Compare commits
22 Commits
834ddb4438
...
main-22aug
Author | SHA1 | Date | |
---|---|---|---|
8fe2af1876 | |||
a9ee8e1f80 | |||
e7f4bb6a35 | |||
7ce0926d91 | |||
3f49993f6b | |||
ed2229b139 | |||
79e7e1a89b | |||
a19b22f2c0 | |||
25dcd27c69 | |||
dcd3a62e3d | |||
de8757d59f | |||
4e1804bb06 | |||
e1af79bac9 | |||
20a4e9cfd4 | |||
a80854253a | |||
1b7795f038 | |||
6495b95c8d | |||
63e27f31ac | |||
64d57407e0 | |||
e40e14dea5 | |||
b988582553 | |||
22ce6c07c3 |
227
checker.py
227
checker.py
@ -135,44 +135,132 @@ def clean_ansi(text):
|
|||||||
return ansi_escape.sub('', text)
|
return ansi_escape.sub('', text)
|
||||||
|
|
||||||
def format_number(number_str):
|
def format_number(number_str):
|
||||||
number = int(number_str)
|
try:
|
||||||
if number >= 1000:
|
number = int(number_str)
|
||||||
return f"{number//1000}k"
|
if number >= 1000:
|
||||||
return str(number)
|
value_in_k = number / 1000.0
|
||||||
|
# Format to 3 decimal places if needed, remove trailing zeros and potentially the dot
|
||||||
|
formatted_num = f"{value_in_k:.3f}".rstrip('0').rstrip('.')
|
||||||
|
return f"{formatted_num}k"
|
||||||
|
return str(number)
|
||||||
|
except (ValueError, TypeError):
|
||||||
|
return "NaN" # Or some other indicator of invalid input
|
||||||
|
|
||||||
|
def check_logs(logger, initial_sync_count, previous_status):
|
||||||
|
"""
|
||||||
|
Checks docker logs for node status (Syncing, OK, Idle) and updates sync count.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
logger: The logger instance.
|
||||||
|
initial_sync_count: The sync count read from Grist at the start.
|
||||||
|
previous_status: The last known status read from Grist ('Sync', 'OK', 'Idle', or others).
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
A dictionary containing:
|
||||||
|
- status_message: A string describing the current status (e.g., "Sync: 123k (5)").
|
||||||
|
- current_status_type: The type of the current status ('Sync', 'OK', 'Idle', 'Error').
|
||||||
|
- current_sync_count: The updated sync count.
|
||||||
|
"""
|
||||||
|
current_sync_count = initial_sync_count # Initialize with the value from Grist
|
||||||
|
|
||||||
def check_logs(logger):
|
|
||||||
try:
|
try:
|
||||||
logs = subprocess.run(['docker', 'logs', '--since', '10m', 'infernet-node'], capture_output=True, text=True, check=True)
|
logs = subprocess.run(['docker', 'logs', '--since', '10m', 'infernet-node'], capture_output=True, text=True, check=True)
|
||||||
log_content = clean_ansi(logs.stdout)
|
log_content = clean_ansi(logs.stdout)
|
||||||
|
|
||||||
last_subscription_id = None
|
last_checking_info = None
|
||||||
head_sub_id = None
|
last_ignored_id = None
|
||||||
|
last_head_sub_id = None
|
||||||
|
|
||||||
|
# Regex patterns
|
||||||
|
checking_pattern = re.compile(r'Checking subscriptions.*last_sub_id=(\d+).*head_sub_id=(\d+).*num_subs_to_sync=(\d+)')
|
||||||
|
ignored_pattern = re.compile(r'Ignored subscription creation.*id=(\d+)')
|
||||||
|
head_sub_pattern = re.compile(r'head sub id is:\s*(\d+)')
|
||||||
|
|
||||||
|
# Use deque to efficiently get the last few relevant lines if needed,
|
||||||
|
# but processing all lines and keeping the last match is simpler here.
|
||||||
for line in log_content.splitlines():
|
for line in log_content.splitlines():
|
||||||
if "Ignored subscription creation" in line and "id=" in line:
|
match = checking_pattern.search(line)
|
||||||
id_match = re.search(r'id=(\d+)', line)
|
if match:
|
||||||
if id_match:
|
last_checking_info = {
|
||||||
last_subscription_id = id_match.group(1)
|
"last_sub_id": match.group(1),
|
||||||
|
"head_sub_id": match.group(2),
|
||||||
|
"num_subs_to_sync": int(match.group(3))
|
||||||
|
}
|
||||||
|
continue # Prioritize checking_info
|
||||||
|
|
||||||
if "head sub id is:" in line:
|
match = ignored_pattern.search(line)
|
||||||
id_match = re.search(r'head sub id is:\s*(\d+)', line)
|
if match:
|
||||||
if id_match:
|
last_ignored_id = match.group(1)
|
||||||
head_sub_id = id_match.group(1)
|
continue
|
||||||
|
|
||||||
if head_sub_id:
|
match = head_sub_pattern.search(line)
|
||||||
logger.info(f"Head sub id: {head_sub_id}")
|
if match:
|
||||||
return {"status": f"OK: {head_sub_id}"}
|
last_head_sub_id = match.group(1)
|
||||||
|
# No continue here, allows checking_info from same timeframe to override
|
||||||
|
|
||||||
if last_subscription_id:
|
current_status_type = "Idle"
|
||||||
logger.info(f"Subscription: {last_subscription_id}")
|
status_message = "Idle"
|
||||||
return {"status": f"Sync: {format_number(last_subscription_id)}"}
|
|
||||||
|
|
||||||
logger.info("Not found subscription")
|
if last_checking_info:
|
||||||
return {"status": "Idle"}
|
formatted_id = format_number(last_checking_info["last_sub_id"])
|
||||||
|
if last_checking_info["num_subs_to_sync"] > 0:
|
||||||
|
current_status_type = "Sync"
|
||||||
|
status_message = f"Sync: {formatted_id}" # Use current_sync_count
|
||||||
|
logger.info(f"Node is syncing. Last sub ID: {last_checking_info['last_sub_id']}, Num subs to sync: {last_checking_info['num_subs_to_sync']}")
|
||||||
|
else:
|
||||||
|
current_status_type = "OK"
|
||||||
|
# Increment count only on transition from Sync to OK
|
||||||
|
if previous_status == "Sync":
|
||||||
|
current_sync_count += 1 # Increment local count
|
||||||
|
logger.info(f"Sync completed. Sync count incremented to {current_sync_count}.")
|
||||||
|
status_message = f"OK: {formatted_id}" # Use current_sync_count
|
||||||
|
logger.info(f"Node is OK. Last sub ID: {last_checking_info['last_sub_id']}")
|
||||||
|
|
||||||
|
elif last_ignored_id:
|
||||||
|
# Fallback to "Ignored" logs if "Checking" is missing
|
||||||
|
formatted_id = format_number(last_ignored_id)
|
||||||
|
current_status_type = "Sync" # Assume sync if we only see ignored creations recently
|
||||||
|
status_message = f"Sync: {formatted_id}" # Use current_sync_count
|
||||||
|
logger.info(f"Node possibly syncing (based on ignored logs). Last ignored ID: {last_ignored_id}")
|
||||||
|
|
||||||
|
elif last_head_sub_id:
|
||||||
|
# Fallback to "head sub id" if others are missing
|
||||||
|
formatted_id = format_number(last_head_sub_id)
|
||||||
|
current_status_type = "OK" # Assume OK if this is the latest relevant info
|
||||||
|
# Don't increment sync count here, only on Sync -> OK transition based on "Checking" logs
|
||||||
|
status_message = f"OK: {formatted_id}" # Use current_sync_count
|
||||||
|
logger.info(f"Node status based on head sub id. Head sub ID: {last_head_sub_id}")
|
||||||
|
|
||||||
|
else:
|
||||||
|
logger.info("No relevant subscription log entries found in the last 10 minutes. Status: Idle.")
|
||||||
|
status_message = "Idle"
|
||||||
|
current_status_type = "Idle"
|
||||||
|
|
||||||
|
# Return the results instead of writing to a file
|
||||||
|
return {
|
||||||
|
"status_message": status_message,
|
||||||
|
"current_status_type": current_status_type,
|
||||||
|
"current_sync_count": current_sync_count
|
||||||
|
}
|
||||||
|
|
||||||
except subprocess.CalledProcessError as e:
|
except subprocess.CalledProcessError as e:
|
||||||
raise RuntimeError(f"Error running docker logs: {e}")
|
error_msg = f"Error: Docker logs failed ({e.returncode})"
|
||||||
|
logger.error(f"Error running docker logs command: {e.stderr or e.stdout or e}")
|
||||||
|
# Return error status and original sync count
|
||||||
|
return {
|
||||||
|
"status_message": error_msg,
|
||||||
|
"current_status_type": "Error",
|
||||||
|
"current_sync_count": initial_sync_count # Return original count on error
|
||||||
|
}
|
||||||
|
except Exception as e:
|
||||||
|
error_msg = "Error: Log processing failed"
|
||||||
|
logger.error(f"Unexpected error processing logs: {e}", exc_info=True)
|
||||||
|
# Return error status and original sync count
|
||||||
|
return {
|
||||||
|
"status_message": error_msg,
|
||||||
|
"current_status_type": "Error",
|
||||||
|
"current_sync_count": initial_sync_count # Return original count on error
|
||||||
|
}
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
colorama.init(autoreset=True)
|
colorama.init(autoreset=True)
|
||||||
@ -199,15 +287,88 @@ if __name__ == "__main__":
|
|||||||
current_vm = grist.find_record(name=GRIST_ROW_NAME, table=NODES_TABLE)[0]
|
current_vm = grist.find_record(name=GRIST_ROW_NAME, table=NODES_TABLE)[0]
|
||||||
def grist_callback(msg): grist.update(current_vm.id, msg, NODES_TABLE)
|
def grist_callback(msg): grist.update(current_vm.id, msg, NODES_TABLE)
|
||||||
|
|
||||||
|
# Initialize updates dictionary
|
||||||
|
initial_updates = {}
|
||||||
|
# Check and prepare update for Syncs if it's None or empty
|
||||||
|
if not current_vm.Syncs: # Handles None, empty string, potentially 0 if that's how Grist stores it
|
||||||
|
initial_updates["Syncs"] = 0
|
||||||
|
# Check and prepare update for Reboots if it's None or empty
|
||||||
|
if not current_vm.Reboots: # Handles None, empty string, potentially 0
|
||||||
|
initial_updates["Reboots"] = 0
|
||||||
|
|
||||||
|
# If there are updates, send them to Grist
|
||||||
|
if initial_updates:
|
||||||
|
try:
|
||||||
|
logger.info(f"Found empty initial values, updating Grist: {initial_updates}")
|
||||||
|
grist.update(current_vm.id, initial_updates, NODES_TABLE)
|
||||||
|
# Re-fetch the record to ensure subsequent logic uses the updated values
|
||||||
|
current_vm = grist.find_record(name=GRIST_ROW_NAME, table=NODES_TABLE)[0]
|
||||||
|
logger.info("Grist updated successfully with initial zeros.")
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Failed to update Grist with initial zeros: {e}")
|
||||||
|
# Decide how to proceed: maybe exit, maybe continue with potentially incorrect defaults
|
||||||
|
# For now, we'll log the error and continue using the potentially incorrect defaults from the first fetch
|
||||||
|
|
||||||
|
# Get initial state from Grist (now potentially updated)
|
||||||
|
initial_sync_count = int(current_vm.Syncs or 0) # 'or 0' still useful as fallback
|
||||||
|
reboot_count = int(current_vm.Reboots or 0) # 'or 0' still useful as fallback
|
||||||
|
# Determine previous status type based on Health string (simplified)
|
||||||
|
previous_health_status = current_vm.Health or "Idle"
|
||||||
|
previous_status_type = "Idle" # Default
|
||||||
|
if previous_health_status.startswith("Sync"):
|
||||||
|
previous_status_type = "Sync"
|
||||||
|
elif previous_health_status.startswith("OK"):
|
||||||
|
previous_status_type = "OK"
|
||||||
|
elif previous_health_status.startswith("Error"):
|
||||||
|
previous_status_type = "Error" # Consider error state
|
||||||
|
|
||||||
|
logger.info(f"Initial state from Grist - Syncs: {initial_sync_count}, Health: {previous_health_status}, Reboots: {reboot_count}")
|
||||||
|
|
||||||
for attempt in range(3):
|
for attempt in range(3):
|
||||||
try:
|
try:
|
||||||
result = check_logs(logger)
|
vm_ip = os.popen("ip -4 addr show eth0 | grep -oP '(?<=inet )[^/]+'").read()
|
||||||
grist_callback({ "Health": result["status"] })
|
vm_ip = vm_ip.strip()
|
||||||
logger.info(f"Status: {result['status']}")
|
if vm_ip == "":
|
||||||
break
|
logger.error("Failed to get VM IP address")
|
||||||
|
else:
|
||||||
|
logger.info(f"VM IP address: {vm_ip}")
|
||||||
|
grist_callback({"IP": f"{vm_ip}"})
|
||||||
|
|
||||||
|
|
||||||
|
# Pass initial state to check_logs
|
||||||
|
result = check_logs(logger, initial_sync_count, previous_status_type)
|
||||||
|
|
||||||
|
grist_updates = {"Health": result["status_message"]}
|
||||||
|
|
||||||
|
# Update Syncs count in Grist only if it changed
|
||||||
|
if result["current_sync_count"] != initial_sync_count:
|
||||||
|
grist_updates["Syncs"] = result["current_sync_count"]
|
||||||
|
logger.info(f"Sync count changed from {initial_sync_count} to {result['current_sync_count']}")
|
||||||
|
|
||||||
|
# Send updates to Grist
|
||||||
|
grist_callback(grist_updates)
|
||||||
|
logger.info(f"Status update sent: {grist_updates}")
|
||||||
|
|
||||||
|
# Reboot logic (remains mostly the same, reads Reboots from current_vm)
|
||||||
|
if result["current_status_type"] == "Idle": # Check type, not message
|
||||||
|
uptime_seconds = os.popen("cat /proc/uptime | cut -d'.' -f1").read()
|
||||||
|
uptime_seconds = int(uptime_seconds)
|
||||||
|
if uptime_seconds > 60*60*4:
|
||||||
|
reboot_count = int(current_vm.Reboots or 0)
|
||||||
|
reboot_count += 1
|
||||||
|
# Include reboot count in the final Grist update before rebooting
|
||||||
|
grist_updates = { "Health": "Rebooting", "Reboots": reboot_count }
|
||||||
|
grist_callback(grist_updates)
|
||||||
|
logger.info(f"Idle detected for >4 hours (uptime: {uptime_seconds}s). Rebooting. Reboot count: {reboot_count}")
|
||||||
|
os.system("reboot")
|
||||||
|
break # Exit loop on success
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error(f"Error on attempt {attempt+1}/3: {e}")
|
logger.error(f"Error in main loop, attempt {attempt+1}/3: {e}", exc_info=True)
|
||||||
if attempt == 2:
|
if attempt == 2:
|
||||||
grist_callback({ "Health": f"Error: {e}" })
|
# Log final error to Grist on last attempt
|
||||||
if attempt < 2:
|
try:
|
||||||
time.sleep(5)
|
grist_updates = { "Health": f"Error: Main loop failed - {e}" }
|
||||||
|
grist_callback(grist_updates)
|
||||||
|
except Exception as grist_e:
|
||||||
|
logger.error(f"Failed to log final error to Grist: {grist_e}")
|
||||||
|
time.sleep(5) # Wait before retrying
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
|
|
||||||
services:
|
services:
|
||||||
node:
|
node:
|
||||||
image: ritualnetwork/infernet-node:1.4.0
|
image: ritualnetwork/infernet-node:1.4.0
|
||||||
|
@ -1,323 +0,0 @@
|
|||||||
import threading
|
|
||||||
from collections import deque
|
|
||||||
from datetime import datetime, timedelta, timezone
|
|
||||||
import random
|
|
||||||
import logging
|
|
||||||
import time
|
|
||||||
import json
|
|
||||||
import socket
|
|
||||||
|
|
||||||
import requests
|
|
||||||
from grist_api import GristDocAPI
|
|
||||||
from flask import Flask, request, Response
|
|
||||||
|
|
||||||
logging.basicConfig(level=logging.INFO)
|
|
||||||
app = Flask(__name__)
|
|
||||||
|
|
||||||
BACKEND_SERVERS = []
|
|
||||||
SERVER_STATS = {}
|
|
||||||
STATS_LOCK = threading.Lock()
|
|
||||||
ADDRESS_STATS = {}
|
|
||||||
ADDRESS_STATS_LOCK = threading.Lock()
|
|
||||||
STATISTICS_THRESHOLD = 10
|
|
||||||
STATISTICS_WINDOW = timedelta(minutes=10)
|
|
||||||
MAX_WORKERS = 500
|
|
||||||
MAX_ERROR_RATE = 0.7
|
|
||||||
PORT = 5000
|
|
||||||
|
|
||||||
HOP_BY_HOP_HEADERS = {
|
|
||||||
'connection',
|
|
||||||
'keep-alive',
|
|
||||||
'proxy-authenticate',
|
|
||||||
'proxy-authorization',
|
|
||||||
'te',
|
|
||||||
'trailers',
|
|
||||||
'transfer-encoding',
|
|
||||||
'upgrade',
|
|
||||||
}
|
|
||||||
|
|
||||||
class GRIST:
|
|
||||||
def __init__(self, server, doc_id, api_key, logger):
|
|
||||||
self.server = server
|
|
||||||
self.doc_id = doc_id
|
|
||||||
self.api_key = api_key
|
|
||||||
self.logger = logger
|
|
||||||
self.grist = GristDocAPI(doc_id, server=server, api_key=api_key)
|
|
||||||
|
|
||||||
def table_name_convert(self, table_name):
|
|
||||||
return table_name.replace(" ", "_")
|
|
||||||
|
|
||||||
def to_timestamp(self, dtime: datetime) -> int:
|
|
||||||
if dtime.tzinfo is None:
|
|
||||||
dtime = dtime.replace(tzinfo=timezone(timedelta(hours=3)))
|
|
||||||
return int(dtime.timestamp())
|
|
||||||
|
|
||||||
def insert_row(self, data, table):
|
|
||||||
data = {key.replace(" ", "_"): value for key, value in data.items()}
|
|
||||||
row_id = self.grist.add_records(self.table_name_convert(table), [data])
|
|
||||||
return row_id
|
|
||||||
|
|
||||||
def update_column(self, row_id, column_name, value, table):
|
|
||||||
if isinstance(value, datetime):
|
|
||||||
value = self.to_timestamp(value)
|
|
||||||
column_name = column_name.replace(" ", "_")
|
|
||||||
self.grist.update_records(self.table_name_convert(table), [{ "id": row_id, column_name: value }])
|
|
||||||
|
|
||||||
def delete_row(self, row_id, table):
|
|
||||||
self.grist.delete_records(self.table_name_convert(table), [row_id])
|
|
||||||
|
|
||||||
def update(self, row_id, updates, table):
|
|
||||||
for column_name, value in updates.items():
|
|
||||||
if isinstance(value, datetime):
|
|
||||||
updates[column_name] = self.to_timestamp(value)
|
|
||||||
updates = {column_name.replace(" ", "_"): value for column_name, value in updates.items()}
|
|
||||||
self.grist.update_records(self.table_name_convert(table), [{"id": row_id, **updates}])
|
|
||||||
|
|
||||||
def fetch_table(self, table):
|
|
||||||
return self.grist.fetch_table(self.table_name_convert(table))
|
|
||||||
|
|
||||||
def find_record(self, id=None, state=None, name=None, table=None):
|
|
||||||
if table is None:
|
|
||||||
raise ValueError("Table is not specified")
|
|
||||||
table_data = self.grist.fetch_table(self.table_name_convert(table))
|
|
||||||
if id is not None:
|
|
||||||
record = [row for row in table_data if row.id == id]
|
|
||||||
return record
|
|
||||||
if state is not None and name is not None:
|
|
||||||
record = [row for row in table_data if row.State == state and row.name == name]
|
|
||||||
return record
|
|
||||||
if state is not None:
|
|
||||||
record = [row for row in table_data if row.State == state]
|
|
||||||
return record
|
|
||||||
if name is not None:
|
|
||||||
record = [row for row in table_data if row.Name == name]
|
|
||||||
return record
|
|
||||||
|
|
||||||
def find_settings(self, key, table):
|
|
||||||
table = self.fetch_table(self.table_name_convert(table))
|
|
||||||
for record in table:
|
|
||||||
if record.Setting == key:
|
|
||||||
if record.Value is None or record.Value == "":
|
|
||||||
raise ValueError(f"Setting {key} blank")
|
|
||||||
return record.Value
|
|
||||||
raise ValueError(f"Setting {key} not found")
|
|
||||||
|
|
||||||
|
|
||||||
@app.route('/', methods=['POST'])
|
|
||||||
def proxy():
|
|
||||||
data = request.get_data()
|
|
||||||
headers = dict(request.headers)
|
|
||||||
headers.pop('Accept-Encoding', None)
|
|
||||||
|
|
||||||
try:
|
|
||||||
data_json = json.loads(data.decode('utf-8'))
|
|
||||||
except json.JSONDecodeError:
|
|
||||||
logging.warning(f'Invalid JSON from {request.remote_addr}: {data}')
|
|
||||||
return Response('Invalid JSON', status=400)
|
|
||||||
|
|
||||||
# Функция для обновления статистики запросов по адресу
|
|
||||||
def update_address_stats(from_address):
|
|
||||||
now = datetime.now(timezone.utc)
|
|
||||||
with ADDRESS_STATS_LOCK:
|
|
||||||
if from_address not in ADDRESS_STATS:
|
|
||||||
ADDRESS_STATS[from_address] = deque()
|
|
||||||
ADDRESS_STATS[from_address].append(now)
|
|
||||||
# Удаление запросов, вышедших за пределы окна
|
|
||||||
while ADDRESS_STATS[from_address] and ADDRESS_STATS[from_address][0] < now - STATISTICS_WINDOW:
|
|
||||||
ADDRESS_STATS[from_address].popleft()
|
|
||||||
|
|
||||||
# Функция для извлечения 'from' адреса из запроса
|
|
||||||
def extract_from_address(req):
|
|
||||||
params = req.get("params", [])
|
|
||||||
if isinstance(params, list) and len(params) > 0 and isinstance(params[0], dict):
|
|
||||||
return params[0].get("from")
|
|
||||||
return None
|
|
||||||
|
|
||||||
# Проверка, является ли запрос массивом (батч-запрос)
|
|
||||||
if isinstance(data_json, list):
|
|
||||||
for req in data_json:
|
|
||||||
from_address = extract_from_address(req)
|
|
||||||
if from_address:
|
|
||||||
update_address_stats(from_address)
|
|
||||||
elif isinstance(data_json, dict):
|
|
||||||
from_address = extract_from_address(data_json)
|
|
||||||
if from_address:
|
|
||||||
update_address_stats(from_address)
|
|
||||||
|
|
||||||
if data_json.get("method") == "eth_chainId":
|
|
||||||
response_json = {
|
|
||||||
"jsonrpc": "2.0",
|
|
||||||
"id": data_json.get("id"),
|
|
||||||
"result": "0x2105" #base
|
|
||||||
}
|
|
||||||
response_str = json.dumps(response_json)
|
|
||||||
return Response(response_str, status=200, mimetype='application/json')
|
|
||||||
|
|
||||||
|
|
||||||
selected_servers = select_servers()
|
|
||||||
for server in selected_servers:
|
|
||||||
server_url = server['url']
|
|
||||||
server_id = server['id']
|
|
||||||
try:
|
|
||||||
headers['Host'] = server_url.split('//')[-1].split('/')[0]
|
|
||||||
#logging.info(f'Proxying request to {server_url}: {data}')
|
|
||||||
response = requests.post(server_url, data=data, headers=headers, timeout=5)
|
|
||||||
if response.status_code == 200:
|
|
||||||
print(".", end="", flush=True)
|
|
||||||
#MAX_DATA_LENGTH = 20
|
|
||||||
#data_str = data.decode('utf-8')
|
|
||||||
#data_json = json.loads(data_str)
|
|
||||||
#if "jsonrpc" in data_json: data_json.pop("jsonrpc")
|
|
||||||
#if 'params' in data_json and isinstance(data_json['params'], list):
|
|
||||||
# for idx, param in enumerate(data_json['params']):
|
|
||||||
# if isinstance(param, dict) and 'data' in param:
|
|
||||||
# original_data = param['data']
|
|
||||||
# if isinstance(original_data, str) and len(original_data) > MAX_DATA_LENGTH:
|
|
||||||
# truncated_data = original_data[:MAX_DATA_LENGTH - len("....SKIPPED")] + "....SKIPPED"
|
|
||||||
# data_json['params'][idx]['data'] = truncated_data
|
|
||||||
#truncated_data_str = json.dumps(data_json)
|
|
||||||
|
|
||||||
|
|
||||||
#response_str = response.content.decode('utf-8')
|
|
||||||
#response_json = json.loads(response_str)
|
|
||||||
#if "jsonrpc" in response_json: response_json.pop("jsonrpc")
|
|
||||||
#if 'result' in response_json:
|
|
||||||
# original_result = response_json['result']
|
|
||||||
# if isinstance(original_result, str) and len(original_result) > MAX_DATA_LENGTH:
|
|
||||||
# truncated_result = original_result[:MAX_DATA_LENGTH - len("....SKIPPED")] + "....SKIPPED"
|
|
||||||
# response_json['result'] = truncated_result
|
|
||||||
#truncated_response_str = json.dumps(response_json)
|
|
||||||
|
|
||||||
#logging.info(f'OK: {request.remote_addr}: {truncated_data_str} -> {server_url}: {response.status_code}/{truncated_response_str}')
|
|
||||||
with STATS_LOCK:
|
|
||||||
SERVER_STATS[server_id].append((datetime.now(timezone.utc), True))
|
|
||||||
filtered_headers = {
|
|
||||||
k: v for k, v in response.headers.items()
|
|
||||||
if k.lower() not in HOP_BY_HOP_HEADERS
|
|
||||||
}
|
|
||||||
filtered_headers.pop('Content-Encoding', None)
|
|
||||||
connection_header = response.headers.get('Connection', '')
|
|
||||||
connection_tokens = [token.strip().lower() for token in connection_header.split(',')]
|
|
||||||
for token in connection_tokens:
|
|
||||||
filtered_headers.pop(token, None)
|
|
||||||
return Response(response.content, status=response.status_code, headers=filtered_headers)
|
|
||||||
else:
|
|
||||||
logging.warning(f'Failed to proxy request to {server_url}: {response.status_code}/{response.content}')
|
|
||||||
with STATS_LOCK:
|
|
||||||
SERVER_STATS[server_id].append((datetime.now(timezone.utc), False))
|
|
||||||
continue
|
|
||||||
except requests.exceptions.RequestException as e:
|
|
||||||
logging.error(f'Exception while proxying to {server_url}: {e}')
|
|
||||||
with STATS_LOCK:
|
|
||||||
SERVER_STATS[server_id].append((datetime.now(timezone.utc), False))
|
|
||||||
continue
|
|
||||||
return Response('All backend servers are unavailable', status=503)
|
|
||||||
|
|
||||||
|
|
||||||
def select_servers():
|
|
||||||
now = datetime.now(timezone.utc)
|
|
||||||
with STATS_LOCK:
|
|
||||||
for server in BACKEND_SERVERS:
|
|
||||||
server_id = server['id']
|
|
||||||
stats = SERVER_STATS[server_id]
|
|
||||||
while stats and stats[0][0] < now - STATISTICS_WINDOW:
|
|
||||||
stats.popleft()
|
|
||||||
total_requests = sum(len(SERVER_STATS[server['id']]) for server in BACKEND_SERVERS)
|
|
||||||
|
|
||||||
if total_requests < STATISTICS_THRESHOLD:
|
|
||||||
servers = BACKEND_SERVERS.copy()
|
|
||||||
random.shuffle(servers)
|
|
||||||
#logging.info("Total requests below threshold. Shuffled servers: %s", servers)
|
|
||||||
return servers
|
|
||||||
|
|
||||||
server_scores = []
|
|
||||||
with STATS_LOCK:
|
|
||||||
for server in BACKEND_SERVERS:
|
|
||||||
server_id = server['id']
|
|
||||||
stats = SERVER_STATS[server_id]
|
|
||||||
failures = sum(1 for t, success in stats if not success)
|
|
||||||
successes = len(stats) - failures
|
|
||||||
total = successes + failures
|
|
||||||
error_rate = failures / total if total > 0 else 0
|
|
||||||
server_scores.append({
|
|
||||||
'server': server,
|
|
||||||
'failures': failures,
|
|
||||||
'successes': successes,
|
|
||||||
'error_rate': error_rate
|
|
||||||
})
|
|
||||||
#logging.info(f"Server {server_id}: Failures={failures}, Successes={successes}, Error Rate={error_rate:.2f}")
|
|
||||||
|
|
||||||
healthy_servers = [s for s in server_scores if s['error_rate'] <= MAX_ERROR_RATE]
|
|
||||||
|
|
||||||
if not healthy_servers:
|
|
||||||
logging.warning("No healthy servers available.")
|
|
||||||
return BACKEND_SERVERS.copy()
|
|
||||||
|
|
||||||
healthy_servers.sort(key=lambda x: x['error_rate'])
|
|
||||||
|
|
||||||
total_weight = sum(1 - s['error_rate'] for s in healthy_servers)
|
|
||||||
if total_weight == 0:
|
|
||||||
weights = [1 for _ in healthy_servers]
|
|
||||||
else:
|
|
||||||
weights = [(1 - s['error_rate']) / total_weight for s in healthy_servers]
|
|
||||||
|
|
||||||
selected_server = random.choices( [s['server'] for s in healthy_servers], weights=weights, k=1 )[0]
|
|
||||||
selected_servers = [selected_server] + [s['server'] for s in healthy_servers if s['server'] != selected_server]
|
|
||||||
return selected_servers
|
|
||||||
|
|
||||||
def upload_stats_to_grist(update_row):
|
|
||||||
while True:
|
|
||||||
try:
|
|
||||||
total_stats = {
|
|
||||||
'failures': 0,
|
|
||||||
'successes': 0,
|
|
||||||
'rps': 0
|
|
||||||
}
|
|
||||||
|
|
||||||
with STATS_LOCK:
|
|
||||||
for server in BACKEND_SERVERS:
|
|
||||||
server_id = server['id']
|
|
||||||
server_stats = SERVER_STATS[server_id]
|
|
||||||
failures = sum(1 for t, success in server_stats if not success)
|
|
||||||
successes = len(server_stats) - failures
|
|
||||||
total_stats['failures'] += failures
|
|
||||||
total_stats['successes'] += successes
|
|
||||||
total_stats['rps'] += len(server_stats)/STATISTICS_WINDOW.total_seconds()
|
|
||||||
|
|
||||||
health = f"{total_stats['successes']}/{total_stats['failures']}/{total_stats['rps']:.2f}"
|
|
||||||
update_row({"Health": health})
|
|
||||||
except Exception as e:
|
|
||||||
logging.error(f"Failed to upload stats to Grist: {str(e)}")
|
|
||||||
time.sleep(30)
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
GRIST_ROW_NAME = socket.gethostname()
|
|
||||||
NODES_TABLE = "Nodes"
|
|
||||||
RPC_TABLE = "RPC_list"
|
|
||||||
|
|
||||||
with open('/root/node/grist.json', 'r', encoding='utf-8') as f:
|
|
||||||
grist_data = json.loads(f.read())
|
|
||||||
|
|
||||||
host = grist_data.get('grist_server')
|
|
||||||
doc_id = grist_data.get('grist_doc_id')
|
|
||||||
api_key = grist_data.get('grist_api_key')
|
|
||||||
grist = GRIST(host, doc_id, api_key, logging)
|
|
||||||
current_vm = grist.find_record(name=GRIST_ROW_NAME, table=NODES_TABLE)[0]
|
|
||||||
def grist_callback(msg): grist.update(current_vm.id, msg, NODES_TABLE)
|
|
||||||
|
|
||||||
|
|
||||||
BACKEND_SERVERS = []
|
|
||||||
SERVER_STATS = {}
|
|
||||||
table = grist.fetch_table(RPC_TABLE)
|
|
||||||
for row in table:
|
|
||||||
if row.URL:
|
|
||||||
server_info = {'id': row.id, 'url': row.URL}
|
|
||||||
BACKEND_SERVERS.append(server_info)
|
|
||||||
SERVER_STATS[row.id] = deque()
|
|
||||||
|
|
||||||
upload_thread = threading.Thread(target=upload_stats_to_grist, daemon=True, args=(grist_callback,))
|
|
||||||
upload_thread.start()
|
|
||||||
|
|
||||||
from waitress import serve
|
|
||||||
logging.info(f"Starting server on port {PORT}")
|
|
||||||
serve(app, host='0.0.0.0', port=PORT, threads=MAX_WORKERS, connection_limit=1000)
|
|
@ -1,15 +0,0 @@
|
|||||||
[Unit]
|
|
||||||
Description=GRPC Balancer Service
|
|
||||||
After=network.target
|
|
||||||
|
|
||||||
[Service]
|
|
||||||
Type=simple
|
|
||||||
User=root
|
|
||||||
Group=root
|
|
||||||
Environment=PYTHONUNBUFFERED=1
|
|
||||||
ExecStart=/usr/bin/python3 /usr/local/bin/grpc-balancer.py
|
|
||||||
Restart=always
|
|
||||||
RestartSec=2
|
|
||||||
|
|
||||||
[Install]
|
|
||||||
WantedBy=multi-user.target
|
|
52
playbook.yml
52
playbook.yml
@ -24,6 +24,9 @@
|
|||||||
docker logs infernet-node -f
|
docker logs infernet-node -f
|
||||||
docker logs --since 10m infernet-node -f
|
docker logs --since 10m infernet-node -f
|
||||||
journalctl -u node-checker.service
|
journalctl -u node-checker.service
|
||||||
|
journalctl -u grpcbalancer.service
|
||||||
|
nano ~/node/deploy/config.json
|
||||||
|
docker compose -f deploy/docker-compose.yaml down; docker compose -f deploy/docker-compose.yaml up -d
|
||||||
marker: ""
|
marker: ""
|
||||||
mode: '0644'
|
mode: '0644'
|
||||||
|
|
||||||
@ -119,7 +122,6 @@
|
|||||||
name: web3
|
name: web3
|
||||||
extra_args: --break-system-packages
|
extra_args: --break-system-packages
|
||||||
|
|
||||||
|
|
||||||
# - name: Install Docker
|
# - name: Install Docker
|
||||||
# ansible.builtin.shell: curl -sL https://get.docker.com | sudo sh -
|
# ansible.builtin.shell: curl -sL https://get.docker.com | sudo sh -
|
||||||
#
|
#
|
||||||
@ -137,9 +139,6 @@
|
|||||||
name: docker
|
name: docker
|
||||||
state: restarted
|
state: restarted
|
||||||
|
|
||||||
#- name: Docker login
|
|
||||||
# ansible.builtin.shell: docker login -u {{ docker_username }} -p {{ docker_password }}
|
|
||||||
|
|
||||||
- name: Docker pull hello-world
|
- name: Docker pull hello-world
|
||||||
ansible.builtin.shell: docker pull ritualnetwork/hello-world-infernet:latest
|
ansible.builtin.shell: docker pull ritualnetwork/hello-world-infernet:latest
|
||||||
|
|
||||||
@ -194,44 +193,14 @@
|
|||||||
changed_when: false
|
changed_when: false
|
||||||
|
|
||||||
|
|
||||||
- name: Install grpcbalancer dependencies
|
|
||||||
ansible.builtin.pip:
|
|
||||||
name:
|
|
||||||
- grist-api
|
|
||||||
- flask
|
|
||||||
- requests
|
|
||||||
- waitress
|
|
||||||
extra_args: --break-system-packages
|
|
||||||
|
|
||||||
- name: Copy grpcbalancer files
|
|
||||||
ansible.builtin.shell: |
|
|
||||||
cp {{ ansible_env.HOME }}/node/grpcbalancer/grpc-balancer.py /usr/local/bin/
|
|
||||||
chmod 755 /usr/local/bin/grpc-balancer.py
|
|
||||||
args:
|
|
||||||
executable: /bin/bash
|
|
||||||
|
|
||||||
#- name: Install grpcbalancer service
|
|
||||||
# ansible.builtin.shell: |
|
|
||||||
# cp {{ ansible_env.HOME }}/node/grpcbalancer/grpc-balancer.service /etc/systemd/system/
|
|
||||||
# chmod 644 /etc/systemd/system/grpc-balancer.service
|
|
||||||
# args:
|
|
||||||
# executable: /bin/bash
|
|
||||||
|
|
||||||
#- name: Start and enable grpcbalancer service
|
|
||||||
# ansible.builtin.systemd:
|
|
||||||
# name: grpc-balancer
|
|
||||||
# state: started
|
|
||||||
# enabled: yes
|
|
||||||
# daemon_reload: yes
|
|
||||||
|
|
||||||
- name: Install Forge and Infernet SDK
|
- name: Install Forge and Infernet SDK
|
||||||
ansible.builtin.shell: |
|
ansible.builtin.shell: |
|
||||||
rm -rf {{ ansible_env.HOME }}/node/projects/hello-world/contracts/lib/forge-std
|
rm -rf {{ ansible_env.HOME }}/node/projects/hello-world/contracts/lib/forge-std
|
||||||
rm -rf {{ ansible_env.HOME }}/node/projects/hello-world/contracts/lib/infernet-sdk
|
rm -rf {{ ansible_env.HOME }}/node/projects/hello-world/contracts/lib/infernet-sdk
|
||||||
cd {{ ansible_env.HOME }}/foundry && source {{ ansible_env.HOME }}/.bashrc && foundryup
|
cd {{ ansible_env.HOME }}/foundry && source {{ ansible_env.HOME }}/.bashrc && foundryup
|
||||||
cd {{ ansible_env.HOME }}/node/projects/hello-world/contracts
|
cd {{ ansible_env.HOME }}/node/projects/hello-world/contracts
|
||||||
forge install --no-commit foundry-rs/forge-std
|
forge install foundry-rs/forge-std
|
||||||
forge install --no-commit ritual-net/infernet-sdk
|
forge install ritual-net/infernet-sdk
|
||||||
args:
|
args:
|
||||||
executable: /bin/bash
|
executable: /bin/bash
|
||||||
|
|
||||||
@ -271,14 +240,6 @@
|
|||||||
until: '"ONCHAIN EXECUTION COMPLETE & SUCCESSFUL" in contract_call_output.stdout'
|
until: '"ONCHAIN EXECUTION COMPLETE & SUCCESSFUL" in contract_call_output.stdout'
|
||||||
failed_when: false
|
failed_when: false
|
||||||
|
|
||||||
# - name: Set Docker containers to restart unless stopped
|
|
||||||
# ansible.builtin.shell: |
|
|
||||||
# docker update --restart unless-stopped hello-world
|
|
||||||
# docker update --restart unless-stopped infernet-node
|
|
||||||
# docker update --restart unless-stopped deploy-redis-1
|
|
||||||
# docker update --restart unless-stopped infernet-anvil
|
|
||||||
# docker update --restart unless-stopped deploy-fluentbit-1
|
|
||||||
|
|
||||||
- name: Copy checker service file
|
- name: Copy checker service file
|
||||||
ansible.builtin.copy:
|
ansible.builtin.copy:
|
||||||
dest: /etc/systemd/system/node-checker.service
|
dest: /etc/systemd/system/node-checker.service
|
||||||
@ -305,6 +266,3 @@
|
|||||||
enabled: yes
|
enabled: yes
|
||||||
state: started
|
state: started
|
||||||
daemon_reload: yes
|
daemon_reload: yes
|
||||||
|
|
||||||
#- name: Remove docker login credentials
|
|
||||||
# ansible.builtin.shell: rm -rf /root/.docker/config.json
|
|
||||||
|
@ -15,7 +15,7 @@
|
|||||||
"snapshot_sync": {
|
"snapshot_sync": {
|
||||||
"sleep": 3,
|
"sleep": 3,
|
||||||
"batch_size": 800,
|
"batch_size": 800,
|
||||||
"starting_sub_id": 210000,
|
"starting_sub_id": 242029,
|
||||||
"sync_period": 30
|
"sync_period": 30
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
set -e
|
set -e
|
||||||
cd ~/ritual
|
cd ~/node
|
||||||
project=hello-world make deploy-container
|
project=hello-world make deploy-container
|
||||||
project=hello-world make deploy-contracts
|
project=hello-world make deploy-contracts
|
||||||
bash update_contracts.sh
|
bash update_contracts.sh
|
||||||
|
28
ws.code-workspace
Normal file
28
ws.code-workspace
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
{
|
||||||
|
"folders": [
|
||||||
|
{
|
||||||
|
"path": "."
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"path": "../ritual-git"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"settings": {
|
||||||
|
"workbench.colorCustomizations": {
|
||||||
|
"activityBar.activeBackground": "#fb94f8",
|
||||||
|
"activityBar.background": "#fb94f8",
|
||||||
|
"activityBar.foreground": "#15202b",
|
||||||
|
"activityBar.inactiveForeground": "#15202b99",
|
||||||
|
"activityBarBadge.background": "#777b05",
|
||||||
|
"activityBarBadge.foreground": "#e7e7e7",
|
||||||
|
"commandCenter.border": "#15202b99",
|
||||||
|
"sash.hoverBorder": "#fb94f8",
|
||||||
|
"titleBar.activeBackground": "#f963f5",
|
||||||
|
"titleBar.activeForeground": "#15202b",
|
||||||
|
"titleBar.inactiveBackground": "#f963f599",
|
||||||
|
"titleBar.inactiveForeground": "#15202b99"
|
||||||
|
},
|
||||||
|
"peacock.color": "#f963f5",
|
||||||
|
"makefile.configureOnOpen": false
|
||||||
|
}
|
||||||
|
}
|
Reference in New Issue
Block a user