commit 825a51e00d68a9b7a34b3dc6ff067b2e085a0f0e Author: vvzvlad Date: Thu Dec 12 01:53:03 2024 +0300 init diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..f43d574 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,27 @@ +# Use a minimal base image (Ubuntu) +FROM debian:bookworm-slim + +# Set environment variables and install dependencies +RUN apt-get update && apt-get install -y \ + build-essential \ + pkg-config \ + libssl-dev \ + git \ + curl \ + protobuf-compiler \ + cargo \ + && apt-get clean + +# Install Rust (needed for Nexus node) +RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y + +# Set Rust environment path +ENV PATH="/root/.cargo/bin:${PATH}" + +RUN echo "curl https://cli.nexus.xyz/ > nexus.sh" > run.sh +RUN echo "sed -i 's|cargo run --release --bin prover -- beta.orchestrator.nexus.xyz|cargo run --config net.git-fetch-with-cli=true --release --bin prover -- beta.orchestrator.nexus.xyz|g' nexus.sh" >> run.sh +RUN echo "sh -c echo Y | cat nexus.sh | sh & tail -f /dev/null" >> run.sh +RUN chmod +x run.sh + +# Set the default command to run the Nexus node setup script, automatically agreeing to the terms +CMD ["sh", "-c", "sh run.sh"] \ No newline at end of file diff --git a/checker.py b/checker.py new file mode 100644 index 0000000..02d0a0b --- /dev/null +++ b/checker.py @@ -0,0 +1,171 @@ +# flake8: noqa +# pylint: disable=broad-exception-raised, raise-missing-from, too-many-arguments, redefined-outer-name +# pylance: disable=reportMissingImports, reportMissingModuleSource, reportGeneralTypeIssues +# type: ignore + +import re +from datetime import datetime, timedelta, timezone +import subprocess +import os +import time +import random +import sys +import pkg_resources +import requests +import json +from collections import deque +required_packages = ['grist-api', 'colorama'] +installed_packages = [pkg.key for pkg in pkg_resources.working_set] + +for package in required_packages: + if package not in installed_packages: + subprocess.check_call([sys.executable, '-m', 'pip', 'install', package, '--break-system-packages']) + +from grist_api import GristDocAPI +import colorama + +import logging +import socket + + +class GRIST: + def __init__(self, server, doc_id, api_key, logger): + self.server = server + self.doc_id = doc_id + self.api_key = api_key + self.logger = logger + self.grist = GristDocAPI(doc_id, server=server, api_key=api_key) + + def table_name_convert(self, table_name): + return table_name.replace(" ", "_") + + def to_timestamp(self, dtime: datetime) -> int: + if dtime.tzinfo is None: + dtime = dtime.replace(tzinfo=timezone(timedelta(hours=3))) + return int(dtime.timestamp()) + + def insert_row(self, data, table): + data = {key.replace(" ", "_"): value for key, value in data.items()} + row_id = self.grist.add_records(self.table_name_convert(table), [data]) + return row_id + + def update_column(self, row_id, column_name, value, table): + if isinstance(value, datetime): + value = self.to_timestamp(value) + column_name = column_name.replace(" ", "_") + self.grist.update_records(self.table_name_convert(table), [{ "id": row_id, column_name: value }]) + + def delete_row(self, row_id, table): + self.grist.delete_records(self.table_name_convert(table), [row_id]) + + def update(self, row_id, updates, table): + for column_name, value in updates.items(): + if isinstance(value, datetime): + updates[column_name] = self.to_timestamp(value) + updates = {column_name.replace(" ", "_"): value for column_name, value in updates.items()} + self.grist.update_records(self.table_name_convert(table), [{"id": row_id, **updates}]) + + def fetch_table(self, table): + return self.grist.fetch_table(self.table_name_convert(table)) + + def find_record(self, id=None, state=None, name=None, table=None): + if table is None: + raise ValueError("Table is not specified") + table_data = self.grist.fetch_table(self.table_name_convert(table)) + if id is not None: + record = [row for row in table_data if row.id == id] + return record + if state is not None and name is not None: + record = [row for row in table_data if row.State == state and row.name == name] + return record + if state is not None: + record = [row for row in table_data if row.State == state] + return record + if name is not None: + record = [row for row in table_data if row.Name == name] + return record + + def find_settings(self, key, table): + table = self.fetch_table(self.table_name_convert(table)) + for record in table: + if record.Setting == key: + if record.Value is None or record.Value == "": + raise ValueError(f"Setting {key} blank") + return record.Value + raise ValueError(f"Setting {key} not found") + + +def check_logs(log_handler): + error_count = 0 + proved_count = 0 + proof_speeds = deque(maxlen=100) + + try: + logs = subprocess.run(['docker', 'compose', 'logs', '--since', '24h'], cwd='/root/node/', capture_output=True, text=True, check=True) + log_content = logs.stdout + except subprocess.CalledProcessError as e: + raise RuntimeError(f"Error running docker compose logs: {e}") from e + + for line in log_content.split('\n'): + if "error" in line.lower(): + log_handler.error(f"Error: {line}") + error_count += 1 + if "Proved step" in line: + proved_count += 1 + log_handler.info(f"Proved step: {line}") + + proof_speed_match = re.search(r'Proved step \d+ at (\d+\.\d+) proof cycles/sec', line) + if proof_speed_match: + current_speed = float(proof_speed_match.group(1)) + proof_speeds.append(current_speed) + log_handler.info(f"Current proof speed: {current_speed} proof cycles/sec") + + # Calculate average proof speed from the collected values + avg_proof_speed = sum(proof_speeds) / len(proof_speeds) if proof_speeds else 0 + log_handler.info(f"Average proof speed (last {len(proof_speeds)} values): {avg_proof_speed:.2f} proof cycles/sec") + + data = { + "errors": error_count, + "proved_steps": proved_count/10, + "proof_speed": avg_proof_speed + } + log_handler.info(f"Result: {data}") + return data + +if __name__ == "__main__": + colorama.init(autoreset=True) + logger = logging.getLogger("Checker") + logger.setLevel(logging.INFO) + formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s") + ch = logging.StreamHandler() + ch.setFormatter(formatter) + logger.addHandler(ch) + + logger.info("Checker started") + random_sleep = random.randint(1, 600) + logger.info(f"Sleeping for {random_sleep} seconds") + time.sleep(random_sleep) + + grist_data = {} + with open('/root/node/grist.json', 'r', encoding='utf-8') as f: + grist_data = json.loads(f.read()) + + GRIST_ROW_NAME = socket.gethostname() + NODES_TABLE = "Nodes" + grist = GRIST(grist_data.get('grist_server'), grist_data.get('grist_doc_id'), grist_data.get('grist_api_key'), logger) + current_vm = grist.find_record(name=GRIST_ROW_NAME, table=NODES_TABLE)[0] + def grist_callback(msg): grist.update(current_vm.id, msg, NODES_TABLE) + + for attempt in range(3): + try: + result = check_logs(logger) + data = f"{result['proved_steps']}/{result['proof_speed']}/{result['errors']}" # proved/proof_speed/Errors + grist_callback({ "Health": data }) + print(result) + break + except Exception as e: + logger.error(f"Error on attempt {attempt+1}/3: {e}") + if attempt == 2: + grist_callback({ "Health": f"Error: {e}" }) + if attempt < 2: + time.sleep(5) \ No newline at end of file diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 0000000..68c3410 --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,11 @@ +#docker build -t nexus-image . + +services: + nexus-prover: + image: nexus-image + container_name: nexus-prover + restart: unless-stopped + volumes: + - /root/prover-id:/root/.nexus/prover-id + logging: + driver: "none" \ No newline at end of file diff --git a/grist.json b/grist.json new file mode 100644 index 0000000..31c6266 --- /dev/null +++ b/grist.json @@ -0,0 +1,5 @@ +{ + "grist_server": "###GRIST_SERVER###", + "grist_doc_id": "###GRIST_DOC_ID###", + "grist_api_key": "###GRIST_API_KEY###" +} diff --git a/playbook.yml b/playbook.yml new file mode 100644 index 0000000..e352994 --- /dev/null +++ b/playbook.yml @@ -0,0 +1,361 @@ +- name: Nillion deployment playbook + hosts: all + become: true + vars: + ansible_python_interpreter: /usr/bin/python3.11 + + tasks: + - name: Append command to .bash_history + blockinfile: + path: "~/.bash_history" + create: yes + block: | + #1724983098 + cd /root/node/ ; docker compose logs -f + marker: "" + + - name: Set locale to C.UTF-8 + ansible.builtin.command: + cmd: localectl set-locale LANG=C.UTF-8 + changed_when: false + + - name: Create APT configuration file to assume yes + ansible.builtin.copy: + dest: /etc/apt/apt.conf.d/90forceyes + content: | + APT::Get::Assume-Yes "true"; + mode: '0644' + + - name: Update /etc/bash.bashrc + ansible.builtin.blockinfile: + path: /etc/bash.bashrc + block: | + export HISTTIMEFORMAT='%F, %T ' + export HISTSIZE=10000 + export HISTFILESIZE=10000 + shopt -s histappend + export PROMPT_COMMAND='history -a' + export HISTCONTROL=ignoredups + export LANG=C.UTF-8 + export LC_ALL=C.UTF-8 + alias ls='ls --color=auto' + shopt -s cmdhist + + - name: Ensure ~/.inputrc exists + ansible.builtin.file: + path: /root/.inputrc + state: touch + mode: '0644' + + - name: Update ~/.inputrc + ansible.builtin.blockinfile: + path: ~/.inputrc + block: | + "\e[A": history-search-backward + "\e[B": history-search-forward + + - name: Ensure ~/.nanorc exists + ansible.builtin.file: + path: /root/.nanorc + state: touch + mode: '0644' + + - name: Update ~/.nanorc + ansible.builtin.blockinfile: + path: ~/.nanorc + block: | + set nohelp + set tabsize 4 + set tabstospaces + set autoindent + set positionlog + set backup + set backupdir /tmp/ + set locking + include /usr/share/nano/*.nanorc + + - name: Set hostname + ansible.builtin.shell: | + hostnamectl set-hostname {{ serverid }} + echo "127.0.1.1 {{ serverid }}" >> /etc/hosts + changed_when: false + + - name: Update and upgrade apt + ansible.builtin.apt: + update_cache: true + upgrade: dist + force_apt_get: true + autoremove: true + register: apt_update_result + retries: 5 + delay: 50 + until: apt_update_result is succeeded + async: "{{ 60 * 20 }}" + poll: 30 + + - name: Install packages + ansible.builtin.apt: + name: + - ca-certificates + - zlib1g-dev + - libncurses5-dev + - libgdbm-dev + - libnss3-dev + - curl + - jq + - git + - zip + - wget + - make + - python3 + - python3-pip + - iftop + state: present + update_cache: true + async: "{{ 60 * 20 }}" + poll: 30 + + - name: Install Docker + ansible.builtin.shell: curl -fsSL https://get.docker.com | bash + changed_when: false + async: "{{ 60 * 5 }}" + poll: 30 + + - name: Update Docker daemon journald logging + ansible.builtin.copy: + dest: /etc/docker/daemon.json + content: | + { "log-driver": "journald" } + mode: '0644' + + - name: Restart Docker + ansible.builtin.service: + name: docker + state: restarted + + - name: Update journald log SystemMaxUse=2G configuration + ansible.builtin.lineinfile: + path: /etc/systemd/journald.conf + line: 'SystemMaxUse=2G' + insertafter: EOF + create: true + mode: '0644' + + - name: Restart journald + ansible.builtin.service: + name: systemd-journald + state: restarted + + - name: Docker login + ansible.builtin.shell: docker login -u "{{ docker_username }}" -p "{{ docker_password }}" + register: docker_login_result + changed_when: false + failed_when: "'Login Succeeded' not in docker_login_result.stdout" + + - name: Clone repository + ansible.builtin.git: + repo: https://gitea.vvzvlad.xyz/vvzvlad/nillion + dest: "{{ ansible_env.HOME }}/node" + version: "{{ git_version }}" + force: true + async: "{{ 60 * 15 }}" + poll: 30 + + - name: Make update.sh executable + ansible.builtin.shell: | + chmod +x ./update.sh + args: + chdir: "{{ ansible_env.HOME }}/node" + changed_when: false + + - name: Update environment variables + ansible.builtin.shell: | + ./update.sh ADDRESS "{{ address }}" + ./update.sh PRIVATE "{{ private_key }}" + ./update.sh PUBLIC "{{ public_key }}" + ./update.sh RPC "{{ rpc_url }}" + ./update.sh GRIST_SERVER "{{ grist_server }}" + ./update.sh GRIST_DOC_ID "{{ grist_doc_id }}" + ./update.sh GRIST_API_KEY "{{ grist_api_key }}" + args: + chdir: "{{ ansible_env.HOME }}/node" + changed_when: false + + - name: Download dockers images + ansible.builtin.command: docker compose pull + args: + chdir: "{{ ansible_env.HOME }}/node" + environment: + COMPOSE_INTERACTIVE_NO_CLI: 'true' + changed_when: false + async: "{{ 60 * 45 }}" + poll: "{{ 60 * 5 }}" + + - name: Check external IP before + ansible.builtin.command: curl https://ifconfig.me + register: ip_before + changed_when: false + + - name: Validate IP address + ansible.builtin.assert: + that: + - ip_before.stdout | ansible.utils.ipaddr + fail_msg: "The returned value is not a valid IP address." + success_msg: "The returned value is a valid IP address." + + - name: Download tun2socks + ansible.builtin.get_url: + url: https://github.com/xjasonlyu/tun2socks/releases/download/v2.5.2/tun2socks-linux-amd64.zip + dest: /tmp/tun2socks-linux-amd64.zip + mode: '0644' + async: "{{ 60 * 5 }}" + poll: 30 + + - name: Unzip tun2socks + ansible.builtin.unarchive: + src: /tmp/tun2socks-linux-amd64.zip + dest: /usr/local/sbin/ + remote_src: true + mode: '0755' + + - name: Create proxy file + ansible.builtin.copy: + content: "{{ proxy }}" + dest: /root/proxy + mode: '0644' + + - name: Create tun2socks systemd service + ansible.builtin.copy: + dest: /etc/systemd/system/tun2socks.service + content: | + [Unit] + Description=Tun2Socks gateway + After=network.target + Wants=network.target + + [Service] + User=root + Type=simple + RemainAfterExit=true + ExecStartPre=/bin/sh -c 'ip route add $(cat /root/proxy | grep -oP "(?<=@)[0-9.]+(?=:)" )/32 via $(ip route | grep -oP "(?<=default via )[0-9.]+")' + ExecStart=/bin/sh -c '/usr/local/sbin/tun2socks-linux-amd64 --device tun0 --proxy $(cat /root/proxy)' + ExecStopPost=/bin/sh -c 'ip route del $(cat /root/proxy | grep -oP "(?<=@)[0-9.]+(?=:)" )/32 via $(ip route | grep -oP "(?<=default via )[0-9.]+")' + Restart=always + + [Install] + WantedBy=multi-user.target + mode: '0644' + + - name: Create network configuration for tun0 + ansible.builtin.copy: + dest: /etc/systemd/network/10-proxy.network + content: | + [Match] + Name=tun0 + + [Network] + Address=10.20.30.1/24 + + [Route] + Gateway=0.0.0.0 + mode: '0644' + + - name: Enable and start tun2socks service + ansible.builtin.systemd: + name: tun2socks + enabled: true + state: started + + - name: Reload network configuration + ansible.builtin.command: networkctl reload + changed_when: false + + - name: Restart tun2socks service + ansible.builtin.systemd: + name: tun2socks + state: restarted + + - name: Check API availability for RPC URL + ansible.builtin.uri: + url: "{{ rpc_url }}/health?" + method: GET + return_content: true + timeout: 30 + register: rpc_url_response + retries: 3 + delay: 60 + failed_when: + - rpc_url_response.status != 200 + - rpc_url_response.json is not none and rpc_url_response.json is not defined + + - name: Check external IP after + ansible.builtin.command: curl https://ifconfig.me + register: ip_after + changed_when: false + + - name: Validate IP address + ansible.builtin.assert: + that: + - ip_after.stdout | ansible.utils.ipaddr + fail_msg: "The returned value is not a valid IP address." + success_msg: "The returned value is a valid IP address." + + - name: Show IPs + ansible.builtin.debug: + msg: "External IP before: {{ ip_before.stdout }}, External IP after: {{ ip_after.stdout }}" + + - name: Compare external IPs + ansible.builtin.fail: + msg: "External IP before and after should not be the same" + when: ip_before.stdout == ip_after.stdout + + - name: Up docker compose stack + ansible.builtin.command: docker compose up -d + args: + chdir: "{{ ansible_env.HOME }}/node" + environment: + COMPOSE_INTERACTIVE_NO_CLI: 'true' + changed_when: false + async: "{{ 60 * 80 }}" + poll: "{{ 60 }}" + + - name: Install grist-api and colorama + ansible.builtin.command: pip3 install grist-api colorama --break-system-packages + args: + chdir: "{{ ansible_env.HOME }}/node" + changed_when: false + + - name: Copy checker service file + ansible.builtin.copy: + dest: /etc/systemd/system/nillion-checker.service + content: | + [Unit] + Description=Nillion Checker Service + After=network.target + + [Service] + Type=simple + User=root + WorkingDirectory={{ ansible_env.HOME }}/node + ExecStart=/usr/bin/bash {{ ansible_env.HOME }}/node/update-and-run-checker.sh + Restart=always + RestartSec=1800 + + [Install] + WantedBy=multi-user.target + mode: '0644' + + - name: Reload systemd + ansible.builtin.systemd: + daemon_reload: yes + + - name: Enable and start nillion-checker service + ansible.builtin.systemd: + name: nillion-checker + enabled: yes + state: started + + - name: Remove docker login credentials + ansible.builtin.file: + path: /root/.docker/config.json + state: absent diff --git a/prover-id b/prover-id new file mode 100644 index 0000000..eb8341c --- /dev/null +++ b/prover-id @@ -0,0 +1 @@ +###ID### diff --git a/update-and-run-checker.sh b/update-and-run-checker.sh new file mode 100644 index 0000000..7ece524 --- /dev/null +++ b/update-and-run-checker.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +curl -o /root/node/checker.py https://gitea.vvzvlad.xyz/vvzvlad/nillion/raw/branch/main/checker.py +python3 /root/node/checker.py diff --git a/update.sh b/update.sh new file mode 100644 index 0000000..8ce0a97 --- /dev/null +++ b/update.sh @@ -0,0 +1,21 @@ +#!/usr/bin/env bash + +if [ "$#" -ne 2 ]; then + echo "Usage: $0 " + exit 1 +fi + +PARAMETER=$1 +NEW_VALUE=$2 + +# Список файлов +FILES=( + "credentials.json" + "docker-compose.yml" + "grist.json" +) + +for FILE in "${FILES[@]}"; do + EXPANDED_FILE=$(eval echo "$FILE") + sed -i "s|###$PARAMETER###|$NEW_VALUE|g" "$EXPANDED_FILE" +done \ No newline at end of file