From d2b1ff68f99b4de93d7cf832899ea69dc8ddb45a Mon Sep 17 00:00:00 2001 From: conache Date: Wed, 7 Aug 2024 19:27:04 +0300 Subject: [PATCH 1/8] Remove b7s and add support for offchain-node --- .env.offchain-node.example | 37 +++++++++++++ .gitignore | 8 +-- Dockerfile_b7s | 7 --- Dockerfile => Dockerfile_inference | 0 docker-compose.yml | 88 ++++-------------------------- main.py | 31 ----------- 6 files changed, 53 insertions(+), 118 deletions(-) create mode 100644 .env.offchain-node.example delete mode 100644 Dockerfile_b7s rename Dockerfile => Dockerfile_inference (100%) delete mode 100644 main.py diff --git a/.env.offchain-node.example b/.env.offchain-node.example new file mode 100644 index 0000000..7425362 --- /dev/null +++ b/.env.offchain-node.example @@ -0,0 +1,37 @@ +ALLORA_OFFCHAIN_NODE_CONFIG_JSON='{ + "wallet": { + "addressKeyName": "test", + "addressRestoreMnemonic": "", + "addressAccountPassphrase": "", + "alloraHomeDir": "", + "gas": "1000000", + "gasAdjustment": 1.0, + "nodeRpc": "http://localhost:26657", + "maxRetries": 1, + "delay": 1, + "submitTx": false + }, + "worker": [ + { + "topicId": 1, + "inferenceEntrypointName": "api-worker-reputer", + "loopSeconds": 5, + "parameters": { + "InferenceEndpoint": "http://source:8000/inference/{Token}", + "Token": "ETH" + } + } + ], + "reputer": [ + { + "topicId": 1, + "reputerEntrypointName": "api-worker-reputer", + "loopSeconds": 30, + "minStake": 100000, + "parameters": { + "SourceOfTruthEndpoint": "http://source:8888/truth/{Token}/{BlockHeight}", + "Token": "ethereum" + } + } + ] +}' \ No newline at end of file diff --git a/.gitignore b/.gitignore index 0bb4c44..4a971f2 100644 --- a/.gitignore +++ b/.gitignore @@ -1,12 +1,12 @@ .DS_Store __pycache__ *.pyc -.lake_cache/* logs/* .env -keys -data inference-data worker-data head-data -lib \ No newline at end of file +offchain-node-data + +.env.* +!.env.*.example diff --git a/Dockerfile_b7s b/Dockerfile_b7s deleted file mode 100644 index 3cfab5a..0000000 --- a/Dockerfile_b7s +++ /dev/null @@ -1,7 +0,0 @@ -FROM alloranetwork/allora-inference-base:latest - -USER root -RUN pip install requests - -USER appuser -COPY main.py /app/ diff --git a/Dockerfile b/Dockerfile_inference similarity index 100% rename from Dockerfile rename to Dockerfile_inference diff --git a/docker-compose.yml b/docker-compose.yml index a96f9be..2e6f837 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -3,14 +3,10 @@ services: container_name: inference-basic-eth-pred build: context: . + dockerfile: Dockerfile_inference command: python -u /app/app.py ports: - "8000:8000" - networks: - eth-model-local: - aliases: - - inference - ipv4_address: 172.22.0.4 healthcheck: test: ["CMD", "curl", "-f", "http://localhost:8000/inference/ETH"] interval: 10s @@ -21,7 +17,9 @@ services: updater: container_name: updater-basic-eth-pred - build: . + build: + context: . + dockerfile: Dockerfile_inference environment: - INFERENCE_API_ADDRESS=http://inference:8000 command: > @@ -34,78 +32,17 @@ services: depends_on: inference: condition: service_healthy - networks: - eth-model-local: - aliases: - - updater - ipv4_address: 172.22.0.5 - - head: - container_name: head-basic-eth-pred - image: alloranetwork/allora-inference-base-head:latest - environment: - - HOME=/data - entrypoint: - - "/bin/bash" - - "-c" - - | - if [ ! -f /data/keys/priv.bin ]; then - echo "Generating new private keys..." - mkdir -p /data/keys - cd /data/keys - allora-keys - fi - allora-node --role=head --peer-db=/data/peerdb --function-db=/data/function-db \ - --runtime-path=/app/runtime --runtime-cli=bls-runtime --workspace=/data/workspace \ - --private-key=/data/keys/priv.bin --log-level=debug --port=9010 --rest-api=:6000 - ports: - - "6000:6000" - volumes: - - ./head-data:/data - working_dir: /data - networks: - eth-model-local: - aliases: - - head - ipv4_address: 172.22.0.100 - worker: - container_name: worker-basic-eth-pred - environment: - - INFERENCE_API_ADDRESS=http://inference:8000 - - HOME=/data - build: - context: . - dockerfile: Dockerfile_b7s - entrypoint: - - "/bin/bash" - - "-c" - - | - if [ ! -f /data/keys/priv.bin ]; then - echo "Generating new private keys..." - mkdir -p /data/keys - cd /data/keys - allora-keys - fi - # Change boot-nodes below to the key advertised by your head - allora-node --role=worker --peer-db=/data/peerdb --function-db=/data/function-db \ - --runtime-path=/app/runtime --runtime-cli=bls-runtime --workspace=/data/workspace \ - --private-key=/data/keys/priv.bin --log-level=debug --port=9011 \ - --boot-nodes=/ip4/172.22.0.100/tcp/9010/p2p/{HEAD-ID} \ - --topic=allora-topic-1-worker + node: + container_name: offchain_node_test + image: allora-offchain-node:latest volumes: - - ./worker-data:/data - working_dir: /data + - ./offchain-node-data:/data depends_on: - - inference - - head - networks: - eth-model-local: - aliases: - - worker - ipv4_address: 172.22.0.10 - - + inference: + condition: service_healthy + env_file: + - ./env.offchain-node networks: eth-model-local: @@ -117,4 +54,3 @@ networks: volumes: inference-data: worker-data: - head-data: diff --git a/main.py b/main.py deleted file mode 100644 index 85b6611..0000000 --- a/main.py +++ /dev/null @@ -1,31 +0,0 @@ -import os -import requests -import sys -import json - -INFERENCE_ADDRESS = os.environ["INFERENCE_API_ADDRESS"] - - -def process(token_name): - response = requests.get(f"{INFERENCE_ADDRESS}/inference/{token_name}") - content = response.text - return content - - -if __name__ == "__main__": - # Your code logic with the parsed argument goes here - try: - if len(sys.argv) < 5: - value = json.dumps({"error": f"Not enough arguments provided: {len(sys.argv)}, expected 4 arguments: topic_id, blockHeight, blockHeightEval, default_arg"}) - else: - topic_id = sys.argv[1] - blockHeight = sys.argv[2] - blockHeightEval = sys.argv[3] - default_arg = sys.argv[4] - - response_inference = process(token_name=default_arg) - response_dict = {"infererValue": response_inference} - value = json.dumps(response_dict) - except Exception as e: - value = json.dumps({"error": {str(e)}}) - print(value) From 2f97575d52ba5b966b2e1557c24bd55cc4f4aedc Mon Sep 17 00:00:00 2001 From: conache Date: Wed, 7 Aug 2024 19:32:35 +0300 Subject: [PATCH 2/8] Fix inference endpoint --- .env.offchain-node.example | 14 +------------- 1 file changed, 1 insertion(+), 13 deletions(-) diff --git a/.env.offchain-node.example b/.env.offchain-node.example index 7425362..1f0a878 100644 --- a/.env.offchain-node.example +++ b/.env.offchain-node.example @@ -17,21 +17,9 @@ ALLORA_OFFCHAIN_NODE_CONFIG_JSON='{ "inferenceEntrypointName": "api-worker-reputer", "loopSeconds": 5, "parameters": { - "InferenceEndpoint": "http://source:8000/inference/{Token}", + "InferenceEndpoint": "http://inference:8000/inference/{Token}", "Token": "ETH" } } - ], - "reputer": [ - { - "topicId": 1, - "reputerEntrypointName": "api-worker-reputer", - "loopSeconds": 30, - "minStake": 100000, - "parameters": { - "SourceOfTruthEndpoint": "http://source:8888/truth/{Token}/{BlockHeight}", - "Token": "ethereum" - } - } ] }' \ No newline at end of file From 9db05c71407b7c8d805e4f74be930ba0a67e7933 Mon Sep 17 00:00:00 2001 From: conache Date: Thu, 8 Aug 2024 14:12:31 +0300 Subject: [PATCH 3/8] Add init scripts --- .gitignore | 13 ++++---- ...fchain-node.example => config.example.json | 7 ++-- docker-compose.yml | 8 ++--- init.offchain-node | 29 ++++++++++++++++ offchain-node-data/env_file | 4 +++ offchain-node-data/scripts/init.sh | 33 +++++++++++++++++++ 6 files changed, 79 insertions(+), 15 deletions(-) rename .env.offchain-node.example => config.example.json (76%) create mode 100755 init.offchain-node create mode 100644 offchain-node-data/env_file create mode 100644 offchain-node-data/scripts/init.sh diff --git a/.gitignore b/.gitignore index 4a971f2..c1d351e 100644 --- a/.gitignore +++ b/.gitignore @@ -2,11 +2,10 @@ __pycache__ *.pyc logs/* -.env -inference-data -worker-data -head-data -offchain-node-data -.env.* -!.env.*.example +.allorad +.cache +inference-data + +config.json +env diff --git a/.env.offchain-node.example b/config.example.json similarity index 76% rename from .env.offchain-node.example rename to config.example.json index 1f0a878..e22ab8b 100644 --- a/.env.offchain-node.example +++ b/config.example.json @@ -1,8 +1,7 @@ -ALLORA_OFFCHAIN_NODE_CONFIG_JSON='{ +{ "wallet": { "addressKeyName": "test", "addressRestoreMnemonic": "", - "addressAccountPassphrase": "", "alloraHomeDir": "", "gas": "1000000", "gasAdjustment": 1.0, @@ -17,9 +16,9 @@ ALLORA_OFFCHAIN_NODE_CONFIG_JSON='{ "inferenceEntrypointName": "api-worker-reputer", "loopSeconds": 5, "parameters": { - "InferenceEndpoint": "http://inference:8000/inference/{Token}", + "InferenceEndpoint": "http://source:8000/inference/{Token}", "Token": "ETH" } } ] -}' \ No newline at end of file +} \ No newline at end of file diff --git a/docker-compose.yml b/docker-compose.yml index 2e6f837..b85357a 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -6,7 +6,7 @@ services: dockerfile: Dockerfile_inference command: python -u /app/app.py ports: - - "8000:8000" + - "8002:8000" healthcheck: test: ["CMD", "curl", "-f", "http://localhost:8000/inference/ETH"] interval: 10s @@ -34,7 +34,7 @@ services: condition: service_healthy node: - container_name: offchain_node_test + container_name: offchain_node image: allora-offchain-node:latest volumes: - ./offchain-node-data:/data @@ -42,7 +42,7 @@ services: inference: condition: service_healthy env_file: - - ./env.offchain-node + - ./offchain-node-data/env_file networks: eth-model-local: @@ -53,4 +53,4 @@ networks: volumes: inference-data: - worker-data: + offchain-node-data: diff --git a/init.offchain-node b/init.offchain-node new file mode 100755 index 0000000..023c379 --- /dev/null +++ b/init.offchain-node @@ -0,0 +1,29 @@ +#!/bin/bash + +set -e + +if [ ! -f config.json ]; then + echo "Error: config.json file not found, please provide one" + exit 1 +fi + +nodeName=$(jq -r '.wallet.addressKeyName' config.json) +if [ -z "$nodeName" ]; then + echo "No name was provided for the node, please provide value for wallet.addressKeyName in the config.json" + exit 1 +fi + +if [ ! -f ./offchain-node-data/env_file ]; then + echo "ENV_LOADED=false" > ./offchain-node-data/env_file +fi + +ENV_LOADED=$(grep '^ENV_LOADED=' ./offchain-node-data/env_file | cut -d '=' -f 2) +if [ "$ENV_LOADED" = "false" ]; then + json_content=$(cat ./config.json) + stringified_json=$(echo "$json_content" | jq -c .) + + docker run -it --entrypoint=bash -v $(pwd)/offchain-node-data:/data -e NAME="${nodeName}" -e ALLORA_OFFCHAIN_NODE_CONFIG_JSON="${stringified_json}" alloranetwork/allora-chain:latest -c "bash /data/scripts/init.sh" + echo "config.json saved to ./offchain-node-data/env_file" +else + echo "config.json is already loaded, skipping the operation. You can set ENV_LOADED variable to false in ./offchain-node-data/env_file to reload the config.json" +fi \ No newline at end of file diff --git a/offchain-node-data/env_file b/offchain-node-data/env_file new file mode 100644 index 0000000..5285859 --- /dev/null +++ b/offchain-node-data/env_file @@ -0,0 +1,4 @@ +ALLORA_OFFCHAIN_NODE_CONFIG_JSON='{"wallet":{"addressKeyName":"basic-coin-prediction-offchain-node","addressRestoreMnemonic":"rich note fetch lava bless snake delay theme era anger ritual sea pluck neck hazard dish talk ranch trophy clap fancy human divide gun","addressAccountPassphrase":"secret","alloraHomeDir":"","gas":"1000000","gasAdjustment":1,"nodeRpc":"https://allora-rpc.devnet.behindthecurtain.xyz","maxRetries":1,"minDelay":1,"maxDelay":2,"submitTx":false},"worker":[{"topicId":1,"inferenceEntrypointName":"api-worker-reputer","loopSeconds":5,"parameters":{"InferenceEndpoint":"http://inference:8000/inference/{Token}","Token":"ETH"}}]}' +ALLORA_OFFCHAIN_ACCOUNT_ADDRESS=allo14wkkdeg93mdc0sd770z9p4mpjz7w9mysz328um +NAME=basic-coin-prediction-offchain-node +ENV_LOADED=true diff --git a/offchain-node-data/scripts/init.sh b/offchain-node-data/scripts/init.sh new file mode 100644 index 0000000..0e6af84 --- /dev/null +++ b/offchain-node-data/scripts/init.sh @@ -0,0 +1,33 @@ +#!/bin/bash + +set -e + +if allorad keys --home=/data/.allorad --keyring-backend test show $NAME > /dev/null 2>&1 ; then + echo "allora account: $NAME already imported" +else + echo "creating allora account: $NAME" + output=$(allorad keys add $NAME --home=/data/.allorad --keyring-backend test 2>&1) + address=$(echo "$output" | grep 'address:' | sed 's/.*address: //') + mnemonic=$(echo "$output" | tail -n 1) + + # Parse and update the JSON string + updated_json=$(echo "$ALLORA_OFFCHAIN_NODE_CONFIG_JSON" | jq --arg name "$NAME" --arg mnemonic "$mnemonic" ' + .wallet.addressKeyName = $name | + .wallet.addressRestoreMnemonic = $mnemonic + ') + + stringified_json=$(echo "$updated_json" | jq -c .) + + echo "ALLORA_OFFCHAIN_NODE_CONFIG_JSON='$stringified_json'" > /data/env_file + echo ALLORA_OFFCHAIN_ACCOUNT_ADDRESS=$address >> /data/env_file + echo "NAME=$NAME" >> /data/env_file + + echo "Updated ALLORA_OFFCHAIN_NODE_CONFIG_JSON saved to /data/env_file" +fi + + +if grep -q "ENV_LOADED=false" /data/env_file; then + sed -i 's/ENV_LOADED=false/ENV_LOADED=true/' /data/env_file +else + echo "ENV_LOADED=true" >> /data/env_file +fi From 9198fd4fb1157a3b991d0aa797c76798e0943c32 Mon Sep 17 00:00:00 2001 From: conache Date: Thu, 8 Aug 2024 14:50:17 +0300 Subject: [PATCH 4/8] Cleanup and update readme --- .gitignore | 1 + README.md | 191 ++++++------------ docker-compose.yml | 12 +- init.docker | 29 +++ init.offchain-node | 29 --- offchain-node-data/env_file | 4 - .../scripts/init.sh | 0 7 files changed, 94 insertions(+), 172 deletions(-) create mode 100755 init.docker delete mode 100755 init.offchain-node delete mode 100644 offchain-node-data/env_file rename {offchain-node-data => worker-data}/scripts/init.sh (100%) diff --git a/.gitignore b/.gitignore index c1d351e..5db6f87 100644 --- a/.gitignore +++ b/.gitignore @@ -9,3 +9,4 @@ inference-data config.json env +env_file \ No newline at end of file diff --git a/README.md b/README.md index 5767f9a..832c3a4 100644 --- a/README.md +++ b/README.md @@ -1,150 +1,75 @@ -# Basic ETH price prediction node +# Basic ETH Price Prediction Node -Example Allora network worker node: a node to provide price predictions of ETH. +This repository provides an example Allora network worker node, designed to offer price predictions for ETH. The primary objective is to demonstrate the use of a basic inference model running within a dedicated container, showcasing its integration with the Allora network infrastructure to contribute valuable inferences. -One of the primary objectives is to demonstrate the utilization of a basic inference model operating within a dedicated container. The purpose is to showcase its seamless integration with the Allora network infrastructure, enabling it to contribute with valuable inferences. +## Components -### Components +- **Worker**: The node that publishes inferences to the Allora chain. +- **Inference**: A container that conducts inferences, maintains the model state, and responds to internal inference requests via a Flask application. This node operates with a basic linear regression model for price predictions. +- **Updater**: A cron-like container designed to update the inference node's data by daily fetching the latest market information from Binance, ensuring the model stays current with new market trends. -* **Head**: An Allora network head node. This is not required for running your node in the Allora network, but it will help for testing your node emulating a network. -* **Worker**: The node that will respond to inference requests from the Allora network heads. -* **Inference**: A container that conducts inferences, maintains the model state, and responds to internal inference requests via a Flask application. The node operates with a basic linear regression model for price predictions. -* **Updater**: An example of a cron-like container designed to update the inference node's data by daily fetching the latest market information from Binance, ensuring the model is kept current with new market trends. +Check the `docker-compose.yml` file for the detailed setup of each component. -Check the `docker-compose.yml` file to see the separate components. +## Docker-Compose Setup -### Inference request flow +A complete working example is provided in the `docker-compose.yml` file. -When a request is made to the head, it relays this request to several workers associated with this head. The request specifies a function to run which will execute a wasm code that will call the `main.py` file in the worker. The worker will check the argument (the coin to predict for), make a request to the `inference` node, and return this value to the `head`, which prepares the response from all of its nodes and sends it back to the requestor. +### Steps to Setup -# Docker Setup +1. **Clone the Repository** +2. **Copy and Populate Configuration** + Copy the example configuration file and populate it with your variables: + ```sh + cp config.example.json config.json + ``` -- head and worker nodes are built upon `Dockerfile_b7s` file. This file is functional but simple, so you may want to change it to fit your needs, if you attempt to expand upon the current setup. -For further details, please check the base repo [allora-inference-base](https://github.com/allora-network/allora-inference-base). -- inference and updater nodes are built with `Dockerfile`. This works as an example of how to reuse your current model containers, just by setting up a Flask web application in front with minimal integration work with the Allora network nodes. +3. **Initialize Worker** + Run the following commands from the project's root directory to initialize the worker: + ```sh + chmod +x init.docker + ./init.docker + ``` + These commands will: + - Automatically create Allora keys for your worker. + - Export the needed variables from the created account to be used by the worker node, bundle them with your provided `config.json`, and pass them to the node as environment variables. -### Application path +4. **Faucet Your Worker Node** + You can find the offchain worker node's address in `./worker-data/env_file` under `ALLORA_OFFCHAIN_ACCOUNT_ADDRESS`. Request some tokens from the faucet to register your worker. -By default, the application runtime lives under `/app`, as well as the Python code the worker provides (`/app/main.py`). The current user needs to have write permissions on `/app/runtime`. +5. **Start the Services** + Run the following command to start the worker node, inference, and updater nodes: + ```sh + docker compose up --build + ``` + To confirm that the worker successfully sends the inferences to the chain, look for the following log: + ``` + {"level":"debug","msg":"Send Worker Data to chain","txHash":,"time":,"message":"Success"} + ``` -### Data volume and permissions +## Testing Inference Only -It is recommended to mount the `/worker-data` and `/head-data` folders as volumes, to persist the node databases of peers, functions, etc. which are defined in the flags passed to the worker. -You can create two different `/data` volumes. It is suggested to use `worker-data` for the worker, `head-data` for the head: -`mkdir worker-data && mkdir heaed-data`. +This setup allows you to develop your model without the need to bring up the head and worker. To test the inference model only: -Troubleshooting: A conflict may happen between the uid/gid of the user inside the container(1001) with the permissions of your own user. -To make the container user have permissions to write on the `/data` volume, you may need to set the UID/GID from the user running the container. You can get those in linux/osx via `id -u` and `id -g`. -The current `docker-compose.yml` file shows the `worker` service setting UID and GID. As well, the `Dockerfile` also sets UID/GID values. +1. Run the following command to start the inference node: + ```sh + docker compose up --build inference + ``` + Wait for the initial data load. - -# Docker-Compose Setup -A full working example is provided in the `docker-compose.yml` file. - -1. **Generate keys**: Create a set of keys for your head and worker nodes. These keys will be used in the configuration of the head and worker nodes. - -**Create head keys:** -``` -docker run -it --entrypoint=bash -v ./head-data:/data alloranetwork/allora-inference-base:latest -c "mkdir -p /data/keys && (cd /data/keys && allora-keys)" -``` - -**Create worker keys** -``` -docker run -it --entrypoint=bash -v ./worker-data:/data alloranetwork/allora-inference-base:latest -c "mkdir -p /data/keys && (cd /data/keys && allora-keys)" -``` - -Important note: If no keys are specified in the volumes, new keys will be automatically created inside `head-data/keys` and `worker-data/keys` when first running step 3. - -2. **Connect the worker node to the head node**: - -At this step, both worker and head nodes identities are generated inside `head-data/keys` and `worker-data/keys`. -To instruct the worker node to connect to the head node: -- run `cat head-data/keys/identity` to extract the head node's peer_id specified in the `head-data/keys/identity` -- use the printed peer_id to replace the `{HEAD-ID}` placeholder value specified inside the docker-compose.yml file when running the worker service: `--boot-nodes=/ip4/172.22.0.100/tcp/9010/p2p/{HEAD-ID}` - -3. **Run setup** -Once all the above is set up, run `docker compose up --build` -This will bring up the head, the worker and the inference nodes (which will run an initial update). The `updater` node is a companion for updating the inference node state and it's meant to hit the /update endpoint on the inference service. It is expected to run periodically, being crucial for maintaining the accuracy of the inferences. - -## Testing docker-compose setup - -The head node has the only open port and responds to requests in port 6000. - -Example request: -``` -curl --location 'http://127.0.0.1:6000/api/v1/functions/execute' \ ---header 'Content-Type: application/json' \ ---data '{ - "function_id": "bafybeigpiwl3o73zvvl6dxdqu7zqcub5mhg65jiky2xqb4rdhfmikswzqm", - "method": "allora-inference-function.wasm", - "parameters": null, - "topic": "1", - "config": { - "env_vars": [ - { - "name": "BLS_REQUEST_PATH", - "value": "/api" - }, - { - "name": "ALLORA_ARG_PARAMS", - "value": "ETH" - } - ], - "number_of_nodes": -1, - "timeout": 5 - } -}' -``` -Response: -``` -{ - "code": "200", - "request_id": "14be2a82-432c-4bae-bc1a-20c7627e0ebc", - "results": [ - { - "result": { - "stdout": "{\"infererValue\": \"2946.450220116334\"}\n\n", - "stderr": "", - "exit_code": 0 - }, - "peers": [ - "12D3KooWGHYZAR5YBgJHvG8o8GxBJpV5ANLUfL1UReX5Lizg5iKf" - ], - "frequency": 100 - } - ], - "cluster": { - "peers": [ - "12D3KooWGHYZAR5YBgJHvG8o8GxBJpV5ANLUfL1UReX5Lizg5iKf" - ] - } -} -``` - -## Testing inference only -This setup allows to develop your model without the need for bringing up the head and worker. -To only test the inference model, you can just: -- Run `docker compose up --build inference` and wait for the initial data load. -- Requests can now be sent, e.g. request ETH price inferences as in: - ``` - $ curl http://127.0.0.1:8000/inference/ETH +2. Send requests to the inference model. For example, request ETH price inferences: + ```sh + curl http://127.0.0.1:8000/inference/ETH + ``` + Expected response: + ```json {"value":"2564.021586281073"} - ``` - or update the node's internal state (download pricing data, train and update the model): - ``` - $ curl http://127.0.0.1:8000/update + ``` + +3. Update the node's internal state (download pricing data, train, and update the model): + ```sh + curl http://127.0.0.1:8000/update + ``` + Expected response: + ```sh 0 - ``` - -## Connecting to the Allora network - To connect to the Allora network to provide inferences, both the head and the worker need to register against it. More details on [allora-inference-base](https://github.com/allora-network/allora-inference-base) repo. -The following optional flags are used in the `command:` section of the `docker-compose.yml` file to define the connectivity with the Allora network. - -``` ---allora-chain-key-name=index-provider # your local key name in your keyring ---allora-chain-restore-mnemonic='pet sock excess ...' # your node's Allora address mnemonic ---allora-node-rpc-address= # RPC address of a node in the chain ---allora-chain-topic-id= # The topic id from the chain that you want to provide predictions for -``` -For the nodes to register with the chain, a funded address is needed first. -If these flags are not provided, the nodes will not register to the appchain and will not attempt to connect to the appchain. + ``` diff --git a/docker-compose.yml b/docker-compose.yml index b85357a..7da3e65 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -33,16 +33,16 @@ services: inference: condition: service_healthy - node: - container_name: offchain_node - image: allora-offchain-node:latest + worker: + container_name: worker + image: alloranetwork/allora-offchain-node:latest volumes: - - ./offchain-node-data:/data + - ./worker-data:/data depends_on: inference: condition: service_healthy env_file: - - ./offchain-node-data/env_file + - ./worker-data/env_file networks: eth-model-local: @@ -53,4 +53,4 @@ networks: volumes: inference-data: - offchain-node-data: + worker-data: diff --git a/init.docker b/init.docker new file mode 100755 index 0000000..ee4d658 --- /dev/null +++ b/init.docker @@ -0,0 +1,29 @@ +#!/bin/bash + +set -e + +if [ ! -f config.json ]; then + echo "Error: config.json file not found, please provide one" + exit 1 +fi + +nodeName=$(jq -r '.wallet.addressKeyName' config.json) +if [ -z "$nodeName" ]; then + echo "No name was provided for the node, please provide value for wallet.addressKeyName in the config.json" + exit 1 +fi + +if [ ! -f ./worker-data/env_file ]; then + echo "ENV_LOADED=false" > ./worker-data/env_file +fi + +ENV_LOADED=$(grep '^ENV_LOADED=' ./worker-data/env_file | cut -d '=' -f 2) +if [ "$ENV_LOADED" = "false" ]; then + json_content=$(cat ./config.json) + stringified_json=$(echo "$json_content" | jq -c .) + + docker run -it --entrypoint=bash -v $(pwd)/worker-data:/data -e NAME="${nodeName}" -e ALLORA_OFFCHAIN_NODE_CONFIG_JSON="${stringified_json}" alloranetwork/allora-chain:latest -c "bash /data/scripts/init.sh" + echo "config.json saved to ./worker-data/env_file" +else + echo "config.json is already loaded, skipping the operation. You can set ENV_LOADED variable to false in ./worker-data/env_file to reload the config.json" +fi \ No newline at end of file diff --git a/init.offchain-node b/init.offchain-node deleted file mode 100755 index 023c379..0000000 --- a/init.offchain-node +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/bash - -set -e - -if [ ! -f config.json ]; then - echo "Error: config.json file not found, please provide one" - exit 1 -fi - -nodeName=$(jq -r '.wallet.addressKeyName' config.json) -if [ -z "$nodeName" ]; then - echo "No name was provided for the node, please provide value for wallet.addressKeyName in the config.json" - exit 1 -fi - -if [ ! -f ./offchain-node-data/env_file ]; then - echo "ENV_LOADED=false" > ./offchain-node-data/env_file -fi - -ENV_LOADED=$(grep '^ENV_LOADED=' ./offchain-node-data/env_file | cut -d '=' -f 2) -if [ "$ENV_LOADED" = "false" ]; then - json_content=$(cat ./config.json) - stringified_json=$(echo "$json_content" | jq -c .) - - docker run -it --entrypoint=bash -v $(pwd)/offchain-node-data:/data -e NAME="${nodeName}" -e ALLORA_OFFCHAIN_NODE_CONFIG_JSON="${stringified_json}" alloranetwork/allora-chain:latest -c "bash /data/scripts/init.sh" - echo "config.json saved to ./offchain-node-data/env_file" -else - echo "config.json is already loaded, skipping the operation. You can set ENV_LOADED variable to false in ./offchain-node-data/env_file to reload the config.json" -fi \ No newline at end of file diff --git a/offchain-node-data/env_file b/offchain-node-data/env_file deleted file mode 100644 index 5285859..0000000 --- a/offchain-node-data/env_file +++ /dev/null @@ -1,4 +0,0 @@ -ALLORA_OFFCHAIN_NODE_CONFIG_JSON='{"wallet":{"addressKeyName":"basic-coin-prediction-offchain-node","addressRestoreMnemonic":"rich note fetch lava bless snake delay theme era anger ritual sea pluck neck hazard dish talk ranch trophy clap fancy human divide gun","addressAccountPassphrase":"secret","alloraHomeDir":"","gas":"1000000","gasAdjustment":1,"nodeRpc":"https://allora-rpc.devnet.behindthecurtain.xyz","maxRetries":1,"minDelay":1,"maxDelay":2,"submitTx":false},"worker":[{"topicId":1,"inferenceEntrypointName":"api-worker-reputer","loopSeconds":5,"parameters":{"InferenceEndpoint":"http://inference:8000/inference/{Token}","Token":"ETH"}}]}' -ALLORA_OFFCHAIN_ACCOUNT_ADDRESS=allo14wkkdeg93mdc0sd770z9p4mpjz7w9mysz328um -NAME=basic-coin-prediction-offchain-node -ENV_LOADED=true diff --git a/offchain-node-data/scripts/init.sh b/worker-data/scripts/init.sh similarity index 100% rename from offchain-node-data/scripts/init.sh rename to worker-data/scripts/init.sh From a4a708c7d45df2c8263ed3d671ebd9abc830febc Mon Sep 17 00:00:00 2001 From: conache Date: Thu, 8 Aug 2024 14:53:15 +0300 Subject: [PATCH 5/8] Add newlines --- README.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/README.md b/README.md index 832c3a4..b9fe843 100644 --- a/README.md +++ b/README.md @@ -18,12 +18,14 @@ A complete working example is provided in the `docker-compose.yml` file. 1. **Clone the Repository** 2. **Copy and Populate Configuration** + Copy the example configuration file and populate it with your variables: ```sh cp config.example.json config.json ``` 3. **Initialize Worker** + Run the following commands from the project's root directory to initialize the worker: ```sh chmod +x init.docker @@ -34,9 +36,11 @@ A complete working example is provided in the `docker-compose.yml` file. - Export the needed variables from the created account to be used by the worker node, bundle them with your provided `config.json`, and pass them to the node as environment variables. 4. **Faucet Your Worker Node** + You can find the offchain worker node's address in `./worker-data/env_file` under `ALLORA_OFFCHAIN_ACCOUNT_ADDRESS`. Request some tokens from the faucet to register your worker. 5. **Start the Services** + Run the following command to start the worker node, inference, and updater nodes: ```sh docker compose up --build @@ -57,6 +61,7 @@ This setup allows you to develop your model without the need to bring up the hea Wait for the initial data load. 2. Send requests to the inference model. For example, request ETH price inferences: + ```sh curl http://127.0.0.1:8000/inference/ETH ``` @@ -66,6 +71,7 @@ This setup allows you to develop your model without the need to bring up the hea ``` 3. Update the node's internal state (download pricing data, train, and update the model): + ```sh curl http://127.0.0.1:8000/update ``` From b58d5275019d96449d688afd6f90e15e60ac81e9 Mon Sep 17 00:00:00 2001 From: conache Date: Thu, 8 Aug 2024 16:05:38 +0300 Subject: [PATCH 6/8] Remove redundant network --- docker-compose.yml | 7 ------- 1 file changed, 7 deletions(-) diff --git a/docker-compose.yml b/docker-compose.yml index 7da3e65..7c7d08d 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -43,13 +43,6 @@ services: condition: service_healthy env_file: - ./worker-data/env_file - -networks: - eth-model-local: - driver: bridge - ipam: - config: - - subnet: 172.22.0.0/24 volumes: inference-data: From 3dfa8ca2e62ab77596ff7ad24ca5bba7c42cf5c3 Mon Sep 17 00:00:00 2001 From: conache Date: Thu, 8 Aug 2024 16:08:44 +0300 Subject: [PATCH 7/8] Cleanup docker-compose --- Dockerfile_inference => Dockerfile | 0 docker-compose.yml | 8 ++------ 2 files changed, 2 insertions(+), 6 deletions(-) rename Dockerfile_inference => Dockerfile (100%) diff --git a/Dockerfile_inference b/Dockerfile similarity index 100% rename from Dockerfile_inference rename to Dockerfile diff --git a/docker-compose.yml b/docker-compose.yml index 7c7d08d..21d2bed 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,9 +1,7 @@ services: inference: container_name: inference-basic-eth-pred - build: - context: . - dockerfile: Dockerfile_inference + build: . command: python -u /app/app.py ports: - "8002:8000" @@ -17,9 +15,7 @@ services: updater: container_name: updater-basic-eth-pred - build: - context: . - dockerfile: Dockerfile_inference + build: . environment: - INFERENCE_API_ADDRESS=http://inference:8000 command: > From ce719ae34ee400c2d48ea4039f5b00c6a54592d2 Mon Sep 17 00:00:00 2001 From: conache Date: Thu, 8 Aug 2024 16:19:06 +0300 Subject: [PATCH 8/8] Cleanup --- README.md | 2 +- docker-compose.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index b9fe843..5180403 100644 --- a/README.md +++ b/README.md @@ -37,7 +37,7 @@ A complete working example is provided in the `docker-compose.yml` file. 4. **Faucet Your Worker Node** - You can find the offchain worker node's address in `./worker-data/env_file` under `ALLORA_OFFCHAIN_ACCOUNT_ADDRESS`. Request some tokens from the faucet to register your worker. + You can find the offchain worker node's address in `./worker-data/env_file` under `ALLORA_OFFCHAIN_ACCOUNT_ADDRESS`. [Add faucet funds](https://docs.allora.network/devs/get-started/setup-wallet#add-faucet-funds) to your worker's wallet before starting it. 5. **Start the Services** diff --git a/docker-compose.yml b/docker-compose.yml index 21d2bed..5d856e3 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -4,7 +4,7 @@ services: build: . command: python -u /app/app.py ports: - - "8002:8000" + - "8000:8000" healthcheck: test: ["CMD", "curl", "-f", "http://localhost:8000/inference/ETH"] interval: 10s