services: inference: container_name: inference-basic-eth-pred build: . command: python -u /app/app.py ports: - "8080:8080" healthcheck: test: ["CMD", "curl", "-f", "http://localhost:8080/inference/ETH"] interval: 10s timeout: 5s retries: 12 volumes: - ./inference-data:/app/data restart: always updater: container_name: updater-basic-eth-pred build: . environment: - INFERENCE_API_ADDRESS=http://inference:8080 command: > sh -c " while true; do python -u /app/update_app.py; sleep 24h; done " depends_on: inference: condition: service_healthy restart: always worker: container_name: worker image: alloranetwork/allora-offchain-node:latest volumes: - ./worker-data:/data depends_on: inference: condition: service_healthy env_file: - ./worker-data/env_file restart: always volumes: inference-data: worker-data: