services: inference: container_name: inference-basic-eth-pred build: context: . dockerfile: Dockerfile_inference command: python -u /app/app.py ports: - "8000:8000" healthcheck: test: ["CMD", "curl", "-f", "http://localhost:8000/inference/ETH"] interval: 10s timeout: 5s retries: 12 volumes: - ./inference-data:/app/data updater: container_name: updater-basic-eth-pred build: context: . dockerfile: Dockerfile_inference environment: - INFERENCE_API_ADDRESS=http://inference:8000 command: > sh -c " while true; do python -u /app/update_app.py; sleep 24h; done " depends_on: inference: condition: service_healthy worker: container_name: worker image: alloranetwork/allora-offchain-node:latest build: context: . dockerfile: Dockerfile_worker volumes: - ./worker-data:/data depends_on: inference: condition: service_healthy volumes: inference-data: worker-data: