diff --git a/.github/workflows/docker_ci.yaml b/.github/workflows/docker_ci.yaml new file mode 100644 index 0000000..5f911f1 --- /dev/null +++ b/.github/workflows/docker_ci.yaml @@ -0,0 +1,30 @@ +# pre-commit workflow +# +# Ensures the codebase passes the pre-commit stack. + +name: Build Docker Images + +on: + pull_request: + branches: + - main + - dev + +jobs: + docker: + runs-on: ubuntu-latest + strategy: + matrix: + project: [ "hello-world", "gpt4", "onnx-iris", "prompt-to-nft", "tgi-llm", "torch-iris"] + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Run build example container + env: + project: ${{ matrix.project }} + CI: true + run: make build-container diff --git a/.github/workflows/python_ci.yaml b/.github/workflows/python_ci.yaml new file mode 100644 index 0000000..e8c6def --- /dev/null +++ b/.github/workflows/python_ci.yaml @@ -0,0 +1,30 @@ +# pre-commit workflow +# +# Ensures the codebase passes the pre-commit stack. + +name: pre-commit ci + +on: [push] + +jobs: + python_ci: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + + - name: Install UV + run: python -m pip install uv + + - name: Create virtual environment + run: uv venv + + - name: Activate virtual environment + run: | + . .venv/bin/activate + echo PATH=$PATH >> $GITHUB_ENV + + - name: Install dependencies + run: uv pip install -r requirements.txt + + - name: Run pre-commit hooks + run: pre-commit run --all-files --show-diff-on-failure diff --git a/.gitignore b/.gitignore index 2efde62..fc98d44 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,6 @@ +# OS +**/.DS_Store + # Byte-compiled / optimized / DLL files deploy/config.json @@ -5,6 +8,7 @@ __pycache__/ *.py[cod] *$py + # C extensions *.so @@ -14,10 +18,23 @@ build/ *.egg-info/ # IDE specific files -.vscode/ -.idea/ +**/.vscode +**/.idea # Virtual environment -venv/ +venv +.venv/ +*.env -**/.idea + +# env files +**/*.env + +# OS Files +**/.DS_Store + +# Multi-deploykey CI +**/root-config + +# sync scripts +remote_sync diff --git a/.gitmodules b/.gitmodules index 6650627..89b8d8b 100644 --- a/.gitmodules +++ b/.gitmodules @@ -4,3 +4,36 @@ [submodule "projects/hello-world/contracts/lib/infernet-sdk"] path = projects/hello-world/contracts/lib/infernet-sdk url = https://github.com/ritual-net/infernet-sdk +[submodule "projects/torch-iris/contracts/lib/infernet-sdk"] + path = projects/torch-iris/contracts/lib/infernet-sdk + url = https://github.com/ritual-net/infernet-sdk +[submodule "projects/torch-iris/contracts/lib/forge-std"] + path = projects/torch-iris/contracts/lib/forge-std + url = https://github.com/foundry-rs/forge-std +[submodule "projects/onnx-iris/contracts/lib/infernet-sdk"] + path = projects/onnx-iris/contracts/lib/infernet-sdk + url = https://github.com/ritual-net/infernet-sdk +[submodule "projects/onnx-iris/contracts/lib/forge-std"] + path = projects/onnx-iris/contracts/lib/forge-std + url = https://github.com/foundry-rs/forge-std +[submodule "projects/prompt-to-nft/contracts/lib/forge-std"] + path = projects/prompt-to-nft/contracts/lib/forge-std + url = https://github.com/foundry-rs/forge-std +[submodule "projects/prompt-to-nft/contracts/lib/infernet-sdk"] + path = projects/prompt-to-nft/contracts/lib/infernet-sdk + url = https://github.com/ritual-net/infernet-sdk +[submodule "projects/prompt-to-nft/contracts/lib/solmate"] + path = projects/prompt-to-nft/contracts/lib/solmate + url = https://github.com/transmissions11/solmate +[submodule "projects/gpt4/contracts/lib/infernet-sdk"] + path = projects/gpt4/contracts/lib/infernet-sdk + url = https://github.com/ritual-net/infernet-sdk +[submodule "projects/gpt4/contracts/lib/forge-std"] + path = projects/gpt4/contracts/lib/forge-std + url = https://github.com/foundry-rs/forge-std +[submodule "projects/tgi-llm/contracts/lib/forge-std"] + path = projects/tgi-llm/contracts/lib/forge-std + url = https://github.com/foundry-rs/forge-std +[submodule "projects/tgi-llm/contracts/lib/infernet-sdk"] + path = projects/tgi-llm/contracts/lib/infernet-sdk + url = https://github.com/ritual-net/infernet-sdk diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000..5dd9174 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,87 @@ +repos: + - repo: https://github.com/astral-sh/ruff-pre-commit + rev: v0.0.289 + hooks: + - id: ruff + args: [--fix, --exit-non-zero-on-fix] + + - repo: https://github.com/psf/black + rev: 23.9.1 + hooks: + - id: black + + - repo: local + hooks: + - id: mypy-hello-world + name: mypy hello-world + entry: mypy --strict + files: ^projects/hello-world/container/ + language: system + types: [python] + + - repo: local + hooks: + - id: mypy-torch-iris + name: mypy torch-iris + entry: mypy --strict + files: ^projects/torch-iris/container/ + language: system + types: [python] + + - repo: local + hooks: + - id: mypy-onnx-iris + name: mypy onnx-iris + entry: mypy --strict + files: ^projects/onnx-iris/container/ + language: system + types: [python] + + + - repo: local + hooks: + - id: mypy-tgi-llm-container + name: mypy tgi-llm container + entry: mypy --strict + files: ^projects/tgi-llm/container + language: system + types: [python] + + - repo: local + hooks: + - id: mypy-tgi-llm-ui + name: mypy tgi-llm ui + entry: mypy --strict + files: ^projects/tgi-llm/ui + language: system + types: [python] + + - repo: local + hooks: + - id: mypy-gpt4 + name: mypy gpt4 + entry: mypy --strict + files: ^projects/gpt4/container + language: system + types: [python] + + - repo: local + hooks: + - id: mypy-prompt-to-nft + name: mypy prompt-to-nft + entry: mypy --strict + files: ^projects/prompt-to-nft/container + language: system + types: [python] + + # Default pre-commit hooks + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v3.2.0 + hooks: + # Ensure EOF exists + - id: end-of-file-fixer + # Prevent adding large files + - id: check-added-large-files + args: ["--maxkb=5000"] + # Newline at end of file + - id: trailing-whitespace diff --git a/Makefile b/Makefile index e2bb22d..f9730cc 100644 --- a/Makefile +++ b/Makefile @@ -1,9 +1,27 @@ +build-container: + $(MAKE) -C ./projects/$(project)/container build + +remove-containers: + docker compose -f deploy/docker-compose.yaml down || true + docker stop $(project) anvil-node && docker rm $(project) anvil-node || true + +build-multiplatform: + $(MAKE) -C ./projects/$(project)/container build-multiplatform + deploy-container: + $(MAKE) remove-containers cp ./projects/$(project)/container/config.json deploy/config.json - cd deploy && docker-compose up + docker compose -f deploy/docker-compose.yaml up -d + docker compose -f deploy/docker-compose.yaml logs -f deploy-contracts: $(MAKE) -C ./projects/$(project)/contracts deploy call-contract: $(MAKE) -C ./projects/$(project)/contracts call-contract + +build-service: + $(MAKE) -C ./projects/$(project)/$(service) build + +run-service: + $(MAKE) -C ./projects/$(project)/$(service) run diff --git a/README.md b/README.md index afcd900..e374c87 100644 --- a/README.md +++ b/README.md @@ -1,221 +1,19 @@ # infernet-container-starter -Starter examples for deploying to infernet. - -# Getting Started - -To interact with infernet, one could either create a job by accessing an infernet -node directly through it's API (we'll refer to this as an off-chain job), or by -creating a subscription on-chain (we'll refer to this as an on-chain job). - -## Requesting an off-chain job: Hello World! - -The easiest way to get started is to run our hello-world container. -This is a simple [flask-app](projects/hello-world/container/src/app.py) that -is compatible with `infernet`, and simply -[echoes what you send to it](./projects/hello-world/container/src/app.py#L16). - -We already have it [hosted on docker hub](https://hub.docker.com/r/ritualnetwork/hello-world-infernet) . -If you're curious how it's made, you can -follow the instructions [here](projects/hello-world/container/README.md) to build your own infernet-compatible -container. - -### Install Docker - -To run this, you'll need to have docker installed. You can find instructions -for installing docker [here](https://docs.docker.com/install/). - -### Running Locally - -First, ensure that the docker daemon is running. - -Then, from the top-level project directory, Run the following make command: - -``` -project=hello-world make deploy-container -``` - -This will deploy an infernet node along with the `hello-world` image. - -### Creating an off-chain job through the API - -You can create an off-chain job by posting to the `node` directly. - -```bash -curl -X POST http://127.0.0.1:4000/api/jobs \ - -H "Content-Type: application/json" \ - -d '{"containers":["hello-world"], "data": {"some": "input"}}' -# returns -{"id":"d5281dd5-c4f4-4523-a9c2-266398e06007"} -``` - -This will return the id of that job. - -### Getting the status/result/errors of a job - -You can check the status of a job like so: - -```bash -curl -X GET http://127.0.0.1:4000/api/jobs?id=d5281dd5-c4f4-4523-a9c2-266398e06007 -# returns -[{"id":"d5281dd5-c4f4-4523-a9c2-266398e06007", "result":{"container":"hello-world","output": {"output":"hello, world!, your input was: {'source': 1, 'data': {'some': 'input'}}"}} ,"status":"success"}] -``` - -### Configuration - -This project already comes with a pre-filled config file. The config -file for the hello-world project is located [here](projects/hello-world/container/config.json): - -```bash -projects/hello-world/config.json -``` - -## Requesting an on-chain job - -In this section we'll go over how to request an on-chain job in a local testnet. - -### Infernet's Anvil Testnet - -To request an on-chain job, you'll need to deploy contracts using the infernet sdk. -We already have a public [anvil node](https://hub.docker.com/r/ritualnetwork/infernet-anvil) docker image which has the -corresponding infernet sdk contracts deployed, along with a node that has -registered itself to listen to on-chain subscription events. - -* Coordinator Address: `0x5FbDB2315678afecb367f032d93F642f64180aa3` -* Node Address: `0x70997970C51812dc3A010C7d01b50e0d17dc79C8` (This is the second account in the anvil's accounts.) - -### Deploying Infernet Node & Infernet's Anvil Testnet - -This step is similar to the section above: - -```bash -project=hello-world make deploy-container -``` - -In another terminal, run `docker container ls`, you should see something like this - -```bash -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -c2ca0ffe7817 ritualnetwork/infernet-anvil:0.0.0 "anvil --host 0.0.0.…" 9 seconds ago Up 8 seconds 0.0.0.0:8545->3000/tcp anvil-node -0b686a6a0e5f ritualnetwork/hello-world-infernet:0.0.2 "gunicorn app:create…" 9 seconds ago Up 8 seconds 0.0.0.0:3000->3000/tcp hello-world -28b2e5608655 ritualnetwork/infernet-node:0.1.1 "/app/entrypoint.sh" 10 seconds ago Up 10 seconds 0.0.0.0:4000->4000/tcp deploy-node-1 -03ba51ff48b8 fluent/fluent-bit:latest "/fluent-bit/bin/flu…" 10 seconds ago Up 10 seconds 2020/tcp, 0.0.0.0:24224->24224/tcp deploy-fluentbit-1 -a0d96f29a238 redis:latest "docker-entrypoint.s…" 10 seconds ago Up 10 seconds 0.0.0.0:6379->6379/tcp deploy-redis-1 -``` - -You can see that the anvil node is running on port `8545`, and the infernet -node is running on port `4000`. Same as before. - -### Deploying Consumer Contracts - -We have a [sample forge project](./projects/hello-world/contracts) which contains -a simple consumer contract, [`SaysGM`](./projects/hello-world/contracts/src/SaysGM.sol). -All this contract does is to request a job from the infernet node, and upon receiving -the result, it will use the `forge` console to print the result. - -**Anvil Logs**: First, it's useful to look at the logs of the anvil node to see what's going on. In -a new terminal, run `docker logs -f anvil-node`. - -**Deploying the contracts**: In another terminal, run the following command: - -```bash -project=hello-world make deploy-contracts -``` - -You should be able to see the following logs in the anvil logs: - -```bash -eth_sendRawTransaction -eth_getTransactionReceipt - - Transaction: 0x23ca6b1d1823ad5af175c207c2505112f60038fc000e1e22509816fa29a3afd6 - Contract created: 0x663f3ad617193148711d28f5334ee4ed07016602 - Gas used: 476669 - - Block Number: 1 - Block Hash: 0x6b026b70fbe97b4a733d4812ccd6e8e25899a1f6c622430c3fb07a2e5c5c96b7 - Block Time: "Wed, 17 Jan 2024 22:17:31 +0000" - -eth_getTransactionByHash -eth_getTransactionReceipt -eth_blockNumber -``` - -We can see that a new contract has been created at `0x663f3ad617193148711d28f5334ee4ed07016602`. -That's the address of the `SaysGM` contract. - -### Calling the contract - -Now, let's call the contract. In the same terminal, run the following command: - -```bash -project=hello-world make call-contract -``` - -You should first see that a transaction was sent to the `SaysGm` contract: - -```bash -eth_getTransactionReceipt - - Transaction: 0xe56b5b6ac713a978a1631a44d6a0c9eb6941dce929e1b66b4a2f7a61b0349d65 - Gas used: 123323 - - Block Number: 2 - Block Hash: 0x3d6678424adcdecfa0a8edd51e014290e5f54ee4707d4779e710a2a4d9867c08 - Block Time: "Wed, 17 Jan 2024 22:18:39 +0000" -eth_getTransactionByHash - -``` - -Then, right after that you should see another transaction submitted by the `node`, -which is the result of the job request: - -```bash -eth_chainId -eth_sendRawTransaction - - -_____ _____ _______ _ _ _ -| __ \|_ _|__ __| | | | /\ | | -| |__) | | | | | | | | | / \ | | -| _ / | | | | | | | |/ /\ \ | | -| | \ \ _| |_ | | | |__| / ____ \| |____ -|_| \_\_____| |_| \____/_/ \_\______| - - -subscription Id 1 -interval 1 -redundancy 1 -node 0x70997970C51812dc3A010C7d01b50e0d17dc79C8 -input: -0x -output: -0x000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000607b276f7574707574273a202268656c6c6f2c20776f726c64212c20796f757220696e707574207761733a207b27736f75726365273a20302c202764617461273a20273437366636663634323036643666373236653639366536373231277d227d -proof: -0x - - Transaction: 0x949351d02e2c7f50ced2be06d14ca4311bd470ec80b135a2ce78a43f43e60d3d - Gas used: 94275 - - Block Number: 3 - Block Hash: 0x57ed0cf39e3fb3a91a0d8baa5f9cb5d2bdc1875f2ad5d6baf4a9466f522df354 - Block Time: "Wed, 17 Jan 2024 22:18:40 +0000" - - -eth_blockNumber -eth_newFilter - -``` - -We can see that the address of the `node` matches the address of the node in -our ritual anvil node. - -### Next Steps - -To learn more about on-chain requests, check out the following resources: - -1. [Tutorial](./projects/hello-world/contracts/Tutorial.md) on this project's consumer smart contracts. -2. [Infernet Callback Consumer Tutorial](https://docs.ritual.net/infernet/sdk/consumers/Callback) -3. [Infernet Nodes Docoumentation](https://docs.ritual.net/infernet/nodes) - - +Welcome to this repository! 🎉 This repo contains a series of examples that demonstrate +the true power of infernet, and the wide range of applications that can be built using +it: + +## Examples +1. [Hello World](projects/hello-world/hello-world.md): Infernet's version of a `hello-world` program. Here, we deploy +a container that simply echoes back the input to us. +2. [Running a Torch Model on Infernet](projects/torch-iris/torch-iris.md): This example shows you how to deploy a pre-trained [pytorch](https://pytorch.org/) +model to infernet. Using this example will make it easier for you to deploy your own models to infernet. +3. [Running an ONNX Model on Infernet](projects/onnx-iris/onnx-iris.md): Same as the previous example, but this time we deploy + an ONNX model to infernet. +4. [Prompt to NFT](projects/prompt-to-nft/prompt-to-nft.md): In this example, we use [stablediffusion](https://github.com/Stability-AI/stablediffusion) to + mint NFTs on-chain using a prompt. +5. [TGI Inference with Mistral-7b](projects/tgi-llm/tgi-llm.md): This example shows you how to deploy an arbitrary +LLM model using [Huggingface's TGI](https://huggingface.co/docs/text-generation-inference/en/index), and use it with an infernet node. +6. [Running OpenAI's GPT-4 on Infernet](projects/gpt4/gpt4.md): This example shows you how to deploy OpenAI's GPT-4 model +to infernet. diff --git a/deploy/docker-compose.yaml b/deploy/docker-compose.yaml index 1deb426..6cc532c 100644 --- a/deploy/docker-compose.yaml +++ b/deploy/docker-compose.yaml @@ -6,9 +6,7 @@ services: ports: - "0.0.0.0:4000:4000" volumes: - - type: bind - source: ./config.json - target: /app/config.json + - ./config.json:/app/config.json - node-logs:/logs - /var/run/docker.sock:/var/run/docker.sock networks: @@ -20,11 +18,12 @@ services: extra_hosts: - "host.docker.internal:host-gateway" stop_grace_period: 1m + tty: true redis: image: redis:latest - ports: - - "6379:6379" + expose: + - "6379" networks: - network volumes: @@ -35,9 +34,8 @@ services: fluentbit: image: fluent/fluent-bit:latest - ports: - - "24224:24224" - + expose: + - "24224" environment: - FLUENTBIT_CONFIG_PATH=/fluent-bit/etc/fluent-bit.conf volumes: diff --git a/deploy/fluent-bit.conf b/deploy/fluent-bit.conf index c165afa..2ea8ac9 100644 --- a/deploy/fluent-bit.conf +++ b/deploy/fluent-bit.conf @@ -35,4 +35,4 @@ User append_only_user Password ogy29Z4mRCLfpup*9fn6 Database postgres - Table live_stats \ No newline at end of file + Table live_stats diff --git a/projects/gpt4/container/.gitignore b/projects/gpt4/container/.gitignore new file mode 100644 index 0000000..e9cebaa --- /dev/null +++ b/projects/gpt4/container/.gitignore @@ -0,0 +1,2 @@ +sample-gpt3.env +config.json diff --git a/projects/gpt4/container/Dockerfile b/projects/gpt4/container/Dockerfile new file mode 100644 index 0000000..57add3f --- /dev/null +++ b/projects/gpt4/container/Dockerfile @@ -0,0 +1,25 @@ +FROM python:3.11-slim as builder + +WORKDIR /app + +ENV PYTHONUNBUFFERED 1 +ENV PYTHONDONTWRITEBYTECODE 1 +ENV PIP_NO_CACHE_DIR 1 +ENV RUNTIME docker +ENV PYTHONPATH src + +RUN apt-get update +RUN apt-get install -y git curl + +# install uv +ADD --chmod=755 https://astral.sh/uv/install.sh /install.sh +RUN /install.sh && rm /install.sh + +COPY src/requirements.txt . + +RUN /root/.cargo/bin/uv pip install --system --no-cache -r requirements.txt + +COPY src src + +ENTRYPOINT ["hypercorn", "app:create_app()"] +CMD ["-b", "0.0.0.0:3000"] diff --git a/projects/gpt4/container/Makefile b/projects/gpt4/container/Makefile new file mode 100644 index 0000000..0f0c698 --- /dev/null +++ b/projects/gpt4/container/Makefile @@ -0,0 +1,18 @@ +DOCKER_ORG := ritualnetwork +EXAMPLE_NAME := gpt4 +TAG := $(DOCKER_ORG)/example-$(EXAMPLE_NAME)-infernet:latest + +.phony: build run build-multiplatform try-prompt + +build: + mkdir -p root-config + @docker build -t $(TAG) . + +run: build + @docker run --env-file $(EXAMPLE_NAME).env -p 3000:3000 $(TAG) + +# You may need to set up a docker builder, to do so run: +# docker buildx create --name mybuilder --bootstrap --use +# refer to https://docs.docker.com/build/building/multi-platform/#building-multi-platform-images for more info +build-multiplatform: + docker buildx build --platform linux/amd64,linux/arm64 -t $(TAG) --push . diff --git a/projects/gpt4/container/README.md b/projects/gpt4/container/README.md new file mode 100644 index 0000000..f6912d5 --- /dev/null +++ b/projects/gpt4/container/README.md @@ -0,0 +1,20 @@ +# GPT 4 +In this example, we run a minimalist container that makes use of our closed-source model +workflow: `CSSInferenceWorkflow`. Refer to [src/app.py](src/app.py) for the +implementation of the quart application. + +## Requirements +To use the model you'll need to have an OpenAI api key. Get one at +[OpenAI](https://openai.com/)'s website. + +## Run the Container + +```bash +make run +``` + +## Test the Container +```bash +curl -X POST localhost:3000/service_output -H "Content-Type: application/json" \ + -d '{"source": 1, "data": {"text": "can shrimps actually fry rice?"}}' +``` diff --git a/projects/gpt4/container/config.sample.json b/projects/gpt4/container/config.sample.json new file mode 100644 index 0000000..151e5ad --- /dev/null +++ b/projects/gpt4/container/config.sample.json @@ -0,0 +1,52 @@ +{ + "log_path": "infernet_node.log", + "server": { + "port": 4000 + }, + "chain": { + "enabled": true, + "trail_head_blocks": 0, + "rpc_url": "http://host.docker.internal:8545", + "coordinator_address": "0x5FbDB2315678afecb367f032d93F642f64180aa3", + "wallet": { + "max_gas_limit": 4000000, + "private_key": "0x59c6995e998f97a5a0044966f0945389dc9e86dae88c7a8412f4603b6b78690d" + } + }, + "startup_wait": 1.0, + "docker": { + "username": "your-username", + "password": "" + }, + "redis": { + "host": "redis", + "port": 6379 + }, + "forward_stats": true, + "containers": [ + { + "id": "gpt4", + "image": "ritualnetwork/example-gpt4-infernet:latest", + "external": true, + "port": "3000", + "allowed_delegate_addresses": [], + "allowed_addresses": [], + "allowed_ips": [], + "command": "--bind=0.0.0.0:3000 --workers=2", + "env": { + "OPENAI_API_KEY": "barabeem baraboom" + } + }, + { + "id": "anvil-node", + "image": "ritualnetwork/infernet-anvil:0.0.0", + "external": true, + "port": "8545", + "allowed_delegate_addresses": [], + "allowed_addresses": [], + "allowed_ips": [], + "command": "", + "env": {} + } + ] +} diff --git a/projects/gpt4/container/gpt4.env.sample b/projects/gpt4/container/gpt4.env.sample new file mode 100644 index 0000000..e570b8b --- /dev/null +++ b/projects/gpt4/container/gpt4.env.sample @@ -0,0 +1 @@ +OPENAI_API_KEY= diff --git a/projects/gpt4/container/src/app.py b/projects/gpt4/container/src/app.py new file mode 100644 index 0000000..17e4694 --- /dev/null +++ b/projects/gpt4/container/src/app.py @@ -0,0 +1,90 @@ +import logging +from typing import Any, cast + +from eth_abi import decode, encode # type: ignore +from infernet_ml.utils.service_models import InfernetInput, InfernetInputSource +from infernet_ml.workflows.inference.css_inference_workflow import CSSInferenceWorkflow +from quart import Quart, request + +log = logging.getLogger(__name__) + + +def create_app() -> Quart: + app = Quart(__name__) + + workflow = CSSInferenceWorkflow(provider="OPENAI", endpoint="completions") + + workflow.setup() + + @app.route("/") + def index() -> str: + """ + Utility endpoint to check if the service is running. + """ + return "GPT4 Example Program" + + @app.route("/service_output", methods=["POST"]) + async def inference() -> dict[str, Any]: + req_data = await request.get_json() + """ + InfernetInput has the format: + source: (0 on-chain, 1 off-chain) + data: dict[str, Any] + """ + infernet_input: InfernetInput = InfernetInput(**req_data) + + if infernet_input.source == InfernetInputSource.OFFCHAIN: + prompt = cast(dict[str, Any], infernet_input.data).get("prompt") + else: + # On-chain requests are sent as a generalized hex-string which we will + # decode to the appropriate format. + (prompt,) = decode( + ["string"], bytes.fromhex(cast(str, infernet_input.data)) + ) + + result: dict[str, Any] = workflow.inference( + { + "model": "gpt-4-0613", + "params": { + "endpoint": "completions", + "messages": [ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": prompt}, + ], + }, + } + ) + + if infernet_input.source == InfernetInputSource.OFFCHAIN: + """ + In case of an off-chain request, the result is returned as is. + """ + return {"message": result} + else: + """ + In case of an on-chain request, the result is returned in the format: + { + "raw_input": str, + "processed_input": str, + "raw_output": str, + "processed_output": str, + "proof": str, + } + refer to: https://docs.ritual.net/infernet/node/containers for more info. + """ + return { + "raw_input": "", + "processed_input": "", + "raw_output": encode(["string"], [result]).hex(), + "processed_output": "", + "proof": "", + } + + return app + + +if __name__ == "__main__": + """ + Utility to run the app locally. For development purposes only. + """ + create_app().run(port=3000) diff --git a/projects/gpt4/container/src/requirements.txt b/projects/gpt4/container/src/requirements.txt new file mode 100644 index 0000000..12cb6d3 --- /dev/null +++ b/projects/gpt4/container/src/requirements.txt @@ -0,0 +1,5 @@ +quart==0.19.4 +infernet_ml==0.1.0 +PyArweave @ git+https://github.com/ritual-net/pyarweave.git +web3==6.15.0 +retry2==0.9.5 diff --git a/projects/gpt4/contracts/.github/workflows/test.yml b/projects/gpt4/contracts/.github/workflows/test.yml new file mode 100644 index 0000000..9282e82 --- /dev/null +++ b/projects/gpt4/contracts/.github/workflows/test.yml @@ -0,0 +1,34 @@ +name: test + +on: workflow_dispatch + +env: + FOUNDRY_PROFILE: ci + +jobs: + check: + strategy: + fail-fast: true + + name: Foundry project + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + submodules: recursive + + - name: Install Foundry + uses: foundry-rs/foundry-toolchain@v1 + with: + version: nightly + + - name: Run Forge build + run: | + forge --version + forge build --sizes + id: build + + - name: Run Forge tests + run: | + forge test -vvv + id: test diff --git a/projects/gpt4/contracts/.gitignore b/projects/gpt4/contracts/.gitignore new file mode 100644 index 0000000..85198aa --- /dev/null +++ b/projects/gpt4/contracts/.gitignore @@ -0,0 +1,14 @@ +# Compiler files +cache/ +out/ + +# Ignores development broadcast logs +!/broadcast +/broadcast/*/31337/ +/broadcast/**/dry-run/ + +# Docs +docs/ + +# Dotenv file +.env diff --git a/projects/gpt4/contracts/Makefile b/projects/gpt4/contracts/Makefile new file mode 100644 index 0000000..f93baf1 --- /dev/null +++ b/projects/gpt4/contracts/Makefile @@ -0,0 +1,14 @@ +# phony targets are targets that don't actually create a file +.phony: deploy call-contract + +# anvil's third default address +sender := 0x5de4111afa1a4b94908f83103eb1f1706367c2e68ca870fc3fb9a804cdab365a +RPC_URL := http://localhost:8545 + +# deploying the contract +deploy: + @PRIVATE_KEY=$(sender) forge script script/Deploy.s.sol:Deploy --broadcast --rpc-url $(RPC_URL) + +# calling promptGPT() +call-contract: + @PRIVATE_KEY=$(sender) forge script script/CallContract.s.sol:CallContract --broadcast --rpc-url $(RPC_URL) diff --git a/projects/gpt4/contracts/README.md b/projects/gpt4/contracts/README.md new file mode 100644 index 0000000..b36189f --- /dev/null +++ b/projects/gpt4/contracts/README.md @@ -0,0 +1,27 @@ +# GPT4 Example Contracts + +This is a minimalist foundry project that implements a [callback consumer](https://docs.ritual.net/infernet/sdk/consumers/Callback) +that makes a prompt to the [container](../container/README.md), which then makes a call to OpenAI's GPT4. For an +end-to-end flow of how this works, follow the [guide here](../gpt4.md). + +## Deploying + +The [`Deploy.s.sol`](./script/Deploy.s.sol) deploys the contracts. +The [Makefile](./Makefile) in this project containes +a utility deploy target. + +```bash +make deploy +``` + +## Prompting + +The [`CallContract.s.sol`](./script/CallContract.s.sol) calls +the [`promptGPT`](./src/PromptsGPT.sol#L10) function. +The [Makefile](./Makefile) in this project contains a utility call target. You'll need +to pass in the prompt as an +env var. + +```bash +make call-contract prompt="What is 2 * 3?" +``` diff --git a/projects/gpt4/contracts/foundry.toml b/projects/gpt4/contracts/foundry.toml new file mode 100644 index 0000000..83816a2 --- /dev/null +++ b/projects/gpt4/contracts/foundry.toml @@ -0,0 +1,7 @@ +[profile.default] +src = "src" +out = "out" +libs = ["lib"] +via_ir = true + +# See more config options https://github.com/foundry-rs/foundry/blob/master/crates/config/README.md#all-options diff --git a/projects/gpt4/contracts/remappings.txt b/projects/gpt4/contracts/remappings.txt new file mode 100644 index 0000000..c788350 --- /dev/null +++ b/projects/gpt4/contracts/remappings.txt @@ -0,0 +1,2 @@ +forge-std/=lib/forge-std/src +infernet-sdk/=lib/infernet-sdk/src diff --git a/projects/gpt4/contracts/script/CallContract.s.sol b/projects/gpt4/contracts/script/CallContract.s.sol new file mode 100644 index 0000000..a6cf018 --- /dev/null +++ b/projects/gpt4/contracts/script/CallContract.s.sol @@ -0,0 +1,19 @@ +// SPDX-License-Identifier: BSD-3-Clause-Clear +pragma solidity ^0.8.0; + +import {Script, console2} from "forge-std/Script.sol"; +import {PromptsGPT} from "../src/PromptsGPT.sol"; + +contract CallContract is Script { + function run() public { + // Setup wallet + uint256 deployerPrivateKey = vm.envUint("PRIVATE_KEY"); + vm.startBroadcast(deployerPrivateKey); + + PromptsGPT promptsGpt = PromptsGPT(0x663F3ad617193148711d28f5334eE4Ed07016602); + + promptsGpt.promptGPT(vm.envString("prompt")); + + vm.stopBroadcast(); + } +} diff --git a/projects/gpt4/contracts/script/Deploy.s.sol b/projects/gpt4/contracts/script/Deploy.s.sol new file mode 100644 index 0000000..98e086d --- /dev/null +++ b/projects/gpt4/contracts/script/Deploy.s.sol @@ -0,0 +1,27 @@ +// SPDX-License-Identifier: BSD-3-Clause-Clear +pragma solidity ^0.8.13; + +import {Script, console2} from "forge-std/Script.sol"; +import {PromptsGPT} from "../src/PromptsGPT.sol"; + +contract Deploy is Script { + function run() public { + // Setup wallet + uint256 deployerPrivateKey = vm.envUint("PRIVATE_KEY"); + vm.startBroadcast(deployerPrivateKey); + + // Log address + address deployerAddress = vm.addr(deployerPrivateKey); + console2.log("Loaded deployer: ", deployerAddress); + + address coordinator = 0x5FbDB2315678afecb367f032d93F642f64180aa3; + + // Create consumer + PromptsGPT promptsGPT = new PromptsGPT(coordinator); + console2.log("Deployed PromptsGPT: ", address(promptsGPT)); + + // Execute + vm.stopBroadcast(); + vm.broadcast(); + } +} diff --git a/projects/gpt4/contracts/src/PromptsGPT.sol b/projects/gpt4/contracts/src/PromptsGPT.sol new file mode 100644 index 0000000..3676f72 --- /dev/null +++ b/projects/gpt4/contracts/src/PromptsGPT.sol @@ -0,0 +1,46 @@ +// SPDX-License-Identifier: BSD-3-Clause-Clear +pragma solidity ^0.8.13; + +import {console2} from "forge-std/console2.sol"; +import {CallbackConsumer} from "infernet-sdk/consumer/Callback.sol"; + +contract PromptsGPT is CallbackConsumer { + string private EXTREMELY_COOL_BANNER = "\n\n" + "_____ _____ _______ _ _ _ \n" + "| __ \\|_ _|__ __| | | | /\\ | | \n" + "| |__) | | | | | | | | | / \\ | | \n" + "| _ / | | | | | | | |/ /\\ \\ | | \n" + "| | \\ \\ _| |_ | | | |__| / ____ \\| |____ \n" + "|_| \\_\\_____| |_| \\____/_/ \\_\\______| \n\n"; + constructor(address coordinator) CallbackConsumer(coordinator) {} + + function promptGPT(string calldata prompt) public { + _requestCompute( + "gpt4", + abi.encode(prompt), + 20 gwei, + 1_000_000, + 1 + ); + } + + function _receiveCompute( + uint32 subscriptionId, + uint32 interval, + uint16 redundancy, + address node, + bytes calldata input, + bytes calldata output, + bytes calldata proof + ) internal override { + console2.log(EXTREMELY_COOL_BANNER); + (bytes memory raw_output, bytes memory processed_output) = abi.decode(output, (bytes, bytes)); + (string memory outputStr) = abi.decode(raw_output, (string)); + + console2.log("subscription Id", subscriptionId); + console2.log("interval", interval); + console2.log("redundancy", redundancy); + console2.log("node", node); + console2.log("output:", outputStr); + } +} diff --git a/projects/gpt4/gpt4.md b/projects/gpt4/gpt4.md new file mode 100644 index 0000000..5389c55 --- /dev/null +++ b/projects/gpt4/gpt4.md @@ -0,0 +1,206 @@ +# Running OpenAI's GPT-4 on Infernet + +In this tutorial we are going to integrate [OpenAI's GPT-4](https://openai.com/gpt-4) into infernet. We will: + +1. Obtain an API key from OpenAI +2. Configure the `gpt4` service, build & deploy it with Infernet +3. Make a web-2 request by directly prompting the [gpt4 service](./container) +4. Make a web-3 request by integrating a sample [`PromptsGPT.sol`](./contracts/src/PromptsGPT.sol) smart contract. This +contract will make a request to Infernet with their prompt, and receive the result of the request. + +## Install Pre-requisites + +For this tutorial you'll need to have the following installed. + +1. [Docker](https://docs.docker.com/engine/install/) +2. [Foundry](https://book.getfoundry.sh/getting-started/installation) + +### Get an API key from OpenAI + +First, you'll need to get an API key from OpenAI. You can do this by making +an [OpenAI](https://openai.com/) account. +After signing in, head over to [their platform](https://platform.openai.com/api-keys) to +make an API key. + +> [!NOTE] +> You will need a paid account to use the GPT-4 API. + +### Ensure `docker` & `foundry` exist + +To check for `docker`, run the following command in your terminal: +```bash copy +docker --version +# Docker version 25.0.2, build 29cf629 (example output) +``` + +You'll also need to ensure that docker-compose exists in your terminal: +```bash copy +which docker-compose +# /usr/local/bin/docker-compose (example output) +``` + +To check for `foundry`, run the following command in your terminal: +```bash copy +forge --version +# forge 0.2.0 (551bcb5 2024-02-28T07:40:42.782478000Z) (example output) +``` + +### Clone the starter repository +Just like our other examples, we're going to clone this repository. +All of the code and instructions for this tutorial can be found in the +[`projects/gpt4`](https://github.com/ritual-net/infernet-container-starter/tree/main/projects/gpt4) +directory of the repository. + +```bash copy +# Clone locally +git clone --recurse-submodules https://github.com/ritual-net/infernet-container-starter +# Navigate to the repository +cd infernet-container-starter +``` + +### Configure the `gpt4` container + +#### Configure API key in `config.json` +This is where we'll use the API key we obtained from OpenAI. + +```bash +cd projects/gpt4/container +cp config.sample.json config.json +``` + +In the `containers` field, you will see the following. Replace `your-openai-key` with your OpenAI API key. + +```json +"containers": [ + { + // etc. etc. + "env": { + "OPENAI_API_KEY": "your-openai-key" // replace with your OpenAI API key + } + } +], +``` + +### Build the `gpt4` container + +First, navigate back to the root of the repository. Then simply run the following command to build the `gpt4` +container: + +```bash copy +cd ../../.. +make build-container project=gpt4 +``` + +### Deploy infernet node locally + +Much like our [hello world](../hello-world/hello-world.md) project, deploying the infernet node is as +simple as running: + +```bash copy +make deploy-container project=gpt4 +``` + +## Making a Web2 Request + +From here, you can directly make a request to the infernet node: + +```bash +curl -X POST http://127.0.0.1:4000/api/jobs \ + -H "Content-Type: application/json" \ + -d '{"containers":["gpt4"], "data": {"prompt": "Hello, can shrimp actually fry rice?"}}' +# {"id":"cab6eea8-8b1e-4144-9a70-f905c5ef375b"} +``` + +If you have `jq` installed, you can pipe the output of the last command to a file: + +```bash copy +curl -X POST http://127.0.0.1:4000/api/jobs \ + -H "Content-Type: application/json" \ + -d '{"containers":["gpt4"], "data": {"prompt": "Hello, can shrimp actually fry rice?"}}' | jq -r ".id" > last-job.uuid +``` + +You can then check the status of the job by running: + +```bash copy +curl -X GET http://127.0.0.1:4000/api/jobs\?id\=cab6eea8-8b1e-4144-9a70-f905c5ef375b +# response [{"id":"07026571-edc8-42ab-b38c-6b3cf19971b6","result":{"container":"gpt4","output":{"message":"No, shrimps cannot fry rice by themselves. However, in culinary terms, shrimp fried rice is a popular dish in which cooked shrimp are added to fried rice along with other ingredients. Cooks or chefs prepare it by frying the rice and shrimps together usually in a wok or frying pan."}},"status":"success"}] +``` + +And if you have `jq` installed and piped the last output to a file, you can instead run: + +```bash +curl -X GET "http://127.0.0.1:4000/api/jobs?id=$(cat last-request.uuid)" | jq . +# returns something like: +[ + { + "id": "1b50e85b-2295-44eb-9c85-40ae5331bd14", + "result": { + "container": "gpt4", + "output": { + "output": "Yes, shrimp can be used to make fried rice. In many Asian cuisines, shrimp is a popular ingredient in fried rice dishes. The shrimp adds flavor and protein to the dish, and can be cooked along with the rice and other ingredients such as vegetables, eggs, and seasonings." + } + }, + "status": "success" + } +] +``` + +## Making a Web3 Request + +Now let's bring this service onchain! First we'll have to deploy the contracts. +The [contracts](contracts) +directory contains a simple foundry project with a simple contract called `PromptsGpt`. +This contract exposes a single +function `function promptGPT(string calldata prompt)`. Using this function you'll be +able to make an infernet request. + +**Anvil Logs**: First, it's useful to look at the logs of the anvil node to see what's +going on. In a new terminal, run +`docker logs -f anvil-node`. + +**Deploying the contracts**: In another terminal, run the following command: + +```bash +make deploy-contracts project=gpt4 +``` + +### Calling the contract + +Now, let's call the contract. So far everything's been identical to +the [hello world](projects/hello-world/README.mdllo-world/README.md) project. The only +difference here is that calling the contract requires an input. We'll pass that input in +using an env var named +`prompt`: + +```bash copy +make call-contract project=gpt4 prompt="Can shrimps actually fry rice" +``` + +On your anvil logs, you should see something like this: + +```bash +eth_sendRawTransaction + +_____ _____ _______ _ _ _ +| __ \|_ _|__ __| | | | /\ | | +| |__) | | | | | | | | | / \ | | +| _ / | | | | | | | |/ /\ \ | | +| | \ \ _| |_ | | | |__| / ____ \| |____ +|_| \_\_____| |_| \____/_/ \_\______| + + +subscription Id 1 +interval 1 +redundancy 1 +node 0x70997970C51812dc3A010C7d01b50e0d17dc79C8 +output: {'output': 'Yes, shrimps can be used to make fried rice. Fried rice is a versatile dish that can be made with various ingredients, including shrimp. Shrimp fried rice is a popular dish in many cuisines, especially in Asian cuisine.'} + + Transaction: 0x9bcab42cf7348953eaf107ca0ca539cb27f3843c1bb08cf359484c71fcf44d2b + Gas used: 93726 + + Block Number: 3 + Block Hash: 0x1cc39d03bb1d69ea7f32db85d2ee684071e28b6d6de9eab6f57e011e11a7ed08 + Block Time: "Fri, 26 Jan 2024 02:30:37 +0000" +``` + +beautiful, isn't it? 🥰 diff --git a/projects/hello-world/container/Dockerfile b/projects/hello-world/container/Dockerfile index ea15020..9a143fd 100644 --- a/projects/hello-world/container/Dockerfile +++ b/projects/hello-world/container/Dockerfile @@ -4,15 +4,20 @@ WORKDIR /app ENV PYTHONUNBUFFERED 1 ENV PYTHONDONTWRITEBYTECODE 1 +ENV PIP_NO_CACHE_DIR 1 +ENV RUNTIME docker ENV PYTHONPATH src -WORKDIR /app - RUN apt-get update +RUN apt-get install -y git curl + +# install uv +ADD --chmod=755 https://astral.sh/uv/install.sh /install.sh +RUN /install.sh && rm /install.sh COPY src/requirements.txt . -RUN pip install --upgrade pip && pip install -r requirements.txt +RUN /root/.cargo/bin/uv pip install --system --no-cache -r requirements.txt COPY src src diff --git a/projects/hello-world/container/README.md b/projects/hello-world/container/README.md index c76ce4e..4772e92 100644 --- a/projects/hello-world/container/README.md +++ b/projects/hello-world/container/README.md @@ -3,8 +3,8 @@ In this tutorial, we'll create a simple hello-world container that can be used with infernet. -> [!NOTE] -> This directory `containers/hello-world` already includes the final result +> [!NOTE] +> This directory `containers/hello-world` already includes the final result > of this tutorial. Run the following tutorial in a new directory. Let's get started! 🎉 @@ -88,7 +88,7 @@ This is a simple Dockerfile that: 3. Copies the source code 4. Runs the app on port `3000` -> [!IMPORTANT] +> [!IMPORTANT] > App must be exposed on port `3000`. Infernet's orchestrator > will always assume that the container apps are exposed on that port within the container. > Users can then remap this port to any port that they want on the host machine @@ -127,7 +127,7 @@ docker run --rm -p 3000:3000 --name hello hello-world In another terminal, run: ``` -curl localhost:3000 +curl "localhost:3000" ``` It should return something like: @@ -159,5 +159,5 @@ The output should be something like: Your users will never call this endpoint directly. Instead, they will: -1. Either [create an off-chain job request](../../../README.md#L36) through the node API +1. Either [create an off-chain job request](../hello-world#L36) through the node API 2. Or they will make a subscription on their contracts diff --git a/projects/hello-world/container/scripts/request_node.py b/projects/hello-world/container/scripts/request_node.py index 143321d..4edcea6 100644 --- a/projects/hello-world/container/scripts/request_node.py +++ b/projects/hello-world/container/scripts/request_node.py @@ -1,9 +1,10 @@ from time import sleep +from typing import Any import requests -def hit_server_directly(): +def hit_server_directly() -> None: print("hello") r = requests.get("http://localhost:3000/") print(r.status_code) @@ -11,7 +12,7 @@ def hit_server_directly(): print("server response", r.text) -def poll_until_complete(id: str): +def poll_until_complete(id: str) -> Any: status = "running" r = None while status == "running": @@ -24,11 +25,12 @@ def poll_until_complete(id: str): status = r.get("status") print("status", status) if status != "running": - return r + break sleep(1) + return r -def create_job_through_node(): +def create_job_through_node() -> None: r = requests.post( "http://localhost:4000/api/jobs", json={ diff --git a/projects/hello-world/container/src/requirements.txt b/projects/hello-world/container/src/requirements.txt index c080975..5d42a79 100644 --- a/projects/hello-world/container/src/requirements.txt +++ b/projects/hello-world/container/src/requirements.txt @@ -1,2 +1,2 @@ Flask>=3.0.0,<4.0.0 -gunicorn>=21.2.0,<22.0.0 \ No newline at end of file +gunicorn>=21.2.0,<22.0.0 diff --git a/projects/hello-world/contracts/Makefile b/projects/hello-world/contracts/Makefile index e71bd45..2af9de7 100644 --- a/projects/hello-world/contracts/Makefile +++ b/projects/hello-world/contracts/Makefile @@ -11,4 +11,4 @@ deploy: # calling sayGM() call-contract: - @PRIVATE_KEY=$(sender) forge script script/CallContract.s.sol:CallContract --broadcast --rpc-url $(RPC_URL) \ No newline at end of file + @PRIVATE_KEY=$(sender) forge script script/CallContract.s.sol:CallContract --broadcast --rpc-url $(RPC_URL) diff --git a/projects/hello-world/contracts/README.md b/projects/hello-world/contracts/README.md index 45bd826..932e8b1 100644 --- a/projects/hello-world/contracts/README.md +++ b/projects/hello-world/contracts/README.md @@ -1,15 +1,15 @@ # `Hello-World` Consumer Contracts This is a [foundry](https://book.getfoundry.sh/) project that implements a simple Consumer -contract, [`SaysGm`](./src/SaysGM.sol). +contract, [`SaysGm`](./src/SaysGM.sol). -This readme explains how to compile and deploy the contract to the Infernet Anvil Testnet network. +This readme explains how to compile and deploy the contract to the Infernet Anvil Testnet network. For a detailed tutorial on how to write a consumer contract, refer to the [tutorial doc](./Tutorial.md). -> [!IMPORTANT] -> Ensure that you are running the following scripts with the Infernet Anvil Testnet network. -> The [tutorial](./../../../README.md) at the root of this repository explains how to +> [!IMPORTANT] +> Ensure that you are running the following scripts with the Infernet Anvil Testnet network. +> The [tutorial](../hello-world) at the root of this repository explains how to > bring up an infernet node. ### Installing the libraries @@ -27,7 +27,7 @@ forge compile ### Deploying the contracts The deploy script at `script/Deploy.s.sol` deploys the `SaysGM` contract to the Infernet Anvil Testnet network. -We have the [following make target](./Makefile#L9) to deploy the contract. Refer to the Makefile +We have the [following make target](./Makefile#L9) to deploy the contract. Refer to the Makefile for more understanding around the deploy scripts. ```bash make deploy @@ -35,10 +35,9 @@ make deploy ### Requesting a job We also have a script called `CallContract.s.sol` that requests a job to the `SaysGM` contract. -Refer to the [script](./script/CallContract.s.sol) for more details. Similar to deployment, +Refer to the [script](./script/CallContract.s.sol) for more details. Similar to deployment, you can run that script using the following convenience make target. ```bash make call-contract ``` Refer to the [Makefile](./Makefile#L14) for more details. - diff --git a/projects/hello-world/contracts/Tutorial.md b/projects/hello-world/contracts/Tutorial.md index a24783a..174d3b7 100644 --- a/projects/hello-world/contracts/Tutorial.md +++ b/projects/hello-world/contracts/Tutorial.md @@ -218,7 +218,7 @@ PRIVATE_KEY=0x5de4111afa1a4b94908f83103eb1f1706367c2e68ca870fc3fb9a804cdab365a \ ``` ### Using a `Makefile` -To make running these commands easier, we can add them to a `Makefile`. This allows +To make running these commands easier, we can add them to a `Makefile`. This allows us to run `make deploy` and `make call` instead of typing out the full command every time. Refer to [this project's Makefile](./Makefile) for an example. @@ -226,4 +226,4 @@ Refer to [this project's Makefile](./Makefile) for an example. ### 🎉 Done! Congratulations! You've successfully created a contract that requests compute from -our `hello-world` container. +our `hello-world` container. diff --git a/projects/hello-world/hello-world.md b/projects/hello-world/hello-world.md new file mode 100644 index 0000000..74626c9 --- /dev/null +++ b/projects/hello-world/hello-world.md @@ -0,0 +1,231 @@ +# Hello, World! + +Welcome to the first tutorial of Infernet! In this tutorial we will guide you through the process of setting up and +running an Infernet Node, and then demonstrate how to create and monitor off-chain compute jobs and on-chain subscriptions. + +To interact with infernet, one could either create a job by accessing an infernet node directly through it's API (we'll +refer to this as an off-chain job), or by creating a subscription on-chain (we'll refer to this as an on-chain job). + +## Requesting an off-chain job: Hello World! + +This project is a simple [flask-app](container/src/app.py) that is compatible with `infernet`, and simply +[echoes what you send to it](container/src/app.py#L16). + +### Install Docker & Verify Installation + +To run this, you'll need to have docker installed. You can find instructions for installing docker [here](https://docs.docker.com/install/). + +After installing & running docker, you can verify that the docker daemon is running by running the following command: + +```bash copy +docker --version +# Docker version 25.0.2, build 29cf629 +``` + +### Clone the starter repository + +```bash copy +# Clone locally +git clone --recurse-submodules https://github.com/ritual-net/infernet-container-starter +# Navigate to the repository +cd infernet-container-starter +``` + +### Build the `hello-world` container +Once inside the repository directory, you can run a simple command to build the `hello-world` container: + +```bash copy +make build-container project=hello-world +``` + +### Running Locally + +Then, from the top-level project directory, Run the following make command: + +``` +make deploy-container project=hello-world +``` + +This will deploy an infernet node along with the `hello-world` image. + +### Creating an off-chain job through the API + +You can create an off-chain job by posting to the `node` directly. + +```bash +curl -X POST "http://127.0.0.1:4000/api/jobs" \ + -H "Content-Type: application/json" \ + -d '{"containers":["hello-world"], "data": {"some": "input"}}' +# returns +{"id":"d5281dd5-c4f4-4523-a9c2-266398e06007"} +``` + +This will return the id of that job. + +### Getting the status/result/errors of a job + +You can check the status of a job like so: + +```bash +curl -X GET "http://127.0.0.1:4000/api/jobs?id=d5281dd5-c4f4-4523-a9c2-266398e06007" +# returns +[{"id":"d5281dd5-c4f4-4523-a9c2-266398e06007", "result":{"container":"hello-world","output": {"output":"hello, world!, your input was: {'source': 1, 'data': {'some': 'input'}}"}} ,"status":"success"}] +``` + +### Configuration + +This project already comes with a pre-filled config file. The config file for the hello-world project is located +[here](container/config.json): + +```bash +projects/hello-world/config.json +``` + +## Requesting an on-chain job + +In this section we'll go over how to request an on-chain job in a local anvil node. + +### Infernet's Anvil Testnet + +To request an on-chain job, you'll need to deploy contracts using the infernet sdk. +We already have a public [anvil node](https://hub.docker.com/r/ritualnetwork/infernet-anvil) docker image which has the +corresponding infernet sdk contracts deployed, along with a node that has +registered itself to listen to on-chain subscription events. + +* Coordinator Address: `0x5FbDB2315678afecb367f032d93F642f64180aa3` +* Node Address: `0x70997970C51812dc3A010C7d01b50e0d17dc79C8` (This is the second account in the anvil's accounts.) + +### Deploying Infernet Node & Infernet's Anvil Testnet + +This step is similar to the section above: + +```bash +project=hello-world make deploy-container +``` + +In another terminal, run `docker container ls`, you should see something like this + +```bash +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +c2ca0ffe7817 ritualnetwork/infernet-anvil:0.0.0 "anvil --host 0.0.0.…" 9 seconds ago Up 8 seconds 0.0.0.0:8545->3000/tcp anvil-node +0b686a6a0e5f ritualnetwork/hello-world-infernet:0.0.2 "gunicorn app:create…" 9 seconds ago Up 8 seconds 0.0.0.0:3000->3000/tcp hello-world +28b2e5608655 ritualnetwork/infernet-node:0.1.1 "/app/entrypoint.sh" 10 seconds ago Up 10 seconds 0.0.0.0:4000->4000/tcp deploy-node-1 +03ba51ff48b8 fluent/fluent-bit:latest "/fluent-bit/bin/flu…" 10 seconds ago Up 10 seconds 2020/tcp, 0.0.0.0:24224->24224/tcp deploy-fluentbit-1 +a0d96f29a238 redis:latest "docker-entrypoint.s…" 10 seconds ago Up 10 seconds 0.0.0.0:6379->6379/tcp deploy-redis-1 +``` + +You can see that the anvil node is running on port `8545`, and the infernet +node is running on port `4000`. Same as before. + +### Deploying Consumer Contracts + +We have a [sample forge project](./contracts) which contains +a simple consumer contract, [`SaysGM`](contracts/src/SaysGM.sol). +All this contract does is to request a job from the infernet node, and upon receiving +the result, it will use the `forge` console to print the result. + +**Anvil Logs**: First, it's useful to look at the logs of the anvil node to see what's going on. In +a new terminal, run `docker logs -f anvil-node`. + +**Deploying the contracts**: In another terminal, run the following command: + +```bash +project=hello-world make deploy-contracts +``` + +You should be able to see the following logs in the anvil logs: + +```bash +eth_sendRawTransaction +eth_getTransactionReceipt + + Transaction: 0x23ca6b1d1823ad5af175c207c2505112f60038fc000e1e22509816fa29a3afd6 + Contract created: 0x663f3ad617193148711d28f5334ee4ed07016602 + Gas used: 476669 + + Block Number: 1 + Block Hash: 0x6b026b70fbe97b4a733d4812ccd6e8e25899a1f6c622430c3fb07a2e5c5c96b7 + Block Time: "Wed, 17 Jan 2024 22:17:31 +0000" + +eth_getTransactionByHash +eth_getTransactionReceipt +eth_blockNumber +``` + +We can see that a new contract has been created at `0x663f3ad617193148711d28f5334ee4ed07016602`. +That's the address of the `SaysGM` contract. + +### Calling the contract + +Now, let's call the contract. In the same terminal, run the following command: + +```bash +project=hello-world make call-contract +``` + +You should first see that a transaction was sent to the `SaysGm` contract: + +```bash +eth_getTransactionReceipt + + Transaction: 0xe56b5b6ac713a978a1631a44d6a0c9eb6941dce929e1b66b4a2f7a61b0349d65 + Gas used: 123323 + + Block Number: 2 + Block Hash: 0x3d6678424adcdecfa0a8edd51e014290e5f54ee4707d4779e710a2a4d9867c08 + Block Time: "Wed, 17 Jan 2024 22:18:39 +0000" +eth_getTransactionByHash + +``` + +Then, right after that you should see another transaction submitted by the `node`, +which is the result of the job request: + +```bash +eth_chainId +eth_sendRawTransaction + + +_____ _____ _______ _ _ _ +| __ \|_ _|__ __| | | | /\ | | +| |__) | | | | | | | | | / \ | | +| _ / | | | | | | | |/ /\ \ | | +| | \ \ _| |_ | | | |__| / ____ \| |____ +|_| \_\_____| |_| \____/_/ \_\______| + + +subscription Id 1 +interval 1 +redundancy 1 +node 0x70997970C51812dc3A010C7d01b50e0d17dc79C8 +input: +0x +output: +0x000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000607b276f7574707574273a202268656c6c6f2c20776f726c64212c20796f757220696e707574207761733a207b27736f75726365273a20302c202764617461273a20273437366636663634323036643666373236653639366536373231277d227d +proof: +0x + + Transaction: 0x949351d02e2c7f50ced2be06d14ca4311bd470ec80b135a2ce78a43f43e60d3d + Gas used: 94275 + + Block Number: 3 + Block Hash: 0x57ed0cf39e3fb3a91a0d8baa5f9cb5d2bdc1875f2ad5d6baf4a9466f522df354 + Block Time: "Wed, 17 Jan 2024 22:18:40 +0000" + + +eth_blockNumber +eth_newFilter + +``` + +We can see that the address of the `node` matches the address of the node in +our ritual anvil node. + +### Next Steps + +To learn more about on-chain requests, check out the following resources: + +1. [Tutorial](contracts/Tutorial.md) on this project's consumer smart contracts. +2. [Infernet Callback Consumer Tutorial](https://docs.ritual.net/infernet/sdk/consumers/Callback) +3. [Infernet Nodes Docoumentation](https://docs.ritual.net/infernet/node/introduction) +4. [Infernet-Compatible Containers](https://docs.ritual.net/infernet/node/containers) diff --git a/projects/onnx-iris/container/Dockerfile b/projects/onnx-iris/container/Dockerfile new file mode 100644 index 0000000..57add3f --- /dev/null +++ b/projects/onnx-iris/container/Dockerfile @@ -0,0 +1,25 @@ +FROM python:3.11-slim as builder + +WORKDIR /app + +ENV PYTHONUNBUFFERED 1 +ENV PYTHONDONTWRITEBYTECODE 1 +ENV PIP_NO_CACHE_DIR 1 +ENV RUNTIME docker +ENV PYTHONPATH src + +RUN apt-get update +RUN apt-get install -y git curl + +# install uv +ADD --chmod=755 https://astral.sh/uv/install.sh /install.sh +RUN /install.sh && rm /install.sh + +COPY src/requirements.txt . + +RUN /root/.cargo/bin/uv pip install --system --no-cache -r requirements.txt + +COPY src src + +ENTRYPOINT ["hypercorn", "app:create_app()"] +CMD ["-b", "0.0.0.0:3000"] diff --git a/projects/onnx-iris/container/Makefile b/projects/onnx-iris/container/Makefile new file mode 100644 index 0000000..f392f14 --- /dev/null +++ b/projects/onnx-iris/container/Makefile @@ -0,0 +1,17 @@ +DOCKER_ORG := ritualnetwork +EXAMPLE_NAME := onnx-iris +TAG := $(DOCKER_ORG)/example-$(EXAMPLE_NAME)-infernet:latest + +.phony: build run build-multiplatform + +build: + @docker build -t $(TAG) . + +run: + docker run -p 3000:3000 $(TAG) + +# You may need to set up a docker builder, to do so run: +# docker buildx create --name mybuilder --bootstrap --use +# refer to https://docs.docker.com/build/building/multi-platform/#building-multi-platform-images for more info +build-multiplatform: + docker buildx build --platform linux/amd64,linux/arm64 -t $(TAG) --push . diff --git a/projects/onnx-iris/container/README.md b/projects/onnx-iris/container/README.md new file mode 100644 index 0000000..753d4ec --- /dev/null +++ b/projects/onnx-iris/container/README.md @@ -0,0 +1,96 @@ +# Iris Classification via ONNX Runtime + +This example uses a pre-trained model to classify iris flowers. The code for the model +is located at +our [simple-ml-models](https://github.com/ritual-net/simple-ml-models/tree/main/iris_classification) +repository. + +## Overview + +We're making use of +the [ONNXInferenceWorkflow](https://github.com/ritual-net/infernet-ml-internal/blob/main/src/ml/workflows/inference/onnx_inference_workflow.py) +class to run the model. This is one of many workflows that we currently support in our +[infernet-ml](https://github.com/ritual-net/infernet-ml-internal). Consult the library's +documentation for more info on workflows that +are supported. + +## Building & Running the Container in Isolation + +Note that this container is meant to be started by the infernet-node. For development & +Testing purposes, you can run the container in isolation using the following commands. + +### Building the Container + +Simply run the following command to build the container. + +```bash +make build +``` + +Consult the [Makefile](./Makefile) for the build command. + +### Running the Container + +To run the container, you can use the following command: + +```bash +make run +``` + +## Testing the Container + +Run the following command to run an inference: + +```bash +curl -X POST http://127.0.0.1:3000/service_output \ + -H "Content-Type: application/json" \ + -d '{"source":1, "data": {"input": [[1.0380048, 0.5586108, 1.1037828, 1.712096]]}}' +``` + +#### Note Regarding the Input + +The inputs provided above correspond to an iris flower with the following +characteristics. Refer to the + +1. Sepal Length: `5.5cm` +2. Sepal Width: `2.4cm` +3. Petal Length: `3.8cm` +4. Petal Width: `1.1cm` + +Putting this input into a vector and scaling it, we get the following scaled input: + +```python +[1.0380048, 0.5586108, 1.1037828, 1.712096] +``` + +Refer +to [this function in the model's repository](https://github.com/ritual-net/simple-ml-models/blob/03ebc6fb15d33efe20b7782505b1a65ce3975222/iris_classification/iris_inference_pytorch.py#L13) +for more information on how the input is scaled. + +For more context on the Iris dataset, refer to +the [UCI Machine Learning Repository](https://archive.ics.uci.edu/ml/datasets/iris). + +### Output + +By running the above command, you should get a response similar to the following: + +```json +[ + [ + [ + 0.0010151526657864451, + 0.014391022734344006, + 0.9845937490463257 + ] + ] +] +``` + +The response corresponds to the model's prediction for each of the classes: + +```python +['setosa', 'versicolor', 'virginica'] +``` + +In this case, the model predicts that the input corresponds to the class `virginica`with +a probability of `0.9845937490463257`(~98.5%). diff --git a/projects/onnx-iris/container/config.json b/projects/onnx-iris/container/config.json new file mode 100644 index 0000000..aa9856a --- /dev/null +++ b/projects/onnx-iris/container/config.json @@ -0,0 +1,50 @@ +{ + "log_path": "infernet_node.log", + "server": { + "port": 4000 + }, + "chain": { + "enabled": true, + "trail_head_blocks": 0, + "rpc_url": "http://host.docker.internal:8545", + "coordinator_address": "0x5FbDB2315678afecb367f032d93F642f64180aa3", + "wallet": { + "max_gas_limit": 4000000, + "private_key": "0x59c6995e998f97a5a0044966f0945389dc9e86dae88c7a8412f4603b6b78690d" + } + }, + "startup_wait": 1.0, + "docker": { + "username": "your-username", + "password": "" + }, + "redis": { + "host": "redis", + "port": 6379 + }, + "forward_stats": true, + "containers": [ + { + "id": "onnx-iris", + "image": "ritualnetwork/example-onnx-iris-infernet:latest", + "external": true, + "port": "3000", + "allowed_delegate_addresses": [], + "allowed_addresses": [], + "allowed_ips": [], + "command": "--bind=0.0.0.0:3000 --workers=2", + "env": {} + }, + { + "id": "anvil-node", + "image": "ritualnetwork/infernet-anvil:0.0.0", + "external": true, + "port": "8545", + "allowed_delegate_addresses": [], + "allowed_addresses": [], + "allowed_ips": [], + "command": "", + "env": {} + } + ] +} diff --git a/projects/onnx-iris/container/scripts/sample_endpoints.py b/projects/onnx-iris/container/scripts/sample_endpoints.py new file mode 100644 index 0000000..37f9571 --- /dev/null +++ b/projects/onnx-iris/container/scripts/sample_endpoints.py @@ -0,0 +1,52 @@ +import asyncio + +import aiohttp +from eth_abi import encode, decode # type: ignore + + +async def ping(session: aiohttp.ClientSession) -> None: + async with session.get("http://127.0.0.1:3000/") as response: + print(await response.text()) + + +async def post_directly_web2(session: aiohttp.ClientSession) -> None: + async with session.post( + "http://127.0.0.1:3000/service_output", + json={ + "source": 1, + "data": {"input": [[1.0380048, 0.5586108, 1.1037828, 1.712096]]}, + }, + ) as response: + print(await response.json()) + + +async def post_directly_web3(session: aiohttp.ClientSession) -> None: + async with session.post( + "http://127.0.0.1:3000/service_output", + json={ + "source": 0, + "data": encode( + ["uint256[]"], [[1_038_004, 558_610, 1_103_782, 1_712_096]] + ).hex(), + }, + ) as response: + print(await response.text()) + result = await response.json() + output = result["raw_output"] + result = decode(["uint256[]"], bytes.fromhex(output))[0] + print(f"result: {result}") + + +# async maine +async def main(session: aiohttp.ClientSession) -> None: + await post_directly_web3(session) + + +if __name__ == "__main__": + # run main async + + async def provide_session() -> None: + async with aiohttp.ClientSession() as session: + await main(session) + + asyncio.run(provide_session()) diff --git a/projects/onnx-iris/container/src/app.py b/projects/onnx-iris/container/src/app.py new file mode 100644 index 0000000..46cfde9 --- /dev/null +++ b/projects/onnx-iris/container/src/app.py @@ -0,0 +1,107 @@ +import logging +from typing import Any, cast, List + +import numpy as np +from eth_abi import decode, encode # type: ignore +from infernet_ml.utils.model_loader import ModelSource +from infernet_ml.utils.service_models import InfernetInput, InfernetInputSource +from infernet_ml.workflows.inference.onnx_inference_workflow import ( + ONNXInferenceWorkflow, +) +from quart import Quart, request +from quart.json.provider import DefaultJSONProvider + +log = logging.getLogger(__name__) + + +class NumpyJsonEncodingProvider(DefaultJSONProvider): + @staticmethod + def default(obj: Any) -> Any: + if isinstance(obj, np.ndarray): + # Convert NumPy arrays to list + return obj.tolist() + # fallback to default JSON encoding + return DefaultJSONProvider.default(obj) + + +def create_app() -> Quart: + Quart.json_provider_class = NumpyJsonEncodingProvider + app = Quart(__name__) + # we are downloading the model from the hub. + # model repo is located at: https://huggingface.co/Ritual-Net/iris-dataset + model_source = ModelSource.HUGGINGFACE_HUB + model_args = {"repo_id": "Ritual-Net/iris-dataset", "filename": "iris.onnx"} + + workflow = ONNXInferenceWorkflow(model_source=model_source, model_args=model_args) + workflow.setup() + + @app.route("/") + def index() -> str: + """ + Utility endpoint to check if the service is running. + """ + return "ONNX Iris Classifier Example Program" + + @app.route("/service_output", methods=["POST"]) + async def inference() -> dict[str, Any]: + req_data = await request.get_json() + """ + InfernetInput has the format: + source: (0 on-chain, 1 off-chain) + data: dict[str, Any] + """ + infernet_input: InfernetInput = InfernetInput(**req_data) + + if infernet_input.source == InfernetInputSource.OFFCHAIN: + web2_input = cast(dict[str, Any], infernet_input.data) + values = cast(List[List[float]], web2_input["input"]) + else: + # On-chain requests are sent as a generalized hex-string which we will + # decode to the appropriate format. + web3_input: List[int] = decode( + ["uint256[]"], bytes.fromhex(cast(str, infernet_input.data)) + )[0] + values = [[float(v) / 1e6 for v in web3_input]] + + """ + The input to the onnx inference workflow needs to conform to ONNX runtime's + input_feed format. For more information refer to: + https://docs.ritual.net/ml-workflows/inference-workflows/onnx_inference_workflow + """ + result: dict[str, Any] = workflow.inference({"input": values}) + + if infernet_input.source == InfernetInputSource.OFFCHAIN: + """ + In case of an off-chain request, the result is returned as is. + """ + return result + else: + """ + In case of an on-chain request, the result is returned in the format: + { + "raw_input": str, + "processed_input": str, + "raw_output": str, + "processed_output": str, + "proof": str, + } + refer to: https://docs.ritual.net/infernet/node/containers for more info. + """ + predictions = cast(List[List[List[float]]], result) + predictions_normalized = [int(p * 1e6) for p in predictions[0][0]] + return { + "raw_input": "", + "processed_input": "", + "raw_output": encode(["uint256[]"], [predictions_normalized]).hex(), + "processed_output": "", + "proof": "", + } + + return app + + +if __name__ == "__main__": + """ + Utility to run the app locally. For development purposes only. + """ + create_app().run(port=3000) diff --git a/projects/onnx-iris/container/src/requirements.txt b/projects/onnx-iris/container/src/requirements.txt new file mode 100644 index 0000000..be6cb85 --- /dev/null +++ b/projects/onnx-iris/container/src/requirements.txt @@ -0,0 +1,7 @@ +quart==0.19.4 +infernet_ml==0.1.0 +PyArweave @ git+https://github.com/ritual-net/pyarweave.git +web3==6.15.0 +onnx==1.15.0 +onnxruntime==1.16.3 +torch==2.1.2 diff --git a/projects/onnx-iris/contracts/.gitignore b/projects/onnx-iris/contracts/.gitignore new file mode 100644 index 0000000..85198aa --- /dev/null +++ b/projects/onnx-iris/contracts/.gitignore @@ -0,0 +1,14 @@ +# Compiler files +cache/ +out/ + +# Ignores development broadcast logs +!/broadcast +/broadcast/*/31337/ +/broadcast/**/dry-run/ + +# Docs +docs/ + +# Dotenv file +.env diff --git a/projects/onnx-iris/contracts/Makefile b/projects/onnx-iris/contracts/Makefile new file mode 100644 index 0000000..2af9de7 --- /dev/null +++ b/projects/onnx-iris/contracts/Makefile @@ -0,0 +1,14 @@ +# phony targets are targets that don't actually create a file +.phony: deploy + +# anvil's third default address +sender := 0x5de4111afa1a4b94908f83103eb1f1706367c2e68ca870fc3fb9a804cdab365a +RPC_URL := http://localhost:8545 + +# deploying the contract +deploy: + @PRIVATE_KEY=$(sender) forge script script/Deploy.s.sol:Deploy --broadcast --rpc-url $(RPC_URL) + +# calling sayGM() +call-contract: + @PRIVATE_KEY=$(sender) forge script script/CallContract.s.sol:CallContract --broadcast --rpc-url $(RPC_URL) diff --git a/projects/onnx-iris/contracts/README.md b/projects/onnx-iris/contracts/README.md new file mode 100644 index 0000000..e90b151 --- /dev/null +++ b/projects/onnx-iris/contracts/README.md @@ -0,0 +1,41 @@ +# `ONNX` Consumer Contracts + +This is a [foundry](https://book.getfoundry.sh/) project that implements a simple Consumer +contract, [`IrisClassifier`](./src/IrisClassifier.sol). + +This readme explains how to compile and deploy the contract to the Infernet Anvil Testnet network. + +> [!IMPORTANT] +> Ensure that you are running the following scripts with the Infernet Anvil Testnet network. +> The [tutorial](../../hello-world/README.mdADME.md) at the root of this repository explains how to +> bring up an infernet node. + +### Installing the libraries + +```bash +forge install +``` + +### Compiling the contracts + +```bash +forge compile +``` + +### Deploying the contracts +The deploy script at `script/Deploy.s.sol` deploys the `IrisClassifier` contract to the Infernet Anvil Testnet network. + +We have the [following make target](./Makefile#L9) to deploy the contract. Refer to the Makefile +for more understanding around the deploy scripts. +```bash +make deploy +``` + +### Requesting a job +We also have a script called `CallContract.s.sol` that requests a job to the `IrisClassifier` contract. +Refer to the [script](./script/CallContract.s.sol) for more details. Similar to deployment, +you can run that script using the following convenience make target. +```bash +make call-contract +``` +Refer to the [Makefile](./Makefile#L14) for more details. diff --git a/projects/onnx-iris/contracts/foundry.toml b/projects/onnx-iris/contracts/foundry.toml new file mode 100644 index 0000000..83816a2 --- /dev/null +++ b/projects/onnx-iris/contracts/foundry.toml @@ -0,0 +1,7 @@ +[profile.default] +src = "src" +out = "out" +libs = ["lib"] +via_ir = true + +# See more config options https://github.com/foundry-rs/foundry/blob/master/crates/config/README.md#all-options diff --git a/projects/onnx-iris/contracts/remappings.txt b/projects/onnx-iris/contracts/remappings.txt new file mode 100644 index 0000000..c788350 --- /dev/null +++ b/projects/onnx-iris/contracts/remappings.txt @@ -0,0 +1,2 @@ +forge-std/=lib/forge-std/src +infernet-sdk/=lib/infernet-sdk/src diff --git a/projects/onnx-iris/contracts/script/CallContract.s.sol b/projects/onnx-iris/contracts/script/CallContract.s.sol new file mode 100644 index 0000000..3612da2 --- /dev/null +++ b/projects/onnx-iris/contracts/script/CallContract.s.sol @@ -0,0 +1,19 @@ +// SPDX-License-Identifier: BSD-3-Clause-Clear +pragma solidity ^0.8.0; + +import {Script, console2} from "forge-std/Script.sol"; +import {IrisClassifier} from "../src/IrisClassifier.sol"; + +contract CallContract is Script { + function run() public { + // Setup wallet + uint256 deployerPrivateKey = vm.envUint("PRIVATE_KEY"); + vm.startBroadcast(deployerPrivateKey); + + IrisClassifier irisClassifier = IrisClassifier(0x663F3ad617193148711d28f5334eE4Ed07016602); + + irisClassifier.classifyIris(); + + vm.stopBroadcast(); + } +} diff --git a/projects/onnx-iris/contracts/script/Deploy.s.sol b/projects/onnx-iris/contracts/script/Deploy.s.sol new file mode 100644 index 0000000..94fb53e --- /dev/null +++ b/projects/onnx-iris/contracts/script/Deploy.s.sol @@ -0,0 +1,26 @@ +// SPDX-License-Identifier: BSD-3-Clause-Clear +pragma solidity ^0.8.13; + +import {Script, console2} from "forge-std/Script.sol"; +import {IrisClassifier} from "../src/IrisClassifier.sol"; + +contract Deploy is Script { + function run() public { + // Setup wallet + uint256 deployerPrivateKey = vm.envUint("PRIVATE_KEY"); + vm.startBroadcast(deployerPrivateKey); + + // Log address + address deployerAddress = vm.addr(deployerPrivateKey); + console2.log("Loaded deployer: ", deployerAddress); + + address coordinator = 0x5FbDB2315678afecb367f032d93F642f64180aa3; + // Create consumer + IrisClassifier classifier = new IrisClassifier(coordinator); + console2.log("Deployed IrisClassifier: ", address(classifier)); + + // Execute + vm.stopBroadcast(); + vm.broadcast(); + } +} diff --git a/projects/onnx-iris/contracts/src/IrisClassifier.sol b/projects/onnx-iris/contracts/src/IrisClassifier.sol new file mode 100644 index 0000000..1fd5eb4 --- /dev/null +++ b/projects/onnx-iris/contracts/src/IrisClassifier.sol @@ -0,0 +1,67 @@ +// SPDX-License-Identifier: BSD-3-Clause-Clear +pragma solidity ^0.8.13; + +import {console2} from "forge-std/console2.sol"; +import {CallbackConsumer} from "infernet-sdk/consumer/Callback.sol"; + + +contract IrisClassifier is CallbackConsumer { + string private EXTREMELY_COOL_BANNER = "\n\n" + "_____ _____ _______ _ _ _\n" + "| __ \\|_ _|__ __| | | | /\\ | |\n" + "| |__) | | | | | | | | | / \\ | |\n" + "| _ / | | | | | | | |/ /\\ \\ | |\n" + "| | \\ \\ _| |_ | | | |__| / ____ \\| |____\n" + "|_| \\_\\_____| |_| \\____/_/ \\_\\______|\n\n"; + + constructor(address coordinator) CallbackConsumer(coordinator) {} + + function classifyIris() public { + /// @dev Iris data is in the following format: + /// @dev [sepal_length, sepal_width, petal_length, petal_width] + /// @dev the following vector corresponds to the following properties: + /// "sepal_length": 5.5cm + /// "sepal_width": 2.4cm + /// "petal_length": 3.8cm + /// "petal_width": 1.1cm + /// @dev The data is normalized & scaled. + /// refer to [this function in the model's repository](https://github.com/ritual-net/simple-ml-models/blob/03ebc6fb15d33efe20b7782505b1a65ce3975222/iris_classification/iris_inference_pytorch.py#L13) + /// for more info on normalization. + /// @dev The data is adjusted by 6 decimals + + uint256[] memory iris_data = new uint256[](4); + iris_data[0] = 1_038_004; + iris_data[1] = 558_610; + iris_data[2] = 1_103_782; + iris_data[3] = 1_712_096; + + _requestCompute( + "onnx-iris", + abi.encode(iris_data), + 20 gwei, + 1_000_000, + 1 + ); + } + + function _receiveCompute( + uint32 subscriptionId, + uint32 interval, + uint16 redundancy, + address node, + bytes calldata input, + bytes calldata output, + bytes calldata proof + ) internal override { + console2.log(EXTREMELY_COOL_BANNER); + (bytes memory raw_output, bytes memory processed_output) = abi.decode(output, (bytes, bytes)); + (uint256[] memory classes) = abi.decode(raw_output, (uint256[])); + uint256 setosa = classes[0]; + uint256 versicolor = classes[1]; + uint256 virginica = classes[2]; + console2.log("predictions: (adjusted by 6 decimals, 1_000_000 = 100%, 1_000 = 0.1%)"); + console2.log("Setosa: ", setosa); + console2.log("Versicolor: ", versicolor); + console2.log("Virginica: ", virginica); + } +} diff --git a/projects/onnx-iris/onnx-iris.md b/projects/onnx-iris/onnx-iris.md new file mode 100644 index 0000000..b3b01cd --- /dev/null +++ b/projects/onnx-iris/onnx-iris.md @@ -0,0 +1,271 @@ +# Running an ONNX Model on Infernet + +Welcome to this comprehensive guide where we'll explore how to run an ONNX model on Infernet, using our [infernet-container-starter](https://github.com/ritual-net/infernet-container-starter/) +examples repository. This tutorial is designed to give you and end-to-end understanding of how you can run your own +custom pre-trained models, and interact with them on-chain and off-chain. + +**Model:** This example uses a pre-trained model to classify iris flowers. The code for the model +is located at our [`simple-ml-models`](https://github.com/ritual-net/simple-ml-models/tree/main/iris_classification) repository. + +## Pre-requisites + +For this tutorial you'll need to have the following installed. + +1. [Docker](https://docs.docker.com/engine/install/) +2. [Foundry](https://book.getfoundry.sh/getting-started/installation) + +### Ensure `docker` & `foundry` exist + +To check for `docker`, run the following command in your terminal: + +```bash copy +docker --version +# Docker version 25.0.2, build 29cf629 (example output) +``` + +You'll also need to ensure that docker-compose exists in your terminal: + +```bash copy +which docker-compose +# /usr/local/bin/docker-compose (example output) +``` + +To check for `foundry`, run the following command in your terminal: + +```bash copy +forge --version +# forge 0.2.0 (551bcb5 2024-02-28T07:40:42.782478000Z) (example output) +``` + +### Clone the starter repository + +If you haven't already, clone the infernet-container-starter repository. All of the code for this tutorial is located +under the `projects/onnx-iris` directory. + +```bash copy +# Clone locally +git clone --recurse-submodules https://github.com/ritual-net/infernet-container-starter +# Navigate to the repository +cd infernet-container-starter +``` + +## Making Inference Requests via Node API (a la Web2 request) + +### Build the `onnx-iris` container + +From the top-level directory of this repository, simply run the following command to build the `onnx-iris` container: + +```bash copy +make build-container project=onnx-iris +``` + +After the container is built, you can deploy an infernet-node that utilizes that +container by running the following command: + +```bash copy +make deploy-container project=onnx-iris +``` + +Now, you can make inference requests to the infernet-node. In a new tab, run: + +```bash copy +curl -X POST "http://127.0.0.1:4000/api/jobs" \ + -H "Content-Type: application/json" \ + -d '{"containers":["onnx-iris"], "data": {"input": [[1.0380048, 0.5586108, 1.1037828, 1.712096]]}}' +``` + +You should get an output similar to the following: + +```json +{ + "id": "074b9e98-f1f6-463c-b185-651878f3b4f6" +} +``` + +Now, you can check the status of the job by running (Make sure job id matches the one +you got from the previous request): + +```bash +curl -X GET "http://127.0.0.1:4000/api/jobs?id=074b9e98-f1f6-463c-b185-651878f3b4f6" +``` + +Should return: + +```json +[ + { + "id": "074b9e98-f1f6-463c-b185-651878f3b4f6", + "result": { + "container": "onnx-iris", + "output": [ + [ + [ + 0.0010151526657864451, + 0.014391022734344006, + 0.9845937490463257 + ] + ] + ] + }, + "status": "success" + } +] +``` + +The `output` corresponds to the model's prediction for each of the classes: + +```python +['setosa', 'versicolor', 'virginica'] +``` + +In this case, the model predicts that the input corresponds to the class `virginica`with +a probability of `0.9845937490463257`(~98.5%). + +#### Note Regarding the Input + +The inputs provided above correspond to an iris flower with the following +characteristics. Refer to the + +1. Sepal Length: `5.5cm` +2. Sepal Width: `2.4cm` +3. Petal Length: `3.8cm` +4. Petal Width: `1.1cm` + +Putting this input into a vector and scaling it, we get the following scaled input: + +```python +[1.0380048, 0.5586108, 1.1037828, 1.712096] +``` + +Refer +to [this function in the model's repository](https://github.com/ritual-net/simple-ml-models/blob/03ebc6fb15d33efe20b7782505b1a65ce3975222/iris_classification/iris_inference_pytorch.py#L13) +for more information on how the input is scaled. + +For more context on the Iris dataset, refer to +the [UCI Machine Learning Repository](https://archive.ics.uci.edu/ml/datasets/iris). + +## Making Inference Requests via Contracts (a la Web3 request) + +The [contracts](contracts) directory contains a simple forge +project that can be used to interact with the Infernet Node. + +Here, we have a very simple +contract, [IrisClassifier](contracts/src/IrisClassifier.sol), +that requests a compute job from the Infernet Node and then retrieves the result. +We are going to make the same request as above, but this time using a smart contract. +Since floats are not supported in Solidity, we convert all floats to `uint256` by +multiplying the input vector entries by `1e6`: + +```Solidity + uint256[] memory iris_data = new uint256[](4); +iris_data[0] = 1_038_004; +iris_data[1] = 558_610; +iris_data[2] = 1_103_782; +iris_data[3] = 1_712_096; +``` + +We have multiplied the input by 1e6 to have enough accuracy. This can be seen +[here](contracts/src/IrisClassifier.sol#19) in the contract's +code. + +### Monitoring the EVM Logs + +The infernet node configuration for this project includes +an [infernet anvil node](projects/hello-world/README.mdllo-world/README.md#77) with pre-deployed contracts. You can view the +logs of the anvil node to see what's going on. In a new terminal, run: + +```bash +docker logs -f anvil-node +``` + +As you deploy the contract and make requests, you should see logs indicating the +requests and responses. + +### Deploying the Contract + +Simply run the following command to deploy the contract: + +```bash +project=onnx-iris make deploy-contracts +``` + +In your anvil logs you should see the following: + +```bash +eth_getTransactionReceipt + + Transaction: 0xeed605eacdace39a48635f6d14215b386523766f80a113b4484f542d862889a4 + Contract created: 0x663f3ad617193148711d28f5334ee4ed07016602 + Gas used: 714269 + + Block Number: 1 + Block Hash: 0x4e6333f91e86a0a0be357b63fba9eb5f5ba287805ac35aaa7698fd05445730f5 + Block Time: "Mon, 19 Feb 2024 20:31:17 +0000" + +eth_blockNumber +``` + +beautiful, we can see that a new contract has been created +at `0x663f3ad617193148711d28f5334ee4ed07016602`. That's the address of +the `IrisClassifier` contract. We are now going to call this contract. To do so, +we are using +the [CallContract.s.sol](contracts/script/CallContract.s.sol) +script. Note that the address of the +contract [is hardcoded in the script](contracts/script/CallContract.s.sol#L13), +and should match the address we see above. Since this is a test environment and we're +using a test deployer address, this address is quite deterministic and shouldn't change. +Otherwise, change the address in the script to match the address of the contract you +just deployed. + +### Calling the Contract + +To call the contract, run the following command: + +```bash +project=onnx-iris make call-contract +``` + +In the anvil logs, you should see the following: + +```bash +eth_sendRawTransaction + + +_____ _____ _______ _ _ _ +| __ \|_ _|__ __| | | | /\ | | +| |__) | | | | | | | | | / \ | | +| _ / | | | | | | | |/ /\ \ | | +| | \ \ _| |_ | | | |__| / ____ \| |____ +|_| \_\_____| |_| \____/_/ \_\______| + + +predictions: (adjusted by 6 decimals, 1_000_000 = 100%, 1_000 = 0.1%) +Setosa: 1015 +Versicolor: 14391 +Virginica: 984593 + + Transaction: 0x77c7ff26ed20ffb1a32baf467a3cead6ed81fe5ae7d2e419491ca92b4ac826f0 + Gas used: 111091 + + Block Number: 3 + Block Hash: 0x78f98f4d54ebdca2a8aa46c3b9b7e7ae36348373dbeb83c91a4600dd6aba2c55 + Block Time: "Mon, 19 Feb 2024 20:33:00 +0000" + +eth_blockNumber +eth_newFilter +eth_getFilterLogs +``` + +Beautiful! We can see that the same result has been posted to the contract. + +### Next Steps + +From here, you can bring your own pre-trained ONNX model, and with minimal changes, you can make it both work with an +infernet-node as well as a smart contract. + +### More Information + +1. Check out our [other examples](../../readme.md) if you haven't already +2. [Infernet Callback Consumer Tutorial](https://docs.ritual.net/infernet/sdk/consumers/Callback) +3. [Infernet Nodes Docoumentation](https://docs.ritual.net/infernet/node/introduction) +4. [Infernet-Compatible Containers](https://docs.ritual.net/infernet/node/containers) diff --git a/projects/prompt-to-nft/.gitignore b/projects/prompt-to-nft/.gitignore new file mode 100644 index 0000000..0ac1401 --- /dev/null +++ b/projects/prompt-to-nft/.gitignore @@ -0,0 +1,2 @@ +# modal service outputs +modal/outputs diff --git a/projects/prompt-to-nft/container/.gitignore b/projects/prompt-to-nft/container/.gitignore new file mode 100644 index 0000000..9c6605c --- /dev/null +++ b/projects/prompt-to-nft/container/.gitignore @@ -0,0 +1,3 @@ +wallet +config.json +**/keyfile-arweave.json diff --git a/projects/prompt-to-nft/container/Dockerfile b/projects/prompt-to-nft/container/Dockerfile new file mode 100644 index 0000000..eb12000 --- /dev/null +++ b/projects/prompt-to-nft/container/Dockerfile @@ -0,0 +1,27 @@ +FROM python:3.11-slim as builder + +WORKDIR /app + +ENV PYTHONUNBUFFERED 1 +ENV PYTHONDONTWRITEBYTECODE 1 +ENV PIP_NO_CACHE_DIR 1 +ENV RUNTIME docker +ENV PYTHONPATH src + +RUN apt-get update +RUN apt-get install -y git curl + +# install uv +ADD --chmod=755 https://astral.sh/uv/install.sh /install.sh +RUN /install.sh && rm /install.sh + +COPY src/requirements.txt . + +RUN /root/.cargo/bin/uv pip install --system --no-cache -r requirements.txt + +copy wallet wallet + +COPY src src + +ENTRYPOINT ["hypercorn", "app:create_app()"] +CMD ["-b", "0.0.0.0:3000"] diff --git a/projects/prompt-to-nft/container/Makefile b/projects/prompt-to-nft/container/Makefile new file mode 100644 index 0000000..2c67657 --- /dev/null +++ b/projects/prompt-to-nft/container/Makefile @@ -0,0 +1,23 @@ +DOCKER_ORG := ritualnetwork +EXAMPLE_NAME := prompt-to-nft +TAG := $(DOCKER_ORG)/example-$(EXAMPLE_NAME)-infernet:latest + +.phony: build run build-multiplatform + +build: +ifdef CI + mkdir -p wallet # in CI we don't have a wallet directory. This enables to bypass that and ensure that the image + # is built successfully +endif + @docker build -t $(TAG) . + +wallet_dir ?= /app/wallet + +run: + docker run -p 3000:3000 -v ./wallet:$(wallet_dir) --env-file prompt_to_nft.env $(TAG) + +# You may need to set up a docker builder, to do so run: +# docker buildx create --name mybuilder --bootstrap --use +# refer to https://docs.docker.com/build/building/multi-platform/#building-multi-platform-images for more info +build-multiplatform: + docker buildx build --platform linux/amd64,linux/arm64 -t $(TAG) --push . diff --git a/projects/prompt-to-nft/container/README.md b/projects/prompt-to-nft/container/README.md new file mode 100644 index 0000000..67c25fe --- /dev/null +++ b/projects/prompt-to-nft/container/README.md @@ -0,0 +1,91 @@ +# Prompt-to-NFT Container + + +## Overview + + +## Building & Running the Container in Isolation + +Note that this container is meant to be started by the infernet-node. For development & +Testing purposes, you can run the container in isolation using the following commands. + +### Building the Container + +Simply run the following command to build the container. + +```bash +make build +``` + +Consult the [Makefile](./Makefile) for the build command. + +### Adding Arweave File +Add your arweave wallet file + + + +### Running the Container + +To run the container, you can use the following command: + +```bash +make run +``` + +## Testing the Container + +Run the following command to run an inference: + +```bash +curl -X POST http://127.0.0.1:3000/service_output \ + -H "Content-Type: application/json" \ + -d '{"source":1, "data": {"prompt": "a golden retriever skiing"}}' +``` + +#### Note Regarding the Input + +The inputs provided above correspond to an iris flower with the following +characteristics. Refer to the + +1. Sepal Length: `5.5cm` +2. Sepal Width: `2.4cm` +3. Petal Length: `3.8cm` +4. Petal Width: `1.1cm` + +Putting this input into a vector and scaling it, we get the following scaled input: + +```python +[1.0380048, 0.5586108, 1.1037828, 1.712096] +``` + +Refer +to [this function in the model's repository](https://github.com/ritual-net/simple-ml-models/blob/03ebc6fb15d33efe20b7782505b1a65ce3975222/iris_classification/iris_inference_pytorch.py#L13) +for more information on how the input is scaled. + +For more context on the Iris dataset, refer to +the [UCI Machine Learning Repository](https://archive.ics.uci.edu/ml/datasets/iris). + +### Output + +By running the above command, you should get a response similar to the following: + +```json +[ + [ + [ + 0.0010151526657864451, + 0.014391022734344006, + 0.9845937490463257 + ] + ] +] +``` + +The response corresponds to the model's prediction for each of the classes: + +```python +['setosa', 'versicolor', 'virginica'] +``` + +In this case, the model predicts that the input corresponds to the class `virginica`with +a probability of `0.9845937490463257`(~98.5%). diff --git a/projects/prompt-to-nft/container/config.sample.json b/projects/prompt-to-nft/container/config.sample.json new file mode 100644 index 0000000..40f05de --- /dev/null +++ b/projects/prompt-to-nft/container/config.sample.json @@ -0,0 +1,53 @@ +{ + "log_path": "infernet_node.log", + "server": { + "port": 4000 + }, + "chain": { + "enabled": true, + "trail_head_blocks": 0, + "rpc_url": "http://host.docker.internal:8545", + "coordinator_address": "0x5FbDB2315678afecb367f032d93F642f64180aa3", + "wallet": { + "max_gas_limit": 4000000, + "private_key": "0x59c6995e998f97a5a0044966f0945389dc9e86dae88c7a8412f4603b6b78690d" + } + }, + "startup_wait": 1.0, + "docker": { + "username": "your-username", + "password": "" + }, + "redis": { + "host": "redis", + "port": 6379 + }, + "forward_stats": true, + "containers": [ + { + "id": "prompt-to-nft", + "image": "ritualnetwork/example-prompt-to-nft-infernet:latest", + "external": true, + "port": "3000", + "allowed_delegate_addresses": [], + "allowed_addresses": [], + "allowed_ips": [], + "command": "--bind=0.0.0.0:3000 --workers=2", + "env": { + "ARWEAVE_WALLET_FILE_PATH": "wallet/keyfile-arweave.json", + "IMAGE_GEN_SERVICE_URL": "http://your.services.ip:port" + } + }, + { + "id": "anvil-node", + "image": "ritualnetwork/infernet-anvil:0.0.0", + "external": true, + "port": "8545", + "allowed_delegate_addresses": [], + "allowed_addresses": [], + "allowed_ips": [], + "command": "", + "env": {} + } + ] +} diff --git a/projects/prompt-to-nft/container/prompt_to_nft.env.sample b/projects/prompt-to-nft/container/prompt_to_nft.env.sample new file mode 100644 index 0000000..1483d42 --- /dev/null +++ b/projects/prompt-to-nft/container/prompt_to_nft.env.sample @@ -0,0 +1,2 @@ +ARWEAVE_WALLET_FILE_PATH= +IMAGE_GEN_SERVICE_URL= diff --git a/projects/prompt-to-nft/container/src/__init__.py b/projects/prompt-to-nft/container/src/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/projects/prompt-to-nft/container/src/app.py b/projects/prompt-to-nft/container/src/app.py new file mode 100644 index 0000000..4ae07c9 --- /dev/null +++ b/projects/prompt-to-nft/container/src/app.py @@ -0,0 +1,109 @@ +import logging +import os +from pathlib import Path +from typing import Any, cast + +import aiohttp +from eth_abi import decode, encode # type: ignore +from infernet_ml.utils.arweave import upload, load_wallet +from infernet_ml.utils.service_models import InfernetInput, InfernetInputSource +from quart import Quart, request + +log = logging.getLogger(__name__) + + +async def run_inference(prompt: str, output_path: str) -> None: + async with aiohttp.ClientSession() as session: + app_url = os.getenv("IMAGE_GEN_SERVICE_URL") + async with session.post( + f"{app_url}/service_output", + json={ + "prompt": prompt, + }, + ) as response: + image_bytes = await response.read() + with open(output_path, "wb") as f: + f.write(image_bytes) + + +def ensure_env_vars() -> None: + if not os.getenv("IMAGE_GEN_SERVICE_URL"): + raise ValueError("IMAGE_GEN_SERVICE_URL environment variable not set") + load_wallet() + + +def create_app() -> Quart: + app = Quart(__name__) + ensure_env_vars() + + @app.route("/") + def index() -> str: + """ + Utility endpoint to check if the service is running. + """ + return "Stable Diffusion Example Program" + + @app.route("/service_output", methods=["POST"]) + async def inference() -> dict[str, Any]: + req_data = await request.get_json() + """ + InfernetInput has the format: + source: (0 on-chain, 1 off-chain) + data: dict[str, Any] + """ + infernet_input: InfernetInput = InfernetInput(**req_data) + temp_file = "image.png" + + if infernet_input.source == InfernetInputSource.OFFCHAIN: + prompt: str = cast(dict[str, str], infernet_input.data)["prompt"] + else: + # On-chain requests are sent as a generalized hex-string which we will + # decode to the appropriate format. + (prompt, mintTo) = decode( + ["string", "address"], bytes.fromhex(cast(str, infernet_input.data)) + ) + log.info("mintTo: %s", mintTo) + log.info("prompt: %s", prompt) + + # run the inference and download the image to a temp file + await run_inference(prompt, temp_file) + + tx = upload(Path(temp_file), {"Content-Type": "image/png"}) + + if infernet_input.source == InfernetInputSource.OFFCHAIN: + """ + In case of an off-chain request, the result is returned as is. + """ + return { + "prompt": prompt, + "hash": tx.id, + "image_url": f"https://arweave.net/{tx.id}", + } + else: + """ + In case of an on-chain request, the result is returned in the format: + { + "raw_input": str, + "processed_input": str, + "raw_output": str, + "processed_output": str, + "proof": str, + } + refer to: https://docs.ritual.net/infernet/node/containers for more info. + """ + return { + "raw_input": infernet_input.data, + "processed_input": "", + "raw_output": encode(["string"], [tx.id]).hex(), + "processed_output": "", + "proof": "", + } + + return app + + +if __name__ == "__main__": + """ + Utility to run the app locally. For development purposes only. + """ + create_app().run(host="0.0.0.0", port=3000) diff --git a/projects/prompt-to-nft/container/src/requirements.txt b/projects/prompt-to-nft/container/src/requirements.txt new file mode 100644 index 0000000..5ac781d --- /dev/null +++ b/projects/prompt-to-nft/container/src/requirements.txt @@ -0,0 +1,5 @@ +quart==0.19.4 +infernet_ml==0.1.0 +PyArweave @ git+https://github.com/ritual-net/pyarweave.git +web3==6.15.0 +tqdm==4.66.1 diff --git a/projects/prompt-to-nft/contracts/.gitignore b/projects/prompt-to-nft/contracts/.gitignore new file mode 100644 index 0000000..85198aa --- /dev/null +++ b/projects/prompt-to-nft/contracts/.gitignore @@ -0,0 +1,14 @@ +# Compiler files +cache/ +out/ + +# Ignores development broadcast logs +!/broadcast +/broadcast/*/31337/ +/broadcast/**/dry-run/ + +# Docs +docs/ + +# Dotenv file +.env diff --git a/projects/prompt-to-nft/contracts/Makefile b/projects/prompt-to-nft/contracts/Makefile new file mode 100644 index 0000000..2af9de7 --- /dev/null +++ b/projects/prompt-to-nft/contracts/Makefile @@ -0,0 +1,14 @@ +# phony targets are targets that don't actually create a file +.phony: deploy + +# anvil's third default address +sender := 0x5de4111afa1a4b94908f83103eb1f1706367c2e68ca870fc3fb9a804cdab365a +RPC_URL := http://localhost:8545 + +# deploying the contract +deploy: + @PRIVATE_KEY=$(sender) forge script script/Deploy.s.sol:Deploy --broadcast --rpc-url $(RPC_URL) + +# calling sayGM() +call-contract: + @PRIVATE_KEY=$(sender) forge script script/CallContract.s.sol:CallContract --broadcast --rpc-url $(RPC_URL) diff --git a/projects/prompt-to-nft/contracts/foundry.toml b/projects/prompt-to-nft/contracts/foundry.toml new file mode 100644 index 0000000..83816a2 --- /dev/null +++ b/projects/prompt-to-nft/contracts/foundry.toml @@ -0,0 +1,7 @@ +[profile.default] +src = "src" +out = "out" +libs = ["lib"] +via_ir = true + +# See more config options https://github.com/foundry-rs/foundry/blob/master/crates/config/README.md#all-options diff --git a/projects/prompt-to-nft/contracts/remappings.txt b/projects/prompt-to-nft/contracts/remappings.txt new file mode 100644 index 0000000..1e986ec --- /dev/null +++ b/projects/prompt-to-nft/contracts/remappings.txt @@ -0,0 +1,3 @@ +forge-std/=lib/forge-std/src +infernet-sdk/=lib/infernet-sdk/src +solmate/=lib/solmate/src diff --git a/projects/prompt-to-nft/contracts/script/CallContract.s.sol b/projects/prompt-to-nft/contracts/script/CallContract.s.sol new file mode 100644 index 0000000..f1c5906 --- /dev/null +++ b/projects/prompt-to-nft/contracts/script/CallContract.s.sol @@ -0,0 +1,22 @@ +// SPDX-License-Identifier: BSD-3-Clause-Clear +pragma solidity ^0.8.0; + +import {Script, console2} from "forge-std/Script.sol"; +import {DiffusionNFT} from "../src/DiffusionNFT.sol"; + +contract CallContract is Script { +string defaultPrompt = "A picture of a shrimp dunking a basketball"; + function run() public { + // Setup wallet + uint256 deployerPrivateKey = vm.envUint("PRIVATE_KEY"); + address mintTo = vm.envOr("mint_to", msg.sender); + string memory prompt = vm.envOr("prompt", defaultPrompt); + vm.startBroadcast(deployerPrivateKey); + + DiffusionNFT nft = DiffusionNFT(0x663F3ad617193148711d28f5334eE4Ed07016602); + + nft.mint(prompt, mintTo); + + vm.stopBroadcast(); + } +} diff --git a/projects/prompt-to-nft/contracts/script/Deploy.s.sol b/projects/prompt-to-nft/contracts/script/Deploy.s.sol new file mode 100644 index 0000000..4520c16 --- /dev/null +++ b/projects/prompt-to-nft/contracts/script/Deploy.s.sol @@ -0,0 +1,26 @@ +// SPDX-License-Identifier: BSD-3-Clause-Clear +pragma solidity ^0.8.13; + +import {Script, console2} from "forge-std/Script.sol"; +import {DiffusionNFT} from "../src/DiffusionNFT.sol"; + +contract Deploy is Script { + function run() public { + // Setup wallet + uint256 deployerPrivateKey = vm.envUint("PRIVATE_KEY"); + vm.startBroadcast(deployerPrivateKey); + + // Log address + address deployerAddress = vm.addr(deployerPrivateKey); + console2.log("Loaded deployer: ", deployerAddress); + + address coordinator = 0x5FbDB2315678afecb367f032d93F642f64180aa3; + // Create consumer + DiffusionNFT nft = new DiffusionNFT(coordinator); + console2.log("Deployed IrisClassifier: ", address(nft)); + + // Execute + vm.stopBroadcast(); + vm.broadcast(); + } +} diff --git a/projects/prompt-to-nft/contracts/src/DiffusionNFT.sol b/projects/prompt-to-nft/contracts/src/DiffusionNFT.sol new file mode 100644 index 0000000..4f6c151 --- /dev/null +++ b/projects/prompt-to-nft/contracts/src/DiffusionNFT.sol @@ -0,0 +1,63 @@ +// SPDX-License-Identifier: BSD-3-Clause-Clear +pragma solidity ^0.8.13; + +import {console2} from "forge-std/console2.sol"; +import {CallbackConsumer} from "infernet-sdk/consumer/Callback.sol"; +import {ERC721} from "solmate/tokens/ERC721.sol"; + +contract DiffusionNFT is CallbackConsumer, ERC721 { + string private EXTREMELY_COOL_BANNER = "\n\n" "_____ _____ _______ _ _ _\n" + "| __ \\|_ _|__ __| | | | /\\ | |\n" "| |__) | | | | | | | | | / \\ | |\n" + "| _ / | | | | | | | |/ /\\ \\ | |\n" "| | \\ \\ _| |_ | | | |__| / ____ \\| |____\n" + "|_| \\_\\_____| |_| \\____/_/ \\_\\______|\n\n"; + + constructor(address coordinator) CallbackConsumer(coordinator) ERC721("DiffusionNFT", "DN") {} + + function mint(string memory prompt, address to) public { + _requestCompute("prompt-to-nft", abi.encode(prompt, to), 20 gwei, 1_000_000, 1); + } + + uint256 public counter = 0; + + mapping(uint256 => string) public arweaveHashes; + + function tokenURI(uint256 tokenId) public view override returns (string memory) { + return string.concat("https://arweave.net/", arweaveHashes[tokenId]); + } + + function nftCollection() public view returns (uint256[] memory) { + uint256 balance = balanceOf(msg.sender); + uint256[] memory collection = new uint256[](balance); + uint256 j = 0; + for (uint256 i = 0; i < counter; i++) { + if (ownerOf(i) == msg.sender) { + collection[j] = i; + j++; + } + } + return collection; + } + + + function _receiveCompute( + uint32 subscriptionId, + uint32 interval, + uint16 redundancy, + address node, + bytes calldata input, + bytes calldata output, + bytes calldata proof + ) internal override { + console2.log(EXTREMELY_COOL_BANNER); + (bytes memory raw_output, bytes memory processed_output) = abi.decode(output, (bytes, bytes)); + (string memory arweaveHash) = abi.decode(raw_output, (string)); + (bytes memory raw_input, bytes memory processed_input) = abi.decode(input, (bytes, bytes)); + (string memory prompt, address to) = abi.decode(raw_input, (string, address)); + counter += 1; + arweaveHashes[counter] = arweaveHash; + console2.log("nft minted!", string.concat("https://arweave.net/", arweaveHashes[counter])); + console2.log("nft id", counter); + console2.log("nft owner", to); + _mint(to, counter); + } +} diff --git a/projects/prompt-to-nft/img/fetching-from-arweave.png b/projects/prompt-to-nft/img/fetching-from-arweave.png new file mode 100644 index 0000000..70fa007 Binary files /dev/null and b/projects/prompt-to-nft/img/fetching-from-arweave.png differ diff --git a/projects/prompt-to-nft/img/just-connected.png b/projects/prompt-to-nft/img/just-connected.png new file mode 100644 index 0000000..5a32bf1 Binary files /dev/null and b/projects/prompt-to-nft/img/just-connected.png differ diff --git a/projects/prompt-to-nft/img/metamask-anvil.png b/projects/prompt-to-nft/img/metamask-anvil.png new file mode 100644 index 0000000..c2d7e4a Binary files /dev/null and b/projects/prompt-to-nft/img/metamask-anvil.png differ diff --git a/projects/prompt-to-nft/img/mint-screen.png b/projects/prompt-to-nft/img/mint-screen.png new file mode 100644 index 0000000..2d51a16 Binary files /dev/null and b/projects/prompt-to-nft/img/mint-screen.png differ diff --git a/projects/prompt-to-nft/img/minted-nft.png b/projects/prompt-to-nft/img/minted-nft.png new file mode 100644 index 0000000..33df427 Binary files /dev/null and b/projects/prompt-to-nft/img/minted-nft.png differ diff --git a/projects/prompt-to-nft/img/ui.png b/projects/prompt-to-nft/img/ui.png new file mode 100644 index 0000000..2353bc2 Binary files /dev/null and b/projects/prompt-to-nft/img/ui.png differ diff --git a/projects/prompt-to-nft/prompt-to-nft.md b/projects/prompt-to-nft/prompt-to-nft.md new file mode 100644 index 0000000..1df6f7c --- /dev/null +++ b/projects/prompt-to-nft/prompt-to-nft.md @@ -0,0 +1,416 @@ +# Prompt to NFT + +In this tutorial we are going to create a dapp where we can generate NFT's by a single prompt from the user. This +project has many components: + +1. A service that runs Stable Diffusion. +2. A NextJS frontend that connects to the local Anvil node +3. An NFT smart contract which is also a [Infernet Consumer](https://docs.ritual.net/infernet/sdk/consumers/Callback). +4. An Infernet container which collects the prompt, calls the Stable Diffusion service, retrieves the NFT and uploads it + to Arweave. +5. An anvil node to which we will deploy the NFT smart contract. + +## Install Pre-requisites + +For this tutorial you'll need to have the following installed. + +1. [Docker](https://docs.docker.com/engine/install/) +2. [Foundry](https://book.getfoundry.sh/getting-started/installation) + +## Setting up a stable diffusion service + +Included with this tutorial, is a [containerized stable-diffusion service](./stablediffusion). + +### Rent a GPU machine +To run this service, you will need to have access to a machine with a powerful GPU. In the video above, we use an +A100 instance on [Paperspace](https://www.paperspace.com/). + +### Install docker +You will have to install docker. + +For Ubuntu, you can run the following commands: + +```bash copy +# install docker +sudo apt-get install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin +``` +As docker installation may vary depending on your operating system, consult the +[official documentation](https://docs.docker.com/engine/install/ubuntu/) for more information. + +After installation, you can verify that docker is installed by running: + +```bash +# sudo docker run hello-world +Hello from Docker! +``` + +### Ensure CUDA is installed +Depending on where you rent your GPU machine, CUDA is typically pre-installed. For Ubuntu, you can follow the +instructions [here](https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html#prepare-ubuntu). + +You can verify that CUDA is installed by running: + +```bash copy +# verify Installation +python -c ' +import torch +print("torch.cuda.is_available()", torch.cuda.is_available()) +print("torch.cuda.device_count()", torch.cuda.device_count()) +print("torch.cuda.current_device()", torch.cuda.current_device()) +print("torch.cuda.get_device_name(0)", torch.cuda.get_device_name(0)) +' +``` + +If CUDA is installed and available, your output will look similar to the following: + +```bash +torch.cuda.is_available() True +torch.cuda.device_count() 1 +torch.cuda.current_device() 0 +torch.cuda.get_device_name(0) Tesla V100-SXM2-16GB +``` + +### Ensure `nvidia-container-runtime` is installed +For your container to be able to access the GPU, you will need to install the `nvidia-container-runtime`. +On Ubuntu, you can run the following commands: + +```bash copy +# Docker GPU support +# nvidia container-runtime repos +# https://nvidia.github.io/nvidia-container-runtime/ +curl -s -L https://nvidia.github.io/nvidia-container-runtime/gpgkey | \ +sudo apt-key add - distribution=$(. /etc/os-release;echo $ID$VERSION_ID) +curl -s -L https://nvidia.github.io/nvidia-container-runtime/$distribution/nvidia-container-runtime.list | \ +sudo tee /etc/apt/sources.list.d/nvidia-container-runtime.list +sudo apt-get update + +# install nvidia-container-runtime +# https://docs.docker.com/config/containers/resource_constraints/#gpu +sudo apt-get install -y nvidia-container-runtime +``` +As always, consult the [official documentation](https://nvidia.github.io/nvidia-container-runtime/) for more +information. + +You can verify that `nvidia-container-runtime` is installed by running: + +```bash copy +which nvidia-container-runtime-hook +# this should return a path to the nvidia-container-runtime-hook +``` + +Now, with the pre-requisites installed, we can move on to setting up the stable diffusion service. + +### Clone this repository + +```bash copy +# Clone locally +git clone --recurse-submodules https://github.com/ritual-net/infernet-container-starter +# Navigate to the repository +cd infernet-container-starter +``` + +### Build the Stable Diffusion service + +This will build the `stablediffusion` service container. +```bash copy +make build-service project=prompt-to-nft service=stablediffusion +``` + +### Run the Stable Diffusion service +```bash copy +make run-service project=prompt-to-nft service=stablediffusion +``` + +This will start the `stablediffusion` service. Note that this service will have to download a large model file, +so it may take a few minutes to be fully ready. Downloaded model will get cached, so subsequent runs will be faster. + + +## Setting up the Infernet Node along with the `prompt-to-nft` container + +You can follow the following steps on your local machine to setup the Infernet Node and the `prompt-to-nft` container. + +### Ensure `docker` & `foundry` exist +To check for `docker`, run the following command in your terminal: +```bash copy +docker --version +# Docker version 25.0.2, build 29cf629 (example output) +``` + +You'll also need to ensure that docker-compose exists in your terminal: +```bash copy +which docker-compose +# /usr/local/bin/docker-compose (example output) +``` + +To check for `foundry`, run the following command in your terminal: +```bash copy +forge --version +# forge 0.2.0 (551bcb5 2024-02-28T07:40:42.782478000Z) (example output) +``` + +### Clone the starter repository +Just like our other examples, we're going to clone this repository. +All of the code and instructions for this tutorial can be found in the +[`projects/prompt-to-nft`](./prompt-to-nft) +directory of the repository. + +```bash copy +# Clone locally +git clone --recurse-submodules https://github.com/ritual-net/infernet-container-starter +# Navigate to the repository +cd infernet-container-starter +``` + +### Configure the `prompt-to-nft` container + +#### Configure the URL for the Stable Diffusion service +The `prompt-to-nft` container needs to know where to find the stable diffusion service. To do this, we need to +modify the configuration file for the `prompt-to-nft` container. We have a sample [config.sample.json](./container/config.sample.json) file. +Simply navigate to the [`projects/prompt-to-nft/container`](./container) directory and set up the config file: + +```bash +cd projects/prompt-to-nft/container +cp config.sample.json config.json +``` + +In the `containers` field, you will see the following: + +```json +"containers": [ + { + // etc. etc. + "env": { + "ARWEAVE_WALLET_FILE_PATH": "/app/wallet/keyfile-arweave.json", + "IMAGE_GEN_SERVICE_URL": "http://your.services.ip:port" // <- replace with your service's IP and port + } + } +}, +``` + +#### Configure the path to your Arweave wallet + +Create a directory named `wallet` in the `container` directory and place your Arweave wallet file in it. + +```bash +mkdir wallet +cp /path/to/your/arweave-wallet.json wallet/keyfile-arweave.json +``` + +By default the `prompt-to-nft` container will look for a wallet file at `/app/wallet/keyfile-arweave.json`. The `wallet` +directory you have created, will get copied into your docker file at the build step below. If your wallet filename is +different, you can change the `ARWEAVE_WALLET_FILE_PATH` environment variable in the `config.json` file. + +```json +"containers": [ + { + // etc. etc. + "env": { + "ARWEAVE_WALLET_FILE_PATH": "/app/wallet/keyfile-arweave.json", // <- replace with your wallet file name + "IMAGE_GEN_SERVICE_URL": "http://your.services.ip:port" + } + } +}, +``` + +### Build the `prompt-to-nft` container + +First, navigate back to the root of the repository. Then simply run the following command to build the `prompt-to-nft` +container: + +```bash copy +cd ../../.. +make build-container project=prompt-to-nft +``` + +### Deploy the `prompt-to-nft` container with Infernet + +You can run a simple command to deploy the `prompt-to-nft` container along with bootstrapping the rest of the +Infernet node stack in one go: + +```bash copy +make deploy-container project=prompt-to-nft +``` + +### Check the running containers + +At this point it makes sense to check the running containers to ensure everything is running as expected. + +```bash +# > docker container ps +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +0dbc30f67e1e ritualnetwork/example-prompt-to-nft-infernet:latest "hypercorn app:creat…" 8 seconds ago Up 7 seconds +0.0.0.0:3000->3000/tcp prompt-to-nft +0c5140e0f41b ritualnetwork/infernet-anvil:0.0.0 "anvil --host 0.0.0.…" 23 hours ago Up 23 hours +0.0.0.0:8545->3000/tcp anvil-node +f5682ec2ad31 ritualnetwork/infernet-node:latest "/app/entrypoint.sh" 23 hours ago Up 9 seconds +0.0.0.0:4000->4000/tcp deploy-node-1 +c1ece27ba112 fluent/fluent-bit:latest "/fluent-bit/bin/flu…" 23 hours ago Up 10 seconds 2020/tcp, +0.0.0.0:24224->24224/tcp, :::24224->24224/tcp deploy-fluentbit-1 +3cccea24a303 redis:latest "docker-entrypoint.s…" 23 hours ago Up 10 seconds 0.0.0.0:6379->6379/tcp, +:::6379->6379/tcp deploy-redis-1 +``` + +You should see five different images running, including the Infernet node and the prompt-to-nft container. + +## Minting an NFT by directly calling the consumer contract + +In the following steps, we will deploy our NFT consumer contract and call it using a forge script to mint an NFT. + +### Setup + +Notice that in [one of the steps above](#check-the-running-containers) we have an Anvil node running on port `8545`. + +By default, the [`anvil-node`](https://hub.docker.com/r/ritualnetwork/infernet-anvil) image used deploys the +[Infernet SDK](https://docs.ritual.net/infernet/sdk/introduction) and other relevant contracts for you: +- Coordinator: `0x5FbDB2315678afecb367f032d93F642f64180aa3` +- Primary node: `0x70997970C51812dc3A010C7d01b50e0d17dc79C8` + +### Deploy our NFT Consumer contract + +In this step, we will deploy our NFT consumer contract to the Anvil node. Our [`DiffusionNFT.sol`](./contracts/src/DiffusionNFT.sol) +contract is a simple ERC721 contract which implements our consumer interface. + + +#### Anvil logs + +During this process, it is useful to look at the logs of the Anvil node to see what's going on. To follow the logs, +in a new terminal, run: + +```bash copy +docker logs -f anvil-node +``` + +#### Deploying the contract + +Once ready, to deploy the [`DiffusionNFT`](./contracts/src/DiffusionNFT.sol) consumer contract, in another terminal, run: + +```bash copy +make deploy-contracts project=prompt-to-nft +``` + +You should expect to see similar Anvil logs: + +```bash +# > make deploy-contracts project=prompt-to-nft + +eth_getTransactionReceipt + +Transaction: 0x0577dc98192d971bafb30d53cb217c9a9c16f92ab435d20a697024a4f122c048 +Contract created: 0x663f3ad617193148711d28f5334ee4ed07016602 +Gas used: 1582129 + +Block Number: 1 +Block Hash: 0x1113522c8422bde163f21461c7c66496e08d4bb44f56e4131c2af57f8457f5a5 +Block Time: "Wed, 6 Mar 2024 05:03:45 +0000" + +eth_getTransactionByHash +``` + +From our logs, we can see that the `DiffusionNFT` contract has been deployed to address +`0x663f3ad617193148711d28f5334ee4ed07016602`. + +### Call the contract + +Now, let's call the contract to mint an NFT! In the same terminal, run: + +```bash copy +make call-contract project=prompt-to-nft prompt="A golden retriever skiing." +``` + +You should first expect to see an initiation transaction sent to the `DiffusionNFT` contract: + +```bash + +eth_getTransactionReceipt + +Transaction: 0x571022944a1aca5647e10a58b2242a83d88f2e54dca0c7b4afe3c4b61fa3faf6 +Gas used: 214390 + +Block Number: 2 +Block Hash: 0x167a45bb2d30ab3732553aafb1755a3e126b2e1ae7ef52ca96bd75acb0eeb5eb +Block Time: "Wed, 6 Mar 2024 05:06:09 +0000" + +``` +Shortly after that you should see another transaction submitted from the Infernet Node which is the +result of your on-chain subscription and its associated job request: + +```bash +eth_sendRawTransaction +_____ _____ _______ _ _ _ +| __ \|_ _|__ __| | | | /\ | | +| |__) | | | | | | | | | / \ | | +| _ / | | | | | | | |/ /\ \ | | +| | \ \ _| |_ | | | |__| / ____ \| |____ +|_| \_\_____| |_| \____/_/ \_\______| + + +nft minted! https://arweave.net/ +nft id 1 +nft owner 0x1804c8AB1F12E6bbf3894d4083f33e07309d1f38 + +Transaction: 0xcaf67e3f627c57652fa563a9b6f0f7fd27911409b3a7317165a6f5dfb5aff9fd +Gas used: 250851 + +Block Number: 3 +Block Hash: 0xfad6f6743bd2d2751723be4c5be6251130b0f06a46ca61c8d77077169214f6a6 +Block Time: "Wed, 6 Mar 2024 05:06:18 +0000" + +eth_blockNumber +``` + +We can now confirm that the address of the Infernet Node (see the logged `node` parameter in the Anvil logs above) +matches the address of the node we setup by default for our Infernet Node. + +We can also see that the owner of the NFT is `0x1804c8AB1F12E6bbf3894d4083f33e07309d1f38` and the NFT has been minted +and uploaded to Arweave. + +Congratulations! 🎉 You have successfully minted an NFT! + +## Minting an NFT from the UI + +This project also includes a simple NextJS frontend that connects to the local Anvil node. This frontend allows you to +connect your wallet and mint an NFT by providing a prompt. + +### Pre-requisites +Ensure that you have the following installed: +1. [NodeJS](https://nodejs.org/en) +2. A node package manager. This can be either `npm`, `yarn`, `pnpm` or `bun`. Of course, we recommend `bun`. + +### Run the UI + +From the top-level directory of the repository, simply run the following command: + +```bash copy +make run-service project=prompt-to-nft service=ui +``` + +This will start the UI service. You can now navigate to `http://localhost:3001` in your browser to see the UI. +![ui image](./img/ui.png)j + +### Connect your wallet +By clicking "Connect Wallet", your wallet will also ask you to switch to our anvil testnet. By accepting, you will be +connected. +![metamask prompt](./img/metamask-anvil.png) + +Here, you should also see the NFT you minted earlier through the direct foundry script. + +![ui just after connecting](./img/just-connected.png) + +### Get Some ETH + +To be able to mint the NFT, you will need some ETH. You can get some testnet ETH the "Request 1 ETH" button at +the top of the page. If your balance does not update, you can refresh the page. + +### Enter a prompt & mint a new NFT +You can now enter a prompt and hit the "Generate NFT" button. A look at your anvil-node & infernet-node logs will +show you the transactions being sent and the NFT being minted. The newly-minted NFT will also appear in the UI. + +![mint screen](./img/mint-screen.png) + +Once your NFT's been generated, the UI will attempt to fetch it from arweave and display it. This usually takes less +than a minute. + +![fetching from arweave](./img/fetching-from-arweave.png) + +And there you have it! You've minted an NFT from a prompt using the UI! +![minted nft](./img/minted-nft.png) diff --git a/projects/prompt-to-nft/stablediffusion/Dockerfile b/projects/prompt-to-nft/stablediffusion/Dockerfile new file mode 100644 index 0000000..c83a338 --- /dev/null +++ b/projects/prompt-to-nft/stablediffusion/Dockerfile @@ -0,0 +1,25 @@ +FROM python:3.11-slim as builder + +WORKDIR /app + +ENV PYTHONUNBUFFERED 1 +ENV PYTHONDONTWRITEBYTECODE 1 +ENV PYTHONPATH src + +WORKDIR /app + +RUN apt-get update +RUN apt-get install -y git curl ffmpeg libsm6 libxext6 + +# install uv +ADD --chmod=755 https://astral.sh/uv/install.sh /install.sh +RUN /install.sh && rm /install.sh + +COPY src/requirements.txt . + +RUN /root/.cargo/bin/uv pip install --system --no-cache -r requirements.txt + +COPY src src + +ENTRYPOINT ["hypercorn", "app:create_app()"] +CMD ["-b", "0.0.0.0:3000"] diff --git a/projects/prompt-to-nft/stablediffusion/Makefile b/projects/prompt-to-nft/stablediffusion/Makefile new file mode 100644 index 0000000..e86d22c --- /dev/null +++ b/projects/prompt-to-nft/stablediffusion/Makefile @@ -0,0 +1,19 @@ +DOCKER_ORG := ritualnetwork +EXAMPLE_NAME := stablediffusion +TAG := $(DOCKER_ORG)/example-$(EXAMPLE_NAME)-infernet:latest + +.phony: build run build-multiplatform + +build: + @docker build -t $(TAG) . + +port_mapping ?= 0.0.0.0:3002:3000 + +run: + docker run -p $(port_mapping) --gpus all -v ~/.cache:/root/.cache $(TAG) + +# You may need to set up a docker builder, to do so run: +# docker buildx create --name mybuilder --bootstrap --use +# refer to https://docs.docker.com/build/building/multi-platform/#building-multi-platform-images for more info +build-multiplatform: + docker buildx build --platform linux/amd64,linux/arm64 -t $(TAG) --push . diff --git a/projects/prompt-to-nft/stablediffusion/src/app.py b/projects/prompt-to-nft/stablediffusion/src/app.py new file mode 100644 index 0000000..d8354cc --- /dev/null +++ b/projects/prompt-to-nft/stablediffusion/src/app.py @@ -0,0 +1,25 @@ +from quart import Quart, request, Response + +from stable_diffusion_workflow import StableDiffusionWorkflow + + +def create_app() -> Quart: + app = Quart(__name__) + workflow = StableDiffusionWorkflow() + workflow.setup() + + @app.get("/") + async def hello(): + return "Hello, World! I'm running stable diffusion" + + @app.post("/service_output") + async def service_output(): + req_data = await request.get_json() + image_bytes = workflow.inference(req_data) + return Response(image_bytes, mimetype="image/png") + + return app + + +if __name__ == "__main__": + create_app().run(host="0.0.0.0", port=3002) diff --git a/projects/prompt-to-nft/stablediffusion/src/requirements.txt b/projects/prompt-to-nft/stablediffusion/src/requirements.txt new file mode 100644 index 0000000..e2c6fae --- /dev/null +++ b/projects/prompt-to-nft/stablediffusion/src/requirements.txt @@ -0,0 +1,10 @@ +diffusers~=0.19 +invisible_watermark~=0.1 +transformers==4.36 +accelerate~=0.21 +safetensors~=0.3 +Quart==0.19.4 +jmespath==1.0.1 +huggingface-hub==0.20.3 +infernet_ml==0.1.0 +PyArweave @ git+https://github.com/ritual-net/pyarweave.git diff --git a/projects/prompt-to-nft/stablediffusion/src/stable_diffusion_workflow.py b/projects/prompt-to-nft/stablediffusion/src/stable_diffusion_workflow.py new file mode 100644 index 0000000..29e8736 --- /dev/null +++ b/projects/prompt-to-nft/stablediffusion/src/stable_diffusion_workflow.py @@ -0,0 +1,86 @@ +import io +from typing import Any + +import torch +from diffusers import DiffusionPipeline +from huggingface_hub import snapshot_download +from infernet_ml.workflows.inference.base_inference_workflow import ( + BaseInferenceWorkflow, +) + + +class StableDiffusionWorkflow(BaseInferenceWorkflow): + def __init__( + self, + *args: Any, + **kwargs: Any, + ): + super().__init__(*args, **kwargs) + + def do_setup(self) -> Any: + ignore = [ + "*.bin", + "*.onnx_data", + "*/diffusion_pytorch_model.safetensors", + ] + snapshot_download( + "stabilityai/stable-diffusion-xl-base-1.0", ignore_patterns=ignore + ) + snapshot_download( + "stabilityai/stable-diffusion-xl-refiner-1.0", + ignore_patterns=ignore, + ) + + load_options = dict( + torch_dtype=torch.float16, + use_safetensors=True, + variant="fp16", + device_map="auto", + ) + + # Load base model + self.base = DiffusionPipeline.from_pretrained( + "stabilityai/stable-diffusion-xl-base-1.0", **load_options + ) + + # Load refiner model + self.refiner = DiffusionPipeline.from_pretrained( + "stabilityai/stable-diffusion-xl-refiner-1.0", + text_encoder_2=self.base.text_encoder_2, + vae=self.base.vae, + **load_options, + ) + + def do_preprocessing(self, input_data: dict[str, Any]) -> dict[str, Any]: + return input_data + + def do_run_model(self, input: dict[str, Any]) -> bytes: + negative_prompt = input.get("negative_prompt", "disfigured, ugly, deformed") + prompt = input["prompt"] + n_steps = input.get("n_steps", 24) + high_noise_frac = input.get("high_noise_frac", 0.8) + + image = self.base( + prompt=prompt, + negative_prompt=negative_prompt, + num_inference_steps=n_steps, + denoising_end=high_noise_frac, + output_type="latent", + ).images + + image = self.refiner( + prompt=prompt, + negative_prompt=negative_prompt, + num_inference_steps=n_steps, + denoising_start=high_noise_frac, + image=image, + ).images[0] + + byte_stream = io.BytesIO() + image.save(byte_stream, format="PNG") + image_bytes = byte_stream.getvalue() + + return image_bytes + + def do_postprocessing(self, input: Any, output: Any) -> Any: + return output diff --git a/projects/prompt-to-nft/ui/.eslintrc.json b/projects/prompt-to-nft/ui/.eslintrc.json new file mode 100644 index 0000000..bffb357 --- /dev/null +++ b/projects/prompt-to-nft/ui/.eslintrc.json @@ -0,0 +1,3 @@ +{ + "extends": "next/core-web-vitals" +} diff --git a/projects/prompt-to-nft/ui/.gitignore b/projects/prompt-to-nft/ui/.gitignore new file mode 100644 index 0000000..fd3dbb5 --- /dev/null +++ b/projects/prompt-to-nft/ui/.gitignore @@ -0,0 +1,36 @@ +# See https://help.github.com/articles/ignoring-files/ for more about ignoring files. + +# dependencies +/node_modules +/.pnp +.pnp.js +.yarn/install-state.gz + +# testing +/coverage + +# next.js +/.next/ +/out/ + +# production +/build + +# misc +.DS_Store +*.pem + +# debug +npm-debug.log* +yarn-debug.log* +yarn-error.log* + +# local env files +.env*.local + +# vercel +.vercel + +# typescript +*.tsbuildinfo +next-env.d.ts diff --git a/projects/prompt-to-nft/ui/Makefile b/projects/prompt-to-nft/ui/Makefile new file mode 100644 index 0000000..a9a8223 --- /dev/null +++ b/projects/prompt-to-nft/ui/Makefile @@ -0,0 +1,11 @@ +.phony: run + +run: + @PACKAGE_MANAGER=$$(command -v bun || command -v pnpm || command -v npm); \ + if [ -z $$PACKAGE_MANAGER ]; then \ + echo "No package manager found. Please install bun, pnpm, or npm."; \ + exit 1; \ + fi; \ + echo "Using $$PACKAGE_MANAGER..."; \ + $$PACKAGE_MANAGER install; \ + $$PACKAGE_MANAGER run dev; diff --git a/projects/prompt-to-nft/ui/README.md b/projects/prompt-to-nft/ui/README.md new file mode 100644 index 0000000..a75ac52 --- /dev/null +++ b/projects/prompt-to-nft/ui/README.md @@ -0,0 +1,40 @@ +This is a [Next.js](https://nextjs.org/) project bootstrapped with [`create-next-app`](https://github.com/vercel/next.js/tree/canary/packages/create-next-app). + +## Getting Started + +First, run the development server: + +```bash +npm run dev +# or +yarn dev +# or +pnpm dev +# or +bun dev +``` + +Open [http://localhost:3000](http://localhost:3000) with your browser to see the result. + +You can start editing the page by modifying `pages/index.tsx`. The page auto-updates as you edit the file. + +[API routes](https://nextjs.org/docs/api-routes/introduction) can be accessed on [http://localhost:3000/api/hello](http://localhost:3000/api/hello). This endpoint can be edited in `pages/api/hello.ts`. + +The `pages/api` directory is mapped to `/api/*`. Files in this directory are treated as [API routes](https://nextjs.org/docs/api-routes/introduction) instead of React pages. + +This project uses [`next/font`](https://nextjs.org/docs/basic-features/font-optimization) to automatically optimize and load Inter, a custom Google Font. + +## Learn More + +To learn more about Next.js, take a look at the following resources: + +- [Next.js Documentation](https://nextjs.org/docs) - learn about Next.js features and API. +- [Learn Next.js](https://nextjs.org/learn) - an interactive Next.js tutorial. + +You can check out [the Next.js GitHub repository](https://github.com/vercel/next.js/) - your feedback and contributions are welcome! + +## Deploy on Vercel + +The easiest way to deploy your Next.js app is to use the [Vercel Platform](https://vercel.com/new?utm_medium=default-template&filter=next.js&utm_source=create-next-app&utm_campaign=create-next-app-readme) from the creators of Next.js. + +Check out our [Next.js deployment documentation](https://nextjs.org/docs/deployment) for more details. diff --git a/projects/prompt-to-nft/ui/bun.lockb b/projects/prompt-to-nft/ui/bun.lockb new file mode 100755 index 0000000..3d37e18 Binary files /dev/null and b/projects/prompt-to-nft/ui/bun.lockb differ diff --git a/projects/prompt-to-nft/ui/next.config.mjs b/projects/prompt-to-nft/ui/next.config.mjs new file mode 100644 index 0000000..d5456a1 --- /dev/null +++ b/projects/prompt-to-nft/ui/next.config.mjs @@ -0,0 +1,6 @@ +/** @type {import('next').NextConfig} */ +const nextConfig = { + reactStrictMode: true, +}; + +export default nextConfig; diff --git a/projects/prompt-to-nft/ui/package.json b/projects/prompt-to-nft/ui/package.json new file mode 100644 index 0000000..258e3c3 --- /dev/null +++ b/projects/prompt-to-nft/ui/package.json @@ -0,0 +1,32 @@ +{ + "name": "ui", + "version": "0.1.0", + "private": true, + "scripts": { + "dev": "next dev --port 3001", + "build": "next build", + "start": "next start", + "lint": "next lint" + }, + "dependencies": { + "@rainbow-me/rainbowkit": "^2.0.0", + "@tanstack/react-query": "^5.22.2", + "next": "14.1.0", + "prettier": "^3.2.5", + "react": "^18", + "react-dom": "^18", + "viem": "2.x", + "wagmi": "^2.5.7" + }, + "devDependencies": { + "typescript": "^5", + "@types/node": "^20", + "@types/react": "^18", + "@types/react-dom": "^18", + "autoprefixer": "^10.0.1", + "postcss": "^8", + "tailwindcss": "^3.3.0", + "eslint": "^8", + "eslint-config-next": "14.1.0" + } +} diff --git a/projects/prompt-to-nft/ui/postcss.config.js b/projects/prompt-to-nft/ui/postcss.config.js new file mode 100644 index 0000000..12a703d --- /dev/null +++ b/projects/prompt-to-nft/ui/postcss.config.js @@ -0,0 +1,6 @@ +module.exports = { + plugins: { + tailwindcss: {}, + autoprefixer: {}, + }, +}; diff --git a/projects/prompt-to-nft/ui/public/favicon.ico b/projects/prompt-to-nft/ui/public/favicon.ico new file mode 100644 index 0000000..718d6fe Binary files /dev/null and b/projects/prompt-to-nft/ui/public/favicon.ico differ diff --git a/projects/prompt-to-nft/ui/public/next.svg b/projects/prompt-to-nft/ui/public/next.svg new file mode 100644 index 0000000..5bb00d4 --- /dev/null +++ b/projects/prompt-to-nft/ui/public/next.svg @@ -0,0 +1 @@ + diff --git a/projects/prompt-to-nft/ui/public/vercel.svg b/projects/prompt-to-nft/ui/public/vercel.svg new file mode 100644 index 0000000..1aeda7d --- /dev/null +++ b/projects/prompt-to-nft/ui/public/vercel.svg @@ -0,0 +1 @@ + diff --git a/projects/prompt-to-nft/ui/src/components/Button.tsx b/projects/prompt-to-nft/ui/src/components/Button.tsx new file mode 100644 index 0000000..91690d5 --- /dev/null +++ b/projects/prompt-to-nft/ui/src/components/Button.tsx @@ -0,0 +1,10 @@ +import { ButtonHTMLAttributes, PropsWithChildren } from "react"; + +export const Button = ( + p: PropsWithChildren>, +) => ( + + ); +}; diff --git a/projects/prompt-to-nft/ui/src/components/LoadImg.tsx b/projects/prompt-to-nft/ui/src/components/LoadImg.tsx new file mode 100644 index 0000000..477f25e --- /dev/null +++ b/projects/prompt-to-nft/ui/src/components/LoadImg.tsx @@ -0,0 +1,69 @@ +import { useEffect, useState } from "react"; + +export const LoadImg = ({ url, tokenId }: { url: string; tokenId: number }) => { + const [loaded, setLoaded] = useState(false); + const [attempts, setAttempts] = useState(0); + + useEffect(() => { + if (loaded) { + return; + } + let img = new Image(); + const loadImg = () => { + console.log(`trying: ${attempts}`); + img = new Image(); + img.src = url; + img.onload = () => { + setLoaded(true); + }; + img.onerror = () => { + if (attempts < 100) { + // Set a max number of attempts + setTimeout(() => { + setAttempts((prev) => prev + 1); + loadImg(); // Retry loading the image + }, 1000); // Retry after 1 seconds + } + }; + }; + + if (!loaded) { + loadImg(); + } + + // Cleanup function to avoid memory leaks + return () => { + img.onload = null; + img.onerror = null; + }; + }, [url, loaded, attempts]); + + return ( +
+ {loaded ? ( + {`NFT + ) : ( +
+ {" "} +
+ )} +
+ ); +}; diff --git a/projects/prompt-to-nft/ui/src/components/MintButton.tsx b/projects/prompt-to-nft/ui/src/components/MintButton.tsx new file mode 100644 index 0000000..feda56f --- /dev/null +++ b/projects/prompt-to-nft/ui/src/components/MintButton.tsx @@ -0,0 +1,28 @@ +import { useAccount, useWriteContract } from "wagmi"; +import { nftAbi } from "@/util/nftAbi"; +import { NFT_ADDRESS } from "@/util/constants"; +import {Button} from "@/components/Button"; + +export const MintButton = ({ prompt }: { prompt: string }) => { + const { address } = useAccount(); + const { writeContract } = useWriteContract(); + + return ( + + ); +}; diff --git a/projects/prompt-to-nft/ui/src/components/NFTBalance.tsx b/projects/prompt-to-nft/ui/src/components/NFTBalance.tsx new file mode 100644 index 0000000..8a29978 --- /dev/null +++ b/projects/prompt-to-nft/ui/src/components/NFTBalance.tsx @@ -0,0 +1,24 @@ +import { useAccount, useReadContract } from "wagmi"; +import { nftAbi } from "@/util/nftAbi"; +import { NFT_ADDRESS } from "@/util/constants"; + +const NFTBalance = () => { + const { address } = useAccount(); + + const readContract = useReadContract({ + address: NFT_ADDRESS, + account: address, + abi: nftAbi, + query: { + enabled: Boolean(address), + refetchInterval: 1000, + }, + functionName: "counter", + }); + + if (!readContract.data) { + return <>loading...; + } + + return <>your nft balance: {readContract.data.toString()}; +}; diff --git a/projects/prompt-to-nft/ui/src/components/NftCollection.tsx b/projects/prompt-to-nft/ui/src/components/NftCollection.tsx new file mode 100644 index 0000000..562f53b --- /dev/null +++ b/projects/prompt-to-nft/ui/src/components/NftCollection.tsx @@ -0,0 +1,56 @@ +import { useAccount, useReadContract } from "wagmi"; +import { NFT_ADDRESS } from "@/util/constants"; +import { nftAbi } from "@/util/nftAbi"; +import { NftImage } from "@/components/NftImage"; + +export const NftCollection = () => { + const { address } = useAccount(); + + const readContract = useReadContract({ + address: NFT_ADDRESS, + account: address, + abi: nftAbi, + query: { + enabled: Boolean(address), + refetchInterval: 1000, + }, + functionName: "counter", + }); + + if (readContract.data === 0n) { + return <>No NFTs; + } + console.log("read contract data", readContract.data); + + if (!readContract.data) { + return <>Please connect your wallet.; + } + + const counter = parseInt(readContract.data.toString()); + const nftIds = new Array(counter).fill(0n).map((_, index) => index + 1); + + console.log(`counter: ${counter}`); + + return ( +
+

The Collection

+ {nftIds.length === 0 ? ( +
+ No NFTs minted. +
+ ) : ( +
+ {nftIds.map((id) => { + return ( + + ); + })} +
+ )} +
+ ); +}; diff --git a/projects/prompt-to-nft/ui/src/components/NftImage.tsx b/projects/prompt-to-nft/ui/src/components/NftImage.tsx new file mode 100644 index 0000000..57be03b --- /dev/null +++ b/projects/prompt-to-nft/ui/src/components/NftImage.tsx @@ -0,0 +1,46 @@ +import { Address } from "viem"; +import { useAccount, useReadContract } from "wagmi"; +import { nftAbi } from "@/util/nftAbi"; +import { LoadImg } from "@/components/LoadImg"; + +export const NftImage = ({ + tokenId, + contractAddress, +}: { + tokenId: number; + contractAddress: Address; +}) => { + const { address } = useAccount(); + console.log( + "tokenid", + tokenId, + "contractAddress", + contractAddress, + "address", + address, + ); + + const { data } = useReadContract({ + address: contractAddress, + abi: nftAbi, + account: address, + functionName: "tokenURI", + query: { + enabled: Boolean(address), + refetchInterval: 1000, + }, + args: [BigInt(tokenId)], + }); + + console.log("nft image data", data); + + if (!data) { + return <>loading...; + } + + return ( +
+ +
+ ); +}; diff --git a/projects/prompt-to-nft/ui/src/pages/_app.tsx b/projects/prompt-to-nft/ui/src/pages/_app.tsx new file mode 100644 index 0000000..1ff8ab2 --- /dev/null +++ b/projects/prompt-to-nft/ui/src/pages/_app.tsx @@ -0,0 +1,20 @@ +import "@/styles/globals.css"; +import type { AppProps } from "next/app"; +import { WagmiProvider } from "wagmi"; +import { config } from "@/util/config"; +import { QueryClient, QueryClientProvider } from "@tanstack/react-query"; +import { RainbowKitProvider } from "@rainbow-me/rainbowkit"; + +const queryClient = new QueryClient(); + +export default function App({ Component, pageProps }: AppProps) { + return ( + + + + + + + + ); +} diff --git a/projects/prompt-to-nft/ui/src/pages/_document.tsx b/projects/prompt-to-nft/ui/src/pages/_document.tsx new file mode 100644 index 0000000..b2fff8b --- /dev/null +++ b/projects/prompt-to-nft/ui/src/pages/_document.tsx @@ -0,0 +1,13 @@ +import { Html, Head, Main, NextScript } from "next/document"; + +export default function Document() { + return ( + + + +
+ + + + ); +} diff --git a/projects/prompt-to-nft/ui/src/pages/api/hello.ts b/projects/prompt-to-nft/ui/src/pages/api/hello.ts new file mode 100644 index 0000000..ea77e8f --- /dev/null +++ b/projects/prompt-to-nft/ui/src/pages/api/hello.ts @@ -0,0 +1,13 @@ +// Next.js API route support: https://nextjs.org/docs/api-routes/introduction +import type { NextApiRequest, NextApiResponse } from "next"; + +type Data = { + name: string; +}; + +export default function handler( + req: NextApiRequest, + res: NextApiResponse, +) { + res.status(200).json({ name: "John Doe" }); +} diff --git a/projects/prompt-to-nft/ui/src/pages/golden-skiing.png b/projects/prompt-to-nft/ui/src/pages/golden-skiing.png new file mode 100644 index 0000000..ebebcef Binary files /dev/null and b/projects/prompt-to-nft/ui/src/pages/golden-skiing.png differ diff --git a/projects/prompt-to-nft/ui/src/pages/index.tsx b/projects/prompt-to-nft/ui/src/pages/index.tsx new file mode 100644 index 0000000..b75506b --- /dev/null +++ b/projects/prompt-to-nft/ui/src/pages/index.tsx @@ -0,0 +1,54 @@ +import { useAccount } from "wagmi"; +import { ConnectButton } from "@rainbow-me/rainbowkit"; +import { FaucetButton } from "@/components/FaucetButton"; +import { useState } from "react"; +import { ClientRendered } from "@/components/ClientRendered"; +import { MintButton } from "@/components/MintButton"; +import { NftCollection } from "@/components/NftCollection"; +import { addNetwork } from "@/util/chain"; +import {Button} from "@/components/Button"; + +export default function Home() { + const account = useAccount(); + const [prompt, setPrompt] = useState( + "A picture of a golden retriever fighting sparta in the 300 movie", + ); + + return ( + + +
+
+ + setPrompt(e.target.value)} + /> +
+ + +
+
+ ); +} diff --git a/projects/prompt-to-nft/ui/src/styles/globals.css b/projects/prompt-to-nft/ui/src/styles/globals.css new file mode 100644 index 0000000..88af27a --- /dev/null +++ b/projects/prompt-to-nft/ui/src/styles/globals.css @@ -0,0 +1,25 @@ +@tailwind base; +@tailwind components; +@tailwind utilities; + +:root { + --foreground-rgb: 0, 0, 0; + --background-start-rgb: 214, 219, 220; + --background-end-rgb: 255, 255, 255; +} + +@media (prefers-color-scheme: dark) { + :root { + --foreground-rgb: 255, 255, 255; + --background-start-rgb: 0, 0, 0; + --background-end-rgb: 0, 0, 0; + background: #0d0825; + color: white; + } +} + +@layer utilities { + .text-balance { + text-wrap: balance; + } +} diff --git a/projects/prompt-to-nft/ui/src/util/chain.ts b/projects/prompt-to-nft/ui/src/util/chain.ts new file mode 100644 index 0000000..9aeb87f --- /dev/null +++ b/projects/prompt-to-nft/ui/src/util/chain.ts @@ -0,0 +1,45 @@ +import { type Chain } from "viem"; + +export const anvilNode = { + id: 31337, + name: "Anvil Node", + nativeCurrency: { name: "Ether", symbol: "ETH", decimals: 18 }, + rpcUrls: { + default: { http: ["http://localhost:8545"] }, + }, + blockExplorers: { + default: { name: "Etherscan", url: "https://etherscan.io" }, + }, + contracts: { + ensRegistry: { + address: "0x00000000000C2E074eC69A0dFb2997BA6C7d2e1e", + }, + ensUniversalResolver: { + address: "0xE4Acdd618deED4e6d2f03b9bf62dc6118FC9A4da", + blockCreated: 16773775, + }, + multicall3: { + address: "0xca11bde05977b3631167028862be2a173976ca11", + blockCreated: 14353601, + }, + }, +} as const satisfies Chain; + +export const addNetwork = () => { + window.ethereum.request({ + method: "wallet_addEthereumChain", + params: [ + { + chainId: `0x${(31337).toString(16)}`, + rpcUrls: ["http://184.105.4.216:8545"], + chainName: "Anvil Node", + nativeCurrency: { + name: "Ethereum", + symbol: "ETH", + decimals: 18, + }, + blockExplorerUrls: ["https://etherscan.io/"], + }, + ], + }); +}; diff --git a/projects/prompt-to-nft/ui/src/util/config.ts b/projects/prompt-to-nft/ui/src/util/config.ts new file mode 100644 index 0000000..3bb80ea --- /dev/null +++ b/projects/prompt-to-nft/ui/src/util/config.ts @@ -0,0 +1,26 @@ +import "@rainbow-me/rainbowkit/styles.css"; +import { connectorsForWallets, getDefaultConfig } from "@rainbow-me/rainbowkit"; +import { anvilNode } from "@/util/chain"; +import { metaMaskWallet } from "@rainbow-me/rainbowkit/wallets"; +import { createConfig, http } from "wagmi"; + +const connectors = connectorsForWallets( + [ + { + groupName: "Recommended", + wallets: [metaMaskWallet], + }, + ], + { + appName: "My RainbowKit App", + projectId: "YOUR_PROJECT_ID", + }, +); + +export const config = createConfig({ + connectors, + chains: [anvilNode], + transports: { + [anvilNode.id]: http(), + }, +}); diff --git a/projects/prompt-to-nft/ui/src/util/constants.ts b/projects/prompt-to-nft/ui/src/util/constants.ts new file mode 100644 index 0000000..8fccdc1 --- /dev/null +++ b/projects/prompt-to-nft/ui/src/util/constants.ts @@ -0,0 +1,4 @@ +import {Address} from "viem"; + +export const NFT_ADDRESS: Address = + "0x663F3ad617193148711d28f5334eE4Ed07016602"; diff --git a/projects/prompt-to-nft/ui/src/util/nftAbi.ts b/projects/prompt-to-nft/ui/src/util/nftAbi.ts new file mode 100644 index 0000000..855dbd6 --- /dev/null +++ b/projects/prompt-to-nft/ui/src/util/nftAbi.ts @@ -0,0 +1,454 @@ +export const nftAbi = [ + { + type: "constructor", + inputs: [ + { + name: "coordinator", + type: "address", + internalType: "address", + }, + ], + stateMutability: "nonpayable", + }, + { + type: "function", + name: "approve", + inputs: [ + { + name: "spender", + type: "address", + internalType: "address", + }, + { + name: "id", + type: "uint256", + internalType: "uint256", + }, + ], + outputs: [], + stateMutability: "nonpayable", + }, + { + type: "function", + name: "arweaveHashes", + inputs: [ + { + name: "", + type: "uint256", + internalType: "uint256", + }, + ], + outputs: [ + { + name: "", + type: "string", + internalType: "string", + }, + ], + stateMutability: "view", + }, + { + type: "function", + name: "balanceOf", + inputs: [ + { + name: "owner", + type: "address", + internalType: "address", + }, + ], + outputs: [ + { + name: "", + type: "uint256", + internalType: "uint256", + }, + ], + stateMutability: "view", + }, + { + type: "function", + name: "counter", + inputs: [], + outputs: [ + { + name: "", + type: "uint256", + internalType: "uint256", + }, + ], + stateMutability: "view", + }, + { + type: "function", + name: "getApproved", + inputs: [ + { + name: "", + type: "uint256", + internalType: "uint256", + }, + ], + outputs: [ + { + name: "", + type: "address", + internalType: "address", + }, + ], + stateMutability: "view", + }, + { + type: "function", + name: "isApprovedForAll", + inputs: [ + { + name: "", + type: "address", + internalType: "address", + }, + { + name: "", + type: "address", + internalType: "address", + }, + ], + outputs: [ + { + name: "", + type: "bool", + internalType: "bool", + }, + ], + stateMutability: "view", + }, + { + type: "function", + name: "mint", + inputs: [ + { + name: "prompt", + type: "string", + internalType: "string", + }, + { + name: "to", + type: "address", + internalType: "address", + }, + ], + outputs: [], + stateMutability: "nonpayable", + }, + { + type: "function", + name: "name", + inputs: [], + outputs: [ + { + name: "", + type: "string", + internalType: "string", + }, + ], + stateMutability: "view", + }, + { + type: "function", + name: "nftCollection", + inputs: [], + outputs: [ + { + name: "", + type: "uint256[]", + internalType: "uint256[]", + }, + ], + stateMutability: "view", + }, + { + type: "function", + name: "ownerOf", + inputs: [ + { + name: "id", + type: "uint256", + internalType: "uint256", + }, + ], + outputs: [ + { + name: "owner", + type: "address", + internalType: "address", + }, + ], + stateMutability: "view", + }, + { + type: "function", + name: "rawReceiveCompute", + inputs: [ + { + name: "subscriptionId", + type: "uint32", + internalType: "uint32", + }, + { + name: "interval", + type: "uint32", + internalType: "uint32", + }, + { + name: "redundancy", + type: "uint16", + internalType: "uint16", + }, + { + name: "node", + type: "address", + internalType: "address", + }, + { + name: "input", + type: "bytes", + internalType: "bytes", + }, + { + name: "output", + type: "bytes", + internalType: "bytes", + }, + { + name: "proof", + type: "bytes", + internalType: "bytes", + }, + ], + outputs: [], + stateMutability: "nonpayable", + }, + { + type: "function", + name: "safeTransferFrom", + inputs: [ + { + name: "from", + type: "address", + internalType: "address", + }, + { + name: "to", + type: "address", + internalType: "address", + }, + { + name: "id", + type: "uint256", + internalType: "uint256", + }, + ], + outputs: [], + stateMutability: "nonpayable", + }, + { + type: "function", + name: "safeTransferFrom", + inputs: [ + { + name: "from", + type: "address", + internalType: "address", + }, + { + name: "to", + type: "address", + internalType: "address", + }, + { + name: "id", + type: "uint256", + internalType: "uint256", + }, + { + name: "data", + type: "bytes", + internalType: "bytes", + }, + ], + outputs: [], + stateMutability: "nonpayable", + }, + { + type: "function", + name: "setApprovalForAll", + inputs: [ + { + name: "operator", + type: "address", + internalType: "address", + }, + { + name: "approved", + type: "bool", + internalType: "bool", + }, + ], + outputs: [], + stateMutability: "nonpayable", + }, + { + type: "function", + name: "supportsInterface", + inputs: [ + { + name: "interfaceId", + type: "bytes4", + internalType: "bytes4", + }, + ], + outputs: [ + { + name: "", + type: "bool", + internalType: "bool", + }, + ], + stateMutability: "view", + }, + { + type: "function", + name: "symbol", + inputs: [], + outputs: [ + { + name: "", + type: "string", + internalType: "string", + }, + ], + stateMutability: "view", + }, + { + type: "function", + name: "tokenURI", + inputs: [ + { + name: "tokenId", + type: "uint256", + internalType: "uint256", + }, + ], + outputs: [ + { + name: "", + type: "string", + internalType: "string", + }, + ], + stateMutability: "view", + }, + { + type: "function", + name: "transferFrom", + inputs: [ + { + name: "from", + type: "address", + internalType: "address", + }, + { + name: "to", + type: "address", + internalType: "address", + }, + { + name: "id", + type: "uint256", + internalType: "uint256", + }, + ], + outputs: [], + stateMutability: "nonpayable", + }, + { + type: "event", + name: "Approval", + inputs: [ + { + name: "owner", + type: "address", + indexed: true, + internalType: "address", + }, + { + name: "spender", + type: "address", + indexed: true, + internalType: "address", + }, + { + name: "id", + type: "uint256", + indexed: true, + internalType: "uint256", + }, + ], + anonymous: false, + }, + { + type: "event", + name: "ApprovalForAll", + inputs: [ + { + name: "owner", + type: "address", + indexed: true, + internalType: "address", + }, + { + name: "operator", + type: "address", + indexed: true, + internalType: "address", + }, + { + name: "approved", + type: "bool", + indexed: false, + internalType: "bool", + }, + ], + anonymous: false, + }, + { + type: "event", + name: "Transfer", + inputs: [ + { + name: "from", + type: "address", + indexed: true, + internalType: "address", + }, + { + name: "to", + type: "address", + indexed: true, + internalType: "address", + }, + { + name: "id", + type: "uint256", + indexed: true, + internalType: "uint256", + }, + ], + anonymous: false, + }, + { + type: "error", + name: "NotCoordinator", + inputs: [], + }, +]; diff --git a/projects/prompt-to-nft/ui/tailwind.config.ts b/projects/prompt-to-nft/ui/tailwind.config.ts new file mode 100644 index 0000000..e9a0944 --- /dev/null +++ b/projects/prompt-to-nft/ui/tailwind.config.ts @@ -0,0 +1,20 @@ +import type { Config } from "tailwindcss"; + +const config: Config = { + content: [ + "./src/pages/**/*.{js,ts,jsx,tsx,mdx}", + "./src/components/**/*.{js,ts,jsx,tsx,mdx}", + "./src/app/**/*.{js,ts,jsx,tsx,mdx}", + ], + theme: { + extend: { + backgroundImage: { + "gradient-radial": "radial-gradient(var(--tw-gradient-stops))", + "gradient-conic": + "conic-gradient(from 180deg at 50% 50%, var(--tw-gradient-stops))", + }, + }, + }, + plugins: [], +}; +export default config; diff --git a/projects/prompt-to-nft/ui/tsconfig.json b/projects/prompt-to-nft/ui/tsconfig.json new file mode 100644 index 0000000..874a6ca --- /dev/null +++ b/projects/prompt-to-nft/ui/tsconfig.json @@ -0,0 +1,22 @@ +{ + "compilerOptions": { + "target": "es2020", + "lib": ["dom", "dom.iterable", "esnext"], + "allowJs": true, + "skipLibCheck": true, + "strict": true, + "noEmit": true, + "esModuleInterop": true, + "module": "esnext", + "moduleResolution": "bundler", + "resolveJsonModule": true, + "isolatedModules": true, + "jsx": "preserve", + "incremental": true, + "paths": { + "@/*": ["./src/*"] + } + }, + "include": ["next-env.d.ts", "**/*.ts", "**/*.tsx"], + "exclude": ["node_modules"] +} diff --git a/projects/tgi-llm/container/.gitignore b/projects/tgi-llm/container/.gitignore new file mode 100644 index 0000000..d344ba6 --- /dev/null +++ b/projects/tgi-llm/container/.gitignore @@ -0,0 +1 @@ +config.json diff --git a/projects/tgi-llm/container/Dockerfile b/projects/tgi-llm/container/Dockerfile new file mode 100644 index 0000000..57add3f --- /dev/null +++ b/projects/tgi-llm/container/Dockerfile @@ -0,0 +1,25 @@ +FROM python:3.11-slim as builder + +WORKDIR /app + +ENV PYTHONUNBUFFERED 1 +ENV PYTHONDONTWRITEBYTECODE 1 +ENV PIP_NO_CACHE_DIR 1 +ENV RUNTIME docker +ENV PYTHONPATH src + +RUN apt-get update +RUN apt-get install -y git curl + +# install uv +ADD --chmod=755 https://astral.sh/uv/install.sh /install.sh +RUN /install.sh && rm /install.sh + +COPY src/requirements.txt . + +RUN /root/.cargo/bin/uv pip install --system --no-cache -r requirements.txt + +COPY src src + +ENTRYPOINT ["hypercorn", "app:create_app()"] +CMD ["-b", "0.0.0.0:3000"] diff --git a/projects/tgi-llm/container/Makefile b/projects/tgi-llm/container/Makefile new file mode 100644 index 0000000..2df9b24 --- /dev/null +++ b/projects/tgi-llm/container/Makefile @@ -0,0 +1,17 @@ +DOCKER_ORG := ritualnetwork +EXAMPLE_NAME := tgi-llm +TAG := $(DOCKER_ORG)/example-$(EXAMPLE_NAME)-infernet:latest + +.phony: build run build-multiplatform + +build: + @docker build -t $(TAG) . + +run: + docker run -p 3000:3000 --env-file tgi-llm.env $(TAG) + +# You may need to set up a docker builder, to do so run: +# docker buildx create --name mybuilder --bootstrap --use +# refer to https://docs.docker.com/build/building/multi-platform/#building-multi-platform-images for more info +build-multiplatform: + docker buildx build --platform linux/amd64,linux/arm64 -t $(TAG) --push . diff --git a/projects/tgi-llm/container/README.md b/projects/tgi-llm/container/README.md new file mode 100644 index 0000000..0aa838f --- /dev/null +++ b/projects/tgi-llm/container/README.md @@ -0,0 +1,88 @@ +# TGI LLM + +In this example, we're running an infernet node along with a TGI service. + +## Deploying TGI Service + +If you have your own TGI service running, feel free to skip this part. Otherwise, +you can deploy the TGI service using the following command. + +Make sure you have a machine with proper GPU support. Clone this repository & +run the following command: + +```bash +make run-service project=tgi-llm service=tgi +``` + +## Deploying Infernet Node Locally + +Running an infernet node involves a simple configuration step & running step. + +### Configuration + +Copy our [sample config file](./config.sample.json) into a new file +called `config.json`. + +```bash +cp config.sample.json config.json +``` + +Then provide the `"env"` field of the `"containers"` section of the file to point to the +TGI Service you just deployed. + +```json +{ + // etc. + "containers": [ + { + "id": "tgi-llm", + "image": "ritualnetwork/llm_inference_service:latest", + "external": true, + "port": "3000", + "allowed_delegate_addresses": [], + "allowed_addresses": [], + "allowed_ips": [], + "command": "--bind=0.0.0.0:3000 --workers=2", + "env": { + "TGI_SERVICE_URL": "http://{your-service-ip}:{your-service-port}" // <- Change this to the TGI service you deployed + } + } + ] +} +``` + +### Running the Infernet Node Locally + +With that out of the way, you can now run the infernet node using the following command +at the top-level directory of this repo: + +``` +make deploy-container project=tgi-llm +``` + +## Testing the Infernet Node + +You can test the infernet node by posting a job in the node's REST api. + +```bash +curl -X POST "http://127.0.0.1:4000/api/jobs" \ + -H "Content-Type: application/json" \ + -d '{"containers":["tgi-llm"], "data": {"prompt": "can shrimp actually fry rice?"}}' +``` + +You can expect a response similar to the following: + +```json +{ + "id": "f026c7c2-7027-4c2d-b662-2b48c9433a12" +} +``` + +You can then check the status of the job using the following command: + +```bash +curl -X GET http://127.0.0.1:4000/api/jobs\?id\=f026c7c2-7027-4c2d-b662-2b48c9433a12 +[{"id":"f026c7c2-7027-4c2d-b662-2b48c9433a12","result":{"container":"tgi-llm","output":{"output":"\n\nI\u2019m not sure if this is a real question or not, but I\u2019m"}},"status":"success"}] +``` + +Congratulations! You've successfully ran an infernet node with a TGI service. diff --git a/projects/tgi-llm/container/config.sample.json b/projects/tgi-llm/container/config.sample.json new file mode 100644 index 0000000..cbbb1fb --- /dev/null +++ b/projects/tgi-llm/container/config.sample.json @@ -0,0 +1,52 @@ +{ + "log_path": "infernet_node.log", + "server": { + "port": 4000 + }, + "chain": { + "enabled": true, + "trail_head_blocks": 0, + "rpc_url": "http://host.docker.internal:8545", + "coordinator_address": "0x5FbDB2315678afecb367f032d93F642f64180aa3", + "wallet": { + "max_gas_limit": 4000000, + "private_key": "0x59c6995e998f97a5a0044966f0945389dc9e86dae88c7a8412f4603b6b78690d" + } + }, + "startup_wait": 1.0, + "docker": { + "username": "your-username", + "password": "" + }, + "redis": { + "host": "redis", + "port": 6379 + }, + "forward_stats": true, + "containers": [ + { + "id": "tgi-llm", + "image": "ritualnetwork/example-tgi-llm-infernet:latest", + "external": true, + "port": "3000", + "allowed_delegate_addresses": [], + "allowed_addresses": [], + "allowed_ips": [], + "command": "--bind=0.0.0.0:3000 --workers=2", + "env": { + "TGI_SERVICE_URL": "http://{your_service_ip}:{your_service_port}" + } + }, + { + "id": "anvil-node", + "image": "ritualnetwork/infernet-anvil:0.0.0", + "external": true, + "port": "8545", + "allowed_delegate_addresses": [], + "allowed_addresses": [], + "allowed_ips": [], + "command": "", + "env": {} + } + ] +} diff --git a/projects/tgi-llm/container/src/app.py b/projects/tgi-llm/container/src/app.py new file mode 100644 index 0000000..abcd185 --- /dev/null +++ b/projects/tgi-llm/container/src/app.py @@ -0,0 +1,85 @@ +import logging +import os +from typing import Any, cast + +from eth_abi import decode, encode # type: ignore +from infernet_ml.utils.service_models import InfernetInput, InfernetInputSource +from infernet_ml.workflows.inference.tgi_client_inference_workflow import ( + TGIClientInferenceWorkflow, +) +from quart import Quart, request + +log = logging.getLogger(__name__) + + +def create_app() -> Quart: + app = Quart(__name__) + + workflow = TGIClientInferenceWorkflow( + server_url=cast(str, os.environ.get("TGI_SERVICE_URL")) + ) + + workflow.setup() + + @app.route("/") + def index() -> str: + """ + Utility endpoint to check if the service is running. + """ + return "LLM Inference Service is running." + + @app.route("/service_output", methods=["POST"]) + async def inference() -> dict[str, Any]: + req_data = await request.get_json() + """ + InfernetInput has the format: + source: (0 on-chain, 1 off-chain) + data: dict[str, Any] + """ + infernet_input: InfernetInput = InfernetInput(**req_data) + + if infernet_input.source == InfernetInputSource.OFFCHAIN: + prompt = cast(dict[str, Any], infernet_input.data).get("prompt") + else: + # On-chain requests are sent as a generalized hex-string which we will + # decode to the appropriate format. + (prompt,) = decode( + ["string"], bytes.fromhex(cast(str, infernet_input.data)) + ) + + result: dict[str, Any] = workflow.inference({"text": prompt}) + + if infernet_input.source == InfernetInputSource.OFFCHAIN: + """ + In case of an off-chain request, the result is returned as a dict. The + infernet node expects a dict format. + """ + return {"data": result} + else: + """ + In case of an on-chain request, the result is returned in the format: + { + "raw_input": str, + "processed_input": str, + "raw_output": str, + "processed_output": str, + "proof": str, + } + refer to: https://docs.ritual.net/infernet/node/containers for more info. + """ + return { + "raw_input": "", + "processed_input": "", + "raw_output": encode(["string"], [result]).hex(), + "processed_output": "", + "proof": "", + } + + return app + + +if __name__ == "__main__": + """ + Utility to run the app locally. For development purposes only. + """ + create_app().run(port=3000) diff --git a/projects/tgi-llm/container/src/requirements.txt b/projects/tgi-llm/container/src/requirements.txt new file mode 100644 index 0000000..0ef237a --- /dev/null +++ b/projects/tgi-llm/container/src/requirements.txt @@ -0,0 +1,6 @@ +quart==0.19.4 +infernet_ml==0.1.0 +PyArweave @ git+https://github.com/ritual-net/pyarweave.git +web3==6.15.0 +retry2==0.9.5 +text-generation==0.6.1 diff --git a/projects/tgi-llm/container/tgi-llm.env.sample b/projects/tgi-llm/container/tgi-llm.env.sample new file mode 100644 index 0000000..c7f24a6 --- /dev/null +++ b/projects/tgi-llm/container/tgi-llm.env.sample @@ -0,0 +1 @@ +TGI_SERVICE_URL=http://{your-service-ip}:{your-service-port} diff --git a/projects/tgi-llm/contracts/.github/workflows/test.yml b/projects/tgi-llm/contracts/.github/workflows/test.yml new file mode 100644 index 0000000..9282e82 --- /dev/null +++ b/projects/tgi-llm/contracts/.github/workflows/test.yml @@ -0,0 +1,34 @@ +name: test + +on: workflow_dispatch + +env: + FOUNDRY_PROFILE: ci + +jobs: + check: + strategy: + fail-fast: true + + name: Foundry project + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + submodules: recursive + + - name: Install Foundry + uses: foundry-rs/foundry-toolchain@v1 + with: + version: nightly + + - name: Run Forge build + run: | + forge --version + forge build --sizes + id: build + + - name: Run Forge tests + run: | + forge test -vvv + id: test diff --git a/projects/tgi-llm/contracts/.gitignore b/projects/tgi-llm/contracts/.gitignore new file mode 100644 index 0000000..85198aa --- /dev/null +++ b/projects/tgi-llm/contracts/.gitignore @@ -0,0 +1,14 @@ +# Compiler files +cache/ +out/ + +# Ignores development broadcast logs +!/broadcast +/broadcast/*/31337/ +/broadcast/**/dry-run/ + +# Docs +docs/ + +# Dotenv file +.env diff --git a/projects/tgi-llm/contracts/Makefile b/projects/tgi-llm/contracts/Makefile new file mode 100644 index 0000000..2af9de7 --- /dev/null +++ b/projects/tgi-llm/contracts/Makefile @@ -0,0 +1,14 @@ +# phony targets are targets that don't actually create a file +.phony: deploy + +# anvil's third default address +sender := 0x5de4111afa1a4b94908f83103eb1f1706367c2e68ca870fc3fb9a804cdab365a +RPC_URL := http://localhost:8545 + +# deploying the contract +deploy: + @PRIVATE_KEY=$(sender) forge script script/Deploy.s.sol:Deploy --broadcast --rpc-url $(RPC_URL) + +# calling sayGM() +call-contract: + @PRIVATE_KEY=$(sender) forge script script/CallContract.s.sol:CallContract --broadcast --rpc-url $(RPC_URL) diff --git a/projects/tgi-llm/contracts/foundry.toml b/projects/tgi-llm/contracts/foundry.toml new file mode 100644 index 0000000..83816a2 --- /dev/null +++ b/projects/tgi-llm/contracts/foundry.toml @@ -0,0 +1,7 @@ +[profile.default] +src = "src" +out = "out" +libs = ["lib"] +via_ir = true + +# See more config options https://github.com/foundry-rs/foundry/blob/master/crates/config/README.md#all-options diff --git a/projects/tgi-llm/contracts/remappings.txt b/projects/tgi-llm/contracts/remappings.txt new file mode 100644 index 0000000..c788350 --- /dev/null +++ b/projects/tgi-llm/contracts/remappings.txt @@ -0,0 +1,2 @@ +forge-std/=lib/forge-std/src +infernet-sdk/=lib/infernet-sdk/src diff --git a/projects/tgi-llm/contracts/script/CallContract.s.sol b/projects/tgi-llm/contracts/script/CallContract.s.sol new file mode 100644 index 0000000..750b58b --- /dev/null +++ b/projects/tgi-llm/contracts/script/CallContract.s.sol @@ -0,0 +1,19 @@ +// SPDX-License-Identifier: BSD-3-Clause-Clear +pragma solidity ^0.8.0; + +import {Script, console2} from "forge-std/Script.sol"; +import {Prompter} from "../src/Prompter.sol"; + +contract CallContract is Script { + function run() public { + // Setup wallet + uint256 deployerPrivateKey = vm.envUint("PRIVATE_KEY"); + vm.startBroadcast(deployerPrivateKey); + + Prompter prompter = Prompter(0x663F3ad617193148711d28f5334eE4Ed07016602); + + prompter.promptLLM(vm.envString("prompt")); + + vm.stopBroadcast(); + } +} diff --git a/projects/tgi-llm/contracts/script/Deploy.s.sol b/projects/tgi-llm/contracts/script/Deploy.s.sol new file mode 100644 index 0000000..302d2c0 --- /dev/null +++ b/projects/tgi-llm/contracts/script/Deploy.s.sol @@ -0,0 +1,26 @@ +// SPDX-License-Identifier: BSD-3-Clause-Clear +pragma solidity ^0.8.13; + +import {Script, console2} from "forge-std/Script.sol"; +import {Prompter} from "../src/Prompter.sol"; + +contract Deploy is Script { + function run() public { + // Setup wallet + uint256 deployerPrivateKey = vm.envUint("PRIVATE_KEY"); + vm.startBroadcast(deployerPrivateKey); + + // Log address + address deployerAddress = vm.addr(deployerPrivateKey); + console2.log("Loaded deployer: ", deployerAddress); + + address coordinator = 0x5FbDB2315678afecb367f032d93F642f64180aa3; + // Create consumer + Prompter prompter = new Prompter(coordinator); + console2.log("Deployed Prompter: ", address(prompter)); + + // Execute + vm.stopBroadcast(); + vm.broadcast(); + } +} diff --git a/projects/tgi-llm/contracts/src/Prompter.sol b/projects/tgi-llm/contracts/src/Prompter.sol new file mode 100644 index 0000000..596afe1 --- /dev/null +++ b/projects/tgi-llm/contracts/src/Prompter.sol @@ -0,0 +1,47 @@ +// SPDX-License-Identifier: BSD-3-Clause-Clear +pragma solidity ^0.8.13; + +import {console2} from "forge-std/console2.sol"; +import {CallbackConsumer} from "infernet-sdk/consumer/Callback.sol"; + +contract Prompter is CallbackConsumer { + string private EXTREMELY_COOL_BANNER = "\n\n" + "_____ _____ _______ _ _ _ \n" + "| __ \\|_ _|__ __| | | | /\\ | | \n" + "| |__) | | | | | | | | | / \\ | | \n" + "| _ / | | | | | | | |/ /\\ \\ | | \n" + "| | \\ \\ _| |_ | | | |__| / ____ \\| |____ \n" + "|_| \\_\\_____| |_| \\____/_/ \\_\\______| \n\n"; + + constructor(address coordinator) CallbackConsumer(coordinator) {} + + function promptLLM(string calldata prompt) public { + _requestCompute( + "tgi-llm", + abi.encode(prompt), + 20 gwei, + 1_000_000, + 1 + ); + } + + function _receiveCompute( + uint32 subscriptionId, + uint32 interval, + uint16 redundancy, + address node, + bytes calldata input, + bytes calldata output, + bytes calldata proof + ) internal override { + console2.log(EXTREMELY_COOL_BANNER); + (bytes memory raw_output, bytes memory processed_output) = abi.decode(output, (bytes, bytes)); + (string memory outputStr) = abi.decode(raw_output, (string)); + + console2.log("subscription Id", subscriptionId); + console2.log("interval", interval); + console2.log("redundancy", redundancy); + console2.log("node", node); + console2.log("output:", outputStr); + } +} diff --git a/projects/tgi-llm/tgi-llm.md b/projects/tgi-llm/tgi-llm.md new file mode 100644 index 0000000..bc9cd25 --- /dev/null +++ b/projects/tgi-llm/tgi-llm.md @@ -0,0 +1,444 @@ +# TGI Inference with Mistral-7b + +In this tutorial we are going to use [Huggingface's TGI (Text Generation Interface)](https://huggingface.co/docs/text-generation-inference/en/index) to run an arbitrary LLM model +and enable users to requests jobs form it, both on-chain and off-chain. + +## Install Pre-requisites + +For this tutorial you'll need to have the following installed. + +1. [Docker](https://docs.docker.com/engine/install/) +2. [Foundry](https://book.getfoundry.sh/getting-started/installation) + +## Setting up a TGI LLM Service + +Included with this tutorial, is a [containerized llm service](./tgi). We're going to deploy this service on a powerful +machine with access to GPU. + +### Rent a GPU machine +To run this service, you will need to have access to a machine with a powerful GPU. In the video above, we use an +A100 instance on [Paperspace](https://www.paperspace.com/). + +### Install docker +You will have to install docker. + +For Ubuntu, you can run the following commands: + +```bash copy +# install docker +sudo apt-get install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin +``` +As docker installation may vary depending on your operating system, consult the +[official documentation](https://docs.docker.com/engine/install/ubuntu/) for more information. + +After installation, you can verify that docker is installed by running: + +```bash +# sudo docker run hello-world +Hello from Docker! +``` + +### Ensure CUDA is installed +Depending on where you rent your GPU machine, CUDA is typically pre-installed. For Ubuntu, you can follow the +instructions [here](https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html#prepare-ubuntu). + +You can verify that CUDA is installed by running: + +```bash copy +# verify Installation +python -c ' +import torch +print("torch.cuda.is_available()", torch.cuda.is_available()) +print("torch.cuda.device_count()", torch.cuda.device_count()) +print("torch.cuda.current_device()", torch.cuda.current_device()) +print("torch.cuda.get_device_name(0)", torch.cuda.get_device_name(0)) +' +``` + +If CUDA is installed and available, your output will look similar to the following: + +```bash +torch.cuda.is_available() True +torch.cuda.device_count() 1 +torch.cuda.current_device() 0 +torch.cuda.get_device_name(0) Tesla V100-SXM2-16GB +``` + +### Ensure `nvidia-container-runtime` is installed +For your container to be able to access the GPU, you will need to install the `nvidia-container-runtime`. +On Ubuntu, you can run the following commands: + +```bash copy +# Docker GPU support +# nvidia container-runtime repos +# https://nvidia.github.io/nvidia-container-runtime/ +curl -s -L https://nvidia.github.io/nvidia-container-runtime/gpgkey | \ +sudo apt-key add - distribution=$(. /etc/os-release;echo $ID$VERSION_ID) +curl -s -L https://nvidia.github.io/nvidia-container-runtime/$distribution/nvidia-container-runtime.list | \ +sudo tee /etc/apt/sources.list.d/nvidia-container-runtime.list +sudo apt-get update + +# install nvidia-container-runtime +# https://docs.docker.com/config/containers/resource_constraints/#gpu +sudo apt-get install -y nvidia-container-runtime +``` +As always, consult the [official documentation](https://nvidia.github.io/nvidia-container-runtime/) for more +information. + +You can verify that `nvidia-container-runtime` is installed by running: + +```bash copy +which nvidia-container-runtime-hook +# this should return a path to the nvidia-container-runtime-hook +``` + +Now, with the pre-requisites installed, we can move on to setting up the TGI service. + +### Clone this repository + +```bash copy +# Clone locally +git clone --recurse-submodules https://github.com/ritual-net/infernet-container-starter +# Navigate to the repository +cd infernet-container-starter +``` + +### Run the Stable Diffusion service +```bash copy +make run-service project=tgi-llm service=tgi +``` + +This will start the `tgi` service. Note that this service will have to download a large model file, +so it may take a few minutes to be fully ready. Downloaded model will get cached, so subsequent runs will be faster. + +## Testing the `tgi-llm` service via the gradio UI +Included with this project is a simple gradio chat UI that allows you to interact with the `tgi-llm` service. This is +not needed for running the Infernet node, but a nice way to debug and test the TGI service. + +### Ensure `docker` & `foundry` exist +To check for `docker`, run the following command in your terminal: +```bash copy +docker --version +# Docker version 25.0.2, build 29cf629 (example output) +``` + +You'll also need to ensure that docker-compose exists in your terminal: +```bash copy +which docker-compose +# /usr/local/bin/docker-compose (example output) +``` + +To check for `foundry`, run the following command in your terminal: +```bash copy +forge --version +# forge 0.2.0 (551bcb5 2024-02-28T07:40:42.782478000Z) (example output) +``` + +### Clone the starter repository +Just like our other examples, we're going to clone this repository. All of the code and instructions for this tutorial +can be found in the [`projects/tgi-llm`](../tgi-llm) directory of the repository. + +```bash copy +# Clone locally +git clone --recurse-submodules https://github.com/ritual-net/infernet-container-starter +# Navigate to the repository +cd infernet-container-starter +``` + +### Configure the UI Service +You'll need to configure the UI service to point to the `tgi` service. To do this, you'll have to +pass that info as environemnt variables. There exists a [`gradio_ui.env.sample`](./ui/gradio_ui.env.sample) +file in the [`projects/tgi-llm/ui`](./ui) +directory. Simply copy this file to `gradio_ui.env` and set the `TGI_SERVICE_URL` to the address of the `tgi` service. + +```bash copy +cd projects/tgi-llm/ui +cp gradio_ui.env.sample gradio_ui.env +``` + +Then modify the content of `gradio_ui.env` to look like this: + +```env +TGI_SERVICE_URL={your_service_ip}:{your_service_port} # <- replace with your service ip & port +HF_API_TOKEN={huggingface_api_token} # <- replace with your huggingface api token +PROMPT_FILE_PATH=./prompt.txt # <- path to the prompt file +``` + +The env vars are as follows: +- `TGI_SERVICE_URL` is the address of the `tgi` service +- `HF_API_TOKEN` is the Huggingface API token. You can get one by signing up at [Huggingface](https://huggingface.co/) +- `PROMPT_FILE_PATH` is the path to the system prompt file. By default it is set to `./prompt.txt`. A simple +`prompt.txt` file is included in the `ui` directory. + +### Build the UI service +From the top-level directory of the repository, simply run the following command to build the UI service: + +```bash copy +# cd back to the top-level directory +cd ../../.. +# build the UI service +make build-service project=tgi-llm service=ui +``` + +### Run the UI service +In the same directory, you can also run the following command to run the UI service: +```bash copy +make run-service project=tgi-llm service=ui +``` + +By default the service will run on `http://localhost:3001`. You can navigate to this address in your browser to see +the UI. + +### Chat with the TGI service! +Congratulations! You can now chat with the TGI service using the gradio UI. You can enter a prompt and see the +response from the TGI service. + +Now that we've tested the TGI service, we can move on to setting up the Infernet Node and the `tgi-llm` container. + +## Setting up the Infernet Node along with the `tgi-llm` container + +You can follow the following steps on your local machine to setup the Infernet Node and the `tgi-llm` container. + +The first couple of steps are identical to that of [the previous section](#ensure-docker--foundry-exist). So if you've already completed those +steps, you can skip to [building the tgi-llm container](#build-the-tgi-llm-container). + +### Ensure `docker` & `foundry` exist + +To check for `docker`, run the following command in your terminal: +```bash copy +docker --version +# Docker version 25.0.2, build 29cf629 (example output) +``` + +You'll also need to ensure that docker-compose exists in your terminal: +```bash copy +which docker-compose +# /usr/local/bin/docker-compose (example output) +``` + +To check for `foundry`, run the following command in your terminal: +```bash copy +forge --version +# forge 0.2.0 (551bcb5 2024-02-28T07:40:42.782478000Z) (example output) +``` + +### Clone the starter repository +Just like our other examples, we're going to clone this repository. +All of the code and instructions for this tutorial can be found in the +[`projects/tgi-llm`](../tgi-llm) +directory of the repository. + +```bash copy +# Clone locally +git clone --recurse-submodules https://github.com/ritual-net/infernet-container-starter +# Navigate to the repository +cd infernet-container-starter +``` + +### Configure the `tgi-llm` container + +#### Configure the URL for the TGI Service +The `tgi-llm` container needs to know where to find the TGI service that we started in the steps above. To do this, +we need to modify the configuration file for the `tgi-llm` container. We have a sample [config.json](./config.sample.json) file. +Simply navigate to the `projects/tgi-llm` directory and set up the config file: + +```bash +cd projects/tgi-llm/container +cp config.sample.json config.json +``` + +In the `containers` field, you will see the following: + +```json +"containers": [ + { + // etc. etc. + "env": { + "TGI_SERVICE_URL": "http://{your_service_ip}:{your_service_port}" // <- replace with your service ip & port + } + } +}, +``` + +### Build the `tgi-llm` container + +Simply run the following command to build the `tgi-llm` container: + +```bash copy +make build-container project=tgi-llm +``` + +### Deploy the `tgi-llm` container with Infernet + +You can run a simple command to deploy the `tgi-llm` container along with bootstrapping the rest of the +Infernet node stack in one go: + +```bash copy +make deploy-container project=tgi-llm +``` + +### Check the running containers + +At this point it makes sense to check the running containers to ensure everything is running as expected. + +```bash +# > docker container ps +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +0dbc30f67e1e ritualnetwork/example-tgi-llm-infernet:latest "hypercorn app:creat…" 8 seconds ago Up 7 seconds +0.0.0.0:3000->3000/tcp tgi-llm +0c5140e0f41b ritualnetwork/infernet-anvil:0.0.0 "anvil --host 0.0.0.…" 23 hours ago Up 23 hours +0.0.0.0:8545->3000/tcp anvil-node +f5682ec2ad31 ritualnetwork/infernet-node:latest "/app/entrypoint.sh" 23 hours ago Up 9 seconds +0.0.0.0:4000->4000/tcp deploy-node-1 +c1ece27ba112 fluent/fluent-bit:latest "/fluent-bit/bin/flu…" 23 hours ago Up 10 seconds 2020/tcp, +0.0.0.0:24224->24224/tcp, :::24224->24224/tcp deploy-fluentbit-1 +3cccea24a303 redis:latest "docker-entrypoint.s…" 23 hours ago Up 10 seconds 0.0.0.0:6379->6379/tcp, +:::6379->6379/tcp deploy-redis-1 +``` + +You should see five different images running, including the Infernet node and the `tgi-llm` container. + +### Send a job request to the `tgi-llm` container +From here, we can make a Web-2 job request to the container by posting a request to the [`api/jobs`](https://docs.ritual.net/infernet/node/api#2a-post-apijobs) endpoint. + +```bash copy +curl -X POST http://127.0.0.1:4000/api/jobs \ +-H "Content-Type: application/json" \ +-d '{"containers": ["tgi-llm"], "data": {"prompt": "Can shrimp actually fry rice fr?"}}' +# {"id":"7a375a56-0da0-40d8-91e0-6440b3282ed8"} +``` +You will get a job id in response. You can use this id to check the status of the job. + +### Check the status of the job +You can make a `GET` request to the [`api/jobs`](https://docs.ritual.net/infernet/node/api#3-get-apijobs) endpoint to check the status of the job. + +```bash copy +curl -X GET "http://127.0.0.1:4000/api/jobs?id=7a375a56-0da0-40d8-91e0-6440b3282ed8" +# [{"id":"7a375a56-0da0-40d8-91e0-6440b3282ed8","result":{"container":"tgi-llm","output":{"data":"\n\n## Can you fry rice in a wok?\n\nThe wok is the"}},"status":"success"}] +``` + +Congratulations! You have successfully setup the Infernet Node and the `tgi-llm` container. Now let's move on to +calling our service from a smart contract (a la web3 request). + + +## Calling our service from a smart contract + +In the following steps, we will deploy our [consumer contract](https://github.com/ritual-net/infernet-container-starter/blob/main/projects/tgi-llm/contracts/src/Prompter.sol) and make a subscription request by calling the +contract. + +### Setup +Ensure that you have followed the steps in the previous section up until [here](#check-the-running-containers) to setup +the Infernet Node and the `tgi-llm` container. + +Notice that in [the step above](#check-the-running-containers) we have an Anvil node running on port `8545`. + +By default, the [`anvil-node`](https://hub.docker.com/r/ritualnetwork/infernet-anvil) image used deploys the +[Infernet SDK](https://docs.ritual.net/infernet/sdk/introduction) and other relevant contracts for you: +- Coordinator: `0x5FbDB2315678afecb367f032d93F642f64180aa3` +- Primary node: `0x70997970C51812dc3A010C7d01b50e0d17dc79C8` + +### Deploy our `Prompter` smart contract + +In this step, we will deploy our [`Prompter.sol`](./contracts/src/Prompter.sol) +to the Anvil node. This contract simply allows us to submit a prompt to the LLM, and receives the result of the +prompt and prints it to the anvil console. + +#### Anvil logs + +During this process, it is useful to look at the logs of the Anvil node to see what's going on. To follow the logs, +in a new terminal, run: + +```bash copy +docker logs -f anvil-node +``` + +#### Deploying the contract + +Once ready, to deploy the `Prompter` consumer contract, in another terminal, run: + +```bash copy +make deploy-contracts project=tgi-llm +``` + +You should expect to see similar Anvil logs: + +```bash +# > make deploy-contracts project=tgi-llm +eth_getTransactionReceipt + +Transaction: 0x17a9d17cc515d39eef26b6a9427e04ed6f7ce6572d9756c07305c2df78d93ffe +Contract created: 0x663f3ad617193148711d28f5334ee4ed07016602 +Gas used: 731312 + +Block Number: 1 +Block Hash: 0xd17b344af15fc32cd3359e6f2c2724a8d0a0283fc3b44febba78fc99f2f00189 +Block Time: "Wed, 6 Mar 2024 18:21:01 +0000" + +eth_getTransactionByHash +``` + +From our logs, we can see that the `Prompter` contract has been deployed to address +`0x663f3ad617193148711d28f5334ee4ed07016602`. + +### Call the contract + +Now, let's call the contract to with a prompt! In the same terminal, run: + +```bash copy +make call-contract project=tgi-llm prompt="What is 2 * 3?" +``` + +You should first expect to see an initiation transaction sent to the `Prompter` contract: + +```bash + +eth_getTransactionReceipt + +Transaction: 0x988b1b251f3b6ad887929a58429291891d026f11392fb9743e9a90f78c7a0801 +Gas used: 190922 + +Block Number: 2 +Block Hash: 0x51f3abf62e763f1bd1b0d245a4eab4ced4b18f58bd13645dbbf3a878f1964044 +Block Time: "Wed, 6 Mar 2024 18:21:34 +0000" + +eth_getTransactionByHash +eth_getTransactionReceipt + +``` +Shortly after that you should see another transaction submitted from the Infernet Node which is the +result of your on-chain subscription and its associated job request: + +```bash +eth_sendRawTransaction + + +_____ _____ _______ _ _ _ +| __ \|_ _|__ __| | | | /\ | | +| |__) | | | | | | | | | / \ | | +| _ / | | | | | | | |/ /\ \ | | +| | \ \ _| |_ | | | |__| / ____ \| |____ +|_| \_\_____| |_| \____/_/ \_\______| + + +subscription Id 1 +interval 1 +redundancy 1 +node 0x70997970C51812dc3A010C7d01b50e0d17dc79C8 +output: + +2 * 3 = 6 + +Transaction: 0xdaaf559c2baba212ab218fb268906613ce3be93ba79b37f902ff28c8fe9a1e1a +Gas used: 116153 + +Block Number: 3 +Block Hash: 0x2f26b2b487a4195ff81865b2966eab1508d10642bf525a258200eea432522e24 +Block Time: "Wed, 6 Mar 2024 18:21:35 +0000" + +eth_blockNumber +``` + +We can now confirm that the address of the Infernet Node (see the logged `node` parameter in the Anvil logs above) +matches the address of the node we setup by default for our Infernet Node. + +Congratulations! 🎉 You have successfully enabled a contract to have access to a TGI LLM service. diff --git a/projects/tgi-llm/tgi/Makefile b/projects/tgi-llm/tgi/Makefile new file mode 100644 index 0000000..713af3c --- /dev/null +++ b/projects/tgi-llm/tgi/Makefile @@ -0,0 +1,8 @@ +.phony: run + +volume ?= $(PWD)/data +model ?= mistralai/Mistral-7B-v0.1 + +run: + docker run --gpus all --shm-size 1g -p 8080:80 -v $(volume):/data \ + ghcr.io/huggingface/text-generation-inference:1.4 --model-id $(model) diff --git a/projects/tgi-llm/tgi/README.md b/projects/tgi-llm/tgi/README.md new file mode 100644 index 0000000..a475279 --- /dev/null +++ b/projects/tgi-llm/tgi/README.md @@ -0,0 +1,15 @@ +# TGI Service + +The [Makefile](./Makefile) for this service simply invokes +huggingface's `huggingface/text-generation-inference:1.4` +docker image. Ensure that you are running this on a machine with a GPU. + +For example, to run the TGI container with model `mistralai/Mistral-7B-v0.1`, you can +use the following command: + +```bash +make run model=mistralai/Mistral-7B-v0.1 volume=/path/to/your/data +``` + +* `model`: is defaulted to `mistralai/Mistral-7B-v0.1` +* `volume`: is defaulted to `./data` diff --git a/projects/tgi-llm/ui/Dockerfile b/projects/tgi-llm/ui/Dockerfile new file mode 100644 index 0000000..353c6b4 --- /dev/null +++ b/projects/tgi-llm/ui/Dockerfile @@ -0,0 +1,22 @@ +FROM python:3.11-slim as builder + +WORKDIR /app + +ENV PYTHONUNBUFFERED 1 +ENV PYTHONDONTWRITEBYTECODE 1 +ENV PYTHONPATH src + +WORKDIR /app + +RUN apt-get update + +COPY src/requirements.txt . + +RUN pip install --upgrade pip && pip install -r requirements.txt + +COPY src src + +COPY prompt.txt . + +ENTRYPOINT ["python", "src/app.py"] +CMD ["-b", "0.0.0.0:3000"] diff --git a/projects/tgi-llm/ui/Makefile b/projects/tgi-llm/ui/Makefile new file mode 100644 index 0000000..9122989 --- /dev/null +++ b/projects/tgi-llm/ui/Makefile @@ -0,0 +1,17 @@ +DOCKER_ORG := ritualnetwork +EXAMPLE_NAME := tgi-llm-ui +TAG := $(DOCKER_ORG)/example-$(EXAMPLE_NAME)-infernet:latest + +.phony: build run publish + +build: + @docker build -t $(TAG) . + +run: build + docker run --env-file ./gradio_ui.env -p 3001:7860 $(TAG) + +# You may need to set up a docker builder, to do so run: +# docker buildx create --name mybuilder --bootstrap --use +# refer to https://docs.docker.com/build/building/multi-platform/#building-multi-platform-images for more info +build-multiplatform: + docker buildx build --platform linux/amd64,linux/arm64 -t $(TAG) --push . diff --git a/projects/tgi-llm/ui/README.md b/projects/tgi-llm/ui/README.md new file mode 100644 index 0000000..a233321 --- /dev/null +++ b/projects/tgi-llm/ui/README.md @@ -0,0 +1,35 @@ +# Gradio UI + +This is a utility UI project to chat with your TGI LLM. + +## Configuration + +Copy the [`gradio_ui.env.sample`](./gradio_ui.env.sample) file into a new file +called `gradio_ui.env` and fill in the necessary environment variables. + +```bash +cp gradio_ui.env.sample gradio_ui.env +``` + +Environment variables are as follows: + +```bash +TGI_SERVICE_URL= # URL to your running TGI service +HF_API_TOKEN= +PROMPT_FILE_PATH= # path to a prompt file +``` + +## Running + +Simply run: + +```bash +make run +``` + +The UI will run on port `3001` on your localhost. You can change that configuration +[here](./Makefile#L11). + +Congratulations! You have successfully set up the Gradio UI for your TGI LLM. + +Now you can go to `http://localhost:3001` and chat with your LLM instance. diff --git a/projects/tgi-llm/ui/gradio_ui.env.sample b/projects/tgi-llm/ui/gradio_ui.env.sample new file mode 100644 index 0000000..58a00d5 --- /dev/null +++ b/projects/tgi-llm/ui/gradio_ui.env.sample @@ -0,0 +1,3 @@ +TGI_SERVICE_URL= +HF_API_TOKEN= +PROMPT_FILE_PATH=./prompt.txt diff --git a/projects/tgi-llm/ui/prompt.txt b/projects/tgi-llm/ui/prompt.txt new file mode 100644 index 0000000..816092c --- /dev/null +++ b/projects/tgi-llm/ui/prompt.txt @@ -0,0 +1 @@ +You're a friendly chatbot. diff --git a/projects/tgi-llm/ui/src/app.py b/projects/tgi-llm/ui/src/app.py new file mode 100644 index 0000000..46faf3f --- /dev/null +++ b/projects/tgi-llm/ui/src/app.py @@ -0,0 +1,109 @@ +import os +from builtins import str +from pathlib import Path +from typing import Union, cast, Any, Callable + +import gradio as gr # type: ignore +from dotenv import load_dotenv +from huggingface_hub import InferenceClient # type: ignore + +load_dotenv() + +TGI_SERVICE_URL = os.getenv("TGI_SERVICE_URL") +HF_API_TOKEN = os.getenv("HF_API_TOKEN") + +client = InferenceClient(model=TGI_SERVICE_URL) + + +def start_interface( + lambdafn: Callable[[str, list[str]], Any], + examples: list[str], + title: str, + description: str, + share: bool = True, + height: int = 300, + placeholder: str = "Chat with me!", + scale: int = 7, + container: bool = False, +) -> None: + """ + Starts the Gradio interface for the Jazz model. + + Args: + lambdafn (callable): text_generation lambda fn with message, history + examples (list[str]): A list of example inputs for the interface. + title (str): The gradio title. + description (str): The gradio description. + share (bool): Whether to generate a global gradio link for 72 hours. + height (int): Height of chat window in pixels. + placeholder (str): Placeholder when chat window is empty. + scale (int): The scale of the chat window. + container (bool): Show the chat window in a container. + """ + gr.ChatInterface( + lambdafn, + chatbot=gr.Chatbot(height=height), + textbox=gr.Textbox(placeholder=placeholder, container=container, scale=scale), + description=description, + title=title, + examples=examples, + retry_btn="Retry", + undo_btn="Undo", + clear_btn="Clear", + ).queue().launch(share=share, server_name="0.0.0.0") + + +def read_text_file(file_path: Union[Path, str]) -> str: + """Reads content from file as a string.""" + with open(file_path, "r") as file: + return file.read() + + +def main() -> None: + cwd = os.getcwd() + + PROMPT_FILE_PATH: str = cast(str, os.getenv("PROMPT_FILE_PATH")) + + if not PROMPT_FILE_PATH: + raise ValueError("PROMPT_FILE_PATH is not set in the environment.") + + input_text = read_text_file(os.path.join(cwd, PROMPT_FILE_PATH)) + + def prompt_formatter(user_prompt: str, input_text: str) -> str: + return user_prompt + + # You should write your own lambdafn to set the parameters + # Gradio doesn't currently support functions with more than + # [message,history] as parameters into the interface + # if you don't want the user to see them. + def stream_inference(message: str, history: list[str]) -> Any: + response = client.text_generation( + prompt_formatter(message, input_text), + max_new_tokens=40, + temperature=0.3, + details=True, + ).generated_text + # this is just for the gradio front end, you can ignore for + # backend in the ML model for strikethroughs. + if response.startswith(""): + response = response[3:] + yield response + + title = "Your Ritual Model🎷" + description = "This is the demo for your model." + + # if you want a global url others can visit. + share = True + examples = ["Can shrimp actually fry rice?"] + + start_interface( + lambdafn=stream_inference, + title=title, + description=description, + share=share, + examples=examples, + ) + + +if __name__ == "__main__": + main() diff --git a/projects/tgi-llm/ui/src/requirements.txt b/projects/tgi-llm/ui/src/requirements.txt new file mode 100644 index 0000000..9bff6ea --- /dev/null +++ b/projects/tgi-llm/ui/src/requirements.txt @@ -0,0 +1,4 @@ +python-dotenv==1.0.0 +gradio==3.47.1 +huggingface-hub==0.17.3 +text-generation==0.6.1 diff --git a/projects/torch-iris/container/Dockerfile b/projects/torch-iris/container/Dockerfile new file mode 100644 index 0000000..57add3f --- /dev/null +++ b/projects/torch-iris/container/Dockerfile @@ -0,0 +1,25 @@ +FROM python:3.11-slim as builder + +WORKDIR /app + +ENV PYTHONUNBUFFERED 1 +ENV PYTHONDONTWRITEBYTECODE 1 +ENV PIP_NO_CACHE_DIR 1 +ENV RUNTIME docker +ENV PYTHONPATH src + +RUN apt-get update +RUN apt-get install -y git curl + +# install uv +ADD --chmod=755 https://astral.sh/uv/install.sh /install.sh +RUN /install.sh && rm /install.sh + +COPY src/requirements.txt . + +RUN /root/.cargo/bin/uv pip install --system --no-cache -r requirements.txt + +COPY src src + +ENTRYPOINT ["hypercorn", "app:create_app()"] +CMD ["-b", "0.0.0.0:3000"] diff --git a/projects/torch-iris/container/Makefile b/projects/torch-iris/container/Makefile new file mode 100644 index 0000000..ea37b79 --- /dev/null +++ b/projects/torch-iris/container/Makefile @@ -0,0 +1,17 @@ +DOCKER_ORG := ritualnetwork +EXAMPLE_NAME := torch-iris +TAG := $(DOCKER_ORG)/example-$(EXAMPLE_NAME)-infernet:latest + +.phony: build run + +build: + @docker build -t $(TAG) . + +run: + docker run -p 3000:3000 $(TAG) + +# You may need to set up a docker builder, to do so run: +# docker buildx create --name mybuilder --bootstrap --use +# refer to https://docs.docker.com/build/building/multi-platform/#building-multi-platform-images for more info +build-multiplatform: + docker buildx build --platform linux/amd64,linux/arm64 -t $(TAG) --push . diff --git a/projects/torch-iris/container/README.md b/projects/torch-iris/container/README.md new file mode 100644 index 0000000..41e1246 --- /dev/null +++ b/projects/torch-iris/container/README.md @@ -0,0 +1,110 @@ +# Iris Classification via Torch + +This example uses a pre-trained model to classify iris flowers. The code for the model +is located at +our [simple-ml-models](https://github.com/ritual-net/simple-ml-models/tree/main/iris_classification) +repository. + +## Overview + +We're making use of +the [TorchInferenceWorkflow](https://github.com/ritual-net/infernet-ml-internal/blob/main/src/ml/workflows/inference/torch_inference_workflow.py) +class to run the model. This is one of many workflows that we currently support in our +[infernet-ml](https://github.com/ritual-net/infernet-ml-internal). Consult the library's +documentation for more info on workflows that +are supported. + +## Building & Running the Container in Isolation + +Note that this container is meant to be started by the infernet-node. For development & +testing purposes, you can run the container in isolation using the following commands. + +### Building the Container + +Simply run the following command to build the container. + +```bash +make build +``` + +Consult the [Makefile](./Makefile) for the build command. + +### Running the Container + +To run the container, you can use the following command: + +```bash +make run +``` + +## Testing the Container + +Run the following command to perform an inference: + +```bash +curl -X POST "http://127.0.0.1:3000/service_output" \ + -H "Content-Type: application/json" \ + -d '{"source":1, "data": {"input": [[1.0380048, 0.5586108, 1.1037828, 1.712096]]}}' +``` + +#### Note Regarding the Input + +The inputs provided above correspond to an iris flower with the following +characteristics. Refer to the + +1. Sepal Length: `5.5cm` +2. Sepal Width: `2.4cm` +3. Petal Length: `3.8cm` +4. Petal Width: `1.1cm` + +Putting this input into a vector and scaling it, we get the following scaled input: + +```python +[1.0380048, 0.5586108, 1.1037828, 1.712096] +``` + +Refer +to [this function in the model's repository](https://github.com/ritual-net/simple-ml-models/blob/03ebc6fb15d33efe20b7782505b1a65ce3975222/iris_classification/iris_inference_pytorch.py#L13) +for more information on how the input is scaled. + +For more context on the Iris dataset, refer to +the [UCI Machine Learning Repository](https://archive.ics.uci.edu/ml/datasets/iris). + +### Output + +By running the above command, you should get a response similar to the following: + +```json +{ + "input_data": [ + [ + 1.0380048, + 0.5586108, + 1.1037828, + 1.712096 + ] + ], + "input_shapes": [ + [ + 4 + ] + ], + "output_data": [ + [ + 0.0016699483385309577, + 0.021144982427358627, + 0.977185070514679 + ] + ] +} +``` + +The `output_data` corresponds to the model's prediction for each of the classes: + +```python +['setosa', 'versicolor', 'virginica'] +``` + +In this case, the model predicts that the input corresponds to the class `virginica` +with +a probability of `0.977185070514679` (97.7%). diff --git a/projects/torch-iris/container/config.json b/projects/torch-iris/container/config.json new file mode 100644 index 0000000..369625a --- /dev/null +++ b/projects/torch-iris/container/config.json @@ -0,0 +1,50 @@ +{ + "log_path": "infernet_node.log", + "server": { + "port": 4000 + }, + "chain": { + "enabled": true, + "trail_head_blocks": 0, + "rpc_url": "http://host.docker.internal:8545", + "coordinator_address": "0x5FbDB2315678afecb367f032d93F642f64180aa3", + "wallet": { + "max_gas_limit": 4000000, + "private_key": "0x59c6995e998f97a5a0044966f0945389dc9e86dae88c7a8412f4603b6b78690d" + } + }, + "startup_wait": 1.0, + "docker": { + "username": "your-username", + "password": "" + }, + "redis": { + "host": "redis", + "port": 6379 + }, + "forward_stats": true, + "containers": [ + { + "id": "torch-iris", + "image": "ritualnetwork/example-torch-iris-infernet:latest", + "external": true, + "port": "3000", + "allowed_delegate_addresses": [], + "allowed_addresses": [], + "allowed_ips": [], + "command": "--bind=0.0.0.0:3000 --workers=2", + "env": {} + }, + { + "id": "anvil-node", + "image": "ritualnetwork/infernet-anvil:0.0.0", + "external": true, + "port": "8545", + "allowed_delegate_addresses": [], + "allowed_addresses": [], + "allowed_ips": [], + "command": "", + "env": {} + } + ] +} diff --git a/projects/torch-iris/container/scripts/sample_endpoints.py b/projects/torch-iris/container/scripts/sample_endpoints.py new file mode 100644 index 0000000..5d4a7ee --- /dev/null +++ b/projects/torch-iris/container/scripts/sample_endpoints.py @@ -0,0 +1,52 @@ +import asyncio + +import aiohttp +from eth_abi import encode, decode # type: ignore + + +async def ping(session: aiohttp.ClientSession) -> None: + async with session.get("http://127.0.0.1:3000/") as response: + print(await response.text()) + + +async def post_directly_web2(session: aiohttp.ClientSession) -> None: + async with session.post( + "http://127.0.0.1:3000/service_output", + json={ + "source": 1, + "data": {"input": [[1.0380048, 0.5586108, 1.1037828, 1.712096]]}, + }, + ) as response: + print(await response.json()) + + +async def post_directly_web3(session: aiohttp.ClientSession) -> None: + async with session.post( + "http://127.0.0.1:3000/service_output", + json={ + "source": 0, + "data": encode( + ["uint256[]"], [[1_038_004, 558_610, 1_103_782, 1_712_096]] + ).hex(), + }, + ) as response: + print(await response.text()) + result = await response.json() + output = result["raw_output"] + result = decode(["uint256[]"], bytes.fromhex(output))[0] + print(f"result: {result}") + + +# async main +async def main(session: aiohttp.ClientSession) -> None: + await post_directly_web3(session) + + +if __name__ == "__main__": + # run main async + + async def provide_session() -> None: + async with aiohttp.ClientSession() as session: + await main(session) + + asyncio.run(provide_session()) diff --git a/projects/torch-iris/container/src/app.py b/projects/torch-iris/container/src/app.py new file mode 100644 index 0000000..def8526 --- /dev/null +++ b/projects/torch-iris/container/src/app.py @@ -0,0 +1,110 @@ +import logging +from typing import Any, cast, List + +from eth_abi import decode, encode # type: ignore +from infernet_ml.utils.model_loader import ModelSource +from infernet_ml.utils.service_models import InfernetInput, InfernetInputSource +from infernet_ml.workflows.inference.torch_inference_workflow import ( + TorchInferenceWorkflow, +) +from quart import Quart, request + +# Note: the IrisClassificationModel needs to be imported in this file for it to exist +# in the classpath. This is because pytorch requires the model to be in the classpath. +# Simply downloading the weights and model from the hub is not enough. +from iris_classification_model import IrisClassificationModel + +log = logging.getLogger(__name__) + + +def create_app() -> Quart: + app = Quart(__name__) + # we are downloading the model from the hub. + # model repo is located at: https://huggingface.co/Ritual-Net/iris-dataset + model_source = ModelSource.HUGGINGFACE_HUB + model_args = {"repo_id": "Ritual-Net/iris-dataset", "filename": "iris.torch"} + + workflow = TorchInferenceWorkflow(model_source=model_source, model_args=model_args) + workflow.setup() + + @app.route("/") + def index() -> str: + """ + Utility endpoint to check if the service is running. + """ + return ( + f"Torch Iris Classifier Example Program: {IrisClassificationModel.__name__}" + ) + + @app.route("/service_output", methods=["POST"]) + async def inference() -> dict[str, Any]: + req_data = await request.get_json() + """ + InfernetInput has the format: + source: (0 on-chain, 1 off-chain) + data: dict[str, Any] + """ + infernet_input: InfernetInput = InfernetInput(**req_data) + + if infernet_input.source == InfernetInputSource.OFFCHAIN: + web2_input = cast(dict[str, Any], infernet_input.data) + values = cast(List[List[float]], web2_input["input"]) + else: + # On-chain requests are sent as a generalized hex-string which we will + # decode to the appropriate format. + web3_input: List[int] = decode( + ["uint256[]"], bytes.fromhex(cast(str, infernet_input.data)) + )[0] + values = [[float(v) / 1e6 for v in web3_input]] + + """ + The input to the torch inference workflow needs to conform to this format: + + { + "dtype": str, + "values": list[Any] + } + + For more information refer to: + https://docs.ritual.net/ml-workflows/inference-workflows/torch_inference_workflow + + """ + inference_result = workflow.inference({"dtype": "float", "values": values}) + + result = [o.detach().numpy().reshape([-1]).tolist() for o in inference_result] + + if infernet_input.source == InfernetInputSource.OFFCHAIN: + """ + In case of an off-chain request, the result is returned as is. + """ + return {"result": result} + else: + """ + In case of an on-chain request, the result is returned in the format: + { + "raw_input": str, + "processed_input": str, + "raw_output": str, + "processed_output": str, + "proof": str, + } + refer to: https://docs.ritual.net/infernet/node/containers for more info. + """ + predictions = cast(List[List[float]], result) + predictions_normalized = [int(p * 1e6) for p in predictions[0]] + return { + "raw_input": "", + "processed_input": "", + "raw_output": encode(["uint256[]"], [predictions_normalized]).hex(), + "processed_output": "", + "proof": "", + } + + return app + + +if __name__ == "__main__": + """ + Utility to run the app locally. For development purposes only. + """ + create_app().run(port=3000) diff --git a/projects/torch-iris/container/src/iris_classification_model.py b/projects/torch-iris/container/src/iris_classification_model.py new file mode 100644 index 0000000..a1a9f48 --- /dev/null +++ b/projects/torch-iris/container/src/iris_classification_model.py @@ -0,0 +1,23 @@ +import torch.nn as nn +import torch +import torch.nn.functional as F + +""" +The IrisClassificationModel torch module. This is the computation graph that was used to +train the model. Refer to: +https://github.com/ritual-net/simple-ml-models/tree/main/iris_classification +""" + + +class IrisClassificationModel(nn.Module): + def __init__(self, input_dim: int) -> None: + super(IrisClassificationModel, self).__init__() + self.layer1 = nn.Linear(input_dim, 50) + self.layer2 = nn.Linear(50, 50) + self.layer3 = nn.Linear(50, 3) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = F.relu(self.layer1(x)) + x = F.relu(self.layer2(x)) + x = F.softmax(self.layer3(x), dim=1) + return x diff --git a/projects/torch-iris/container/src/requirements.txt b/projects/torch-iris/container/src/requirements.txt new file mode 100644 index 0000000..c8347e3 --- /dev/null +++ b/projects/torch-iris/container/src/requirements.txt @@ -0,0 +1,7 @@ +quart==0.19.4 +infernet_ml==0.1.0 +PyArweave @ git+https://github.com/ritual-net/pyarweave.git +huggingface-hub==0.17.3 +sk2torch==1.2.0 +torch==2.1.2 +web3==6.15.0 diff --git a/projects/torch-iris/contracts/.gitignore b/projects/torch-iris/contracts/.gitignore new file mode 100644 index 0000000..85198aa --- /dev/null +++ b/projects/torch-iris/contracts/.gitignore @@ -0,0 +1,14 @@ +# Compiler files +cache/ +out/ + +# Ignores development broadcast logs +!/broadcast +/broadcast/*/31337/ +/broadcast/**/dry-run/ + +# Docs +docs/ + +# Dotenv file +.env diff --git a/projects/torch-iris/contracts/Makefile b/projects/torch-iris/contracts/Makefile new file mode 100644 index 0000000..1a0bac3 --- /dev/null +++ b/projects/torch-iris/contracts/Makefile @@ -0,0 +1,14 @@ +# phony targets are targets that don't actually create a file +.phony: deploy call-contract + +# anvil's third default address +sender := 0x5de4111afa1a4b94908f83103eb1f1706367c2e68ca870fc3fb9a804cdab365a +RPC_URL := http://localhost:8545 + +# deploying the contract +deploy: + @PRIVATE_KEY=$(sender) forge script script/Deploy.s.sol:Deploy --broadcast --rpc-url $(RPC_URL) + +# calling sayGM() +call-contract: + @PRIVATE_KEY=$(sender) forge script script/CallContract.s.sol:CallContract --broadcast --rpc-url $(RPC_URL) diff --git a/projects/torch-iris/contracts/README.md b/projects/torch-iris/contracts/README.md new file mode 100644 index 0000000..4fc0051 --- /dev/null +++ b/projects/torch-iris/contracts/README.md @@ -0,0 +1,41 @@ +# `Torch` Consumer Contracts + +This is a [foundry](https://book.getfoundry.sh/) project that implements a simple Consumer +contract, [`IrisClassifier`](./src/IrisClassifier.sol). + +This readme explains how to compile and deploy the contract to the Infernet Anvil Testnet network. + +> [!IMPORTANT] +> Ensure that you are running the following scripts with the Infernet Anvil Testnet network. +> The [tutorial](../../hello-world/README.mdADME.md) at the root of this repository explains how to +> bring up an infernet node. + +### Installing the libraries + +```bash +forge install +``` + +### Compiling the contracts + +```bash +forge compile +``` + +### Deploying the contracts +The deploy script at `script/Deploy.s.sol` deploys the `IrisClassifier` contract to the Infernet Anvil Testnet network. + +We have the [following make target](./Makefile#L9) to deploy the contract. Refer to the Makefile +for more understanding around the deploy scripts. +```bash +make deploy +``` + +### Requesting a job +We also have a script called `CallContract.s.sol` that requests a job to the `IrisClassifier` contract. +Refer to the [script](./script/CallContract.s.sol) for more details. Similar to deployment, +you can run that script using the following convenience make target. +```bash +make call-contract +``` +Refer to the [Makefile](./Makefile#L14) for more details. diff --git a/projects/torch-iris/contracts/foundry.toml b/projects/torch-iris/contracts/foundry.toml new file mode 100644 index 0000000..83816a2 --- /dev/null +++ b/projects/torch-iris/contracts/foundry.toml @@ -0,0 +1,7 @@ +[profile.default] +src = "src" +out = "out" +libs = ["lib"] +via_ir = true + +# See more config options https://github.com/foundry-rs/foundry/blob/master/crates/config/README.md#all-options diff --git a/projects/torch-iris/contracts/remappings.txt b/projects/torch-iris/contracts/remappings.txt new file mode 100644 index 0000000..c788350 --- /dev/null +++ b/projects/torch-iris/contracts/remappings.txt @@ -0,0 +1,2 @@ +forge-std/=lib/forge-std/src +infernet-sdk/=lib/infernet-sdk/src diff --git a/projects/torch-iris/contracts/script/CallContract.s.sol b/projects/torch-iris/contracts/script/CallContract.s.sol new file mode 100644 index 0000000..3612da2 --- /dev/null +++ b/projects/torch-iris/contracts/script/CallContract.s.sol @@ -0,0 +1,19 @@ +// SPDX-License-Identifier: BSD-3-Clause-Clear +pragma solidity ^0.8.0; + +import {Script, console2} from "forge-std/Script.sol"; +import {IrisClassifier} from "../src/IrisClassifier.sol"; + +contract CallContract is Script { + function run() public { + // Setup wallet + uint256 deployerPrivateKey = vm.envUint("PRIVATE_KEY"); + vm.startBroadcast(deployerPrivateKey); + + IrisClassifier irisClassifier = IrisClassifier(0x663F3ad617193148711d28f5334eE4Ed07016602); + + irisClassifier.classifyIris(); + + vm.stopBroadcast(); + } +} diff --git a/projects/torch-iris/contracts/script/Deploy.s.sol b/projects/torch-iris/contracts/script/Deploy.s.sol new file mode 100644 index 0000000..94fb53e --- /dev/null +++ b/projects/torch-iris/contracts/script/Deploy.s.sol @@ -0,0 +1,26 @@ +// SPDX-License-Identifier: BSD-3-Clause-Clear +pragma solidity ^0.8.13; + +import {Script, console2} from "forge-std/Script.sol"; +import {IrisClassifier} from "../src/IrisClassifier.sol"; + +contract Deploy is Script { + function run() public { + // Setup wallet + uint256 deployerPrivateKey = vm.envUint("PRIVATE_KEY"); + vm.startBroadcast(deployerPrivateKey); + + // Log address + address deployerAddress = vm.addr(deployerPrivateKey); + console2.log("Loaded deployer: ", deployerAddress); + + address coordinator = 0x5FbDB2315678afecb367f032d93F642f64180aa3; + // Create consumer + IrisClassifier classifier = new IrisClassifier(coordinator); + console2.log("Deployed IrisClassifier: ", address(classifier)); + + // Execute + vm.stopBroadcast(); + vm.broadcast(); + } +} diff --git a/projects/torch-iris/contracts/src/IrisClassifier.sol b/projects/torch-iris/contracts/src/IrisClassifier.sol new file mode 100644 index 0000000..77ddade --- /dev/null +++ b/projects/torch-iris/contracts/src/IrisClassifier.sol @@ -0,0 +1,67 @@ +// SPDX-License-Identifier: BSD-3-Clause-Clear +pragma solidity ^0.8.13; + +import {console2} from "forge-std/console2.sol"; +import {CallbackConsumer} from "infernet-sdk/consumer/Callback.sol"; + + +contract IrisClassifier is CallbackConsumer { + string private EXTREMELY_COOL_BANNER = "\n\n" + "_____ _____ _______ _ _ _\n" + "| __ \\|_ _|__ __| | | | /\\ | |\n" + "| |__) | | | | | | | | | / \\ | |\n" + "| _ / | | | | | | | |/ /\\ \\ | |\n" + "| | \\ \\ _| |_ | | | |__| / ____ \\| |____\n" + "|_| \\_\\_____| |_| \\____/_/ \\_\\______|\n\n"; + + constructor(address coordinator) CallbackConsumer(coordinator) {} + + function classifyIris() public { + /// @dev Iris data is in the following format: + /// @dev [sepal_length, sepal_width, petal_length, petal_width] + /// @dev the following vector corresponds to the following properties: + /// "sepal_length": 5.5cm + /// "sepal_width": 2.4cm + /// "petal_length": 3.8cm + /// "petal_width": 1.1cm + /// @dev The data is normalized & scaled. + /// refer to [this function in the model's repository](https://github.com/ritual-net/simple-ml-models/blob/03ebc6fb15d33efe20b7782505b1a65ce3975222/iris_classification/iris_inference_pytorch.py#L13) + /// for more info on normalization. + /// @dev The data is adjusted by 6 decimals + + uint256[] memory iris_data = new uint256[](4); + iris_data[0] = 1_038_004; + iris_data[1] = 558_610; + iris_data[2] = 1_103_782; + iris_data[3] = 1_712_096; + + _requestCompute( + "torch-iris", + abi.encode(iris_data), + 20 gwei, + 1_000_000, + 1 + ); + } + + function _receiveCompute( + uint32 subscriptionId, + uint32 interval, + uint16 redundancy, + address node, + bytes calldata input, + bytes calldata output, + bytes calldata proof + ) internal override { + console2.log(EXTREMELY_COOL_BANNER); + (bytes memory raw_output, bytes memory processed_output) = abi.decode(output, (bytes, bytes)); + (uint256[] memory classes) = abi.decode(raw_output, (uint256[])); + uint256 setosa = classes[0]; + uint256 versicolor = classes[1]; + uint256 virginica = classes[2]; + console2.log("predictions: (adjusted by 6 decimals, 1_000_000 = 100%, 1_000 = 0.1%)"); + console2.log("Setosa: ", setosa); + console2.log("Versicolor: ", versicolor); + console2.log("Virginica: ", virginica); + } +} diff --git a/projects/torch-iris/torch-iris.md b/projects/torch-iris/torch-iris.md new file mode 100644 index 0000000..1d5fbcd --- /dev/null +++ b/projects/torch-iris/torch-iris.md @@ -0,0 +1,292 @@ +# Running a Torch Model on Infernet + +Welcome to this comprehensive guide where we'll explore how to run a `pytorch` model on Infernet. If you've followed +our ONNX example, you'll find this guide to be quite similar. + +**Model:** This example uses a pre-trained model to classify iris flowers. The code for the model +is located at the [simple-ml-models](https://github.com/ritual-net/simple-ml-models/tree/main/iris_classification) +repository. + +## Pre-requisites + +For this tutorial you'll need to have the following installed. + +1. [Docker](https://docs.docker.com/engine/install/) +2. [Foundry](https://book.getfoundry.sh/getting-started/installation) + +### Ensure `docker` & `foundry` exist + +To check for `docker`, run the following command in your terminal: + +```bash copy +docker --version +# Docker version 25.0.2, build 29cf629 (example output) +``` + +You'll also need to ensure that docker-compose exists in your terminal: + +```bash copy +which docker-compose +# /usr/local/bin/docker-compose (example output) +``` + +To check for `foundry`, run the following command in your terminal: + +```bash copy +forge --version +# forge 0.2.0 (551bcb5 2024-02-28T07:40:42.782478000Z) (example output) +``` + +### Clone the starter repository + +If you haven't already, clone the infernet-container-starter repository. All of the code for this tutorial is located +under the `projects/torch-iris` directory. + +```bash copy +# Clone locally +git clone --recurse-submodules https://github.com/ritual-net/infernet-container-starter +# Navigate to the repository +cd infernet-container-starter +``` + +### Build the `torch-iris` container + +From the top-level directory of this repository, simply run the following command to build the `torch-iris` container: + +```bash copy +make build-container project=torch-iris +``` + +After the container is built, you can deploy an infernet-node that utilizes that +container by running the following command: + +```bash +make deploy-container project=torch-iris +``` + +## Making Inference Requests via Node API (a la Web2 request) + +Now, you can make inference requests to the infernet-node. In a new tab, run: + +```bash +curl -X POST "http://127.0.0.1:4000/api/jobs" \ + -H "Content-Type: application/json" \ + -d '{"containers":["torch-iris"], "data": {"input": [[1.0380048, 0.5586108, 1.1037828, 1.712096]]}}' +``` + +You should get an output similar to the following: + +```json +{ + "id": "6d5e47f0-5907-4ab2-9523-862dccb80d67" +} +``` + +Now, you can check the status of the job by running (make sure job id matches the one +you got from the previous request): + +```bash +curl "http://127.0.0.1:4000/api/jobs?id=6d5e47f0-5907-4ab2-9523-862dccb80d67" +``` + +Should return: + +```json +[ + { + "id": "6d5e47f0-5907-4ab2-9523-862dccb80d67", + "result": { + "container": "torch-iris", + "output": { + "input_data": [ + [ + 1.038004755973816, + 0.5586107969284058, + 1.1037827730178833, + 1.7120959758758545 + ] + ], + "input_shapes": [ + [ + 4 + ] + ], + "output_data": [ + [ + 0.0016699483385309577, + 0.021144982427358627, + 0.977185070514679 + ] + ] + } + }, + "status": "success" + } +] +``` + +#### Note Regarding the Input + +The inputs provided above correspond to an iris flower with the following +characteristics. Refer to the + +1. Sepal Length: `5.5cm` +2. Sepal Width: `2.4cm` +3. Petal Length: `3.8cm` +4. Petal Width: `1.1cm` + +Putting this input into a vector and scaling it, we get the following scaled input: + +```python +[1.0380048, 0.5586108, 1.1037828, 1.712096] +``` + +Refer +to [this function in the model's repository](https://github.com/ritual-net/simple-ml-models/blob/03ebc6fb15d33efe20b7782505b1a65ce3975222/iris_classification/iris_inference_pytorch.py#L13) +for more information on how the input is scaled. + +For more context on the Iris dataset, refer to +the [UCI Machine Learning Repository](https://archive.ics.uci.edu/ml/datasets/iris). + +## Making Inference Requests via Contracts (a la Web3 request) + +The [contracts](contracts) directory contains a simple forge +project that can be used to interact with the Infernet Node. + +Here, we have a very simple +contract, [IrisClassifier](contracts/src/IrisClassifier.sol), +that requests a compute job from the Infernet Node and then retrieves the result. +We are going to make the same request as above, but this time using a smart contract. +Since floats are not supported in Solidity, we convert all floats to `uint256` by +multiplying the input vector entries by `1e6`: + +```solidity + uint256[] memory iris_data = new uint256[](4); +iris_data[0] = 1_038_004; +iris_data[1] = 558_610; +iris_data[2] = 1_103_782; +iris_data[3] = 1_712_096; +``` + +We have multiplied the input by 1e6 to have enough decimals accuracy. This can be seen +[here](contracts/src/IrisClassifier.sol#19) in the contract's +code. + +### Infernet's Anvil Testnet + +To request an on-chain job, you'll need to deploy contracts using the infernet sdk. +We already have a public [anvil node](https://hub.docker.com/r/ritualnetwork/infernet-anvil) docker image which has the +corresponding infernet sdk contracts deployed, along with a node that has +registered itself to listen to on-chain subscription events. + +* Coordinator Address: `0x5FbDB2315678afecb367f032d93F642f64180aa3` +* Node Address: `0x70997970C51812dc3A010C7d01b50e0d17dc79C8` (This is the second account in the anvil's accounts.) + +### Monitoring the EVM Logs + +The infernet node configuration for this project includes our anvil node. You can monitor the logs of the anvil node to +see what's going on. In a new terminal, run: + +```bash +docker logs -f anvil-node +``` + +As you deploy the contract and make requests, you should see logs indicating the +requests and responses. + +### Deploying the Contract + +Simply run the following command to deploy the contract: + +```bash +project=torch-iris make deploy-contracts +``` + +In your anvil logs you should see the following: + +```bash +eth_feeHistory +eth_sendRawTransaction +eth_getTransactionReceipt + + Transaction: 0x8e7e96d0a062285ee6fea864c43c29af65b962d260955e6284ab79dae145b32c + Contract created: 0x663f3ad617193148711d28f5334ee4ed07016602 + Gas used: 725947 + + Block Number: 1 + Block Hash: 0x88c1a1af024cca6f921284bd61663b1d500aa6d22d06571f0a085c2d8e1ffe92 + Block Time: "Mon, 19 Feb 2024 16:44:00 +0000" + +eth_blockNumber +eth_newFilter +eth_getFilterLogs +eth_blockNumber +``` + +beautiful, we can see that a new contract has been created +at `0x663f3ad617193148711d28f5334ee4ed07016602`. That's the address of +the `IrisClassifier` contract. We are now going to call this contract. To do so, +we are using +the [CallContract.s.sol](contracts/script/CallContract.s.sol) +script. Note that the address of the +contract [is hardcoded in the script](contracts/script/CallContract.s.sol#L13), +and should match the address we see above. Since this is a test environment and we're +using a test deployer address, this address is quite deterministic and shouldn't change. +Otherwise, change the address in the script to match the address of the contract you +just deployed. + +### Calling the Contract + +To call the contract, run the following command: + +```bash +project=torch-iris make call-contract +``` + +In the anvil logs, you should see the following: + +```bash +eth_sendRawTransaction + + +_____ _____ _______ _ _ _ +| __ \|_ _|__ __| | | | /\ | | +| |__) | | | | | | | | | / \ | | +| _ / | | | | | | | |/ /\ \ | | +| | \ \ _| |_ | | | |__| / ____ \| |____ +|_| \_\_____| |_| \____/_/ \_\______| + + +about to decode babyyy +predictions: (adjusted by 6 decimals, 1_000_000 = 100%, 1_000 = 0.1%) +Setosa: 1669 +Versicolor: 21144 +Virginica: 977185 + + Transaction: 0x252158ab9dd2178b6a11e417090988782861d208d8e9bb01c4e0635316fd95c9 + Gas used: 111762 + + Block Number: 3 + Block Hash: 0xfba07bd65da8dde644ba07ff67f0d79ed36f388760f27dcf02d96f7912d34c4c + Block Time: "Mon, 19 Feb 2024 16:54:07 +0000" + +eth_blockNumbereth_blockNumber +eth_blockNumber +``` + +Beautiful! We can see that the same result has been posted to the contract. + +For more information about the container, consult +the [container's readme.](container/README.md) + +### Next Steps + +From here, you can bring your own trained pytorch model, and with minimal changes, you can make it both work with an +infernet-node as well as a smart contract. + +### More Information + +1. Check out our [ONNX example](../onnx-iris/onnx-iris.md) if you haven't already. +2. [Infernet Callback Consumer Tutorial](https://docs.ritual.net/infernet/sdk/consumers/Callback) +3. [Infernet Nodes Docoumentation](https://docs.ritual.net/infernet/node/introduction) +4. [Infernet-Compatible Containers](https://docs.ritual.net/infernet/node/containers) diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..4034edb --- /dev/null +++ b/requirements.txt @@ -0,0 +1,22 @@ +gunicorn==21.2.0 +mypy==1.8.0 +mypy-extensions==1.0.0 +packaging==23.2 +requests==2.31.0 +ruff==0.1.13 +types-click==7.1.8 +types-Flask==1.1.6 +types-Jinja2==2.11.9 +types-MarkupSafe==1.1.10 +types-requests==2.31.0.20240125 +types-Werkzeug==1.0.9 +typing_extensions==4.9.0 +Flask==3.0.0 +quart==0.19.4 +infernet_ml==0.1.0 +PyArweave @ git+https://github.com/ritual-net/pyarweave.git +torch==2.2.1 +web3==6.15.0 +onnx==1.15.0 +onnxruntime==1.17.1 +pre-commit==2.15.0