feat: publishing infernet-container-starter v0.2.0

This commit is contained in:
ritual-all 2024-03-29 10:50:13 -04:00
parent 41aaa152e6
commit 4545223364
No known key found for this signature in database
GPG Key ID: 44F6A6F5B09FFEB8
155 changed files with 6086 additions and 257 deletions

30
.github/workflows/docker_ci.yaml vendored Normal file
View File

@ -0,0 +1,30 @@
# pre-commit workflow
#
# Ensures the codebase passes the pre-commit stack.
name: Build Docker Images
on:
pull_request:
branches:
- main
- dev
jobs:
docker:
runs-on: ubuntu-latest
strategy:
matrix:
project: [ "hello-world", "gpt4", "onnx-iris", "prompt-to-nft", "tgi-llm", "torch-iris"]
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Run build example container
env:
project: ${{ matrix.project }}
CI: true
run: make build-container

30
.github/workflows/python_ci.yaml vendored Normal file
View File

@ -0,0 +1,30 @@
# pre-commit workflow
#
# Ensures the codebase passes the pre-commit stack.
name: pre-commit ci
on: [push]
jobs:
python_ci:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Install UV
run: python -m pip install uv
- name: Create virtual environment
run: uv venv
- name: Activate virtual environment
run: |
. .venv/bin/activate
echo PATH=$PATH >> $GITHUB_ENV
- name: Install dependencies
run: uv pip install -r requirements.txt
- name: Run pre-commit hooks
run: pre-commit run --all-files --show-diff-on-failure

25
.gitignore vendored
View File

@ -1,3 +1,6 @@
# OS
**/.DS_Store
# Byte-compiled / optimized / DLL files
deploy/config.json
@ -5,6 +8,7 @@ __pycache__/
*.py[cod]
*$py
# C extensions
*.so
@ -14,10 +18,23 @@ build/
*.egg-info/
# IDE specific files
.vscode/
.idea/
**/.vscode
**/.idea
# Virtual environment
venv/
venv
.venv/
*.env
**/.idea
# env files
**/*.env
# OS Files
**/.DS_Store
# Multi-deploykey CI
**/root-config
# sync scripts
remote_sync

33
.gitmodules vendored
View File

@ -4,3 +4,36 @@
[submodule "projects/hello-world/contracts/lib/infernet-sdk"]
path = projects/hello-world/contracts/lib/infernet-sdk
url = https://github.com/ritual-net/infernet-sdk
[submodule "projects/torch-iris/contracts/lib/infernet-sdk"]
path = projects/torch-iris/contracts/lib/infernet-sdk
url = https://github.com/ritual-net/infernet-sdk
[submodule "projects/torch-iris/contracts/lib/forge-std"]
path = projects/torch-iris/contracts/lib/forge-std
url = https://github.com/foundry-rs/forge-std
[submodule "projects/onnx-iris/contracts/lib/infernet-sdk"]
path = projects/onnx-iris/contracts/lib/infernet-sdk
url = https://github.com/ritual-net/infernet-sdk
[submodule "projects/onnx-iris/contracts/lib/forge-std"]
path = projects/onnx-iris/contracts/lib/forge-std
url = https://github.com/foundry-rs/forge-std
[submodule "projects/prompt-to-nft/contracts/lib/forge-std"]
path = projects/prompt-to-nft/contracts/lib/forge-std
url = https://github.com/foundry-rs/forge-std
[submodule "projects/prompt-to-nft/contracts/lib/infernet-sdk"]
path = projects/prompt-to-nft/contracts/lib/infernet-sdk
url = https://github.com/ritual-net/infernet-sdk
[submodule "projects/prompt-to-nft/contracts/lib/solmate"]
path = projects/prompt-to-nft/contracts/lib/solmate
url = https://github.com/transmissions11/solmate
[submodule "projects/gpt4/contracts/lib/infernet-sdk"]
path = projects/gpt4/contracts/lib/infernet-sdk
url = https://github.com/ritual-net/infernet-sdk
[submodule "projects/gpt4/contracts/lib/forge-std"]
path = projects/gpt4/contracts/lib/forge-std
url = https://github.com/foundry-rs/forge-std
[submodule "projects/tgi-llm/contracts/lib/forge-std"]
path = projects/tgi-llm/contracts/lib/forge-std
url = https://github.com/foundry-rs/forge-std
[submodule "projects/tgi-llm/contracts/lib/infernet-sdk"]
path = projects/tgi-llm/contracts/lib/infernet-sdk
url = https://github.com/ritual-net/infernet-sdk

87
.pre-commit-config.yaml Normal file
View File

@ -0,0 +1,87 @@
repos:
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.0.289
hooks:
- id: ruff
args: [--fix, --exit-non-zero-on-fix]
- repo: https://github.com/psf/black
rev: 23.9.1
hooks:
- id: black
- repo: local
hooks:
- id: mypy-hello-world
name: mypy hello-world
entry: mypy --strict
files: ^projects/hello-world/container/
language: system
types: [python]
- repo: local
hooks:
- id: mypy-torch-iris
name: mypy torch-iris
entry: mypy --strict
files: ^projects/torch-iris/container/
language: system
types: [python]
- repo: local
hooks:
- id: mypy-onnx-iris
name: mypy onnx-iris
entry: mypy --strict
files: ^projects/onnx-iris/container/
language: system
types: [python]
- repo: local
hooks:
- id: mypy-tgi-llm-container
name: mypy tgi-llm container
entry: mypy --strict
files: ^projects/tgi-llm/container
language: system
types: [python]
- repo: local
hooks:
- id: mypy-tgi-llm-ui
name: mypy tgi-llm ui
entry: mypy --strict
files: ^projects/tgi-llm/ui
language: system
types: [python]
- repo: local
hooks:
- id: mypy-gpt4
name: mypy gpt4
entry: mypy --strict
files: ^projects/gpt4/container
language: system
types: [python]
- repo: local
hooks:
- id: mypy-prompt-to-nft
name: mypy prompt-to-nft
entry: mypy --strict
files: ^projects/prompt-to-nft/container
language: system
types: [python]
# Default pre-commit hooks
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v3.2.0
hooks:
# Ensure EOF exists
- id: end-of-file-fixer
# Prevent adding large files
- id: check-added-large-files
args: ["--maxkb=5000"]
# Newline at end of file
- id: trailing-whitespace

View File

@ -1,9 +1,27 @@
build-container:
$(MAKE) -C ./projects/$(project)/container build
remove-containers:
docker compose -f deploy/docker-compose.yaml down || true
docker stop $(project) anvil-node && docker rm $(project) anvil-node || true
build-multiplatform:
$(MAKE) -C ./projects/$(project)/container build-multiplatform
deploy-container:
$(MAKE) remove-containers
cp ./projects/$(project)/container/config.json deploy/config.json
cd deploy && docker-compose up
docker compose -f deploy/docker-compose.yaml up -d
docker compose -f deploy/docker-compose.yaml logs -f
deploy-contracts:
$(MAKE) -C ./projects/$(project)/contracts deploy
call-contract:
$(MAKE) -C ./projects/$(project)/contracts call-contract
build-service:
$(MAKE) -C ./projects/$(project)/$(service) build
run-service:
$(MAKE) -C ./projects/$(project)/$(service) run

236
README.md
View File

@ -1,221 +1,19 @@
# infernet-container-starter
Starter examples for deploying to infernet.
# Getting Started
To interact with infernet, one could either create a job by accessing an infernet
node directly through it's API (we'll refer to this as an off-chain job), or by
creating a subscription on-chain (we'll refer to this as an on-chain job).
## Requesting an off-chain job: Hello World!
The easiest way to get started is to run our hello-world container.
This is a simple [flask-app](projects/hello-world/container/src/app.py) that
is compatible with `infernet`, and simply
[echoes what you send to it](./projects/hello-world/container/src/app.py#L16).
We already have it [hosted on docker hub](https://hub.docker.com/r/ritualnetwork/hello-world-infernet) .
If you're curious how it's made, you can
follow the instructions [here](projects/hello-world/container/README.md) to build your own infernet-compatible
container.
### Install Docker
To run this, you'll need to have docker installed. You can find instructions
for installing docker [here](https://docs.docker.com/install/).
### Running Locally
First, ensure that the docker daemon is running.
Then, from the top-level project directory, Run the following make command:
```
project=hello-world make deploy-container
```
This will deploy an infernet node along with the `hello-world` image.
### Creating an off-chain job through the API
You can create an off-chain job by posting to the `node` directly.
```bash
curl -X POST http://127.0.0.1:4000/api/jobs \
-H "Content-Type: application/json" \
-d '{"containers":["hello-world"], "data": {"some": "input"}}'
# returns
{"id":"d5281dd5-c4f4-4523-a9c2-266398e06007"}
```
This will return the id of that job.
### Getting the status/result/errors of a job
You can check the status of a job like so:
```bash
curl -X GET http://127.0.0.1:4000/api/jobs?id=d5281dd5-c4f4-4523-a9c2-266398e06007
# returns
[{"id":"d5281dd5-c4f4-4523-a9c2-266398e06007", "result":{"container":"hello-world","output": {"output":"hello, world!, your input was: {'source': 1, 'data': {'some': 'input'}}"}} ,"status":"success"}]
```
### Configuration
This project already comes with a pre-filled config file. The config
file for the hello-world project is located [here](projects/hello-world/container/config.json):
```bash
projects/hello-world/config.json
```
## Requesting an on-chain job
In this section we'll go over how to request an on-chain job in a local testnet.
### Infernet's Anvil Testnet
To request an on-chain job, you'll need to deploy contracts using the infernet sdk.
We already have a public [anvil node](https://hub.docker.com/r/ritualnetwork/infernet-anvil) docker image which has the
corresponding infernet sdk contracts deployed, along with a node that has
registered itself to listen to on-chain subscription events.
* Coordinator Address: `0x5FbDB2315678afecb367f032d93F642f64180aa3`
* Node Address: `0x70997970C51812dc3A010C7d01b50e0d17dc79C8` (This is the second account in the anvil's accounts.)
### Deploying Infernet Node & Infernet's Anvil Testnet
This step is similar to the section above:
```bash
project=hello-world make deploy-container
```
In another terminal, run `docker container ls`, you should see something like this
```bash
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
c2ca0ffe7817 ritualnetwork/infernet-anvil:0.0.0 "anvil --host 0.0.0.…" 9 seconds ago Up 8 seconds 0.0.0.0:8545->3000/tcp anvil-node
0b686a6a0e5f ritualnetwork/hello-world-infernet:0.0.2 "gunicorn app:create…" 9 seconds ago Up 8 seconds 0.0.0.0:3000->3000/tcp hello-world
28b2e5608655 ritualnetwork/infernet-node:0.1.1 "/app/entrypoint.sh" 10 seconds ago Up 10 seconds 0.0.0.0:4000->4000/tcp deploy-node-1
03ba51ff48b8 fluent/fluent-bit:latest "/fluent-bit/bin/flu…" 10 seconds ago Up 10 seconds 2020/tcp, 0.0.0.0:24224->24224/tcp deploy-fluentbit-1
a0d96f29a238 redis:latest "docker-entrypoint.s…" 10 seconds ago Up 10 seconds 0.0.0.0:6379->6379/tcp deploy-redis-1
```
You can see that the anvil node is running on port `8545`, and the infernet
node is running on port `4000`. Same as before.
### Deploying Consumer Contracts
We have a [sample forge project](./projects/hello-world/contracts) which contains
a simple consumer contract, [`SaysGM`](./projects/hello-world/contracts/src/SaysGM.sol).
All this contract does is to request a job from the infernet node, and upon receiving
the result, it will use the `forge` console to print the result.
**Anvil Logs**: First, it's useful to look at the logs of the anvil node to see what's going on. In
a new terminal, run `docker logs -f anvil-node`.
**Deploying the contracts**: In another terminal, run the following command:
```bash
project=hello-world make deploy-contracts
```
You should be able to see the following logs in the anvil logs:
```bash
eth_sendRawTransaction
eth_getTransactionReceipt
Transaction: 0x23ca6b1d1823ad5af175c207c2505112f60038fc000e1e22509816fa29a3afd6
Contract created: 0x663f3ad617193148711d28f5334ee4ed07016602
Gas used: 476669
Block Number: 1
Block Hash: 0x6b026b70fbe97b4a733d4812ccd6e8e25899a1f6c622430c3fb07a2e5c5c96b7
Block Time: "Wed, 17 Jan 2024 22:17:31 +0000"
eth_getTransactionByHash
eth_getTransactionReceipt
eth_blockNumber
```
We can see that a new contract has been created at `0x663f3ad617193148711d28f5334ee4ed07016602`.
That's the address of the `SaysGM` contract.
### Calling the contract
Now, let's call the contract. In the same terminal, run the following command:
```bash
project=hello-world make call-contract
```
You should first see that a transaction was sent to the `SaysGm` contract:
```bash
eth_getTransactionReceipt
Transaction: 0xe56b5b6ac713a978a1631a44d6a0c9eb6941dce929e1b66b4a2f7a61b0349d65
Gas used: 123323
Block Number: 2
Block Hash: 0x3d6678424adcdecfa0a8edd51e014290e5f54ee4707d4779e710a2a4d9867c08
Block Time: "Wed, 17 Jan 2024 22:18:39 +0000"
eth_getTransactionByHash
```
Then, right after that you should see another transaction submitted by the `node`,
which is the result of the job request:
```bash
eth_chainId
eth_sendRawTransaction
_____ _____ _______ _ _ _
| __ \|_ _|__ __| | | | /\ | |
| |__) | | | | | | | | | / \ | |
| _ / | | | | | | | |/ /\ \ | |
| | \ \ _| |_ | | | |__| / ____ \| |____
|_| \_\_____| |_| \____/_/ \_\______|
subscription Id 1
interval 1
redundancy 1
node 0x70997970C51812dc3A010C7d01b50e0d17dc79C8
input:
0x
output:
0x000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000607b276f7574707574273a202268656c6c6f2c20776f726c64212c20796f757220696e707574207761733a207b27736f75726365273a20302c202764617461273a20273437366636663634323036643666373236653639366536373231277d227d
proof:
0x
Transaction: 0x949351d02e2c7f50ced2be06d14ca4311bd470ec80b135a2ce78a43f43e60d3d
Gas used: 94275
Block Number: 3
Block Hash: 0x57ed0cf39e3fb3a91a0d8baa5f9cb5d2bdc1875f2ad5d6baf4a9466f522df354
Block Time: "Wed, 17 Jan 2024 22:18:40 +0000"
eth_blockNumber
eth_newFilter
```
We can see that the address of the `node` matches the address of the node in
our ritual anvil node.
### Next Steps
To learn more about on-chain requests, check out the following resources:
1. [Tutorial](./projects/hello-world/contracts/Tutorial.md) on this project's consumer smart contracts.
2. [Infernet Callback Consumer Tutorial](https://docs.ritual.net/infernet/sdk/consumers/Callback)
3. [Infernet Nodes Docoumentation](https://docs.ritual.net/infernet/nodes)
Welcome to this repository! 🎉 This repo contains a series of examples that demonstrate
the true power of infernet, and the wide range of applications that can be built using
it:
## Examples
1. [Hello World](projects/hello-world/hello-world.md): Infernet's version of a `hello-world` program. Here, we deploy
a container that simply echoes back the input to us.
2. [Running a Torch Model on Infernet](projects/torch-iris/torch-iris.md): This example shows you how to deploy a pre-trained [pytorch](https://pytorch.org/)
model to infernet. Using this example will make it easier for you to deploy your own models to infernet.
3. [Running an ONNX Model on Infernet](projects/onnx-iris/onnx-iris.md): Same as the previous example, but this time we deploy
an ONNX model to infernet.
4. [Prompt to NFT](projects/prompt-to-nft/prompt-to-nft.md): In this example, we use [stablediffusion](https://github.com/Stability-AI/stablediffusion) to
mint NFTs on-chain using a prompt.
5. [TGI Inference with Mistral-7b](projects/tgi-llm/tgi-llm.md): This example shows you how to deploy an arbitrary
LLM model using [Huggingface's TGI](https://huggingface.co/docs/text-generation-inference/en/index), and use it with an infernet node.
6. [Running OpenAI's GPT-4 on Infernet](projects/gpt4/gpt4.md): This example shows you how to deploy OpenAI's GPT-4 model
to infernet.

View File

@ -6,9 +6,7 @@ services:
ports:
- "0.0.0.0:4000:4000"
volumes:
- type: bind
source: ./config.json
target: /app/config.json
- ./config.json:/app/config.json
- node-logs:/logs
- /var/run/docker.sock:/var/run/docker.sock
networks:
@ -20,11 +18,12 @@ services:
extra_hosts:
- "host.docker.internal:host-gateway"
stop_grace_period: 1m
tty: true
redis:
image: redis:latest
ports:
- "6379:6379"
expose:
- "6379"
networks:
- network
volumes:
@ -35,9 +34,8 @@ services:
fluentbit:
image: fluent/fluent-bit:latest
ports:
- "24224:24224"
expose:
- "24224"
environment:
- FLUENTBIT_CONFIG_PATH=/fluent-bit/etc/fluent-bit.conf
volumes:

View File

@ -35,4 +35,4 @@
User append_only_user
Password ogy29Z4mRCLfpup*9fn6
Database postgres
Table live_stats
Table live_stats

2
projects/gpt4/container/.gitignore vendored Normal file
View File

@ -0,0 +1,2 @@
sample-gpt3.env
config.json

View File

@ -0,0 +1,25 @@
FROM python:3.11-slim as builder
WORKDIR /app
ENV PYTHONUNBUFFERED 1
ENV PYTHONDONTWRITEBYTECODE 1
ENV PIP_NO_CACHE_DIR 1
ENV RUNTIME docker
ENV PYTHONPATH src
RUN apt-get update
RUN apt-get install -y git curl
# install uv
ADD --chmod=755 https://astral.sh/uv/install.sh /install.sh
RUN /install.sh && rm /install.sh
COPY src/requirements.txt .
RUN /root/.cargo/bin/uv pip install --system --no-cache -r requirements.txt
COPY src src
ENTRYPOINT ["hypercorn", "app:create_app()"]
CMD ["-b", "0.0.0.0:3000"]

View File

@ -0,0 +1,18 @@
DOCKER_ORG := ritualnetwork
EXAMPLE_NAME := gpt4
TAG := $(DOCKER_ORG)/example-$(EXAMPLE_NAME)-infernet:latest
.phony: build run build-multiplatform try-prompt
build:
mkdir -p root-config
@docker build -t $(TAG) .
run: build
@docker run --env-file $(EXAMPLE_NAME).env -p 3000:3000 $(TAG)
# You may need to set up a docker builder, to do so run:
# docker buildx create --name mybuilder --bootstrap --use
# refer to https://docs.docker.com/build/building/multi-platform/#building-multi-platform-images for more info
build-multiplatform:
docker buildx build --platform linux/amd64,linux/arm64 -t $(TAG) --push .

View File

@ -0,0 +1,20 @@
# GPT 4
In this example, we run a minimalist container that makes use of our closed-source model
workflow: `CSSInferenceWorkflow`. Refer to [src/app.py](src/app.py) for the
implementation of the quart application.
## Requirements
To use the model you'll need to have an OpenAI api key. Get one at
[OpenAI](https://openai.com/)'s website.
## Run the Container
```bash
make run
```
## Test the Container
```bash
curl -X POST localhost:3000/service_output -H "Content-Type: application/json" \
-d '{"source": 1, "data": {"text": "can shrimps actually fry rice?"}}'
```

View File

@ -0,0 +1,52 @@
{
"log_path": "infernet_node.log",
"server": {
"port": 4000
},
"chain": {
"enabled": true,
"trail_head_blocks": 0,
"rpc_url": "http://host.docker.internal:8545",
"coordinator_address": "0x5FbDB2315678afecb367f032d93F642f64180aa3",
"wallet": {
"max_gas_limit": 4000000,
"private_key": "0x59c6995e998f97a5a0044966f0945389dc9e86dae88c7a8412f4603b6b78690d"
}
},
"startup_wait": 1.0,
"docker": {
"username": "your-username",
"password": ""
},
"redis": {
"host": "redis",
"port": 6379
},
"forward_stats": true,
"containers": [
{
"id": "gpt4",
"image": "ritualnetwork/example-gpt4-infernet:latest",
"external": true,
"port": "3000",
"allowed_delegate_addresses": [],
"allowed_addresses": [],
"allowed_ips": [],
"command": "--bind=0.0.0.0:3000 --workers=2",
"env": {
"OPENAI_API_KEY": "barabeem baraboom"
}
},
{
"id": "anvil-node",
"image": "ritualnetwork/infernet-anvil:0.0.0",
"external": true,
"port": "8545",
"allowed_delegate_addresses": [],
"allowed_addresses": [],
"allowed_ips": [],
"command": "",
"env": {}
}
]
}

View File

@ -0,0 +1 @@
OPENAI_API_KEY=

View File

@ -0,0 +1,90 @@
import logging
from typing import Any, cast
from eth_abi import decode, encode # type: ignore
from infernet_ml.utils.service_models import InfernetInput, InfernetInputSource
from infernet_ml.workflows.inference.css_inference_workflow import CSSInferenceWorkflow
from quart import Quart, request
log = logging.getLogger(__name__)
def create_app() -> Quart:
app = Quart(__name__)
workflow = CSSInferenceWorkflow(provider="OPENAI", endpoint="completions")
workflow.setup()
@app.route("/")
def index() -> str:
"""
Utility endpoint to check if the service is running.
"""
return "GPT4 Example Program"
@app.route("/service_output", methods=["POST"])
async def inference() -> dict[str, Any]:
req_data = await request.get_json()
"""
InfernetInput has the format:
source: (0 on-chain, 1 off-chain)
data: dict[str, Any]
"""
infernet_input: InfernetInput = InfernetInput(**req_data)
if infernet_input.source == InfernetInputSource.OFFCHAIN:
prompt = cast(dict[str, Any], infernet_input.data).get("prompt")
else:
# On-chain requests are sent as a generalized hex-string which we will
# decode to the appropriate format.
(prompt,) = decode(
["string"], bytes.fromhex(cast(str, infernet_input.data))
)
result: dict[str, Any] = workflow.inference(
{
"model": "gpt-4-0613",
"params": {
"endpoint": "completions",
"messages": [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt},
],
},
}
)
if infernet_input.source == InfernetInputSource.OFFCHAIN:
"""
In case of an off-chain request, the result is returned as is.
"""
return {"message": result}
else:
"""
In case of an on-chain request, the result is returned in the format:
{
"raw_input": str,
"processed_input": str,
"raw_output": str,
"processed_output": str,
"proof": str,
}
refer to: https://docs.ritual.net/infernet/node/containers for more info.
"""
return {
"raw_input": "",
"processed_input": "",
"raw_output": encode(["string"], [result]).hex(),
"processed_output": "",
"proof": "",
}
return app
if __name__ == "__main__":
"""
Utility to run the app locally. For development purposes only.
"""
create_app().run(port=3000)

View File

@ -0,0 +1,5 @@
quart==0.19.4
infernet_ml==0.1.0
PyArweave @ git+https://github.com/ritual-net/pyarweave.git
web3==6.15.0
retry2==0.9.5

View File

@ -0,0 +1,34 @@
name: test
on: workflow_dispatch
env:
FOUNDRY_PROFILE: ci
jobs:
check:
strategy:
fail-fast: true
name: Foundry project
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
submodules: recursive
- name: Install Foundry
uses: foundry-rs/foundry-toolchain@v1
with:
version: nightly
- name: Run Forge build
run: |
forge --version
forge build --sizes
id: build
- name: Run Forge tests
run: |
forge test -vvv
id: test

14
projects/gpt4/contracts/.gitignore vendored Normal file
View File

@ -0,0 +1,14 @@
# Compiler files
cache/
out/
# Ignores development broadcast logs
!/broadcast
/broadcast/*/31337/
/broadcast/**/dry-run/
# Docs
docs/
# Dotenv file
.env

View File

@ -0,0 +1,14 @@
# phony targets are targets that don't actually create a file
.phony: deploy call-contract
# anvil's third default address
sender := 0x5de4111afa1a4b94908f83103eb1f1706367c2e68ca870fc3fb9a804cdab365a
RPC_URL := http://localhost:8545
# deploying the contract
deploy:
@PRIVATE_KEY=$(sender) forge script script/Deploy.s.sol:Deploy --broadcast --rpc-url $(RPC_URL)
# calling promptGPT()
call-contract:
@PRIVATE_KEY=$(sender) forge script script/CallContract.s.sol:CallContract --broadcast --rpc-url $(RPC_URL)

View File

@ -0,0 +1,27 @@
# GPT4 Example Contracts
This is a minimalist foundry project that implements a [callback consumer](https://docs.ritual.net/infernet/sdk/consumers/Callback)
that makes a prompt to the [container](../container/README.md), which then makes a call to OpenAI's GPT4. For an
end-to-end flow of how this works, follow the [guide here](../gpt4.md).
## Deploying
The [`Deploy.s.sol`](./script/Deploy.s.sol) deploys the contracts.
The [Makefile](./Makefile) in this project containes
a utility deploy target.
```bash
make deploy
```
## Prompting
The [`CallContract.s.sol`](./script/CallContract.s.sol) calls
the [`promptGPT`](./src/PromptsGPT.sol#L10) function.
The [Makefile](./Makefile) in this project contains a utility call target. You'll need
to pass in the prompt as an
env var.
```bash
make call-contract prompt="What is 2 * 3?"
```

View File

@ -0,0 +1,7 @@
[profile.default]
src = "src"
out = "out"
libs = ["lib"]
via_ir = true
# See more config options https://github.com/foundry-rs/foundry/blob/master/crates/config/README.md#all-options

View File

@ -0,0 +1,2 @@
forge-std/=lib/forge-std/src
infernet-sdk/=lib/infernet-sdk/src

View File

@ -0,0 +1,19 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
pragma solidity ^0.8.0;
import {Script, console2} from "forge-std/Script.sol";
import {PromptsGPT} from "../src/PromptsGPT.sol";
contract CallContract is Script {
function run() public {
// Setup wallet
uint256 deployerPrivateKey = vm.envUint("PRIVATE_KEY");
vm.startBroadcast(deployerPrivateKey);
PromptsGPT promptsGpt = PromptsGPT(0x663F3ad617193148711d28f5334eE4Ed07016602);
promptsGpt.promptGPT(vm.envString("prompt"));
vm.stopBroadcast();
}
}

View File

@ -0,0 +1,27 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
pragma solidity ^0.8.13;
import {Script, console2} from "forge-std/Script.sol";
import {PromptsGPT} from "../src/PromptsGPT.sol";
contract Deploy is Script {
function run() public {
// Setup wallet
uint256 deployerPrivateKey = vm.envUint("PRIVATE_KEY");
vm.startBroadcast(deployerPrivateKey);
// Log address
address deployerAddress = vm.addr(deployerPrivateKey);
console2.log("Loaded deployer: ", deployerAddress);
address coordinator = 0x5FbDB2315678afecb367f032d93F642f64180aa3;
// Create consumer
PromptsGPT promptsGPT = new PromptsGPT(coordinator);
console2.log("Deployed PromptsGPT: ", address(promptsGPT));
// Execute
vm.stopBroadcast();
vm.broadcast();
}
}

View File

@ -0,0 +1,46 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
pragma solidity ^0.8.13;
import {console2} from "forge-std/console2.sol";
import {CallbackConsumer} from "infernet-sdk/consumer/Callback.sol";
contract PromptsGPT is CallbackConsumer {
string private EXTREMELY_COOL_BANNER = "\n\n"
"_____ _____ _______ _ _ _ \n"
"| __ \\|_ _|__ __| | | | /\\ | | \n"
"| |__) | | | | | | | | | / \\ | | \n"
"| _ / | | | | | | | |/ /\\ \\ | | \n"
"| | \\ \\ _| |_ | | | |__| / ____ \\| |____ \n"
"|_| \\_\\_____| |_| \\____/_/ \\_\\______| \n\n";
constructor(address coordinator) CallbackConsumer(coordinator) {}
function promptGPT(string calldata prompt) public {
_requestCompute(
"gpt4",
abi.encode(prompt),
20 gwei,
1_000_000,
1
);
}
function _receiveCompute(
uint32 subscriptionId,
uint32 interval,
uint16 redundancy,
address node,
bytes calldata input,
bytes calldata output,
bytes calldata proof
) internal override {
console2.log(EXTREMELY_COOL_BANNER);
(bytes memory raw_output, bytes memory processed_output) = abi.decode(output, (bytes, bytes));
(string memory outputStr) = abi.decode(raw_output, (string));
console2.log("subscription Id", subscriptionId);
console2.log("interval", interval);
console2.log("redundancy", redundancy);
console2.log("node", node);
console2.log("output:", outputStr);
}
}

206
projects/gpt4/gpt4.md Normal file
View File

@ -0,0 +1,206 @@
# Running OpenAI's GPT-4 on Infernet
In this tutorial we are going to integrate [OpenAI's GPT-4](https://openai.com/gpt-4) into infernet. We will:
1. Obtain an API key from OpenAI
2. Configure the `gpt4` service, build & deploy it with Infernet
3. Make a web-2 request by directly prompting the [gpt4 service](./container)
4. Make a web-3 request by integrating a sample [`PromptsGPT.sol`](./contracts/src/PromptsGPT.sol) smart contract. This
contract will make a request to Infernet with their prompt, and receive the result of the request.
## Install Pre-requisites
For this tutorial you'll need to have the following installed.
1. [Docker](https://docs.docker.com/engine/install/)
2. [Foundry](https://book.getfoundry.sh/getting-started/installation)
### Get an API key from OpenAI
First, you'll need to get an API key from OpenAI. You can do this by making
an [OpenAI](https://openai.com/) account.
After signing in, head over to [their platform](https://platform.openai.com/api-keys) to
make an API key.
> [!NOTE]
> You will need a paid account to use the GPT-4 API.
### Ensure `docker` & `foundry` exist
To check for `docker`, run the following command in your terminal:
```bash copy
docker --version
# Docker version 25.0.2, build 29cf629 (example output)
```
You'll also need to ensure that docker-compose exists in your terminal:
```bash copy
which docker-compose
# /usr/local/bin/docker-compose (example output)
```
To check for `foundry`, run the following command in your terminal:
```bash copy
forge --version
# forge 0.2.0 (551bcb5 2024-02-28T07:40:42.782478000Z) (example output)
```
### Clone the starter repository
Just like our other examples, we're going to clone this repository.
All of the code and instructions for this tutorial can be found in the
[`projects/gpt4`](https://github.com/ritual-net/infernet-container-starter/tree/main/projects/gpt4)
directory of the repository.
```bash copy
# Clone locally
git clone --recurse-submodules https://github.com/ritual-net/infernet-container-starter
# Navigate to the repository
cd infernet-container-starter
```
### Configure the `gpt4` container
#### Configure API key in `config.json`
This is where we'll use the API key we obtained from OpenAI.
```bash
cd projects/gpt4/container
cp config.sample.json config.json
```
In the `containers` field, you will see the following. Replace `your-openai-key` with your OpenAI API key.
```json
"containers": [
{
// etc. etc.
"env": {
"OPENAI_API_KEY": "your-openai-key" // replace with your OpenAI API key
}
}
],
```
### Build the `gpt4` container
First, navigate back to the root of the repository. Then simply run the following command to build the `gpt4`
container:
```bash copy
cd ../../..
make build-container project=gpt4
```
### Deploy infernet node locally
Much like our [hello world](../hello-world/hello-world.md) project, deploying the infernet node is as
simple as running:
```bash copy
make deploy-container project=gpt4
```
## Making a Web2 Request
From here, you can directly make a request to the infernet node:
```bash
curl -X POST http://127.0.0.1:4000/api/jobs \
-H "Content-Type: application/json" \
-d '{"containers":["gpt4"], "data": {"prompt": "Hello, can shrimp actually fry rice?"}}'
# {"id":"cab6eea8-8b1e-4144-9a70-f905c5ef375b"}
```
If you have `jq` installed, you can pipe the output of the last command to a file:
```bash copy
curl -X POST http://127.0.0.1:4000/api/jobs \
-H "Content-Type: application/json" \
-d '{"containers":["gpt4"], "data": {"prompt": "Hello, can shrimp actually fry rice?"}}' | jq -r ".id" > last-job.uuid
```
You can then check the status of the job by running:
```bash copy
curl -X GET http://127.0.0.1:4000/api/jobs\?id\=cab6eea8-8b1e-4144-9a70-f905c5ef375b
# response [{"id":"07026571-edc8-42ab-b38c-6b3cf19971b6","result":{"container":"gpt4","output":{"message":"No, shrimps cannot fry rice by themselves. However, in culinary terms, shrimp fried rice is a popular dish in which cooked shrimp are added to fried rice along with other ingredients. Cooks or chefs prepare it by frying the rice and shrimps together usually in a wok or frying pan."}},"status":"success"}]
```
And if you have `jq` installed and piped the last output to a file, you can instead run:
```bash
curl -X GET "http://127.0.0.1:4000/api/jobs?id=$(cat last-request.uuid)" | jq .
# returns something like:
[
{
"id": "1b50e85b-2295-44eb-9c85-40ae5331bd14",
"result": {
"container": "gpt4",
"output": {
"output": "Yes, shrimp can be used to make fried rice. In many Asian cuisines, shrimp is a popular ingredient in fried rice dishes. The shrimp adds flavor and protein to the dish, and can be cooked along with the rice and other ingredients such as vegetables, eggs, and seasonings."
}
},
"status": "success"
}
]
```
## Making a Web3 Request
Now let's bring this service onchain! First we'll have to deploy the contracts.
The [contracts](contracts)
directory contains a simple foundry project with a simple contract called `PromptsGpt`.
This contract exposes a single
function `function promptGPT(string calldata prompt)`. Using this function you'll be
able to make an infernet request.
**Anvil Logs**: First, it's useful to look at the logs of the anvil node to see what's
going on. In a new terminal, run
`docker logs -f anvil-node`.
**Deploying the contracts**: In another terminal, run the following command:
```bash
make deploy-contracts project=gpt4
```
### Calling the contract
Now, let's call the contract. So far everything's been identical to
the [hello world](projects/hello-world/README.mdllo-world/README.md) project. The only
difference here is that calling the contract requires an input. We'll pass that input in
using an env var named
`prompt`:
```bash copy
make call-contract project=gpt4 prompt="Can shrimps actually fry rice"
```
On your anvil logs, you should see something like this:
```bash
eth_sendRawTransaction
_____ _____ _______ _ _ _
| __ \|_ _|__ __| | | | /\ | |
| |__) | | | | | | | | | / \ | |
| _ / | | | | | | | |/ /\ \ | |
| | \ \ _| |_ | | | |__| / ____ \| |____
|_| \_\_____| |_| \____/_/ \_\______|
subscription Id 1
interval 1
redundancy 1
node 0x70997970C51812dc3A010C7d01b50e0d17dc79C8
output: {'output': 'Yes, shrimps can be used to make fried rice. Fried rice is a versatile dish that can be made with various ingredients, including shrimp. Shrimp fried rice is a popular dish in many cuisines, especially in Asian cuisine.'}
Transaction: 0x9bcab42cf7348953eaf107ca0ca539cb27f3843c1bb08cf359484c71fcf44d2b
Gas used: 93726
Block Number: 3
Block Hash: 0x1cc39d03bb1d69ea7f32db85d2ee684071e28b6d6de9eab6f57e011e11a7ed08
Block Time: "Fri, 26 Jan 2024 02:30:37 +0000"
```
beautiful, isn't it? 🥰

View File

@ -4,15 +4,20 @@ WORKDIR /app
ENV PYTHONUNBUFFERED 1
ENV PYTHONDONTWRITEBYTECODE 1
ENV PIP_NO_CACHE_DIR 1
ENV RUNTIME docker
ENV PYTHONPATH src
WORKDIR /app
RUN apt-get update
RUN apt-get install -y git curl
# install uv
ADD --chmod=755 https://astral.sh/uv/install.sh /install.sh
RUN /install.sh && rm /install.sh
COPY src/requirements.txt .
RUN pip install --upgrade pip && pip install -r requirements.txt
RUN /root/.cargo/bin/uv pip install --system --no-cache -r requirements.txt
COPY src src

View File

@ -3,8 +3,8 @@
In this tutorial, we'll create a simple hello-world container that can be used
with infernet.
> [!NOTE]
> This directory `containers/hello-world` already includes the final result
> [!NOTE]
> This directory `containers/hello-world` already includes the final result
> of this tutorial. Run the following tutorial in a new directory.
Let's get started! 🎉
@ -88,7 +88,7 @@ This is a simple Dockerfile that:
3. Copies the source code
4. Runs the app on port `3000`
> [!IMPORTANT]
> [!IMPORTANT]
> App must be exposed on port `3000`. Infernet's orchestrator
> will always assume that the container apps are exposed on that port within the container.
> Users can then remap this port to any port that they want on the host machine
@ -127,7 +127,7 @@ docker run --rm -p 3000:3000 --name hello hello-world
In another terminal, run:
```
curl localhost:3000
curl "localhost:3000"
```
It should return something like:
@ -159,5 +159,5 @@ The output should be something like:
Your users will never call this endpoint directly. Instead, they will:
1. Either [create an off-chain job request](../../../README.md#L36) through the node API
1. Either [create an off-chain job request](../hello-world#L36) through the node API
2. Or they will make a subscription on their contracts

View File

@ -1,9 +1,10 @@
from time import sleep
from typing import Any
import requests
def hit_server_directly():
def hit_server_directly() -> None:
print("hello")
r = requests.get("http://localhost:3000/")
print(r.status_code)
@ -11,7 +12,7 @@ def hit_server_directly():
print("server response", r.text)
def poll_until_complete(id: str):
def poll_until_complete(id: str) -> Any:
status = "running"
r = None
while status == "running":
@ -24,11 +25,12 @@ def poll_until_complete(id: str):
status = r.get("status")
print("status", status)
if status != "running":
return r
break
sleep(1)
return r
def create_job_through_node():
def create_job_through_node() -> None:
r = requests.post(
"http://localhost:4000/api/jobs",
json={

View File

@ -1,2 +1,2 @@
Flask>=3.0.0,<4.0.0
gunicorn>=21.2.0,<22.0.0
gunicorn>=21.2.0,<22.0.0

View File

@ -11,4 +11,4 @@ deploy:
# calling sayGM()
call-contract:
@PRIVATE_KEY=$(sender) forge script script/CallContract.s.sol:CallContract --broadcast --rpc-url $(RPC_URL)
@PRIVATE_KEY=$(sender) forge script script/CallContract.s.sol:CallContract --broadcast --rpc-url $(RPC_URL)

View File

@ -1,15 +1,15 @@
# `Hello-World` Consumer Contracts
This is a [foundry](https://book.getfoundry.sh/) project that implements a simple Consumer
contract, [`SaysGm`](./src/SaysGM.sol).
contract, [`SaysGm`](./src/SaysGM.sol).
This readme explains how to compile and deploy the contract to the Infernet Anvil Testnet network.
This readme explains how to compile and deploy the contract to the Infernet Anvil Testnet network.
For a detailed tutorial on how to write a consumer contract, refer to the [tutorial doc](./Tutorial.md).
> [!IMPORTANT]
> Ensure that you are running the following scripts with the Infernet Anvil Testnet network.
> The [tutorial](./../../../README.md) at the root of this repository explains how to
> [!IMPORTANT]
> Ensure that you are running the following scripts with the Infernet Anvil Testnet network.
> The [tutorial](../hello-world) at the root of this repository explains how to
> bring up an infernet node.
### Installing the libraries
@ -27,7 +27,7 @@ forge compile
### Deploying the contracts
The deploy script at `script/Deploy.s.sol` deploys the `SaysGM` contract to the Infernet Anvil Testnet network.
We have the [following make target](./Makefile#L9) to deploy the contract. Refer to the Makefile
We have the [following make target](./Makefile#L9) to deploy the contract. Refer to the Makefile
for more understanding around the deploy scripts.
```bash
make deploy
@ -35,10 +35,9 @@ make deploy
### Requesting a job
We also have a script called `CallContract.s.sol` that requests a job to the `SaysGM` contract.
Refer to the [script](./script/CallContract.s.sol) for more details. Similar to deployment,
Refer to the [script](./script/CallContract.s.sol) for more details. Similar to deployment,
you can run that script using the following convenience make target.
```bash
make call-contract
```
Refer to the [Makefile](./Makefile#L14) for more details.

View File

@ -218,7 +218,7 @@ PRIVATE_KEY=0x5de4111afa1a4b94908f83103eb1f1706367c2e68ca870fc3fb9a804cdab365a \
```
### Using a `Makefile`
To make running these commands easier, we can add them to a `Makefile`. This allows
To make running these commands easier, we can add them to a `Makefile`. This allows
us to run `make deploy` and `make call` instead of typing out the full command every time.
Refer to [this project's Makefile](./Makefile) for an example.
@ -226,4 +226,4 @@ Refer to [this project's Makefile](./Makefile) for an example.
### 🎉 Done!
Congratulations! You've successfully created a contract that requests compute from
our `hello-world` container.
our `hello-world` container.

View File

@ -0,0 +1,231 @@
# Hello, World!
Welcome to the first tutorial of Infernet! In this tutorial we will guide you through the process of setting up and
running an Infernet Node, and then demonstrate how to create and monitor off-chain compute jobs and on-chain subscriptions.
To interact with infernet, one could either create a job by accessing an infernet node directly through it's API (we'll
refer to this as an off-chain job), or by creating a subscription on-chain (we'll refer to this as an on-chain job).
## Requesting an off-chain job: Hello World!
This project is a simple [flask-app](container/src/app.py) that is compatible with `infernet`, and simply
[echoes what you send to it](container/src/app.py#L16).
### Install Docker & Verify Installation
To run this, you'll need to have docker installed. You can find instructions for installing docker [here](https://docs.docker.com/install/).
After installing & running docker, you can verify that the docker daemon is running by running the following command:
```bash copy
docker --version
# Docker version 25.0.2, build 29cf629
```
### Clone the starter repository
```bash copy
# Clone locally
git clone --recurse-submodules https://github.com/ritual-net/infernet-container-starter
# Navigate to the repository
cd infernet-container-starter
```
### Build the `hello-world` container
Once inside the repository directory, you can run a simple command to build the `hello-world` container:
```bash copy
make build-container project=hello-world
```
### Running Locally
Then, from the top-level project directory, Run the following make command:
```
make deploy-container project=hello-world
```
This will deploy an infernet node along with the `hello-world` image.
### Creating an off-chain job through the API
You can create an off-chain job by posting to the `node` directly.
```bash
curl -X POST "http://127.0.0.1:4000/api/jobs" \
-H "Content-Type: application/json" \
-d '{"containers":["hello-world"], "data": {"some": "input"}}'
# returns
{"id":"d5281dd5-c4f4-4523-a9c2-266398e06007"}
```
This will return the id of that job.
### Getting the status/result/errors of a job
You can check the status of a job like so:
```bash
curl -X GET "http://127.0.0.1:4000/api/jobs?id=d5281dd5-c4f4-4523-a9c2-266398e06007"
# returns
[{"id":"d5281dd5-c4f4-4523-a9c2-266398e06007", "result":{"container":"hello-world","output": {"output":"hello, world!, your input was: {'source': 1, 'data': {'some': 'input'}}"}} ,"status":"success"}]
```
### Configuration
This project already comes with a pre-filled config file. The config file for the hello-world project is located
[here](container/config.json):
```bash
projects/hello-world/config.json
```
## Requesting an on-chain job
In this section we'll go over how to request an on-chain job in a local anvil node.
### Infernet's Anvil Testnet
To request an on-chain job, you'll need to deploy contracts using the infernet sdk.
We already have a public [anvil node](https://hub.docker.com/r/ritualnetwork/infernet-anvil) docker image which has the
corresponding infernet sdk contracts deployed, along with a node that has
registered itself to listen to on-chain subscription events.
* Coordinator Address: `0x5FbDB2315678afecb367f032d93F642f64180aa3`
* Node Address: `0x70997970C51812dc3A010C7d01b50e0d17dc79C8` (This is the second account in the anvil's accounts.)
### Deploying Infernet Node & Infernet's Anvil Testnet
This step is similar to the section above:
```bash
project=hello-world make deploy-container
```
In another terminal, run `docker container ls`, you should see something like this
```bash
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
c2ca0ffe7817 ritualnetwork/infernet-anvil:0.0.0 "anvil --host 0.0.0.…" 9 seconds ago Up 8 seconds 0.0.0.0:8545->3000/tcp anvil-node
0b686a6a0e5f ritualnetwork/hello-world-infernet:0.0.2 "gunicorn app:create…" 9 seconds ago Up 8 seconds 0.0.0.0:3000->3000/tcp hello-world
28b2e5608655 ritualnetwork/infernet-node:0.1.1 "/app/entrypoint.sh" 10 seconds ago Up 10 seconds 0.0.0.0:4000->4000/tcp deploy-node-1
03ba51ff48b8 fluent/fluent-bit:latest "/fluent-bit/bin/flu…" 10 seconds ago Up 10 seconds 2020/tcp, 0.0.0.0:24224->24224/tcp deploy-fluentbit-1
a0d96f29a238 redis:latest "docker-entrypoint.s…" 10 seconds ago Up 10 seconds 0.0.0.0:6379->6379/tcp deploy-redis-1
```
You can see that the anvil node is running on port `8545`, and the infernet
node is running on port `4000`. Same as before.
### Deploying Consumer Contracts
We have a [sample forge project](./contracts) which contains
a simple consumer contract, [`SaysGM`](contracts/src/SaysGM.sol).
All this contract does is to request a job from the infernet node, and upon receiving
the result, it will use the `forge` console to print the result.
**Anvil Logs**: First, it's useful to look at the logs of the anvil node to see what's going on. In
a new terminal, run `docker logs -f anvil-node`.
**Deploying the contracts**: In another terminal, run the following command:
```bash
project=hello-world make deploy-contracts
```
You should be able to see the following logs in the anvil logs:
```bash
eth_sendRawTransaction
eth_getTransactionReceipt
Transaction: 0x23ca6b1d1823ad5af175c207c2505112f60038fc000e1e22509816fa29a3afd6
Contract created: 0x663f3ad617193148711d28f5334ee4ed07016602
Gas used: 476669
Block Number: 1
Block Hash: 0x6b026b70fbe97b4a733d4812ccd6e8e25899a1f6c622430c3fb07a2e5c5c96b7
Block Time: "Wed, 17 Jan 2024 22:17:31 +0000"
eth_getTransactionByHash
eth_getTransactionReceipt
eth_blockNumber
```
We can see that a new contract has been created at `0x663f3ad617193148711d28f5334ee4ed07016602`.
That's the address of the `SaysGM` contract.
### Calling the contract
Now, let's call the contract. In the same terminal, run the following command:
```bash
project=hello-world make call-contract
```
You should first see that a transaction was sent to the `SaysGm` contract:
```bash
eth_getTransactionReceipt
Transaction: 0xe56b5b6ac713a978a1631a44d6a0c9eb6941dce929e1b66b4a2f7a61b0349d65
Gas used: 123323
Block Number: 2
Block Hash: 0x3d6678424adcdecfa0a8edd51e014290e5f54ee4707d4779e710a2a4d9867c08
Block Time: "Wed, 17 Jan 2024 22:18:39 +0000"
eth_getTransactionByHash
```
Then, right after that you should see another transaction submitted by the `node`,
which is the result of the job request:
```bash
eth_chainId
eth_sendRawTransaction
_____ _____ _______ _ _ _
| __ \|_ _|__ __| | | | /\ | |
| |__) | | | | | | | | | / \ | |
| _ / | | | | | | | |/ /\ \ | |
| | \ \ _| |_ | | | |__| / ____ \| |____
|_| \_\_____| |_| \____/_/ \_\______|
subscription Id 1
interval 1
redundancy 1
node 0x70997970C51812dc3A010C7d01b50e0d17dc79C8
input:
0x
output:
0x000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000607b276f7574707574273a202268656c6c6f2c20776f726c64212c20796f757220696e707574207761733a207b27736f75726365273a20302c202764617461273a20273437366636663634323036643666373236653639366536373231277d227d
proof:
0x
Transaction: 0x949351d02e2c7f50ced2be06d14ca4311bd470ec80b135a2ce78a43f43e60d3d
Gas used: 94275
Block Number: 3
Block Hash: 0x57ed0cf39e3fb3a91a0d8baa5f9cb5d2bdc1875f2ad5d6baf4a9466f522df354
Block Time: "Wed, 17 Jan 2024 22:18:40 +0000"
eth_blockNumber
eth_newFilter
```
We can see that the address of the `node` matches the address of the node in
our ritual anvil node.
### Next Steps
To learn more about on-chain requests, check out the following resources:
1. [Tutorial](contracts/Tutorial.md) on this project's consumer smart contracts.
2. [Infernet Callback Consumer Tutorial](https://docs.ritual.net/infernet/sdk/consumers/Callback)
3. [Infernet Nodes Docoumentation](https://docs.ritual.net/infernet/node/introduction)
4. [Infernet-Compatible Containers](https://docs.ritual.net/infernet/node/containers)

View File

@ -0,0 +1,25 @@
FROM python:3.11-slim as builder
WORKDIR /app
ENV PYTHONUNBUFFERED 1
ENV PYTHONDONTWRITEBYTECODE 1
ENV PIP_NO_CACHE_DIR 1
ENV RUNTIME docker
ENV PYTHONPATH src
RUN apt-get update
RUN apt-get install -y git curl
# install uv
ADD --chmod=755 https://astral.sh/uv/install.sh /install.sh
RUN /install.sh && rm /install.sh
COPY src/requirements.txt .
RUN /root/.cargo/bin/uv pip install --system --no-cache -r requirements.txt
COPY src src
ENTRYPOINT ["hypercorn", "app:create_app()"]
CMD ["-b", "0.0.0.0:3000"]

View File

@ -0,0 +1,17 @@
DOCKER_ORG := ritualnetwork
EXAMPLE_NAME := onnx-iris
TAG := $(DOCKER_ORG)/example-$(EXAMPLE_NAME)-infernet:latest
.phony: build run build-multiplatform
build:
@docker build -t $(TAG) .
run:
docker run -p 3000:3000 $(TAG)
# You may need to set up a docker builder, to do so run:
# docker buildx create --name mybuilder --bootstrap --use
# refer to https://docs.docker.com/build/building/multi-platform/#building-multi-platform-images for more info
build-multiplatform:
docker buildx build --platform linux/amd64,linux/arm64 -t $(TAG) --push .

View File

@ -0,0 +1,96 @@
# Iris Classification via ONNX Runtime
This example uses a pre-trained model to classify iris flowers. The code for the model
is located at
our [simple-ml-models](https://github.com/ritual-net/simple-ml-models/tree/main/iris_classification)
repository.
## Overview
We're making use of
the [ONNXInferenceWorkflow](https://github.com/ritual-net/infernet-ml-internal/blob/main/src/ml/workflows/inference/onnx_inference_workflow.py)
class to run the model. This is one of many workflows that we currently support in our
[infernet-ml](https://github.com/ritual-net/infernet-ml-internal). Consult the library's
documentation for more info on workflows that
are supported.
## Building & Running the Container in Isolation
Note that this container is meant to be started by the infernet-node. For development &
Testing purposes, you can run the container in isolation using the following commands.
### Building the Container
Simply run the following command to build the container.
```bash
make build
```
Consult the [Makefile](./Makefile) for the build command.
### Running the Container
To run the container, you can use the following command:
```bash
make run
```
## Testing the Container
Run the following command to run an inference:
```bash
curl -X POST http://127.0.0.1:3000/service_output \
-H "Content-Type: application/json" \
-d '{"source":1, "data": {"input": [[1.0380048, 0.5586108, 1.1037828, 1.712096]]}}'
```
#### Note Regarding the Input
The inputs provided above correspond to an iris flower with the following
characteristics. Refer to the
1. Sepal Length: `5.5cm`
2. Sepal Width: `2.4cm`
3. Petal Length: `3.8cm`
4. Petal Width: `1.1cm`
Putting this input into a vector and scaling it, we get the following scaled input:
```python
[1.0380048, 0.5586108, 1.1037828, 1.712096]
```
Refer
to [this function in the model's repository](https://github.com/ritual-net/simple-ml-models/blob/03ebc6fb15d33efe20b7782505b1a65ce3975222/iris_classification/iris_inference_pytorch.py#L13)
for more information on how the input is scaled.
For more context on the Iris dataset, refer to
the [UCI Machine Learning Repository](https://archive.ics.uci.edu/ml/datasets/iris).
### Output
By running the above command, you should get a response similar to the following:
```json
[
[
[
0.0010151526657864451,
0.014391022734344006,
0.9845937490463257
]
]
]
```
The response corresponds to the model's prediction for each of the classes:
```python
['setosa', 'versicolor', 'virginica']
```
In this case, the model predicts that the input corresponds to the class `virginica`with
a probability of `0.9845937490463257`(~98.5%).

View File

@ -0,0 +1,50 @@
{
"log_path": "infernet_node.log",
"server": {
"port": 4000
},
"chain": {
"enabled": true,
"trail_head_blocks": 0,
"rpc_url": "http://host.docker.internal:8545",
"coordinator_address": "0x5FbDB2315678afecb367f032d93F642f64180aa3",
"wallet": {
"max_gas_limit": 4000000,
"private_key": "0x59c6995e998f97a5a0044966f0945389dc9e86dae88c7a8412f4603b6b78690d"
}
},
"startup_wait": 1.0,
"docker": {
"username": "your-username",
"password": ""
},
"redis": {
"host": "redis",
"port": 6379
},
"forward_stats": true,
"containers": [
{
"id": "onnx-iris",
"image": "ritualnetwork/example-onnx-iris-infernet:latest",
"external": true,
"port": "3000",
"allowed_delegate_addresses": [],
"allowed_addresses": [],
"allowed_ips": [],
"command": "--bind=0.0.0.0:3000 --workers=2",
"env": {}
},
{
"id": "anvil-node",
"image": "ritualnetwork/infernet-anvil:0.0.0",
"external": true,
"port": "8545",
"allowed_delegate_addresses": [],
"allowed_addresses": [],
"allowed_ips": [],
"command": "",
"env": {}
}
]
}

View File

@ -0,0 +1,52 @@
import asyncio
import aiohttp
from eth_abi import encode, decode # type: ignore
async def ping(session: aiohttp.ClientSession) -> None:
async with session.get("http://127.0.0.1:3000/") as response:
print(await response.text())
async def post_directly_web2(session: aiohttp.ClientSession) -> None:
async with session.post(
"http://127.0.0.1:3000/service_output",
json={
"source": 1,
"data": {"input": [[1.0380048, 0.5586108, 1.1037828, 1.712096]]},
},
) as response:
print(await response.json())
async def post_directly_web3(session: aiohttp.ClientSession) -> None:
async with session.post(
"http://127.0.0.1:3000/service_output",
json={
"source": 0,
"data": encode(
["uint256[]"], [[1_038_004, 558_610, 1_103_782, 1_712_096]]
).hex(),
},
) as response:
print(await response.text())
result = await response.json()
output = result["raw_output"]
result = decode(["uint256[]"], bytes.fromhex(output))[0]
print(f"result: {result}")
# async maine
async def main(session: aiohttp.ClientSession) -> None:
await post_directly_web3(session)
if __name__ == "__main__":
# run main async
async def provide_session() -> None:
async with aiohttp.ClientSession() as session:
await main(session)
asyncio.run(provide_session())

View File

@ -0,0 +1,107 @@
import logging
from typing import Any, cast, List
import numpy as np
from eth_abi import decode, encode # type: ignore
from infernet_ml.utils.model_loader import ModelSource
from infernet_ml.utils.service_models import InfernetInput, InfernetInputSource
from infernet_ml.workflows.inference.onnx_inference_workflow import (
ONNXInferenceWorkflow,
)
from quart import Quart, request
from quart.json.provider import DefaultJSONProvider
log = logging.getLogger(__name__)
class NumpyJsonEncodingProvider(DefaultJSONProvider):
@staticmethod
def default(obj: Any) -> Any:
if isinstance(obj, np.ndarray):
# Convert NumPy arrays to list
return obj.tolist()
# fallback to default JSON encoding
return DefaultJSONProvider.default(obj)
def create_app() -> Quart:
Quart.json_provider_class = NumpyJsonEncodingProvider
app = Quart(__name__)
# we are downloading the model from the hub.
# model repo is located at: https://huggingface.co/Ritual-Net/iris-dataset
model_source = ModelSource.HUGGINGFACE_HUB
model_args = {"repo_id": "Ritual-Net/iris-dataset", "filename": "iris.onnx"}
workflow = ONNXInferenceWorkflow(model_source=model_source, model_args=model_args)
workflow.setup()
@app.route("/")
def index() -> str:
"""
Utility endpoint to check if the service is running.
"""
return "ONNX Iris Classifier Example Program"
@app.route("/service_output", methods=["POST"])
async def inference() -> dict[str, Any]:
req_data = await request.get_json()
"""
InfernetInput has the format:
source: (0 on-chain, 1 off-chain)
data: dict[str, Any]
"""
infernet_input: InfernetInput = InfernetInput(**req_data)
if infernet_input.source == InfernetInputSource.OFFCHAIN:
web2_input = cast(dict[str, Any], infernet_input.data)
values = cast(List[List[float]], web2_input["input"])
else:
# On-chain requests are sent as a generalized hex-string which we will
# decode to the appropriate format.
web3_input: List[int] = decode(
["uint256[]"], bytes.fromhex(cast(str, infernet_input.data))
)[0]
values = [[float(v) / 1e6 for v in web3_input]]
"""
The input to the onnx inference workflow needs to conform to ONNX runtime's
input_feed format. For more information refer to:
https://docs.ritual.net/ml-workflows/inference-workflows/onnx_inference_workflow
"""
result: dict[str, Any] = workflow.inference({"input": values})
if infernet_input.source == InfernetInputSource.OFFCHAIN:
"""
In case of an off-chain request, the result is returned as is.
"""
return result
else:
"""
In case of an on-chain request, the result is returned in the format:
{
"raw_input": str,
"processed_input": str,
"raw_output": str,
"processed_output": str,
"proof": str,
}
refer to: https://docs.ritual.net/infernet/node/containers for more info.
"""
predictions = cast(List[List[List[float]]], result)
predictions_normalized = [int(p * 1e6) for p in predictions[0][0]]
return {
"raw_input": "",
"processed_input": "",
"raw_output": encode(["uint256[]"], [predictions_normalized]).hex(),
"processed_output": "",
"proof": "",
}
return app
if __name__ == "__main__":
"""
Utility to run the app locally. For development purposes only.
"""
create_app().run(port=3000)

View File

@ -0,0 +1,7 @@
quart==0.19.4
infernet_ml==0.1.0
PyArweave @ git+https://github.com/ritual-net/pyarweave.git
web3==6.15.0
onnx==1.15.0
onnxruntime==1.16.3
torch==2.1.2

14
projects/onnx-iris/contracts/.gitignore vendored Normal file
View File

@ -0,0 +1,14 @@
# Compiler files
cache/
out/
# Ignores development broadcast logs
!/broadcast
/broadcast/*/31337/
/broadcast/**/dry-run/
# Docs
docs/
# Dotenv file
.env

View File

@ -0,0 +1,14 @@
# phony targets are targets that don't actually create a file
.phony: deploy
# anvil's third default address
sender := 0x5de4111afa1a4b94908f83103eb1f1706367c2e68ca870fc3fb9a804cdab365a
RPC_URL := http://localhost:8545
# deploying the contract
deploy:
@PRIVATE_KEY=$(sender) forge script script/Deploy.s.sol:Deploy --broadcast --rpc-url $(RPC_URL)
# calling sayGM()
call-contract:
@PRIVATE_KEY=$(sender) forge script script/CallContract.s.sol:CallContract --broadcast --rpc-url $(RPC_URL)

View File

@ -0,0 +1,41 @@
# `ONNX` Consumer Contracts
This is a [foundry](https://book.getfoundry.sh/) project that implements a simple Consumer
contract, [`IrisClassifier`](./src/IrisClassifier.sol).
This readme explains how to compile and deploy the contract to the Infernet Anvil Testnet network.
> [!IMPORTANT]
> Ensure that you are running the following scripts with the Infernet Anvil Testnet network.
> The [tutorial](../../hello-world/README.mdADME.md) at the root of this repository explains how to
> bring up an infernet node.
### Installing the libraries
```bash
forge install
```
### Compiling the contracts
```bash
forge compile
```
### Deploying the contracts
The deploy script at `script/Deploy.s.sol` deploys the `IrisClassifier` contract to the Infernet Anvil Testnet network.
We have the [following make target](./Makefile#L9) to deploy the contract. Refer to the Makefile
for more understanding around the deploy scripts.
```bash
make deploy
```
### Requesting a job
We also have a script called `CallContract.s.sol` that requests a job to the `IrisClassifier` contract.
Refer to the [script](./script/CallContract.s.sol) for more details. Similar to deployment,
you can run that script using the following convenience make target.
```bash
make call-contract
```
Refer to the [Makefile](./Makefile#L14) for more details.

View File

@ -0,0 +1,7 @@
[profile.default]
src = "src"
out = "out"
libs = ["lib"]
via_ir = true
# See more config options https://github.com/foundry-rs/foundry/blob/master/crates/config/README.md#all-options

View File

@ -0,0 +1,2 @@
forge-std/=lib/forge-std/src
infernet-sdk/=lib/infernet-sdk/src

View File

@ -0,0 +1,19 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
pragma solidity ^0.8.0;
import {Script, console2} from "forge-std/Script.sol";
import {IrisClassifier} from "../src/IrisClassifier.sol";
contract CallContract is Script {
function run() public {
// Setup wallet
uint256 deployerPrivateKey = vm.envUint("PRIVATE_KEY");
vm.startBroadcast(deployerPrivateKey);
IrisClassifier irisClassifier = IrisClassifier(0x663F3ad617193148711d28f5334eE4Ed07016602);
irisClassifier.classifyIris();
vm.stopBroadcast();
}
}

View File

@ -0,0 +1,26 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
pragma solidity ^0.8.13;
import {Script, console2} from "forge-std/Script.sol";
import {IrisClassifier} from "../src/IrisClassifier.sol";
contract Deploy is Script {
function run() public {
// Setup wallet
uint256 deployerPrivateKey = vm.envUint("PRIVATE_KEY");
vm.startBroadcast(deployerPrivateKey);
// Log address
address deployerAddress = vm.addr(deployerPrivateKey);
console2.log("Loaded deployer: ", deployerAddress);
address coordinator = 0x5FbDB2315678afecb367f032d93F642f64180aa3;
// Create consumer
IrisClassifier classifier = new IrisClassifier(coordinator);
console2.log("Deployed IrisClassifier: ", address(classifier));
// Execute
vm.stopBroadcast();
vm.broadcast();
}
}

View File

@ -0,0 +1,67 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
pragma solidity ^0.8.13;
import {console2} from "forge-std/console2.sol";
import {CallbackConsumer} from "infernet-sdk/consumer/Callback.sol";
contract IrisClassifier is CallbackConsumer {
string private EXTREMELY_COOL_BANNER = "\n\n"
"_____ _____ _______ _ _ _\n"
"| __ \\|_ _|__ __| | | | /\\ | |\n"
"| |__) | | | | | | | | | / \\ | |\n"
"| _ / | | | | | | | |/ /\\ \\ | |\n"
"| | \\ \\ _| |_ | | | |__| / ____ \\| |____\n"
"|_| \\_\\_____| |_| \\____/_/ \\_\\______|\n\n";
constructor(address coordinator) CallbackConsumer(coordinator) {}
function classifyIris() public {
/// @dev Iris data is in the following format:
/// @dev [sepal_length, sepal_width, petal_length, petal_width]
/// @dev the following vector corresponds to the following properties:
/// "sepal_length": 5.5cm
/// "sepal_width": 2.4cm
/// "petal_length": 3.8cm
/// "petal_width": 1.1cm
/// @dev The data is normalized & scaled.
/// refer to [this function in the model's repository](https://github.com/ritual-net/simple-ml-models/blob/03ebc6fb15d33efe20b7782505b1a65ce3975222/iris_classification/iris_inference_pytorch.py#L13)
/// for more info on normalization.
/// @dev The data is adjusted by 6 decimals
uint256[] memory iris_data = new uint256[](4);
iris_data[0] = 1_038_004;
iris_data[1] = 558_610;
iris_data[2] = 1_103_782;
iris_data[3] = 1_712_096;
_requestCompute(
"onnx-iris",
abi.encode(iris_data),
20 gwei,
1_000_000,
1
);
}
function _receiveCompute(
uint32 subscriptionId,
uint32 interval,
uint16 redundancy,
address node,
bytes calldata input,
bytes calldata output,
bytes calldata proof
) internal override {
console2.log(EXTREMELY_COOL_BANNER);
(bytes memory raw_output, bytes memory processed_output) = abi.decode(output, (bytes, bytes));
(uint256[] memory classes) = abi.decode(raw_output, (uint256[]));
uint256 setosa = classes[0];
uint256 versicolor = classes[1];
uint256 virginica = classes[2];
console2.log("predictions: (adjusted by 6 decimals, 1_000_000 = 100%, 1_000 = 0.1%)");
console2.log("Setosa: ", setosa);
console2.log("Versicolor: ", versicolor);
console2.log("Virginica: ", virginica);
}
}

View File

@ -0,0 +1,271 @@
# Running an ONNX Model on Infernet
Welcome to this comprehensive guide where we'll explore how to run an ONNX model on Infernet, using our [infernet-container-starter](https://github.com/ritual-net/infernet-container-starter/)
examples repository. This tutorial is designed to give you and end-to-end understanding of how you can run your own
custom pre-trained models, and interact with them on-chain and off-chain.
**Model:** This example uses a pre-trained model to classify iris flowers. The code for the model
is located at our [`simple-ml-models`](https://github.com/ritual-net/simple-ml-models/tree/main/iris_classification) repository.
## Pre-requisites
For this tutorial you'll need to have the following installed.
1. [Docker](https://docs.docker.com/engine/install/)
2. [Foundry](https://book.getfoundry.sh/getting-started/installation)
### Ensure `docker` & `foundry` exist
To check for `docker`, run the following command in your terminal:
```bash copy
docker --version
# Docker version 25.0.2, build 29cf629 (example output)
```
You'll also need to ensure that docker-compose exists in your terminal:
```bash copy
which docker-compose
# /usr/local/bin/docker-compose (example output)
```
To check for `foundry`, run the following command in your terminal:
```bash copy
forge --version
# forge 0.2.0 (551bcb5 2024-02-28T07:40:42.782478000Z) (example output)
```
### Clone the starter repository
If you haven't already, clone the infernet-container-starter repository. All of the code for this tutorial is located
under the `projects/onnx-iris` directory.
```bash copy
# Clone locally
git clone --recurse-submodules https://github.com/ritual-net/infernet-container-starter
# Navigate to the repository
cd infernet-container-starter
```
## Making Inference Requests via Node API (a la Web2 request)
### Build the `onnx-iris` container
From the top-level directory of this repository, simply run the following command to build the `onnx-iris` container:
```bash copy
make build-container project=onnx-iris
```
After the container is built, you can deploy an infernet-node that utilizes that
container by running the following command:
```bash copy
make deploy-container project=onnx-iris
```
Now, you can make inference requests to the infernet-node. In a new tab, run:
```bash copy
curl -X POST "http://127.0.0.1:4000/api/jobs" \
-H "Content-Type: application/json" \
-d '{"containers":["onnx-iris"], "data": {"input": [[1.0380048, 0.5586108, 1.1037828, 1.712096]]}}'
```
You should get an output similar to the following:
```json
{
"id": "074b9e98-f1f6-463c-b185-651878f3b4f6"
}
```
Now, you can check the status of the job by running (Make sure job id matches the one
you got from the previous request):
```bash
curl -X GET "http://127.0.0.1:4000/api/jobs?id=074b9e98-f1f6-463c-b185-651878f3b4f6"
```
Should return:
```json
[
{
"id": "074b9e98-f1f6-463c-b185-651878f3b4f6",
"result": {
"container": "onnx-iris",
"output": [
[
[
0.0010151526657864451,
0.014391022734344006,
0.9845937490463257
]
]
]
},
"status": "success"
}
]
```
The `output` corresponds to the model's prediction for each of the classes:
```python
['setosa', 'versicolor', 'virginica']
```
In this case, the model predicts that the input corresponds to the class `virginica`with
a probability of `0.9845937490463257`(~98.5%).
#### Note Regarding the Input
The inputs provided above correspond to an iris flower with the following
characteristics. Refer to the
1. Sepal Length: `5.5cm`
2. Sepal Width: `2.4cm`
3. Petal Length: `3.8cm`
4. Petal Width: `1.1cm`
Putting this input into a vector and scaling it, we get the following scaled input:
```python
[1.0380048, 0.5586108, 1.1037828, 1.712096]
```
Refer
to [this function in the model's repository](https://github.com/ritual-net/simple-ml-models/blob/03ebc6fb15d33efe20b7782505b1a65ce3975222/iris_classification/iris_inference_pytorch.py#L13)
for more information on how the input is scaled.
For more context on the Iris dataset, refer to
the [UCI Machine Learning Repository](https://archive.ics.uci.edu/ml/datasets/iris).
## Making Inference Requests via Contracts (a la Web3 request)
The [contracts](contracts) directory contains a simple forge
project that can be used to interact with the Infernet Node.
Here, we have a very simple
contract, [IrisClassifier](contracts/src/IrisClassifier.sol),
that requests a compute job from the Infernet Node and then retrieves the result.
We are going to make the same request as above, but this time using a smart contract.
Since floats are not supported in Solidity, we convert all floats to `uint256` by
multiplying the input vector entries by `1e6`:
```Solidity
uint256[] memory iris_data = new uint256[](4);
iris_data[0] = 1_038_004;
iris_data[1] = 558_610;
iris_data[2] = 1_103_782;
iris_data[3] = 1_712_096;
```
We have multiplied the input by 1e6 to have enough accuracy. This can be seen
[here](contracts/src/IrisClassifier.sol#19) in the contract's
code.
### Monitoring the EVM Logs
The infernet node configuration for this project includes
an [infernet anvil node](projects/hello-world/README.mdllo-world/README.md#77) with pre-deployed contracts. You can view the
logs of the anvil node to see what's going on. In a new terminal, run:
```bash
docker logs -f anvil-node
```
As you deploy the contract and make requests, you should see logs indicating the
requests and responses.
### Deploying the Contract
Simply run the following command to deploy the contract:
```bash
project=onnx-iris make deploy-contracts
```
In your anvil logs you should see the following:
```bash
eth_getTransactionReceipt
Transaction: 0xeed605eacdace39a48635f6d14215b386523766f80a113b4484f542d862889a4
Contract created: 0x663f3ad617193148711d28f5334ee4ed07016602
Gas used: 714269
Block Number: 1
Block Hash: 0x4e6333f91e86a0a0be357b63fba9eb5f5ba287805ac35aaa7698fd05445730f5
Block Time: "Mon, 19 Feb 2024 20:31:17 +0000"
eth_blockNumber
```
beautiful, we can see that a new contract has been created
at `0x663f3ad617193148711d28f5334ee4ed07016602`. That's the address of
the `IrisClassifier` contract. We are now going to call this contract. To do so,
we are using
the [CallContract.s.sol](contracts/script/CallContract.s.sol)
script. Note that the address of the
contract [is hardcoded in the script](contracts/script/CallContract.s.sol#L13),
and should match the address we see above. Since this is a test environment and we're
using a test deployer address, this address is quite deterministic and shouldn't change.
Otherwise, change the address in the script to match the address of the contract you
just deployed.
### Calling the Contract
To call the contract, run the following command:
```bash
project=onnx-iris make call-contract
```
In the anvil logs, you should see the following:
```bash
eth_sendRawTransaction
_____ _____ _______ _ _ _
| __ \|_ _|__ __| | | | /\ | |
| |__) | | | | | | | | | / \ | |
| _ / | | | | | | | |/ /\ \ | |
| | \ \ _| |_ | | | |__| / ____ \| |____
|_| \_\_____| |_| \____/_/ \_\______|
predictions: (adjusted by 6 decimals, 1_000_000 = 100%, 1_000 = 0.1%)
Setosa: 1015
Versicolor: 14391
Virginica: 984593
Transaction: 0x77c7ff26ed20ffb1a32baf467a3cead6ed81fe5ae7d2e419491ca92b4ac826f0
Gas used: 111091
Block Number: 3
Block Hash: 0x78f98f4d54ebdca2a8aa46c3b9b7e7ae36348373dbeb83c91a4600dd6aba2c55
Block Time: "Mon, 19 Feb 2024 20:33:00 +0000"
eth_blockNumber
eth_newFilter
eth_getFilterLogs
```
Beautiful! We can see that the same result has been posted to the contract.
### Next Steps
From here, you can bring your own pre-trained ONNX model, and with minimal changes, you can make it both work with an
infernet-node as well as a smart contract.
### More Information
1. Check out our [other examples](../../readme.md) if you haven't already
2. [Infernet Callback Consumer Tutorial](https://docs.ritual.net/infernet/sdk/consumers/Callback)
3. [Infernet Nodes Docoumentation](https://docs.ritual.net/infernet/node/introduction)
4. [Infernet-Compatible Containers](https://docs.ritual.net/infernet/node/containers)

2
projects/prompt-to-nft/.gitignore vendored Normal file
View File

@ -0,0 +1,2 @@
# modal service outputs
modal/outputs

View File

@ -0,0 +1,3 @@
wallet
config.json
**/keyfile-arweave.json

View File

@ -0,0 +1,27 @@
FROM python:3.11-slim as builder
WORKDIR /app
ENV PYTHONUNBUFFERED 1
ENV PYTHONDONTWRITEBYTECODE 1
ENV PIP_NO_CACHE_DIR 1
ENV RUNTIME docker
ENV PYTHONPATH src
RUN apt-get update
RUN apt-get install -y git curl
# install uv
ADD --chmod=755 https://astral.sh/uv/install.sh /install.sh
RUN /install.sh && rm /install.sh
COPY src/requirements.txt .
RUN /root/.cargo/bin/uv pip install --system --no-cache -r requirements.txt
copy wallet wallet
COPY src src
ENTRYPOINT ["hypercorn", "app:create_app()"]
CMD ["-b", "0.0.0.0:3000"]

View File

@ -0,0 +1,23 @@
DOCKER_ORG := ritualnetwork
EXAMPLE_NAME := prompt-to-nft
TAG := $(DOCKER_ORG)/example-$(EXAMPLE_NAME)-infernet:latest
.phony: build run build-multiplatform
build:
ifdef CI
mkdir -p wallet # in CI we don't have a wallet directory. This enables to bypass that and ensure that the image
# is built successfully
endif
@docker build -t $(TAG) .
wallet_dir ?= /app/wallet
run:
docker run -p 3000:3000 -v ./wallet:$(wallet_dir) --env-file prompt_to_nft.env $(TAG)
# You may need to set up a docker builder, to do so run:
# docker buildx create --name mybuilder --bootstrap --use
# refer to https://docs.docker.com/build/building/multi-platform/#building-multi-platform-images for more info
build-multiplatform:
docker buildx build --platform linux/amd64,linux/arm64 -t $(TAG) --push .

View File

@ -0,0 +1,91 @@
# Prompt-to-NFT Container
## Overview
## Building & Running the Container in Isolation
Note that this container is meant to be started by the infernet-node. For development &
Testing purposes, you can run the container in isolation using the following commands.
### Building the Container
Simply run the following command to build the container.
```bash
make build
```
Consult the [Makefile](./Makefile) for the build command.
### Adding Arweave File
Add your arweave wallet file
### Running the Container
To run the container, you can use the following command:
```bash
make run
```
## Testing the Container
Run the following command to run an inference:
```bash
curl -X POST http://127.0.0.1:3000/service_output \
-H "Content-Type: application/json" \
-d '{"source":1, "data": {"prompt": "a golden retriever skiing"}}'
```
#### Note Regarding the Input
The inputs provided above correspond to an iris flower with the following
characteristics. Refer to the
1. Sepal Length: `5.5cm`
2. Sepal Width: `2.4cm`
3. Petal Length: `3.8cm`
4. Petal Width: `1.1cm`
Putting this input into a vector and scaling it, we get the following scaled input:
```python
[1.0380048, 0.5586108, 1.1037828, 1.712096]
```
Refer
to [this function in the model's repository](https://github.com/ritual-net/simple-ml-models/blob/03ebc6fb15d33efe20b7782505b1a65ce3975222/iris_classification/iris_inference_pytorch.py#L13)
for more information on how the input is scaled.
For more context on the Iris dataset, refer to
the [UCI Machine Learning Repository](https://archive.ics.uci.edu/ml/datasets/iris).
### Output
By running the above command, you should get a response similar to the following:
```json
[
[
[
0.0010151526657864451,
0.014391022734344006,
0.9845937490463257
]
]
]
```
The response corresponds to the model's prediction for each of the classes:
```python
['setosa', 'versicolor', 'virginica']
```
In this case, the model predicts that the input corresponds to the class `virginica`with
a probability of `0.9845937490463257`(~98.5%).

View File

@ -0,0 +1,53 @@
{
"log_path": "infernet_node.log",
"server": {
"port": 4000
},
"chain": {
"enabled": true,
"trail_head_blocks": 0,
"rpc_url": "http://host.docker.internal:8545",
"coordinator_address": "0x5FbDB2315678afecb367f032d93F642f64180aa3",
"wallet": {
"max_gas_limit": 4000000,
"private_key": "0x59c6995e998f97a5a0044966f0945389dc9e86dae88c7a8412f4603b6b78690d"
}
},
"startup_wait": 1.0,
"docker": {
"username": "your-username",
"password": ""
},
"redis": {
"host": "redis",
"port": 6379
},
"forward_stats": true,
"containers": [
{
"id": "prompt-to-nft",
"image": "ritualnetwork/example-prompt-to-nft-infernet:latest",
"external": true,
"port": "3000",
"allowed_delegate_addresses": [],
"allowed_addresses": [],
"allowed_ips": [],
"command": "--bind=0.0.0.0:3000 --workers=2",
"env": {
"ARWEAVE_WALLET_FILE_PATH": "wallet/keyfile-arweave.json",
"IMAGE_GEN_SERVICE_URL": "http://your.services.ip:port"
}
},
{
"id": "anvil-node",
"image": "ritualnetwork/infernet-anvil:0.0.0",
"external": true,
"port": "8545",
"allowed_delegate_addresses": [],
"allowed_addresses": [],
"allowed_ips": [],
"command": "",
"env": {}
}
]
}

View File

@ -0,0 +1,2 @@
ARWEAVE_WALLET_FILE_PATH=
IMAGE_GEN_SERVICE_URL=

View File

@ -0,0 +1,109 @@
import logging
import os
from pathlib import Path
from typing import Any, cast
import aiohttp
from eth_abi import decode, encode # type: ignore
from infernet_ml.utils.arweave import upload, load_wallet
from infernet_ml.utils.service_models import InfernetInput, InfernetInputSource
from quart import Quart, request
log = logging.getLogger(__name__)
async def run_inference(prompt: str, output_path: str) -> None:
async with aiohttp.ClientSession() as session:
app_url = os.getenv("IMAGE_GEN_SERVICE_URL")
async with session.post(
f"{app_url}/service_output",
json={
"prompt": prompt,
},
) as response:
image_bytes = await response.read()
with open(output_path, "wb") as f:
f.write(image_bytes)
def ensure_env_vars() -> None:
if not os.getenv("IMAGE_GEN_SERVICE_URL"):
raise ValueError("IMAGE_GEN_SERVICE_URL environment variable not set")
load_wallet()
def create_app() -> Quart:
app = Quart(__name__)
ensure_env_vars()
@app.route("/")
def index() -> str:
"""
Utility endpoint to check if the service is running.
"""
return "Stable Diffusion Example Program"
@app.route("/service_output", methods=["POST"])
async def inference() -> dict[str, Any]:
req_data = await request.get_json()
"""
InfernetInput has the format:
source: (0 on-chain, 1 off-chain)
data: dict[str, Any]
"""
infernet_input: InfernetInput = InfernetInput(**req_data)
temp_file = "image.png"
if infernet_input.source == InfernetInputSource.OFFCHAIN:
prompt: str = cast(dict[str, str], infernet_input.data)["prompt"]
else:
# On-chain requests are sent as a generalized hex-string which we will
# decode to the appropriate format.
(prompt, mintTo) = decode(
["string", "address"], bytes.fromhex(cast(str, infernet_input.data))
)
log.info("mintTo: %s", mintTo)
log.info("prompt: %s", prompt)
# run the inference and download the image to a temp file
await run_inference(prompt, temp_file)
tx = upload(Path(temp_file), {"Content-Type": "image/png"})
if infernet_input.source == InfernetInputSource.OFFCHAIN:
"""
In case of an off-chain request, the result is returned as is.
"""
return {
"prompt": prompt,
"hash": tx.id,
"image_url": f"https://arweave.net/{tx.id}",
}
else:
"""
In case of an on-chain request, the result is returned in the format:
{
"raw_input": str,
"processed_input": str,
"raw_output": str,
"processed_output": str,
"proof": str,
}
refer to: https://docs.ritual.net/infernet/node/containers for more info.
"""
return {
"raw_input": infernet_input.data,
"processed_input": "",
"raw_output": encode(["string"], [tx.id]).hex(),
"processed_output": "",
"proof": "",
}
return app
if __name__ == "__main__":
"""
Utility to run the app locally. For development purposes only.
"""
create_app().run(host="0.0.0.0", port=3000)

View File

@ -0,0 +1,5 @@
quart==0.19.4
infernet_ml==0.1.0
PyArweave @ git+https://github.com/ritual-net/pyarweave.git
web3==6.15.0
tqdm==4.66.1

View File

@ -0,0 +1,14 @@
# Compiler files
cache/
out/
# Ignores development broadcast logs
!/broadcast
/broadcast/*/31337/
/broadcast/**/dry-run/
# Docs
docs/
# Dotenv file
.env

View File

@ -0,0 +1,14 @@
# phony targets are targets that don't actually create a file
.phony: deploy
# anvil's third default address
sender := 0x5de4111afa1a4b94908f83103eb1f1706367c2e68ca870fc3fb9a804cdab365a
RPC_URL := http://localhost:8545
# deploying the contract
deploy:
@PRIVATE_KEY=$(sender) forge script script/Deploy.s.sol:Deploy --broadcast --rpc-url $(RPC_URL)
# calling sayGM()
call-contract:
@PRIVATE_KEY=$(sender) forge script script/CallContract.s.sol:CallContract --broadcast --rpc-url $(RPC_URL)

View File

@ -0,0 +1,7 @@
[profile.default]
src = "src"
out = "out"
libs = ["lib"]
via_ir = true
# See more config options https://github.com/foundry-rs/foundry/blob/master/crates/config/README.md#all-options

View File

@ -0,0 +1,3 @@
forge-std/=lib/forge-std/src
infernet-sdk/=lib/infernet-sdk/src
solmate/=lib/solmate/src

View File

@ -0,0 +1,22 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
pragma solidity ^0.8.0;
import {Script, console2} from "forge-std/Script.sol";
import {DiffusionNFT} from "../src/DiffusionNFT.sol";
contract CallContract is Script {
string defaultPrompt = "A picture of a shrimp dunking a basketball";
function run() public {
// Setup wallet
uint256 deployerPrivateKey = vm.envUint("PRIVATE_KEY");
address mintTo = vm.envOr("mint_to", msg.sender);
string memory prompt = vm.envOr("prompt", defaultPrompt);
vm.startBroadcast(deployerPrivateKey);
DiffusionNFT nft = DiffusionNFT(0x663F3ad617193148711d28f5334eE4Ed07016602);
nft.mint(prompt, mintTo);
vm.stopBroadcast();
}
}

View File

@ -0,0 +1,26 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
pragma solidity ^0.8.13;
import {Script, console2} from "forge-std/Script.sol";
import {DiffusionNFT} from "../src/DiffusionNFT.sol";
contract Deploy is Script {
function run() public {
// Setup wallet
uint256 deployerPrivateKey = vm.envUint("PRIVATE_KEY");
vm.startBroadcast(deployerPrivateKey);
// Log address
address deployerAddress = vm.addr(deployerPrivateKey);
console2.log("Loaded deployer: ", deployerAddress);
address coordinator = 0x5FbDB2315678afecb367f032d93F642f64180aa3;
// Create consumer
DiffusionNFT nft = new DiffusionNFT(coordinator);
console2.log("Deployed IrisClassifier: ", address(nft));
// Execute
vm.stopBroadcast();
vm.broadcast();
}
}

View File

@ -0,0 +1,63 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
pragma solidity ^0.8.13;
import {console2} from "forge-std/console2.sol";
import {CallbackConsumer} from "infernet-sdk/consumer/Callback.sol";
import {ERC721} from "solmate/tokens/ERC721.sol";
contract DiffusionNFT is CallbackConsumer, ERC721 {
string private EXTREMELY_COOL_BANNER = "\n\n" "_____ _____ _______ _ _ _\n"
"| __ \\|_ _|__ __| | | | /\\ | |\n" "| |__) | | | | | | | | | / \\ | |\n"
"| _ / | | | | | | | |/ /\\ \\ | |\n" "| | \\ \\ _| |_ | | | |__| / ____ \\| |____\n"
"|_| \\_\\_____| |_| \\____/_/ \\_\\______|\n\n";
constructor(address coordinator) CallbackConsumer(coordinator) ERC721("DiffusionNFT", "DN") {}
function mint(string memory prompt, address to) public {
_requestCompute("prompt-to-nft", abi.encode(prompt, to), 20 gwei, 1_000_000, 1);
}
uint256 public counter = 0;
mapping(uint256 => string) public arweaveHashes;
function tokenURI(uint256 tokenId) public view override returns (string memory) {
return string.concat("https://arweave.net/", arweaveHashes[tokenId]);
}
function nftCollection() public view returns (uint256[] memory) {
uint256 balance = balanceOf(msg.sender);
uint256[] memory collection = new uint256[](balance);
uint256 j = 0;
for (uint256 i = 0; i < counter; i++) {
if (ownerOf(i) == msg.sender) {
collection[j] = i;
j++;
}
}
return collection;
}
function _receiveCompute(
uint32 subscriptionId,
uint32 interval,
uint16 redundancy,
address node,
bytes calldata input,
bytes calldata output,
bytes calldata proof
) internal override {
console2.log(EXTREMELY_COOL_BANNER);
(bytes memory raw_output, bytes memory processed_output) = abi.decode(output, (bytes, bytes));
(string memory arweaveHash) = abi.decode(raw_output, (string));
(bytes memory raw_input, bytes memory processed_input) = abi.decode(input, (bytes, bytes));
(string memory prompt, address to) = abi.decode(raw_input, (string, address));
counter += 1;
arweaveHashes[counter] = arweaveHash;
console2.log("nft minted!", string.concat("https://arweave.net/", arweaveHashes[counter]));
console2.log("nft id", counter);
console2.log("nft owner", to);
_mint(to, counter);
}
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 506 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 500 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 26 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 394 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1001 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 19 KiB

View File

@ -0,0 +1,416 @@
# Prompt to NFT
In this tutorial we are going to create a dapp where we can generate NFT's by a single prompt from the user. This
project has many components:
1. A service that runs Stable Diffusion.
2. A NextJS frontend that connects to the local Anvil node
3. An NFT smart contract which is also a [Infernet Consumer](https://docs.ritual.net/infernet/sdk/consumers/Callback).
4. An Infernet container which collects the prompt, calls the Stable Diffusion service, retrieves the NFT and uploads it
to Arweave.
5. An anvil node to which we will deploy the NFT smart contract.
## Install Pre-requisites
For this tutorial you'll need to have the following installed.
1. [Docker](https://docs.docker.com/engine/install/)
2. [Foundry](https://book.getfoundry.sh/getting-started/installation)
## Setting up a stable diffusion service
Included with this tutorial, is a [containerized stable-diffusion service](./stablediffusion).
### Rent a GPU machine
To run this service, you will need to have access to a machine with a powerful GPU. In the video above, we use an
A100 instance on [Paperspace](https://www.paperspace.com/).
### Install docker
You will have to install docker.
For Ubuntu, you can run the following commands:
```bash copy
# install docker
sudo apt-get install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
```
As docker installation may vary depending on your operating system, consult the
[official documentation](https://docs.docker.com/engine/install/ubuntu/) for more information.
After installation, you can verify that docker is installed by running:
```bash
# sudo docker run hello-world
Hello from Docker!
```
### Ensure CUDA is installed
Depending on where you rent your GPU machine, CUDA is typically pre-installed. For Ubuntu, you can follow the
instructions [here](https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html#prepare-ubuntu).
You can verify that CUDA is installed by running:
```bash copy
# verify Installation
python -c '
import torch
print("torch.cuda.is_available()", torch.cuda.is_available())
print("torch.cuda.device_count()", torch.cuda.device_count())
print("torch.cuda.current_device()", torch.cuda.current_device())
print("torch.cuda.get_device_name(0)", torch.cuda.get_device_name(0))
'
```
If CUDA is installed and available, your output will look similar to the following:
```bash
torch.cuda.is_available() True
torch.cuda.device_count() 1
torch.cuda.current_device() 0
torch.cuda.get_device_name(0) Tesla V100-SXM2-16GB
```
### Ensure `nvidia-container-runtime` is installed
For your container to be able to access the GPU, you will need to install the `nvidia-container-runtime`.
On Ubuntu, you can run the following commands:
```bash copy
# Docker GPU support
# nvidia container-runtime repos
# https://nvidia.github.io/nvidia-container-runtime/
curl -s -L https://nvidia.github.io/nvidia-container-runtime/gpgkey | \
sudo apt-key add - distribution=$(. /etc/os-release;echo $ID$VERSION_ID)
curl -s -L https://nvidia.github.io/nvidia-container-runtime/$distribution/nvidia-container-runtime.list | \
sudo tee /etc/apt/sources.list.d/nvidia-container-runtime.list
sudo apt-get update
# install nvidia-container-runtime
# https://docs.docker.com/config/containers/resource_constraints/#gpu
sudo apt-get install -y nvidia-container-runtime
```
As always, consult the [official documentation](https://nvidia.github.io/nvidia-container-runtime/) for more
information.
You can verify that `nvidia-container-runtime` is installed by running:
```bash copy
which nvidia-container-runtime-hook
# this should return a path to the nvidia-container-runtime-hook
```
Now, with the pre-requisites installed, we can move on to setting up the stable diffusion service.
### Clone this repository
```bash copy
# Clone locally
git clone --recurse-submodules https://github.com/ritual-net/infernet-container-starter
# Navigate to the repository
cd infernet-container-starter
```
### Build the Stable Diffusion service
This will build the `stablediffusion` service container.
```bash copy
make build-service project=prompt-to-nft service=stablediffusion
```
### Run the Stable Diffusion service
```bash copy
make run-service project=prompt-to-nft service=stablediffusion
```
This will start the `stablediffusion` service. Note that this service will have to download a large model file,
so it may take a few minutes to be fully ready. Downloaded model will get cached, so subsequent runs will be faster.
## Setting up the Infernet Node along with the `prompt-to-nft` container
You can follow the following steps on your local machine to setup the Infernet Node and the `prompt-to-nft` container.
### Ensure `docker` & `foundry` exist
To check for `docker`, run the following command in your terminal:
```bash copy
docker --version
# Docker version 25.0.2, build 29cf629 (example output)
```
You'll also need to ensure that docker-compose exists in your terminal:
```bash copy
which docker-compose
# /usr/local/bin/docker-compose (example output)
```
To check for `foundry`, run the following command in your terminal:
```bash copy
forge --version
# forge 0.2.0 (551bcb5 2024-02-28T07:40:42.782478000Z) (example output)
```
### Clone the starter repository
Just like our other examples, we're going to clone this repository.
All of the code and instructions for this tutorial can be found in the
[`projects/prompt-to-nft`](./prompt-to-nft)
directory of the repository.
```bash copy
# Clone locally
git clone --recurse-submodules https://github.com/ritual-net/infernet-container-starter
# Navigate to the repository
cd infernet-container-starter
```
### Configure the `prompt-to-nft` container
#### Configure the URL for the Stable Diffusion service
The `prompt-to-nft` container needs to know where to find the stable diffusion service. To do this, we need to
modify the configuration file for the `prompt-to-nft` container. We have a sample [config.sample.json](./container/config.sample.json) file.
Simply navigate to the [`projects/prompt-to-nft/container`](./container) directory and set up the config file:
```bash
cd projects/prompt-to-nft/container
cp config.sample.json config.json
```
In the `containers` field, you will see the following:
```json
"containers": [
{
// etc. etc.
"env": {
"ARWEAVE_WALLET_FILE_PATH": "/app/wallet/keyfile-arweave.json",
"IMAGE_GEN_SERVICE_URL": "http://your.services.ip:port" // <- replace with your service's IP and port
}
}
},
```
#### Configure the path to your Arweave wallet
Create a directory named `wallet` in the `container` directory and place your Arweave wallet file in it.
```bash
mkdir wallet
cp /path/to/your/arweave-wallet.json wallet/keyfile-arweave.json
```
By default the `prompt-to-nft` container will look for a wallet file at `/app/wallet/keyfile-arweave.json`. The `wallet`
directory you have created, will get copied into your docker file at the build step below. If your wallet filename is
different, you can change the `ARWEAVE_WALLET_FILE_PATH` environment variable in the `config.json` file.
```json
"containers": [
{
// etc. etc.
"env": {
"ARWEAVE_WALLET_FILE_PATH": "/app/wallet/keyfile-arweave.json", // <- replace with your wallet file name
"IMAGE_GEN_SERVICE_URL": "http://your.services.ip:port"
}
}
},
```
### Build the `prompt-to-nft` container
First, navigate back to the root of the repository. Then simply run the following command to build the `prompt-to-nft`
container:
```bash copy
cd ../../..
make build-container project=prompt-to-nft
```
### Deploy the `prompt-to-nft` container with Infernet
You can run a simple command to deploy the `prompt-to-nft` container along with bootstrapping the rest of the
Infernet node stack in one go:
```bash copy
make deploy-container project=prompt-to-nft
```
### Check the running containers
At this point it makes sense to check the running containers to ensure everything is running as expected.
```bash
# > docker container ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
0dbc30f67e1e ritualnetwork/example-prompt-to-nft-infernet:latest "hypercorn app:creat…" 8 seconds ago Up 7 seconds
0.0.0.0:3000->3000/tcp prompt-to-nft
0c5140e0f41b ritualnetwork/infernet-anvil:0.0.0 "anvil --host 0.0.0.…" 23 hours ago Up 23 hours
0.0.0.0:8545->3000/tcp anvil-node
f5682ec2ad31 ritualnetwork/infernet-node:latest "/app/entrypoint.sh" 23 hours ago Up 9 seconds
0.0.0.0:4000->4000/tcp deploy-node-1
c1ece27ba112 fluent/fluent-bit:latest "/fluent-bit/bin/flu…" 23 hours ago Up 10 seconds 2020/tcp,
0.0.0.0:24224->24224/tcp, :::24224->24224/tcp deploy-fluentbit-1
3cccea24a303 redis:latest "docker-entrypoint.s…" 23 hours ago Up 10 seconds 0.0.0.0:6379->6379/tcp,
:::6379->6379/tcp deploy-redis-1
```
You should see five different images running, including the Infernet node and the prompt-to-nft container.
## Minting an NFT by directly calling the consumer contract
In the following steps, we will deploy our NFT consumer contract and call it using a forge script to mint an NFT.
### Setup
Notice that in [one of the steps above](#check-the-running-containers) we have an Anvil node running on port `8545`.
By default, the [`anvil-node`](https://hub.docker.com/r/ritualnetwork/infernet-anvil) image used deploys the
[Infernet SDK](https://docs.ritual.net/infernet/sdk/introduction) and other relevant contracts for you:
- Coordinator: `0x5FbDB2315678afecb367f032d93F642f64180aa3`
- Primary node: `0x70997970C51812dc3A010C7d01b50e0d17dc79C8`
### Deploy our NFT Consumer contract
In this step, we will deploy our NFT consumer contract to the Anvil node. Our [`DiffusionNFT.sol`](./contracts/src/DiffusionNFT.sol)
contract is a simple ERC721 contract which implements our consumer interface.
#### Anvil logs
During this process, it is useful to look at the logs of the Anvil node to see what's going on. To follow the logs,
in a new terminal, run:
```bash copy
docker logs -f anvil-node
```
#### Deploying the contract
Once ready, to deploy the [`DiffusionNFT`](./contracts/src/DiffusionNFT.sol) consumer contract, in another terminal, run:
```bash copy
make deploy-contracts project=prompt-to-nft
```
You should expect to see similar Anvil logs:
```bash
# > make deploy-contracts project=prompt-to-nft
eth_getTransactionReceipt
Transaction: 0x0577dc98192d971bafb30d53cb217c9a9c16f92ab435d20a697024a4f122c048
Contract created: 0x663f3ad617193148711d28f5334ee4ed07016602
Gas used: 1582129
Block Number: 1
Block Hash: 0x1113522c8422bde163f21461c7c66496e08d4bb44f56e4131c2af57f8457f5a5
Block Time: "Wed, 6 Mar 2024 05:03:45 +0000"
eth_getTransactionByHash
```
From our logs, we can see that the `DiffusionNFT` contract has been deployed to address
`0x663f3ad617193148711d28f5334ee4ed07016602`.
### Call the contract
Now, let's call the contract to mint an NFT! In the same terminal, run:
```bash copy
make call-contract project=prompt-to-nft prompt="A golden retriever skiing."
```
You should first expect to see an initiation transaction sent to the `DiffusionNFT` contract:
```bash
eth_getTransactionReceipt
Transaction: 0x571022944a1aca5647e10a58b2242a83d88f2e54dca0c7b4afe3c4b61fa3faf6
Gas used: 214390
Block Number: 2
Block Hash: 0x167a45bb2d30ab3732553aafb1755a3e126b2e1ae7ef52ca96bd75acb0eeb5eb
Block Time: "Wed, 6 Mar 2024 05:06:09 +0000"
```
Shortly after that you should see another transaction submitted from the Infernet Node which is the
result of your on-chain subscription and its associated job request:
```bash
eth_sendRawTransaction
_____ _____ _______ _ _ _
| __ \|_ _|__ __| | | | /\ | |
| |__) | | | | | | | | | / \ | |
| _ / | | | | | | | |/ /\ \ | |
| | \ \ _| |_ | | | |__| / ____ \| |____
|_| \_\_____| |_| \____/_/ \_\______|
nft minted! https://arweave.net/<arweave-hash>
nft id 1
nft owner 0x1804c8AB1F12E6bbf3894d4083f33e07309d1f38
Transaction: 0xcaf67e3f627c57652fa563a9b6f0f7fd27911409b3a7317165a6f5dfb5aff9fd
Gas used: 250851
Block Number: 3
Block Hash: 0xfad6f6743bd2d2751723be4c5be6251130b0f06a46ca61c8d77077169214f6a6
Block Time: "Wed, 6 Mar 2024 05:06:18 +0000"
eth_blockNumber
```
We can now confirm that the address of the Infernet Node (see the logged `node` parameter in the Anvil logs above)
matches the address of the node we setup by default for our Infernet Node.
We can also see that the owner of the NFT is `0x1804c8AB1F12E6bbf3894d4083f33e07309d1f38` and the NFT has been minted
and uploaded to Arweave.
Congratulations! 🎉 You have successfully minted an NFT!
## Minting an NFT from the UI
This project also includes a simple NextJS frontend that connects to the local Anvil node. This frontend allows you to
connect your wallet and mint an NFT by providing a prompt.
### Pre-requisites
Ensure that you have the following installed:
1. [NodeJS](https://nodejs.org/en)
2. A node package manager. This can be either `npm`, `yarn`, `pnpm` or `bun`. Of course, we recommend `bun`.
### Run the UI
From the top-level directory of the repository, simply run the following command:
```bash copy
make run-service project=prompt-to-nft service=ui
```
This will start the UI service. You can now navigate to `http://localhost:3001` in your browser to see the UI.
![ui image](./img/ui.png)j
### Connect your wallet
By clicking "Connect Wallet", your wallet will also ask you to switch to our anvil testnet. By accepting, you will be
connected.
![metamask prompt](./img/metamask-anvil.png)
Here, you should also see the NFT you minted earlier through the direct foundry script.
![ui just after connecting](./img/just-connected.png)
### Get Some ETH
To be able to mint the NFT, you will need some ETH. You can get some testnet ETH the "Request 1 ETH" button at
the top of the page. If your balance does not update, you can refresh the page.
### Enter a prompt & mint a new NFT
You can now enter a prompt and hit the "Generate NFT" button. A look at your anvil-node & infernet-node logs will
show you the transactions being sent and the NFT being minted. The newly-minted NFT will also appear in the UI.
![mint screen](./img/mint-screen.png)
Once your NFT's been generated, the UI will attempt to fetch it from arweave and display it. This usually takes less
than a minute.
![fetching from arweave](./img/fetching-from-arweave.png)
And there you have it! You've minted an NFT from a prompt using the UI!
![minted nft](./img/minted-nft.png)

View File

@ -0,0 +1,25 @@
FROM python:3.11-slim as builder
WORKDIR /app
ENV PYTHONUNBUFFERED 1
ENV PYTHONDONTWRITEBYTECODE 1
ENV PYTHONPATH src
WORKDIR /app
RUN apt-get update
RUN apt-get install -y git curl ffmpeg libsm6 libxext6
# install uv
ADD --chmod=755 https://astral.sh/uv/install.sh /install.sh
RUN /install.sh && rm /install.sh
COPY src/requirements.txt .
RUN /root/.cargo/bin/uv pip install --system --no-cache -r requirements.txt
COPY src src
ENTRYPOINT ["hypercorn", "app:create_app()"]
CMD ["-b", "0.0.0.0:3000"]

View File

@ -0,0 +1,19 @@
DOCKER_ORG := ritualnetwork
EXAMPLE_NAME := stablediffusion
TAG := $(DOCKER_ORG)/example-$(EXAMPLE_NAME)-infernet:latest
.phony: build run build-multiplatform
build:
@docker build -t $(TAG) .
port_mapping ?= 0.0.0.0:3002:3000
run:
docker run -p $(port_mapping) --gpus all -v ~/.cache:/root/.cache $(TAG)
# You may need to set up a docker builder, to do so run:
# docker buildx create --name mybuilder --bootstrap --use
# refer to https://docs.docker.com/build/building/multi-platform/#building-multi-platform-images for more info
build-multiplatform:
docker buildx build --platform linux/amd64,linux/arm64 -t $(TAG) --push .

View File

@ -0,0 +1,25 @@
from quart import Quart, request, Response
from stable_diffusion_workflow import StableDiffusionWorkflow
def create_app() -> Quart:
app = Quart(__name__)
workflow = StableDiffusionWorkflow()
workflow.setup()
@app.get("/")
async def hello():
return "Hello, World! I'm running stable diffusion"
@app.post("/service_output")
async def service_output():
req_data = await request.get_json()
image_bytes = workflow.inference(req_data)
return Response(image_bytes, mimetype="image/png")
return app
if __name__ == "__main__":
create_app().run(host="0.0.0.0", port=3002)

View File

@ -0,0 +1,10 @@
diffusers~=0.19
invisible_watermark~=0.1
transformers==4.36
accelerate~=0.21
safetensors~=0.3
Quart==0.19.4
jmespath==1.0.1
huggingface-hub==0.20.3
infernet_ml==0.1.0
PyArweave @ git+https://github.com/ritual-net/pyarweave.git

View File

@ -0,0 +1,86 @@
import io
from typing import Any
import torch
from diffusers import DiffusionPipeline
from huggingface_hub import snapshot_download
from infernet_ml.workflows.inference.base_inference_workflow import (
BaseInferenceWorkflow,
)
class StableDiffusionWorkflow(BaseInferenceWorkflow):
def __init__(
self,
*args: Any,
**kwargs: Any,
):
super().__init__(*args, **kwargs)
def do_setup(self) -> Any:
ignore = [
"*.bin",
"*.onnx_data",
"*/diffusion_pytorch_model.safetensors",
]
snapshot_download(
"stabilityai/stable-diffusion-xl-base-1.0", ignore_patterns=ignore
)
snapshot_download(
"stabilityai/stable-diffusion-xl-refiner-1.0",
ignore_patterns=ignore,
)
load_options = dict(
torch_dtype=torch.float16,
use_safetensors=True,
variant="fp16",
device_map="auto",
)
# Load base model
self.base = DiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-xl-base-1.0", **load_options
)
# Load refiner model
self.refiner = DiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-xl-refiner-1.0",
text_encoder_2=self.base.text_encoder_2,
vae=self.base.vae,
**load_options,
)
def do_preprocessing(self, input_data: dict[str, Any]) -> dict[str, Any]:
return input_data
def do_run_model(self, input: dict[str, Any]) -> bytes:
negative_prompt = input.get("negative_prompt", "disfigured, ugly, deformed")
prompt = input["prompt"]
n_steps = input.get("n_steps", 24)
high_noise_frac = input.get("high_noise_frac", 0.8)
image = self.base(
prompt=prompt,
negative_prompt=negative_prompt,
num_inference_steps=n_steps,
denoising_end=high_noise_frac,
output_type="latent",
).images
image = self.refiner(
prompt=prompt,
negative_prompt=negative_prompt,
num_inference_steps=n_steps,
denoising_start=high_noise_frac,
image=image,
).images[0]
byte_stream = io.BytesIO()
image.save(byte_stream, format="PNG")
image_bytes = byte_stream.getvalue()
return image_bytes
def do_postprocessing(self, input: Any, output: Any) -> Any:
return output

View File

@ -0,0 +1,3 @@
{
"extends": "next/core-web-vitals"
}

36
projects/prompt-to-nft/ui/.gitignore vendored Normal file
View File

@ -0,0 +1,36 @@
# See https://help.github.com/articles/ignoring-files/ for more about ignoring files.
# dependencies
/node_modules
/.pnp
.pnp.js
.yarn/install-state.gz
# testing
/coverage
# next.js
/.next/
/out/
# production
/build
# misc
.DS_Store
*.pem
# debug
npm-debug.log*
yarn-debug.log*
yarn-error.log*
# local env files
.env*.local
# vercel
.vercel
# typescript
*.tsbuildinfo
next-env.d.ts

View File

@ -0,0 +1,11 @@
.phony: run
run:
@PACKAGE_MANAGER=$$(command -v bun || command -v pnpm || command -v npm); \
if [ -z $$PACKAGE_MANAGER ]; then \
echo "No package manager found. Please install bun, pnpm, or npm."; \
exit 1; \
fi; \
echo "Using $$PACKAGE_MANAGER..."; \
$$PACKAGE_MANAGER install; \
$$PACKAGE_MANAGER run dev;

View File

@ -0,0 +1,40 @@
This is a [Next.js](https://nextjs.org/) project bootstrapped with [`create-next-app`](https://github.com/vercel/next.js/tree/canary/packages/create-next-app).
## Getting Started
First, run the development server:
```bash
npm run dev
# or
yarn dev
# or
pnpm dev
# or
bun dev
```
Open [http://localhost:3000](http://localhost:3000) with your browser to see the result.
You can start editing the page by modifying `pages/index.tsx`. The page auto-updates as you edit the file.
[API routes](https://nextjs.org/docs/api-routes/introduction) can be accessed on [http://localhost:3000/api/hello](http://localhost:3000/api/hello). This endpoint can be edited in `pages/api/hello.ts`.
The `pages/api` directory is mapped to `/api/*`. Files in this directory are treated as [API routes](https://nextjs.org/docs/api-routes/introduction) instead of React pages.
This project uses [`next/font`](https://nextjs.org/docs/basic-features/font-optimization) to automatically optimize and load Inter, a custom Google Font.
## Learn More
To learn more about Next.js, take a look at the following resources:
- [Next.js Documentation](https://nextjs.org/docs) - learn about Next.js features and API.
- [Learn Next.js](https://nextjs.org/learn) - an interactive Next.js tutorial.
You can check out [the Next.js GitHub repository](https://github.com/vercel/next.js/) - your feedback and contributions are welcome!
## Deploy on Vercel
The easiest way to deploy your Next.js app is to use the [Vercel Platform](https://vercel.com/new?utm_medium=default-template&filter=next.js&utm_source=create-next-app&utm_campaign=create-next-app-readme) from the creators of Next.js.
Check out our [Next.js deployment documentation](https://nextjs.org/docs/deployment) for more details.

Binary file not shown.

View File

@ -0,0 +1,6 @@
/** @type {import('next').NextConfig} */
const nextConfig = {
reactStrictMode: true,
};
export default nextConfig;

View File

@ -0,0 +1,32 @@
{
"name": "ui",
"version": "0.1.0",
"private": true,
"scripts": {
"dev": "next dev --port 3001",
"build": "next build",
"start": "next start",
"lint": "next lint"
},
"dependencies": {
"@rainbow-me/rainbowkit": "^2.0.0",
"@tanstack/react-query": "^5.22.2",
"next": "14.1.0",
"prettier": "^3.2.5",
"react": "^18",
"react-dom": "^18",
"viem": "2.x",
"wagmi": "^2.5.7"
},
"devDependencies": {
"typescript": "^5",
"@types/node": "^20",
"@types/react": "^18",
"@types/react-dom": "^18",
"autoprefixer": "^10.0.1",
"postcss": "^8",
"tailwindcss": "^3.3.0",
"eslint": "^8",
"eslint-config-next": "14.1.0"
}
}

View File

@ -0,0 +1,6 @@
module.exports = {
plugins: {
tailwindcss: {},
autoprefixer: {},
},
};

Binary file not shown.

After

Width:  |  Height:  |  Size: 25 KiB

View File

@ -0,0 +1 @@
<svg xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 394 80"><path fill="#000" d="M262 0h68.5v12.7h-27.2v66.6h-13.6V12.7H262V0ZM149 0v12.7H94v20.4h44.3v12.6H94v21h55v12.6H80.5V0h68.7zm34.3 0h-17.8l63.8 79.4h17.9l-32-39.7 32-39.6h-17.9l-23 28.6-23-28.6zm18.3 56.7-9-11-27.1 33.7h17.8l18.3-22.7z"/><path fill="#000" d="M81 79.3 17 0H0v79.3h13.6V17l50.2 62.3H81Zm252.6-.4c-1 0-1.8-.4-2.5-1s-1.1-1.6-1.1-2.6.3-1.8 1-2.5 1.6-1 2.6-1 1.8.3 2.5 1a3.4 3.4 0 0 1 .6 4.3 3.7 3.7 0 0 1-3 1.8zm23.2-33.5h6v23.3c0 2.1-.4 4-1.3 5.5a9.1 9.1 0 0 1-3.8 3.5c-1.6.8-3.5 1.3-5.7 1.3-2 0-3.7-.4-5.3-1s-2.8-1.8-3.7-3.2c-.9-1.3-1.4-3-1.4-5h6c.1.8.3 1.6.7 2.2s1 1.2 1.6 1.5c.7.4 1.5.5 2.4.5 1 0 1.8-.2 2.4-.6a4 4 0 0 0 1.6-1.8c.3-.8.5-1.8.5-3V45.5zm30.9 9.1a4.4 4.4 0 0 0-2-3.3 7.5 7.5 0 0 0-4.3-1.1c-1.3 0-2.4.2-3.3.5-.9.4-1.6 1-2 1.6a3.5 3.5 0 0 0-.3 4c.3.5.7.9 1.3 1.2l1.8 1 2 .5 3.2.8c1.3.3 2.5.7 3.7 1.2a13 13 0 0 1 3.2 1.8 8.1 8.1 0 0 1 3 6.5c0 2-.5 3.7-1.5 5.1a10 10 0 0 1-4.4 3.5c-1.8.8-4.1 1.2-6.8 1.2-2.6 0-4.9-.4-6.8-1.2-2-.8-3.4-2-4.5-3.5a10 10 0 0 1-1.7-5.6h6a5 5 0 0 0 3.5 4.6c1 .4 2.2.6 3.4.6 1.3 0 2.5-.2 3.5-.6 1-.4 1.8-1 2.4-1.7a4 4 0 0 0 .8-2.4c0-.9-.2-1.6-.7-2.2a11 11 0 0 0-2.1-1.4l-3.2-1-3.8-1c-2.8-.7-5-1.7-6.6-3.2a7.2 7.2 0 0 1-2.4-5.7 8 8 0 0 1 1.7-5 10 10 0 0 1 4.3-3.5c2-.8 4-1.2 6.4-1.2 2.3 0 4.4.4 6.2 1.2 1.8.8 3.2 2 4.3 3.4 1 1.4 1.5 3 1.5 5h-5.8z"/></svg>

After

Width:  |  Height:  |  Size: 1.3 KiB

View File

@ -0,0 +1 @@
<svg xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 283 64"><path fill="black" d="M141 16c-11 0-19 7-19 18s9 18 20 18c7 0 13-3 16-7l-7-5c-2 3-6 4-9 4-5 0-9-3-10-7h28v-3c0-11-8-18-19-18zm-9 15c1-4 4-7 9-7s8 3 9 7h-18zm117-15c-11 0-19 7-19 18s9 18 20 18c6 0 12-3 16-7l-8-5c-2 3-5 4-8 4-5 0-9-3-11-7h28l1-3c0-11-8-18-19-18zm-10 15c2-4 5-7 10-7s8 3 9 7h-19zm-39 3c0 6 4 10 10 10 4 0 7-2 9-5l8 5c-3 5-9 8-17 8-11 0-19-7-19-18s8-18 19-18c8 0 14 3 17 8l-8 5c-2-3-5-5-9-5-6 0-10 4-10 10zm83-29v46h-9V5h9zM37 0l37 64H0L37 0zm92 5-27 48L74 5h10l18 30 17-30h10zm59 12v10l-3-1c-6 0-10 4-10 10v15h-9V17h9v9c0-5 6-9 13-9z"/></svg>

After

Width:  |  Height:  |  Size: 630 B

View File

@ -0,0 +1,10 @@
import { ButtonHTMLAttributes, PropsWithChildren } from "react";
export const Button = (
p: PropsWithChildren<ButtonHTMLAttributes<HTMLButtonElement>>,
) => (
<button
className={"bg-emerald-700 font-light px-4 py-2 rounded-xl text-white"}
{...p}
/>
);

View File

@ -0,0 +1,15 @@
import { PropsWithChildren, useEffect, useState } from "react";
export const ClientRendered = ({ children }: PropsWithChildren) => {
// look at here:https://nextjs.org/docs/messages/react-hydration-error#solution-1-using-useeffect-to-run-on-the-client-only
const [isClient, setIsClient] = useState(false);
useEffect(() => {
setIsClient(true);
}, []);
if (!isClient) {
return null;
}
return <>{children}</>;
};

View File

@ -0,0 +1,38 @@
import { http, useAccount } from "wagmi";
import { createWalletClient, parseUnits } from "viem";
import { anvilNode } from "@/util/chain";
import { privateKeyToAccount } from "viem/accounts";
import { Button } from "@/components/Button";
export const FaucetButton = () => {
const account = useAccount();
const requestEth = async () => {
const { address: _address } = account;
if (!_address) {
console.log("No address found");
return;
}
const address = _address!;
const faucetAccount = privateKeyToAccount(
"0x7c852118294e51e653712a81e05800f419141751be58f605c371e15141b007a6",
);
const client = createWalletClient({
account: faucetAccount,
chain: anvilNode,
transport: http(),
});
await client.sendTransaction({
to: address,
value: parseUnits("1", 18),
});
};
return (
<Button onClick={requestEth}>
{account ? "Request 1 ETH" : "Connect Your Wallet"}
</Button>
);
};

View File

@ -0,0 +1,69 @@
import { useEffect, useState } from "react";
export const LoadImg = ({ url, tokenId }: { url: string; tokenId: number }) => {
const [loaded, setLoaded] = useState(false);
const [attempts, setAttempts] = useState(0);
useEffect(() => {
if (loaded) {
return;
}
let img = new Image();
const loadImg = () => {
console.log(`trying: ${attempts}`);
img = new Image();
img.src = url;
img.onload = () => {
setLoaded(true);
};
img.onerror = () => {
if (attempts < 100) {
// Set a max number of attempts
setTimeout(() => {
setAttempts((prev) => prev + 1);
loadImg(); // Retry loading the image
}, 1000); // Retry after 1 seconds
}
};
};
if (!loaded) {
loadImg();
}
// Cleanup function to avoid memory leaks
return () => {
img.onload = null;
img.onerror = null;
};
}, [url, loaded, attempts]);
return (
<div
className={
"bg-teal-600 bg-opacity-20 rounded-lg flex flex-1 justify-center items-center"
}
>
{loaded ? (
<img className={"rounded-lg"} src={url} alt={`NFT ${tokenId}`} />
) : (
<div className={""}>
<button
type="button"
className="py-3 px-4 inline-flex items-center gap-x-2 text-sm rounded-lg
border border-transparent bg-emerald-700 text-white hover:bg-blue-700 disabled:opacity-50
disabled:pointer-events-none dark:focus:outline-none dark:focus:ring-1 dark:focus:ring-gray-600"
>
<span
className="animate-spin inline-block size-4 border-[2px] border-current border-t-transparent text-white
rounded-full"
role="status"
aria-label="loading"
></span>
Fetching from Arweave
</button>{" "}
</div>
)}
</div>
);
};

View File

@ -0,0 +1,28 @@
import { useAccount, useWriteContract } from "wagmi";
import { nftAbi } from "@/util/nftAbi";
import { NFT_ADDRESS } from "@/util/constants";
import {Button} from "@/components/Button";
export const MintButton = ({ prompt }: { prompt: string }) => {
const { address } = useAccount();
const { writeContract } = useWriteContract();
return (
<Button
onClick={() => {
if (!address) {
return;
}
writeContract({
chainId: 31337,
abi: nftAbi,
address: NFT_ADDRESS,
functionName: "mint",
args: [prompt, address],
});
}}
>
<span className={"text-xl"}>Generate NFT</span>
</Button>
);
};

View File

@ -0,0 +1,24 @@
import { useAccount, useReadContract } from "wagmi";
import { nftAbi } from "@/util/nftAbi";
import { NFT_ADDRESS } from "@/util/constants";
const NFTBalance = () => {
const { address } = useAccount();
const readContract = useReadContract({
address: NFT_ADDRESS,
account: address,
abi: nftAbi,
query: {
enabled: Boolean(address),
refetchInterval: 1000,
},
functionName: "counter",
});
if (!readContract.data) {
return <>loading...</>;
}
return <>your nft balance: {readContract.data.toString()}</>;
};

View File

@ -0,0 +1,56 @@
import { useAccount, useReadContract } from "wagmi";
import { NFT_ADDRESS } from "@/util/constants";
import { nftAbi } from "@/util/nftAbi";
import { NftImage } from "@/components/NftImage";
export const NftCollection = () => {
const { address } = useAccount();
const readContract = useReadContract({
address: NFT_ADDRESS,
account: address,
abi: nftAbi,
query: {
enabled: Boolean(address),
refetchInterval: 1000,
},
functionName: "counter",
});
if (readContract.data === 0n) {
return <>No NFTs</>;
}
console.log("read contract data", readContract.data);
if (!readContract.data) {
return <>Please connect your wallet.</>;
}
const counter = parseInt(readContract.data.toString());
const nftIds = new Array(counter).fill(0n).map((_, index) => index + 1);
console.log(`counter: ${counter}`);
return (
<div
className={
"bg-emerald-700 bg-opacity-10 p-3 flex-1 flex flex-col w-[100%]"
}
>
<h2 className={"text-2xl ml-2 my-3"}>The Collection</h2>
{nftIds.length === 0 ? (
<div className={"justify-center flex mt-20 text-opacity-80"}>
No NFTs minted.
</div>
) : (
<div className={"flex flex-wrap"}>
{nftIds.map((id) => {
return (
<NftImage key={id} tokenId={id} contractAddress={NFT_ADDRESS} />
);
})}
</div>
)}
</div>
);
};

View File

@ -0,0 +1,46 @@
import { Address } from "viem";
import { useAccount, useReadContract } from "wagmi";
import { nftAbi } from "@/util/nftAbi";
import { LoadImg } from "@/components/LoadImg";
export const NftImage = ({
tokenId,
contractAddress,
}: {
tokenId: number;
contractAddress: Address;
}) => {
const { address } = useAccount();
console.log(
"tokenid",
tokenId,
"contractAddress",
contractAddress,
"address",
address,
);
const { data } = useReadContract({
address: contractAddress,
abi: nftAbi,
account: address,
functionName: "tokenURI",
query: {
enabled: Boolean(address),
refetchInterval: 1000,
},
args: [BigInt(tokenId)],
});
console.log("nft image data", data);
if (!data) {
return <>loading...</>;
}
return (
<div className={"p-2 w-[100%] md:w-1/2 lg:w-1/3 flex"}>
<LoadImg url={data} tokenId={tokenId} />
</div>
);
};

View File

@ -0,0 +1,20 @@
import "@/styles/globals.css";
import type { AppProps } from "next/app";
import { WagmiProvider } from "wagmi";
import { config } from "@/util/config";
import { QueryClient, QueryClientProvider } from "@tanstack/react-query";
import { RainbowKitProvider } from "@rainbow-me/rainbowkit";
const queryClient = new QueryClient();
export default function App({ Component, pageProps }: AppProps) {
return (
<WagmiProvider config={config}>
<QueryClientProvider client={queryClient}>
<RainbowKitProvider>
<Component {...pageProps} />
</RainbowKitProvider>
</QueryClientProvider>
</WagmiProvider>
);
}

Some files were not shown because too many files have changed in this diff Show More