infernet-1.0.0 update

This commit is contained in:
arshan-ritual
2024-06-06 13:18:48 -04:00
parent 2a11fd3953
commit 40a6c590da
98 changed files with 879 additions and 506 deletions

View File

@ -7,12 +7,15 @@ ENV PYTHONDONTWRITEBYTECODE 1
ENV PIP_NO_CACHE_DIR 1
ENV RUNTIME docker
ENV PYTHONPATH src
ARG index_url
ENV UV_EXTRA_INDEX_URL ${index_url}
RUN apt-get update
RUN apt-get install -y git curl
# install uv
ADD --chmod=755 https://astral.sh/uv/install.sh /install.sh
ADD https://astral.sh/uv/install.sh /install.sh
RUN chmod 755 /install.sh
RUN /install.sh && rm /install.sh
COPY src/requirements.txt .

View File

@ -5,7 +5,7 @@ TAG := $(DOCKER_ORG)/example-$(EXAMPLE_NAME)-infernet:latest
.phony: build run build-multiplatform
build:
@docker build -t $(TAG) .
@docker build -t $(TAG) --build-arg index_url=$(index_url) .
run:
docker run -p 3000:3000 $(TAG)

View File

@ -8,9 +8,9 @@ repository.
## Overview
We're making use of
the [ONNXInferenceWorkflow](https://github.com/ritual-net/infernet-ml-internal/blob/main/src/ml/workflows/inference/onnx_inference_workflow.py)
the [ONNXInferenceWorkflow](https://github.com/ritual-net/infernet-ml/blob/main/src/ml/workflows/inference/onnx_inference_workflow.py)
class to run the model. This is one of many workflows that we currently support in our
[infernet-ml](https://github.com/ritual-net/infernet-ml-internal). Consult the library's
[infernet-ml](https://github.com/ritual-net/infernet-ml). Consult the library's
documentation for more info on workflows that
are supported.

View File

@ -7,7 +7,7 @@
"enabled": true,
"trail_head_blocks": 0,
"rpc_url": "http://host.docker.internal:8545",
"coordinator_address": "0x5FbDB2315678afecb367f032d93F642f64180aa3",
"registry_address": "0x663F3ad617193148711d28f5334eE4Ed07016602",
"wallet": {
"max_gas_limit": 4000000,
"private_key": "0x59c6995e998f97a5a0044966f0945389dc9e86dae88c7a8412f4603b6b78690d"
@ -23,6 +23,10 @@
"port": 6379
},
"forward_stats": true,
"snapshot_sync": {
"sleep": 3,
"batch_size": 100
},
"containers": [
{
"id": "onnx-iris",
@ -33,18 +37,8 @@
"allowed_addresses": [],
"allowed_ips": [],
"command": "--bind=0.0.0.0:3000 --workers=2",
"env": {}
},
{
"id": "anvil-node",
"image": "ritualnetwork/infernet-anvil:0.0.0",
"external": true,
"port": "8545",
"allowed_delegate_addresses": [],
"allowed_addresses": [],
"allowed_ips": [],
"command": "",
"env": {}
"env": {},
"accepted_payments": {}
}
]
}

View File

@ -1,12 +1,18 @@
import logging
from typing import Any, cast, List
from infernet_ml.utils.common_types import TensorInput
import numpy as np
from eth_abi import decode, encode # type: ignore
from infernet_ml.utils.model_loader import ModelSource
from infernet_ml.utils.service_models import InfernetInput, InfernetInputSource
from infernet_ml.utils.model_loader import (
HFLoadArgs,
ModelSource,
)
from infernet_ml.utils.service_models import InfernetInput, JobLocation
from infernet_ml.workflows.inference.onnx_inference_workflow import (
ONNXInferenceWorkflow,
ONNXInferenceInput,
ONNXInferenceResult,
)
from quart import Quart, request
from quart.json.provider import DefaultJSONProvider
@ -29,10 +35,11 @@ def create_app() -> Quart:
app = Quart(__name__)
# we are downloading the model from the hub.
# model repo is located at: https://huggingface.co/Ritual-Net/iris-dataset
model_source = ModelSource.HUGGINGFACE_HUB
model_args = {"repo_id": "Ritual-Net/iris-dataset", "filename": "iris.onnx"}
workflow = ONNXInferenceWorkflow(model_source=model_source, model_args=model_args)
workflow = ONNXInferenceWorkflow(
model_source=ModelSource.HUGGINGFACE_HUB,
load_args=HFLoadArgs(repo_id="Ritual-Net/iris-dataset", filename="iris.onnx"),
)
workflow.setup()
@app.route("/")
@ -43,7 +50,7 @@ def create_app() -> Quart:
return "ONNX Iris Classifier Example Program"
@app.route("/service_output", methods=["POST"])
async def inference() -> dict[str, Any]:
async def inference() -> Any:
req_data = await request.get_json()
"""
InfernetInput has the format:
@ -52,50 +59,56 @@ def create_app() -> Quart:
"""
infernet_input: InfernetInput = InfernetInput(**req_data)
if infernet_input.source == InfernetInputSource.OFFCHAIN:
web2_input = cast(dict[str, Any], infernet_input.data)
values = cast(List[List[float]], web2_input["input"])
else:
# On-chain requests are sent as a generalized hex-string which we will
# decode to the appropriate format.
web3_input: List[int] = decode(
["uint256[]"], bytes.fromhex(cast(str, infernet_input.data))
)[0]
values = [[float(v) / 1e6 for v in web3_input]]
match infernet_input:
case InfernetInput(source=JobLocation.OFFCHAIN):
web2_input = cast(dict[str, Any], infernet_input.data)
values = cast(List[List[float]], web2_input["input"])
case InfernetInput(source=JobLocation.ONCHAIN):
web3_input: List[int] = decode(
["uint256[]"], bytes.fromhex(cast(str, infernet_input.data))
)[0]
values = [[float(v) / 1e6 for v in web3_input]]
"""
The input to the onnx inference workflow needs to conform to ONNX runtime's
input_feed format. For more information refer to:
https://docs.ritual.net/ml-workflows/inference-workflows/onnx_inference_workflow
"""
result: dict[str, Any] = workflow.inference({"input": values})
_input = ONNXInferenceInput(
inputs={"input": TensorInput(shape=(1, 4), dtype="float", values=values)},
)
result: ONNXInferenceResult = workflow.inference(_input)
if infernet_input.source == InfernetInputSource.OFFCHAIN:
"""
In case of an off-chain request, the result is returned as is.
"""
return result
else:
"""
In case of an on-chain request, the result is returned in the format:
{
"raw_input": str,
"processed_input": str,
"raw_output": str,
"processed_output": str,
"proof": str,
}
refer to: https://docs.ritual.net/infernet/node/containers for more info.
"""
predictions = cast(List[List[List[float]]], result)
predictions_normalized = [int(p * 1e6) for p in predictions[0][0]]
return {
"raw_input": "",
"processed_input": "",
"raw_output": encode(["uint256[]"], [predictions_normalized]).hex(),
"processed_output": "",
"proof": "",
}
match infernet_input:
case InfernetInput(destination=JobLocation.OFFCHAIN):
"""
In case of an off-chain request, the result is returned as is.
"""
return result
case InfernetInput(destination=JobLocation.ONCHAIN):
"""
In case of an on-chain request, the result is returned in the format:
{
"raw_input": str,
"processed_input": str,
"raw_output": str,
"processed_output": str,
"proof": str,
}
refer to: https://docs.ritual.net/infernet/node/containers for more
info.
"""
predictions = result[0]
predictions_normalized = [int(p * 1e6) for p in predictions.values]
return {
"raw_input": "",
"processed_input": "",
"raw_output": encode(["uint256[]"], [predictions_normalized]).hex(),
"processed_output": "",
"proof": "",
}
case _:
raise ValueError("Invalid destination")
return app

View File

@ -1,7 +1,4 @@
quart==0.19.4
infernet_ml==0.1.0
PyArweave @ git+https://github.com/ritual-net/pyarweave.git
infernet-ml==1.0.0
infernet-ml[onnx_inference]==1.0.0
web3==6.15.0
onnx==1.15.0
onnxruntime==1.16.3
torch==2.1.2

View File

@ -10,7 +10,7 @@ contract CallContract is Script {
uint256 deployerPrivateKey = vm.envUint("PRIVATE_KEY");
vm.startBroadcast(deployerPrivateKey);
IrisClassifier irisClassifier = IrisClassifier(0x663F3ad617193148711d28f5334eE4Ed07016602);
IrisClassifier irisClassifier = IrisClassifier(0x13D69Cf7d6CE4218F646B759Dcf334D82c023d8e);
irisClassifier.classifyIris();

View File

@ -14,9 +14,9 @@ contract Deploy is Script {
address deployerAddress = vm.addr(deployerPrivateKey);
console2.log("Loaded deployer: ", deployerAddress);
address coordinator = 0x5FbDB2315678afecb367f032d93F642f64180aa3;
address registry = 0x663F3ad617193148711d28f5334eE4Ed07016602;
// Create consumer
IrisClassifier classifier = new IrisClassifier(coordinator);
IrisClassifier classifier = new IrisClassifier(registry);
console2.log("Deployed IrisClassifier: ", address(classifier));
// Execute

View File

@ -14,7 +14,7 @@ contract IrisClassifier is CallbackConsumer {
"| | \\ \\ _| |_ | | | |__| / ____ \\| |____\n"
"|_| \\_\\_____| |_| \\____/_/ \\_\\______|\n\n";
constructor(address coordinator) CallbackConsumer(coordinator) {}
constructor(address registry) CallbackConsumer(registry) {}
function classifyIris() public {
/// @dev Iris data is in the following format:
@ -38,9 +38,11 @@ contract IrisClassifier is CallbackConsumer {
_requestCompute(
"onnx-iris",
abi.encode(iris_data),
20 gwei,
1_000_000,
1
1, // redundancy
address(0), // paymentToken
0, // paymentAmount
address(0), // wallet
address(0) // prover
);
}
@ -51,7 +53,9 @@ contract IrisClassifier is CallbackConsumer {
address node,
bytes calldata input,
bytes calldata output,
bytes calldata proof
bytes calldata proof,
bytes32 containerId,
uint256 index
) internal override {
console2.log(EXTREMELY_COOL_BANNER);
(bytes memory raw_output, bytes memory processed_output) = abi.decode(output, (bytes, bytes));

View File

@ -195,7 +195,7 @@ In your anvil logs you should see the following:
eth_getTransactionReceipt
Transaction: 0xeed605eacdace39a48635f6d14215b386523766f80a113b4484f542d862889a4
Contract created: 0x663f3ad617193148711d28f5334ee4ed07016602
Contract created: 0x13D69Cf7d6CE4218F646B759Dcf334D82c023d8e
Gas used: 714269
Block Number: 1
@ -206,7 +206,7 @@ eth_blockNumber
```
beautiful, we can see that a new contract has been created
at `0x663f3ad617193148711d28f5334ee4ed07016602`. That's the address of
at `0x663F3ad617193148711d28f5334eE4Ed07016602`. That's the address of
the `IrisClassifier` contract. We are now going to call this contract. To do so,
we are using
the [CallContract.s.sol](contracts/script/CallContract.s.sol)