feat: publishing infernet-container-starter v0.2.0
This commit is contained in:
2
projects/gpt4/container/.gitignore
vendored
Normal file
2
projects/gpt4/container/.gitignore
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
sample-gpt3.env
|
||||
config.json
|
25
projects/gpt4/container/Dockerfile
Normal file
25
projects/gpt4/container/Dockerfile
Normal file
@ -0,0 +1,25 @@
|
||||
FROM python:3.11-slim as builder
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
ENV PYTHONUNBUFFERED 1
|
||||
ENV PYTHONDONTWRITEBYTECODE 1
|
||||
ENV PIP_NO_CACHE_DIR 1
|
||||
ENV RUNTIME docker
|
||||
ENV PYTHONPATH src
|
||||
|
||||
RUN apt-get update
|
||||
RUN apt-get install -y git curl
|
||||
|
||||
# install uv
|
||||
ADD --chmod=755 https://astral.sh/uv/install.sh /install.sh
|
||||
RUN /install.sh && rm /install.sh
|
||||
|
||||
COPY src/requirements.txt .
|
||||
|
||||
RUN /root/.cargo/bin/uv pip install --system --no-cache -r requirements.txt
|
||||
|
||||
COPY src src
|
||||
|
||||
ENTRYPOINT ["hypercorn", "app:create_app()"]
|
||||
CMD ["-b", "0.0.0.0:3000"]
|
18
projects/gpt4/container/Makefile
Normal file
18
projects/gpt4/container/Makefile
Normal file
@ -0,0 +1,18 @@
|
||||
DOCKER_ORG := ritualnetwork
|
||||
EXAMPLE_NAME := gpt4
|
||||
TAG := $(DOCKER_ORG)/example-$(EXAMPLE_NAME)-infernet:latest
|
||||
|
||||
.phony: build run build-multiplatform try-prompt
|
||||
|
||||
build:
|
||||
mkdir -p root-config
|
||||
@docker build -t $(TAG) .
|
||||
|
||||
run: build
|
||||
@docker run --env-file $(EXAMPLE_NAME).env -p 3000:3000 $(TAG)
|
||||
|
||||
# You may need to set up a docker builder, to do so run:
|
||||
# docker buildx create --name mybuilder --bootstrap --use
|
||||
# refer to https://docs.docker.com/build/building/multi-platform/#building-multi-platform-images for more info
|
||||
build-multiplatform:
|
||||
docker buildx build --platform linux/amd64,linux/arm64 -t $(TAG) --push .
|
20
projects/gpt4/container/README.md
Normal file
20
projects/gpt4/container/README.md
Normal file
@ -0,0 +1,20 @@
|
||||
# GPT 4
|
||||
In this example, we run a minimalist container that makes use of our closed-source model
|
||||
workflow: `CSSInferenceWorkflow`. Refer to [src/app.py](src/app.py) for the
|
||||
implementation of the quart application.
|
||||
|
||||
## Requirements
|
||||
To use the model you'll need to have an OpenAI api key. Get one at
|
||||
[OpenAI](https://openai.com/)'s website.
|
||||
|
||||
## Run the Container
|
||||
|
||||
```bash
|
||||
make run
|
||||
```
|
||||
|
||||
## Test the Container
|
||||
```bash
|
||||
curl -X POST localhost:3000/service_output -H "Content-Type: application/json" \
|
||||
-d '{"source": 1, "data": {"text": "can shrimps actually fry rice?"}}'
|
||||
```
|
52
projects/gpt4/container/config.sample.json
Normal file
52
projects/gpt4/container/config.sample.json
Normal file
@ -0,0 +1,52 @@
|
||||
{
|
||||
"log_path": "infernet_node.log",
|
||||
"server": {
|
||||
"port": 4000
|
||||
},
|
||||
"chain": {
|
||||
"enabled": true,
|
||||
"trail_head_blocks": 0,
|
||||
"rpc_url": "http://host.docker.internal:8545",
|
||||
"coordinator_address": "0x5FbDB2315678afecb367f032d93F642f64180aa3",
|
||||
"wallet": {
|
||||
"max_gas_limit": 4000000,
|
||||
"private_key": "0x59c6995e998f97a5a0044966f0945389dc9e86dae88c7a8412f4603b6b78690d"
|
||||
}
|
||||
},
|
||||
"startup_wait": 1.0,
|
||||
"docker": {
|
||||
"username": "your-username",
|
||||
"password": ""
|
||||
},
|
||||
"redis": {
|
||||
"host": "redis",
|
||||
"port": 6379
|
||||
},
|
||||
"forward_stats": true,
|
||||
"containers": [
|
||||
{
|
||||
"id": "gpt4",
|
||||
"image": "ritualnetwork/example-gpt4-infernet:latest",
|
||||
"external": true,
|
||||
"port": "3000",
|
||||
"allowed_delegate_addresses": [],
|
||||
"allowed_addresses": [],
|
||||
"allowed_ips": [],
|
||||
"command": "--bind=0.0.0.0:3000 --workers=2",
|
||||
"env": {
|
||||
"OPENAI_API_KEY": "barabeem baraboom"
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "anvil-node",
|
||||
"image": "ritualnetwork/infernet-anvil:0.0.0",
|
||||
"external": true,
|
||||
"port": "8545",
|
||||
"allowed_delegate_addresses": [],
|
||||
"allowed_addresses": [],
|
||||
"allowed_ips": [],
|
||||
"command": "",
|
||||
"env": {}
|
||||
}
|
||||
]
|
||||
}
|
1
projects/gpt4/container/gpt4.env.sample
Normal file
1
projects/gpt4/container/gpt4.env.sample
Normal file
@ -0,0 +1 @@
|
||||
OPENAI_API_KEY=
|
90
projects/gpt4/container/src/app.py
Normal file
90
projects/gpt4/container/src/app.py
Normal file
@ -0,0 +1,90 @@
|
||||
import logging
|
||||
from typing import Any, cast
|
||||
|
||||
from eth_abi import decode, encode # type: ignore
|
||||
from infernet_ml.utils.service_models import InfernetInput, InfernetInputSource
|
||||
from infernet_ml.workflows.inference.css_inference_workflow import CSSInferenceWorkflow
|
||||
from quart import Quart, request
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def create_app() -> Quart:
|
||||
app = Quart(__name__)
|
||||
|
||||
workflow = CSSInferenceWorkflow(provider="OPENAI", endpoint="completions")
|
||||
|
||||
workflow.setup()
|
||||
|
||||
@app.route("/")
|
||||
def index() -> str:
|
||||
"""
|
||||
Utility endpoint to check if the service is running.
|
||||
"""
|
||||
return "GPT4 Example Program"
|
||||
|
||||
@app.route("/service_output", methods=["POST"])
|
||||
async def inference() -> dict[str, Any]:
|
||||
req_data = await request.get_json()
|
||||
"""
|
||||
InfernetInput has the format:
|
||||
source: (0 on-chain, 1 off-chain)
|
||||
data: dict[str, Any]
|
||||
"""
|
||||
infernet_input: InfernetInput = InfernetInput(**req_data)
|
||||
|
||||
if infernet_input.source == InfernetInputSource.OFFCHAIN:
|
||||
prompt = cast(dict[str, Any], infernet_input.data).get("prompt")
|
||||
else:
|
||||
# On-chain requests are sent as a generalized hex-string which we will
|
||||
# decode to the appropriate format.
|
||||
(prompt,) = decode(
|
||||
["string"], bytes.fromhex(cast(str, infernet_input.data))
|
||||
)
|
||||
|
||||
result: dict[str, Any] = workflow.inference(
|
||||
{
|
||||
"model": "gpt-4-0613",
|
||||
"params": {
|
||||
"endpoint": "completions",
|
||||
"messages": [
|
||||
{"role": "system", "content": "You are a helpful assistant."},
|
||||
{"role": "user", "content": prompt},
|
||||
],
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
if infernet_input.source == InfernetInputSource.OFFCHAIN:
|
||||
"""
|
||||
In case of an off-chain request, the result is returned as is.
|
||||
"""
|
||||
return {"message": result}
|
||||
else:
|
||||
"""
|
||||
In case of an on-chain request, the result is returned in the format:
|
||||
{
|
||||
"raw_input": str,
|
||||
"processed_input": str,
|
||||
"raw_output": str,
|
||||
"processed_output": str,
|
||||
"proof": str,
|
||||
}
|
||||
refer to: https://docs.ritual.net/infernet/node/containers for more info.
|
||||
"""
|
||||
return {
|
||||
"raw_input": "",
|
||||
"processed_input": "",
|
||||
"raw_output": encode(["string"], [result]).hex(),
|
||||
"processed_output": "",
|
||||
"proof": "",
|
||||
}
|
||||
|
||||
return app
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
"""
|
||||
Utility to run the app locally. For development purposes only.
|
||||
"""
|
||||
create_app().run(port=3000)
|
5
projects/gpt4/container/src/requirements.txt
Normal file
5
projects/gpt4/container/src/requirements.txt
Normal file
@ -0,0 +1,5 @@
|
||||
quart==0.19.4
|
||||
infernet_ml==0.1.0
|
||||
PyArweave @ git+https://github.com/ritual-net/pyarweave.git
|
||||
web3==6.15.0
|
||||
retry2==0.9.5
|
34
projects/gpt4/contracts/.github/workflows/test.yml
vendored
Normal file
34
projects/gpt4/contracts/.github/workflows/test.yml
vendored
Normal file
@ -0,0 +1,34 @@
|
||||
name: test
|
||||
|
||||
on: workflow_dispatch
|
||||
|
||||
env:
|
||||
FOUNDRY_PROFILE: ci
|
||||
|
||||
jobs:
|
||||
check:
|
||||
strategy:
|
||||
fail-fast: true
|
||||
|
||||
name: Foundry project
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: recursive
|
||||
|
||||
- name: Install Foundry
|
||||
uses: foundry-rs/foundry-toolchain@v1
|
||||
with:
|
||||
version: nightly
|
||||
|
||||
- name: Run Forge build
|
||||
run: |
|
||||
forge --version
|
||||
forge build --sizes
|
||||
id: build
|
||||
|
||||
- name: Run Forge tests
|
||||
run: |
|
||||
forge test -vvv
|
||||
id: test
|
14
projects/gpt4/contracts/.gitignore
vendored
Normal file
14
projects/gpt4/contracts/.gitignore
vendored
Normal file
@ -0,0 +1,14 @@
|
||||
# Compiler files
|
||||
cache/
|
||||
out/
|
||||
|
||||
# Ignores development broadcast logs
|
||||
!/broadcast
|
||||
/broadcast/*/31337/
|
||||
/broadcast/**/dry-run/
|
||||
|
||||
# Docs
|
||||
docs/
|
||||
|
||||
# Dotenv file
|
||||
.env
|
14
projects/gpt4/contracts/Makefile
Normal file
14
projects/gpt4/contracts/Makefile
Normal file
@ -0,0 +1,14 @@
|
||||
# phony targets are targets that don't actually create a file
|
||||
.phony: deploy call-contract
|
||||
|
||||
# anvil's third default address
|
||||
sender := 0x5de4111afa1a4b94908f83103eb1f1706367c2e68ca870fc3fb9a804cdab365a
|
||||
RPC_URL := http://localhost:8545
|
||||
|
||||
# deploying the contract
|
||||
deploy:
|
||||
@PRIVATE_KEY=$(sender) forge script script/Deploy.s.sol:Deploy --broadcast --rpc-url $(RPC_URL)
|
||||
|
||||
# calling promptGPT()
|
||||
call-contract:
|
||||
@PRIVATE_KEY=$(sender) forge script script/CallContract.s.sol:CallContract --broadcast --rpc-url $(RPC_URL)
|
27
projects/gpt4/contracts/README.md
Normal file
27
projects/gpt4/contracts/README.md
Normal file
@ -0,0 +1,27 @@
|
||||
# GPT4 Example Contracts
|
||||
|
||||
This is a minimalist foundry project that implements a [callback consumer](https://docs.ritual.net/infernet/sdk/consumers/Callback)
|
||||
that makes a prompt to the [container](../container/README.md), which then makes a call to OpenAI's GPT4. For an
|
||||
end-to-end flow of how this works, follow the [guide here](../gpt4.md).
|
||||
|
||||
## Deploying
|
||||
|
||||
The [`Deploy.s.sol`](./script/Deploy.s.sol) deploys the contracts.
|
||||
The [Makefile](./Makefile) in this project containes
|
||||
a utility deploy target.
|
||||
|
||||
```bash
|
||||
make deploy
|
||||
```
|
||||
|
||||
## Prompting
|
||||
|
||||
The [`CallContract.s.sol`](./script/CallContract.s.sol) calls
|
||||
the [`promptGPT`](./src/PromptsGPT.sol#L10) function.
|
||||
The [Makefile](./Makefile) in this project contains a utility call target. You'll need
|
||||
to pass in the prompt as an
|
||||
env var.
|
||||
|
||||
```bash
|
||||
make call-contract prompt="What is 2 * 3?"
|
||||
```
|
7
projects/gpt4/contracts/foundry.toml
Normal file
7
projects/gpt4/contracts/foundry.toml
Normal file
@ -0,0 +1,7 @@
|
||||
[profile.default]
|
||||
src = "src"
|
||||
out = "out"
|
||||
libs = ["lib"]
|
||||
via_ir = true
|
||||
|
||||
# See more config options https://github.com/foundry-rs/foundry/blob/master/crates/config/README.md#all-options
|
2
projects/gpt4/contracts/remappings.txt
Normal file
2
projects/gpt4/contracts/remappings.txt
Normal file
@ -0,0 +1,2 @@
|
||||
forge-std/=lib/forge-std/src
|
||||
infernet-sdk/=lib/infernet-sdk/src
|
19
projects/gpt4/contracts/script/CallContract.s.sol
Normal file
19
projects/gpt4/contracts/script/CallContract.s.sol
Normal file
@ -0,0 +1,19 @@
|
||||
// SPDX-License-Identifier: BSD-3-Clause-Clear
|
||||
pragma solidity ^0.8.0;
|
||||
|
||||
import {Script, console2} from "forge-std/Script.sol";
|
||||
import {PromptsGPT} from "../src/PromptsGPT.sol";
|
||||
|
||||
contract CallContract is Script {
|
||||
function run() public {
|
||||
// Setup wallet
|
||||
uint256 deployerPrivateKey = vm.envUint("PRIVATE_KEY");
|
||||
vm.startBroadcast(deployerPrivateKey);
|
||||
|
||||
PromptsGPT promptsGpt = PromptsGPT(0x663F3ad617193148711d28f5334eE4Ed07016602);
|
||||
|
||||
promptsGpt.promptGPT(vm.envString("prompt"));
|
||||
|
||||
vm.stopBroadcast();
|
||||
}
|
||||
}
|
27
projects/gpt4/contracts/script/Deploy.s.sol
Normal file
27
projects/gpt4/contracts/script/Deploy.s.sol
Normal file
@ -0,0 +1,27 @@
|
||||
// SPDX-License-Identifier: BSD-3-Clause-Clear
|
||||
pragma solidity ^0.8.13;
|
||||
|
||||
import {Script, console2} from "forge-std/Script.sol";
|
||||
import {PromptsGPT} from "../src/PromptsGPT.sol";
|
||||
|
||||
contract Deploy is Script {
|
||||
function run() public {
|
||||
// Setup wallet
|
||||
uint256 deployerPrivateKey = vm.envUint("PRIVATE_KEY");
|
||||
vm.startBroadcast(deployerPrivateKey);
|
||||
|
||||
// Log address
|
||||
address deployerAddress = vm.addr(deployerPrivateKey);
|
||||
console2.log("Loaded deployer: ", deployerAddress);
|
||||
|
||||
address coordinator = 0x5FbDB2315678afecb367f032d93F642f64180aa3;
|
||||
|
||||
// Create consumer
|
||||
PromptsGPT promptsGPT = new PromptsGPT(coordinator);
|
||||
console2.log("Deployed PromptsGPT: ", address(promptsGPT));
|
||||
|
||||
// Execute
|
||||
vm.stopBroadcast();
|
||||
vm.broadcast();
|
||||
}
|
||||
}
|
46
projects/gpt4/contracts/src/PromptsGPT.sol
Normal file
46
projects/gpt4/contracts/src/PromptsGPT.sol
Normal file
@ -0,0 +1,46 @@
|
||||
// SPDX-License-Identifier: BSD-3-Clause-Clear
|
||||
pragma solidity ^0.8.13;
|
||||
|
||||
import {console2} from "forge-std/console2.sol";
|
||||
import {CallbackConsumer} from "infernet-sdk/consumer/Callback.sol";
|
||||
|
||||
contract PromptsGPT is CallbackConsumer {
|
||||
string private EXTREMELY_COOL_BANNER = "\n\n"
|
||||
"_____ _____ _______ _ _ _ \n"
|
||||
"| __ \\|_ _|__ __| | | | /\\ | | \n"
|
||||
"| |__) | | | | | | | | | / \\ | | \n"
|
||||
"| _ / | | | | | | | |/ /\\ \\ | | \n"
|
||||
"| | \\ \\ _| |_ | | | |__| / ____ \\| |____ \n"
|
||||
"|_| \\_\\_____| |_| \\____/_/ \\_\\______| \n\n";
|
||||
constructor(address coordinator) CallbackConsumer(coordinator) {}
|
||||
|
||||
function promptGPT(string calldata prompt) public {
|
||||
_requestCompute(
|
||||
"gpt4",
|
||||
abi.encode(prompt),
|
||||
20 gwei,
|
||||
1_000_000,
|
||||
1
|
||||
);
|
||||
}
|
||||
|
||||
function _receiveCompute(
|
||||
uint32 subscriptionId,
|
||||
uint32 interval,
|
||||
uint16 redundancy,
|
||||
address node,
|
||||
bytes calldata input,
|
||||
bytes calldata output,
|
||||
bytes calldata proof
|
||||
) internal override {
|
||||
console2.log(EXTREMELY_COOL_BANNER);
|
||||
(bytes memory raw_output, bytes memory processed_output) = abi.decode(output, (bytes, bytes));
|
||||
(string memory outputStr) = abi.decode(raw_output, (string));
|
||||
|
||||
console2.log("subscription Id", subscriptionId);
|
||||
console2.log("interval", interval);
|
||||
console2.log("redundancy", redundancy);
|
||||
console2.log("node", node);
|
||||
console2.log("output:", outputStr);
|
||||
}
|
||||
}
|
206
projects/gpt4/gpt4.md
Normal file
206
projects/gpt4/gpt4.md
Normal file
@ -0,0 +1,206 @@
|
||||
# Running OpenAI's GPT-4 on Infernet
|
||||
|
||||
In this tutorial we are going to integrate [OpenAI's GPT-4](https://openai.com/gpt-4) into infernet. We will:
|
||||
|
||||
1. Obtain an API key from OpenAI
|
||||
2. Configure the `gpt4` service, build & deploy it with Infernet
|
||||
3. Make a web-2 request by directly prompting the [gpt4 service](./container)
|
||||
4. Make a web-3 request by integrating a sample [`PromptsGPT.sol`](./contracts/src/PromptsGPT.sol) smart contract. This
|
||||
contract will make a request to Infernet with their prompt, and receive the result of the request.
|
||||
|
||||
## Install Pre-requisites
|
||||
|
||||
For this tutorial you'll need to have the following installed.
|
||||
|
||||
1. [Docker](https://docs.docker.com/engine/install/)
|
||||
2. [Foundry](https://book.getfoundry.sh/getting-started/installation)
|
||||
|
||||
### Get an API key from OpenAI
|
||||
|
||||
First, you'll need to get an API key from OpenAI. You can do this by making
|
||||
an [OpenAI](https://openai.com/) account.
|
||||
After signing in, head over to [their platform](https://platform.openai.com/api-keys) to
|
||||
make an API key.
|
||||
|
||||
> [!NOTE]
|
||||
> You will need a paid account to use the GPT-4 API.
|
||||
|
||||
### Ensure `docker` & `foundry` exist
|
||||
|
||||
To check for `docker`, run the following command in your terminal:
|
||||
```bash copy
|
||||
docker --version
|
||||
# Docker version 25.0.2, build 29cf629 (example output)
|
||||
```
|
||||
|
||||
You'll also need to ensure that docker-compose exists in your terminal:
|
||||
```bash copy
|
||||
which docker-compose
|
||||
# /usr/local/bin/docker-compose (example output)
|
||||
```
|
||||
|
||||
To check for `foundry`, run the following command in your terminal:
|
||||
```bash copy
|
||||
forge --version
|
||||
# forge 0.2.0 (551bcb5 2024-02-28T07:40:42.782478000Z) (example output)
|
||||
```
|
||||
|
||||
### Clone the starter repository
|
||||
Just like our other examples, we're going to clone this repository.
|
||||
All of the code and instructions for this tutorial can be found in the
|
||||
[`projects/gpt4`](https://github.com/ritual-net/infernet-container-starter/tree/main/projects/gpt4)
|
||||
directory of the repository.
|
||||
|
||||
```bash copy
|
||||
# Clone locally
|
||||
git clone --recurse-submodules https://github.com/ritual-net/infernet-container-starter
|
||||
# Navigate to the repository
|
||||
cd infernet-container-starter
|
||||
```
|
||||
|
||||
### Configure the `gpt4` container
|
||||
|
||||
#### Configure API key in `config.json`
|
||||
This is where we'll use the API key we obtained from OpenAI.
|
||||
|
||||
```bash
|
||||
cd projects/gpt4/container
|
||||
cp config.sample.json config.json
|
||||
```
|
||||
|
||||
In the `containers` field, you will see the following. Replace `your-openai-key` with your OpenAI API key.
|
||||
|
||||
```json
|
||||
"containers": [
|
||||
{
|
||||
// etc. etc.
|
||||
"env": {
|
||||
"OPENAI_API_KEY": "your-openai-key" // replace with your OpenAI API key
|
||||
}
|
||||
}
|
||||
],
|
||||
```
|
||||
|
||||
### Build the `gpt4` container
|
||||
|
||||
First, navigate back to the root of the repository. Then simply run the following command to build the `gpt4`
|
||||
container:
|
||||
|
||||
```bash copy
|
||||
cd ../../..
|
||||
make build-container project=gpt4
|
||||
```
|
||||
|
||||
### Deploy infernet node locally
|
||||
|
||||
Much like our [hello world](../hello-world/hello-world.md) project, deploying the infernet node is as
|
||||
simple as running:
|
||||
|
||||
```bash copy
|
||||
make deploy-container project=gpt4
|
||||
```
|
||||
|
||||
## Making a Web2 Request
|
||||
|
||||
From here, you can directly make a request to the infernet node:
|
||||
|
||||
```bash
|
||||
curl -X POST http://127.0.0.1:4000/api/jobs \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"containers":["gpt4"], "data": {"prompt": "Hello, can shrimp actually fry rice?"}}'
|
||||
# {"id":"cab6eea8-8b1e-4144-9a70-f905c5ef375b"}
|
||||
```
|
||||
|
||||
If you have `jq` installed, you can pipe the output of the last command to a file:
|
||||
|
||||
```bash copy
|
||||
curl -X POST http://127.0.0.1:4000/api/jobs \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"containers":["gpt4"], "data": {"prompt": "Hello, can shrimp actually fry rice?"}}' | jq -r ".id" > last-job.uuid
|
||||
```
|
||||
|
||||
You can then check the status of the job by running:
|
||||
|
||||
```bash copy
|
||||
curl -X GET http://127.0.0.1:4000/api/jobs\?id\=cab6eea8-8b1e-4144-9a70-f905c5ef375b
|
||||
# response [{"id":"07026571-edc8-42ab-b38c-6b3cf19971b6","result":{"container":"gpt4","output":{"message":"No, shrimps cannot fry rice by themselves. However, in culinary terms, shrimp fried rice is a popular dish in which cooked shrimp are added to fried rice along with other ingredients. Cooks or chefs prepare it by frying the rice and shrimps together usually in a wok or frying pan."}},"status":"success"}]
|
||||
```
|
||||
|
||||
And if you have `jq` installed and piped the last output to a file, you can instead run:
|
||||
|
||||
```bash
|
||||
curl -X GET "http://127.0.0.1:4000/api/jobs?id=$(cat last-request.uuid)" | jq .
|
||||
# returns something like:
|
||||
[
|
||||
{
|
||||
"id": "1b50e85b-2295-44eb-9c85-40ae5331bd14",
|
||||
"result": {
|
||||
"container": "gpt4",
|
||||
"output": {
|
||||
"output": "Yes, shrimp can be used to make fried rice. In many Asian cuisines, shrimp is a popular ingredient in fried rice dishes. The shrimp adds flavor and protein to the dish, and can be cooked along with the rice and other ingredients such as vegetables, eggs, and seasonings."
|
||||
}
|
||||
},
|
||||
"status": "success"
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
## Making a Web3 Request
|
||||
|
||||
Now let's bring this service onchain! First we'll have to deploy the contracts.
|
||||
The [contracts](contracts)
|
||||
directory contains a simple foundry project with a simple contract called `PromptsGpt`.
|
||||
This contract exposes a single
|
||||
function `function promptGPT(string calldata prompt)`. Using this function you'll be
|
||||
able to make an infernet request.
|
||||
|
||||
**Anvil Logs**: First, it's useful to look at the logs of the anvil node to see what's
|
||||
going on. In a new terminal, run
|
||||
`docker logs -f anvil-node`.
|
||||
|
||||
**Deploying the contracts**: In another terminal, run the following command:
|
||||
|
||||
```bash
|
||||
make deploy-contracts project=gpt4
|
||||
```
|
||||
|
||||
### Calling the contract
|
||||
|
||||
Now, let's call the contract. So far everything's been identical to
|
||||
the [hello world](projects/hello-world/README.mdllo-world/README.md) project. The only
|
||||
difference here is that calling the contract requires an input. We'll pass that input in
|
||||
using an env var named
|
||||
`prompt`:
|
||||
|
||||
```bash copy
|
||||
make call-contract project=gpt4 prompt="Can shrimps actually fry rice"
|
||||
```
|
||||
|
||||
On your anvil logs, you should see something like this:
|
||||
|
||||
```bash
|
||||
eth_sendRawTransaction
|
||||
|
||||
_____ _____ _______ _ _ _
|
||||
| __ \|_ _|__ __| | | | /\ | |
|
||||
| |__) | | | | | | | | | / \ | |
|
||||
| _ / | | | | | | | |/ /\ \ | |
|
||||
| | \ \ _| |_ | | | |__| / ____ \| |____
|
||||
|_| \_\_____| |_| \____/_/ \_\______|
|
||||
|
||||
|
||||
subscription Id 1
|
||||
interval 1
|
||||
redundancy 1
|
||||
node 0x70997970C51812dc3A010C7d01b50e0d17dc79C8
|
||||
output: {'output': 'Yes, shrimps can be used to make fried rice. Fried rice is a versatile dish that can be made with various ingredients, including shrimp. Shrimp fried rice is a popular dish in many cuisines, especially in Asian cuisine.'}
|
||||
|
||||
Transaction: 0x9bcab42cf7348953eaf107ca0ca539cb27f3843c1bb08cf359484c71fcf44d2b
|
||||
Gas used: 93726
|
||||
|
||||
Block Number: 3
|
||||
Block Hash: 0x1cc39d03bb1d69ea7f32db85d2ee684071e28b6d6de9eab6f57e011e11a7ed08
|
||||
Block Time: "Fri, 26 Jan 2024 02:30:37 +0000"
|
||||
```
|
||||
|
||||
beautiful, isn't it? 🥰
|
Reference in New Issue
Block a user