Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,11 @@ jobs:
- uses: arduino/setup-protoc@v3
with:
version: "25.x"
# Authenticate to lift the action's release-list call from the
# ~60/hr per-IP unauth quota onto our 5,000/hr token quota.
# release.yml already does this (PR #31); ci.yml didn't, and
# PR #57's post-merge CI run flaked here on 2026-05-06.
repo-token: ${{ secrets.GITHUB_TOKEN }}

- name: Check formatting
run: cargo fmt --check
Expand Down Expand Up @@ -75,6 +80,7 @@ jobs:
- uses: arduino/setup-protoc@v3
with:
version: "25.x"
repo-token: ${{ secrets.GITHUB_TOKEN }}

- name: Run contract tests
run: cargo test --locked
76 changes: 76 additions & 0 deletions antd-go/client.go
Original file line number Diff line number Diff line change
Expand Up @@ -151,6 +151,13 @@ func unum64(m map[string]any, key string) uint64 {
return 0
}

func boolField(m map[string]any, key string) bool {
if v, ok := m[key].(bool); ok {
return v
}
return false
}

func arrAt(m map[string]any, key string) []any {
if v, ok := m[key].([]any); ok {
return v
Expand Down Expand Up @@ -282,6 +289,75 @@ func (c *Client) ChunkGet(ctx context.Context, address string) ([]byte, error) {
return b64Decode(str(j, "data"))
}

// PrepareChunkUpload prepares a single chunk for external-signer publish via
// POST /v1/chunks/prepare.
//
// The daemon collects storage quotes from the close group, stashes the
// prepared state, and returns either:
//
// - AlreadyStored = true and Address set, if the chunk is already on-network.
// No payment or finalize call is needed.
// - AlreadyStored = false with UploadID + Payments + TotalAmount populated,
// in which case the caller signs and submits payForQuotes() externally,
// then calls FinalizeChunkUpload with the resulting tx hashes.
//
// Unlike ChunkPut, this method does NOT require the daemon to have a wallet —
// all funds flow through the external signer.
//
// Requires antd >= 0.7.0.
func (c *Client) PrepareChunkUpload(ctx context.Context, content []byte) (*PrepareChunkResult, error) {
j, _, err := c.doJSON(ctx, http.MethodPost, "/v1/chunks/prepare", map[string]any{
"data": b64Encode(content),
})
if err != nil {
return nil, err
}

r := &PrepareChunkResult{
Address: str(j, "address"),
AlreadyStored: boolField(j, "already_stored"),
UploadID: str(j, "upload_id"),
PaymentType: str(j, "payment_type"),
TotalAmount: str(j, "total_amount"),
PaymentVaultAddress: str(j, "payment_vault_address"),
PaymentTokenAddress: str(j, "payment_token_address"),
RPCUrl: str(j, "rpc_url"),
}
if payments, ok := j["payments"].([]any); ok {
for _, p := range payments {
pm, ok := p.(map[string]any)
if !ok {
continue
}
r.Payments = append(r.Payments, PaymentInfo{
QuoteHash: str(pm, "quote_hash"),
RewardsAddress: str(pm, "rewards_address"),
Amount: str(pm, "amount"),
})
}
}
return r, nil
}

// FinalizeChunkUpload submits a single chunk to the network after the external
// signer has paid via POST /v1/chunks/finalize.
//
// txHashes maps each non-zero quote_hash from PrepareChunkUpload's Payments to
// the corresponding tx_hash returned by payForQuotes(). Returns the hex-encoded
// network address of the stored chunk (matches PrepareChunkResult.Address).
//
// Requires antd >= 0.7.0.
func (c *Client) FinalizeChunkUpload(ctx context.Context, uploadID string, txHashes map[string]string) (string, error) {
j, _, err := c.doJSON(ctx, http.MethodPost, "/v1/chunks/finalize", map[string]any{
"upload_id": uploadID,
"tx_hashes": txHashes,
})
if err != nil {
return "", err
}
return str(j, "address"), nil
}

// --- Files ---

// FileUploadPublic uploads a local file to the network.
Expand Down
136 changes: 136 additions & 0 deletions antd-go/client_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ import (
"errors"
"net/http"
"net/http/httptest"
"strings"
"testing"
)

Expand Down Expand Up @@ -701,3 +702,138 @@ func TestFinalizeUploadOmitsDataMapAddressForPrivate(t *testing.T) {
t.Fatalf("expected DataMap=deadbeef, got %q", res.DataMap)
}
}

// ── Single-chunk external-signer (antd >= 0.7.0) ──

func TestPrepareChunkUploadEncodesPayloadAndParsesResponse(t *testing.T) {
var capturedBody map[string]any
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.Method == http.MethodPost && r.URL.Path == "/v1/chunks/prepare" {
_ = json.NewDecoder(r.Body).Decode(&capturedBody)
w.Header().Set("Content-Type", "application/json")
_ = json.NewEncoder(w).Encode(map[string]any{
"address": "aa" + strings.Repeat("00", 31),
"already_stored": false,
"upload_id": "chunk-1",
"payment_type": "wave_batch",
"payments": []any{
map[string]any{"quote_hash": "qh1", "rewards_address": "ra1", "amount": "100"},
map[string]any{"quote_hash": "qh2", "rewards_address": "ra2", "amount": "100"},
},
"total_amount": "200",
"payment_vault_address": "0xvault",
"payment_token_address": "0xtoken",
"rpc_url": "http://localhost:8545",
})
return
}
w.WriteHeader(404)
}))
defer srv.Close()

c := NewClient(srv.URL)
res, err := c.PrepareChunkUpload(context.Background(), []byte("hello"))
if err != nil {
t.Fatal(err)
}

// Request: bytes must arrive base64-encoded under `data`.
if got, want := capturedBody["data"], "aGVsbG8="; got != want {
t.Fatalf("expected base64-encoded data %q, got %v", want, got)
}

if res.AlreadyStored {
t.Fatal("expected AlreadyStored=false")
}
if res.UploadID != "chunk-1" {
t.Fatalf("UploadID = %q, want chunk-1", res.UploadID)
}
if res.PaymentType != "wave_batch" {
t.Fatalf("PaymentType = %q, want wave_batch", res.PaymentType)
}
if len(res.Payments) != 2 {
t.Fatalf("expected 2 payments, got %d", len(res.Payments))
}
if res.Payments[0].QuoteHash != "qh1" || res.Payments[1].Amount != "100" {
t.Fatalf("unexpected payment shape: %+v", res.Payments)
}
if res.TotalAmount != "200" {
t.Fatalf("TotalAmount = %q, want 200", res.TotalAmount)
}
if res.PaymentVaultAddress != "0xvault" || res.RPCUrl != "http://localhost:8545" {
t.Fatalf("EVM config not parsed: %+v", res)
}
}

func TestPrepareChunkUploadAlreadyStoredOmitsPaymentFields(t *testing.T) {
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.Method == http.MethodPost && r.URL.Path == "/v1/chunks/prepare" {
w.Header().Set("Content-Type", "application/json")
_ = json.NewEncoder(w).Encode(map[string]any{
"address": "bb" + strings.Repeat("11", 31),
"already_stored": true,
// no upload_id, no payments, no payment_type, etc.
})
return
}
w.WriteHeader(404)
}))
defer srv.Close()

c := NewClient(srv.URL)
res, err := c.PrepareChunkUpload(context.Background(), []byte("already-on-network"))
if err != nil {
t.Fatal(err)
}
if !res.AlreadyStored {
t.Fatal("expected AlreadyStored=true")
}
if res.Address == "" {
t.Fatal("Address must still be populated for already-stored chunks")
}
if res.UploadID != "" {
t.Fatalf("UploadID should be empty for already-stored, got %q", res.UploadID)
}
if len(res.Payments) != 0 {
t.Fatalf("Payments should be empty for already-stored, got %d", len(res.Payments))
}
if res.TotalAmount != "" || res.PaymentType != "" {
t.Fatalf("payment fields should be empty: %+v", res)
}
}

func TestFinalizeChunkUploadReturnsAddress(t *testing.T) {
var capturedBody map[string]any
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.Method == http.MethodPost && r.URL.Path == "/v1/chunks/finalize" {
_ = json.NewDecoder(r.Body).Decode(&capturedBody)
w.Header().Set("Content-Type", "application/json")
_ = json.NewEncoder(w).Encode(map[string]any{
"address": "cc" + strings.Repeat("22", 31),
})
return
}
w.WriteHeader(404)
}))
defer srv.Close()

c := NewClient(srv.URL)
addr, err := c.FinalizeChunkUpload(context.Background(), "chunk-1", map[string]string{
"qh1": "tx1",
"qh2": "tx2",
})
if err != nil {
t.Fatal(err)
}

if capturedBody["upload_id"] != "chunk-1" {
t.Fatalf("upload_id not sent: %v", capturedBody["upload_id"])
}
tx, ok := capturedBody["tx_hashes"].(map[string]any)
if !ok || tx["qh1"] != "tx1" || tx["qh2"] != "tx2" {
t.Fatalf("tx_hashes not sent correctly: %v", capturedBody["tx_hashes"])
}
if addr == "" || len(addr) != 64 {
t.Fatalf("expected 64-char hex address, got %q", addr)
}
}
31 changes: 31 additions & 0 deletions antd-go/models.go
Original file line number Diff line number Diff line change
Expand Up @@ -88,6 +88,37 @@ type FinalizeUploadResult struct {
ChunksStored int64 `json:"chunks_stored"` // number of chunks stored
}

// PrepareChunkResult is the result of preparing a single-chunk publish for
// external signing via POST /v1/chunks/prepare.
//
// When [AlreadyStored] is true, the chunk is already on-network — the only
// populated fields are Address and AlreadyStored, and no finalize call is
// needed. Otherwise the wave-batch payment fields describe what the external
// signer must submit before calling FinalizeChunkUpload.
type PrepareChunkResult struct {
// Content-addressed BLAKE3 of the chunk bytes (hex, 64 chars). Always set.
Address string `json:"address"`
// True if the chunk is already stored on the network and no payment is needed.
AlreadyStored bool `json:"already_stored"`

// Fields below are only populated when AlreadyStored == false.

// Opaque identifier to pass back to FinalizeChunkUpload.
UploadID string `json:"upload_id,omitempty"`
// Always "wave_batch" for single-chunk publishes (well below the merkle threshold).
PaymentType string `json:"payment_type,omitempty"`
// Per-quote payment entries for payForQuotes(). Typically 5–7 (one per peer in the close group).
Payments []PaymentInfo `json:"payments,omitempty"`
// Total amount to pay (atto tokens, decimal string).
TotalAmount string `json:"total_amount,omitempty"`
// Payment vault contract address (hex with 0x prefix).
PaymentVaultAddress string `json:"payment_vault_address,omitempty"`
// Payment token contract address (hex with 0x prefix).
PaymentTokenAddress string `json:"payment_token_address,omitempty"`
// EVM RPC URL for submitting transactions.
RPCUrl string `json:"rpc_url,omitempty"`
}

// UploadCostEstimate is the result of an estimate (EstimateDataCost / EstimateFileCost).
//
// Unlike [PutResult.Cost], which is a paid cost after upload, this is a
Expand Down
11 changes: 4 additions & 7 deletions antd/Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion antd/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ version = "0.6.1"
edition = "2021"

[dependencies]
ant-core = { git = "https://github.com/WithAutonomi/ant-client", tag = "ant-cli-v0.2.2" }
ant-core = { git = "https://github.com/WithAutonomi/ant-client", rev = "c0f6a816ccd7ffe7a4922de4c68187b5a8d9d5a2" } # ant-client#89 merge commit (Client::finalize_chunk); bump to a tag once ant-core cuts a release containing it
self_encryption = "0.35.0"
evmlib = "0.8.1"
axum = { version = "0.8", features = ["macros"] }
Expand Down
4 changes: 3 additions & 1 deletion antd/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -289,6 +289,7 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
network: config.network.clone(),
bootstrap_peers,
pending_uploads: Arc::new(tokio::sync::Mutex::new(std::collections::HashMap::new())),
pending_chunks: Arc::new(tokio::sync::Mutex::new(std::collections::HashMap::new())),
started_at: std::time::Instant::now(),
version: env!("CARGO_PKG_VERSION").to_string(),
build_commit: env!("ANTD_BUILD_COMMIT").to_string(),
Expand All @@ -297,13 +298,14 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
evm_vault_addr,
});

// Spawn background task to clean up stale pending uploads (1-hour TTL)
// Spawn background task to clean up stale pending prepares (1-hour TTL)
let cleanup_state = state.clone();
tokio::spawn(async move {
let ttl = std::time::Duration::from_secs(3600);
loop {
tokio::time::sleep(std::time::Duration::from_secs(300)).await;
cleanup_state.cleanup_stale_uploads(ttl).await;
cleanup_state.cleanup_stale_chunks(ttl).await;
}
});

Expand Down
Loading
Loading