diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index a4b3460..5760c3f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -33,6 +33,11 @@ jobs: - uses: arduino/setup-protoc@v3 with: version: "25.x" + # Authenticate to lift the action's release-list call from the + # ~60/hr per-IP unauth quota onto our 5,000/hr token quota. + # release.yml already does this (PR #31); ci.yml didn't, and + # PR #57's post-merge CI run flaked here on 2026-05-06. + repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Check formatting run: cargo fmt --check @@ -75,6 +80,7 @@ jobs: - uses: arduino/setup-protoc@v3 with: version: "25.x" + repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Run contract tests run: cargo test --locked diff --git a/antd-go/client.go b/antd-go/client.go index fc57cfb..d9bbb52 100644 --- a/antd-go/client.go +++ b/antd-go/client.go @@ -151,6 +151,13 @@ func unum64(m map[string]any, key string) uint64 { return 0 } +func boolField(m map[string]any, key string) bool { + if v, ok := m[key].(bool); ok { + return v + } + return false +} + func arrAt(m map[string]any, key string) []any { if v, ok := m[key].([]any); ok { return v @@ -282,6 +289,75 @@ func (c *Client) ChunkGet(ctx context.Context, address string) ([]byte, error) { return b64Decode(str(j, "data")) } +// PrepareChunkUpload prepares a single chunk for external-signer publish via +// POST /v1/chunks/prepare. +// +// The daemon collects storage quotes from the close group, stashes the +// prepared state, and returns either: +// +// - AlreadyStored = true and Address set, if the chunk is already on-network. +// No payment or finalize call is needed. +// - AlreadyStored = false with UploadID + Payments + TotalAmount populated, +// in which case the caller signs and submits payForQuotes() externally, +// then calls FinalizeChunkUpload with the resulting tx hashes. +// +// Unlike ChunkPut, this method does NOT require the daemon to have a wallet — +// all funds flow through the external signer. +// +// Requires antd >= 0.7.0. +func (c *Client) PrepareChunkUpload(ctx context.Context, content []byte) (*PrepareChunkResult, error) { + j, _, err := c.doJSON(ctx, http.MethodPost, "/v1/chunks/prepare", map[string]any{ + "data": b64Encode(content), + }) + if err != nil { + return nil, err + } + + r := &PrepareChunkResult{ + Address: str(j, "address"), + AlreadyStored: boolField(j, "already_stored"), + UploadID: str(j, "upload_id"), + PaymentType: str(j, "payment_type"), + TotalAmount: str(j, "total_amount"), + PaymentVaultAddress: str(j, "payment_vault_address"), + PaymentTokenAddress: str(j, "payment_token_address"), + RPCUrl: str(j, "rpc_url"), + } + if payments, ok := j["payments"].([]any); ok { + for _, p := range payments { + pm, ok := p.(map[string]any) + if !ok { + continue + } + r.Payments = append(r.Payments, PaymentInfo{ + QuoteHash: str(pm, "quote_hash"), + RewardsAddress: str(pm, "rewards_address"), + Amount: str(pm, "amount"), + }) + } + } + return r, nil +} + +// FinalizeChunkUpload submits a single chunk to the network after the external +// signer has paid via POST /v1/chunks/finalize. +// +// txHashes maps each non-zero quote_hash from PrepareChunkUpload's Payments to +// the corresponding tx_hash returned by payForQuotes(). Returns the hex-encoded +// network address of the stored chunk (matches PrepareChunkResult.Address). +// +// Requires antd >= 0.7.0. +func (c *Client) FinalizeChunkUpload(ctx context.Context, uploadID string, txHashes map[string]string) (string, error) { + j, _, err := c.doJSON(ctx, http.MethodPost, "/v1/chunks/finalize", map[string]any{ + "upload_id": uploadID, + "tx_hashes": txHashes, + }) + if err != nil { + return "", err + } + return str(j, "address"), nil +} + // --- Files --- // FileUploadPublic uploads a local file to the network. diff --git a/antd-go/client_test.go b/antd-go/client_test.go index 54482ee..f313441 100644 --- a/antd-go/client_test.go +++ b/antd-go/client_test.go @@ -7,6 +7,7 @@ import ( "errors" "net/http" "net/http/httptest" + "strings" "testing" ) @@ -701,3 +702,138 @@ func TestFinalizeUploadOmitsDataMapAddressForPrivate(t *testing.T) { t.Fatalf("expected DataMap=deadbeef, got %q", res.DataMap) } } + +// ── Single-chunk external-signer (antd >= 0.7.0) ── + +func TestPrepareChunkUploadEncodesPayloadAndParsesResponse(t *testing.T) { + var capturedBody map[string]any + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Method == http.MethodPost && r.URL.Path == "/v1/chunks/prepare" { + _ = json.NewDecoder(r.Body).Decode(&capturedBody) + w.Header().Set("Content-Type", "application/json") + _ = json.NewEncoder(w).Encode(map[string]any{ + "address": "aa" + strings.Repeat("00", 31), + "already_stored": false, + "upload_id": "chunk-1", + "payment_type": "wave_batch", + "payments": []any{ + map[string]any{"quote_hash": "qh1", "rewards_address": "ra1", "amount": "100"}, + map[string]any{"quote_hash": "qh2", "rewards_address": "ra2", "amount": "100"}, + }, + "total_amount": "200", + "payment_vault_address": "0xvault", + "payment_token_address": "0xtoken", + "rpc_url": "http://localhost:8545", + }) + return + } + w.WriteHeader(404) + })) + defer srv.Close() + + c := NewClient(srv.URL) + res, err := c.PrepareChunkUpload(context.Background(), []byte("hello")) + if err != nil { + t.Fatal(err) + } + + // Request: bytes must arrive base64-encoded under `data`. + if got, want := capturedBody["data"], "aGVsbG8="; got != want { + t.Fatalf("expected base64-encoded data %q, got %v", want, got) + } + + if res.AlreadyStored { + t.Fatal("expected AlreadyStored=false") + } + if res.UploadID != "chunk-1" { + t.Fatalf("UploadID = %q, want chunk-1", res.UploadID) + } + if res.PaymentType != "wave_batch" { + t.Fatalf("PaymentType = %q, want wave_batch", res.PaymentType) + } + if len(res.Payments) != 2 { + t.Fatalf("expected 2 payments, got %d", len(res.Payments)) + } + if res.Payments[0].QuoteHash != "qh1" || res.Payments[1].Amount != "100" { + t.Fatalf("unexpected payment shape: %+v", res.Payments) + } + if res.TotalAmount != "200" { + t.Fatalf("TotalAmount = %q, want 200", res.TotalAmount) + } + if res.PaymentVaultAddress != "0xvault" || res.RPCUrl != "http://localhost:8545" { + t.Fatalf("EVM config not parsed: %+v", res) + } +} + +func TestPrepareChunkUploadAlreadyStoredOmitsPaymentFields(t *testing.T) { + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Method == http.MethodPost && r.URL.Path == "/v1/chunks/prepare" { + w.Header().Set("Content-Type", "application/json") + _ = json.NewEncoder(w).Encode(map[string]any{ + "address": "bb" + strings.Repeat("11", 31), + "already_stored": true, + // no upload_id, no payments, no payment_type, etc. + }) + return + } + w.WriteHeader(404) + })) + defer srv.Close() + + c := NewClient(srv.URL) + res, err := c.PrepareChunkUpload(context.Background(), []byte("already-on-network")) + if err != nil { + t.Fatal(err) + } + if !res.AlreadyStored { + t.Fatal("expected AlreadyStored=true") + } + if res.Address == "" { + t.Fatal("Address must still be populated for already-stored chunks") + } + if res.UploadID != "" { + t.Fatalf("UploadID should be empty for already-stored, got %q", res.UploadID) + } + if len(res.Payments) != 0 { + t.Fatalf("Payments should be empty for already-stored, got %d", len(res.Payments)) + } + if res.TotalAmount != "" || res.PaymentType != "" { + t.Fatalf("payment fields should be empty: %+v", res) + } +} + +func TestFinalizeChunkUploadReturnsAddress(t *testing.T) { + var capturedBody map[string]any + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Method == http.MethodPost && r.URL.Path == "/v1/chunks/finalize" { + _ = json.NewDecoder(r.Body).Decode(&capturedBody) + w.Header().Set("Content-Type", "application/json") + _ = json.NewEncoder(w).Encode(map[string]any{ + "address": "cc" + strings.Repeat("22", 31), + }) + return + } + w.WriteHeader(404) + })) + defer srv.Close() + + c := NewClient(srv.URL) + addr, err := c.FinalizeChunkUpload(context.Background(), "chunk-1", map[string]string{ + "qh1": "tx1", + "qh2": "tx2", + }) + if err != nil { + t.Fatal(err) + } + + if capturedBody["upload_id"] != "chunk-1" { + t.Fatalf("upload_id not sent: %v", capturedBody["upload_id"]) + } + tx, ok := capturedBody["tx_hashes"].(map[string]any) + if !ok || tx["qh1"] != "tx1" || tx["qh2"] != "tx2" { + t.Fatalf("tx_hashes not sent correctly: %v", capturedBody["tx_hashes"]) + } + if addr == "" || len(addr) != 64 { + t.Fatalf("expected 64-char hex address, got %q", addr) + } +} diff --git a/antd-go/models.go b/antd-go/models.go index fc60a30..5af6dbb 100644 --- a/antd-go/models.go +++ b/antd-go/models.go @@ -88,6 +88,37 @@ type FinalizeUploadResult struct { ChunksStored int64 `json:"chunks_stored"` // number of chunks stored } +// PrepareChunkResult is the result of preparing a single-chunk publish for +// external signing via POST /v1/chunks/prepare. +// +// When [AlreadyStored] is true, the chunk is already on-network — the only +// populated fields are Address and AlreadyStored, and no finalize call is +// needed. Otherwise the wave-batch payment fields describe what the external +// signer must submit before calling FinalizeChunkUpload. +type PrepareChunkResult struct { + // Content-addressed BLAKE3 of the chunk bytes (hex, 64 chars). Always set. + Address string `json:"address"` + // True if the chunk is already stored on the network and no payment is needed. + AlreadyStored bool `json:"already_stored"` + + // Fields below are only populated when AlreadyStored == false. + + // Opaque identifier to pass back to FinalizeChunkUpload. + UploadID string `json:"upload_id,omitempty"` + // Always "wave_batch" for single-chunk publishes (well below the merkle threshold). + PaymentType string `json:"payment_type,omitempty"` + // Per-quote payment entries for payForQuotes(). Typically 5–7 (one per peer in the close group). + Payments []PaymentInfo `json:"payments,omitempty"` + // Total amount to pay (atto tokens, decimal string). + TotalAmount string `json:"total_amount,omitempty"` + // Payment vault contract address (hex with 0x prefix). + PaymentVaultAddress string `json:"payment_vault_address,omitempty"` + // Payment token contract address (hex with 0x prefix). + PaymentTokenAddress string `json:"payment_token_address,omitempty"` + // EVM RPC URL for submitting transactions. + RPCUrl string `json:"rpc_url,omitempty"` +} + // UploadCostEstimate is the result of an estimate (EstimateDataCost / EstimateFileCost). // // Unlike [PutResult.Cost], which is a paid cost after upload, this is a diff --git a/antd/Cargo.lock b/antd/Cargo.lock index cd45c43..8c80855 100644 --- a/antd/Cargo.lock +++ b/antd/Cargo.lock @@ -805,7 +805,7 @@ dependencies = [ [[package]] name = "ant-core" version = "0.2.3" -source = "git+https://github.com/WithAutonomi/ant-client?tag=ant-cli-v0.2.2#6cada1d6b318a93e52ea6c34aa4b68fc2782c946" +source = "git+https://github.com/WithAutonomi/ant-client?rev=c0f6a816ccd7ffe7a4922de4c68187b5a8d9d5a2#c0f6a816ccd7ffe7a4922de4c68187b5a8d9d5a2" dependencies = [ "ant-protocol", "async-stream", @@ -857,8 +857,7 @@ dependencies = [ [[package]] name = "ant-protocol" version = "2.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edaf05019466da9ff9055dbbc1428db3414440a8f0a1ac929febcf4fb0608a6d" +source = "git+https://github.com/WithAutonomi/ant-protocol.git?rev=93e63b8a41a97c37c24d1164a3ee5525e002ddcd#93e63b8a41a97c37c24d1164a3ee5525e002ddcd" dependencies = [ "blake3", "bytes", @@ -4906,8 +4905,7 @@ dependencies = [ [[package]] name = "saorsa-core" version = "0.24.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03e194874b524ad2a50d0976eb4ebcb81aea326dfb475373de9a2942a90cb2cc" +source = "git+https://github.com/saorsa-labs/saorsa-core?branch=fix%2Fstability-improvements#5586795740eafd9818e827baa2eb2677ff3ba942" dependencies = [ "anyhow", "async-trait", @@ -5021,8 +5019,7 @@ dependencies = [ [[package]] name = "saorsa-transport" version = "0.34.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31ebc09fb51e2c325e61ff09892f1256c60ef4f3beb881fd2980befe3e53e9bc" +source = "git+https://github.com/saorsa-labs/saorsa-transport?branch=fix%2Fstability-improvements#55f423ca5e3312e31ba22475b68a76f4ffd50285" dependencies = [ "anyhow", "async-trait", diff --git a/antd/Cargo.toml b/antd/Cargo.toml index 94e77d9..923de65 100644 --- a/antd/Cargo.toml +++ b/antd/Cargo.toml @@ -4,7 +4,7 @@ version = "0.6.1" edition = "2021" [dependencies] -ant-core = { git = "https://github.com/WithAutonomi/ant-client", tag = "ant-cli-v0.2.2" } +ant-core = { git = "https://github.com/WithAutonomi/ant-client", rev = "c0f6a816ccd7ffe7a4922de4c68187b5a8d9d5a2" } # ant-client#89 merge commit (Client::finalize_chunk); bump to a tag once ant-core cuts a release containing it self_encryption = "0.35.0" evmlib = "0.8.1" axum = { version = "0.8", features = ["macros"] } diff --git a/antd/src/main.rs b/antd/src/main.rs index f813d51..cd4975f 100644 --- a/antd/src/main.rs +++ b/antd/src/main.rs @@ -289,6 +289,7 @@ async fn main() -> Result<(), Box> { network: config.network.clone(), bootstrap_peers, pending_uploads: Arc::new(tokio::sync::Mutex::new(std::collections::HashMap::new())), + pending_chunks: Arc::new(tokio::sync::Mutex::new(std::collections::HashMap::new())), started_at: std::time::Instant::now(), version: env!("CARGO_PKG_VERSION").to_string(), build_commit: env!("ANTD_BUILD_COMMIT").to_string(), @@ -297,13 +298,14 @@ async fn main() -> Result<(), Box> { evm_vault_addr, }); - // Spawn background task to clean up stale pending uploads (1-hour TTL) + // Spawn background task to clean up stale pending prepares (1-hour TTL) let cleanup_state = state.clone(); tokio::spawn(async move { let ttl = std::time::Duration::from_secs(3600); loop { tokio::time::sleep(std::time::Duration::from_secs(300)).await; cleanup_state.cleanup_stale_uploads(ttl).await; + cleanup_state.cleanup_stale_chunks(ttl).await; } }); diff --git a/antd/src/rest/chunks.rs b/antd/src/rest/chunks.rs index aaadf74..be276ec 100644 --- a/antd/src/rest/chunks.rs +++ b/antd/src/rest/chunks.rs @@ -1,3 +1,4 @@ +use std::collections::HashMap; use std::sync::Arc; use axum::extract::{Path, State}; @@ -7,7 +8,8 @@ use base64::Engine; use bytes::Bytes; use crate::error::AntdError; -use crate::state::AppState; +use crate::evm_defaults; +use crate::state::{AppState, TimestampedChunk}; use crate::types::*; pub async fn chunk_get( @@ -65,3 +67,151 @@ pub async fn chunk_put( address: hex::encode(address), })) } + +/// `POST /v1/chunks/prepare` — single-chunk external-signer prepare. +/// +/// Quotes the close group for storing the supplied bytes as one chunk, stashes +/// the prepared state under a fresh `upload_id`, and returns the wave-batch +/// payment shape. After the external signer pays, the caller hits +/// [`chunk_finalize`] with the resulting `tx_hashes`. +/// +/// When the chunk is already on-network, returns `already_stored: true` with +/// the existing address and no `upload_id` — payment is unnecessary. +/// +/// Unlike `chunk_put`, this handler does NOT require the daemon to have a +/// wallet; all funds flow through the external signer. +pub async fn chunk_prepare( + State(state): State>, + Json(req): Json, +) -> Result, AntdError> { + let data = BASE64 + .decode(&req.data) + .map_err(|e| AntdError::BadRequest(format!("invalid base64: {e}")))?; + let content = Bytes::from(data); + + // Compute the content address up-front so the "already stored" response + // can still return it without re-quoting. ant-core's prepare path also + // computes this internally, but it doesn't surface the address on the + // Ok(None) path. + let address_hex = hex::encode(ant_core::data::compute_address(&content)); + + let client = state.client.clone(); + let prepared = tokio::spawn(async move { + client + .prepare_chunk_payment(content) + .await + .map_err(AntdError::from_core) + }) + .await + .map_err(|e| AntdError::Internal(format!("task failed: {e}")))??; + + let Some(prepared) = prepared else { + // Already on-network — no payment needed, no finalize call needed. + return Ok(Json(PrepareChunkResponse { + address: address_hex, + already_stored: true, + upload_id: None, + payment_type: None, + payments: Vec::new(), + total_amount: None, + payment_vault_address: None, + payment_token_address: None, + rpc_url: None, + })); + }; + + let evm_cfg = evm_defaults::resolve(&state.network); + + // Filter out zero-amount quotes — they go into peer_quotes for ProofOfPayment + // but the external signer doesn't need a `payForQuotes` entry for them + // (and including them would charge for nothing). + let payments: Vec = prepared + .payment + .quotes + .iter() + .filter(|q| !q.amount.is_zero()) + .map(|q| PaymentEntry { + quote_hash: format!("{:#x}", q.quote_hash), + rewards_address: format!("{:#x}", q.rewards_address), + amount: q.amount.to_string(), + }) + .collect(); + let total_amount = prepared.payment.total_amount().to_string(); + + let upload_id = hex::encode(rand::random::<[u8; 16]>()); + state.pending_chunks.lock().await.insert( + upload_id.clone(), + TimestampedChunk { + prepared, + created_at: std::time::Instant::now(), + }, + ); + + Ok(Json(PrepareChunkResponse { + address: address_hex, + already_stored: false, + upload_id: Some(upload_id), + payment_type: Some("wave_batch".into()), + payments, + total_amount: Some(total_amount), + payment_vault_address: Some(evm_cfg.vault_addr), + payment_token_address: Some(evm_cfg.token_addr), + rpc_url: Some(evm_cfg.rpc_url), + })) +} + +/// `POST /v1/chunks/finalize` — submit the chunk to the network after +/// external payment. +/// +/// Looks up the prepared chunk by `upload_id`, builds the [`PaymentProof`] +/// from the supplied `tx_hashes`, and stores the chunk on `CLOSE_GROUP_MAJORITY` +/// peers via [`ant_core::data::Client::finalize_chunk`]. +pub async fn chunk_finalize( + State(state): State>, + Json(req): Json, +) -> Result, AntdError> { + use evmlib::common::{QuoteHash, TxHash}; + + let timestamped = state + .pending_chunks + .lock() + .await + .remove(&req.upload_id) + .ok_or_else(|| { + AntdError::NotFound(format!( + "upload_id {} not found — it may have expired or already been finalized", + req.upload_id + )) + })?; + + let tx_hash_map: HashMap = req + .tx_hashes + .iter() + .map(|(quote_hex, tx_hex)| { + let quote_bytes: [u8; 32] = hex::decode(quote_hex.trim_start_matches("0x")) + .map_err(|e| AntdError::BadRequest(format!("invalid quote_hash {quote_hex}: {e}")))? + .try_into() + .map_err(|_| AntdError::BadRequest("quote_hash must be 32 bytes".into()))?; + let tx_bytes: [u8; 32] = hex::decode(tx_hex.trim_start_matches("0x")) + .map_err(|e| AntdError::BadRequest(format!("invalid tx_hash {tx_hex}: {e}")))? + .try_into() + .map_err(|_| AntdError::BadRequest("tx_hash must be 32 bytes".into()))?; + Ok((quote_bytes.into(), tx_bytes.into())) + }) + .collect::>()?; + + let client = state.client.clone(); + let prepared = timestamped.prepared; + let address = tokio::spawn(async move { + client + .finalize_chunk(prepared, &tx_hash_map) + .await + .map_err(AntdError::from_core) + }) + .await + .map_err(|e| AntdError::Internal(format!("task failed: {e}")))??; + + Ok(Json(FinalizeChunkResponse { + address: hex::encode(address), + })) +} diff --git a/antd/src/rest/mod.rs b/antd/src/rest/mod.rs index 3f111c5..0f3f3c6 100644 --- a/antd/src/rest/mod.rs +++ b/antd/src/rest/mod.rs @@ -78,6 +78,8 @@ pub fn router(state: Arc, enable_cors: bool, rest_port: u16) -> Router // Chunks .route("/v1/chunks/{addr}", get(chunks::chunk_get)) .route("/v1/chunks", post(chunks::chunk_put)) + .route("/v1/chunks/prepare", post(chunks::chunk_prepare)) + .route("/v1/chunks/finalize", post(chunks::chunk_finalize)) // Files .route("/v1/files/upload/public", post(files::file_upload_public)) .route( diff --git a/antd/src/state.rs b/antd/src/state.rs index f969280..46b2a9d 100644 --- a/antd/src/state.rs +++ b/antd/src/state.rs @@ -1,7 +1,7 @@ use std::collections::HashMap; use std::sync::Arc; -use ant_core::data::{Client, MultiAddr, PreparedUpload}; +use ant_core::data::{Client, MultiAddr, PreparedChunk, PreparedUpload}; use tokio::sync::Mutex; /// A prepared upload with a creation timestamp for TTL-based cleanup. @@ -10,6 +10,13 @@ pub struct TimestampedUpload { pub created_at: std::time::Instant, } +/// A prepared single-chunk publish with a creation timestamp for TTL-based +/// cleanup. Mirrors [`TimestampedUpload`] but for the `/v1/chunks/prepare` flow. +pub struct TimestampedChunk { + pub prepared: PreparedChunk, + pub created_at: std::time::Instant, +} + /// Shared application state passed to all handlers. #[derive(Clone)] pub struct AppState { @@ -22,6 +29,11 @@ pub struct AppState { pub bootstrap_peers: Vec, /// Pending prepared uploads awaiting external payment (upload_id → state). pub pending_uploads: Arc>>, + /// Pending prepared single-chunk publishes awaiting external payment + /// (upload_id → state). Kept separate from [`Self::pending_uploads`] + /// because the inner type differs and the two flows touch different + /// ant-core surfaces. + pub pending_chunks: Arc>>, /// Process start time, for /health uptime reporting. pub started_at: std::time::Instant, /// antd crate version (env!("CARGO_PKG_VERSION") at build time). @@ -51,4 +63,19 @@ impl AppState { ); } } + + /// Remove pending single-chunk prepares older than the given duration. + pub async fn cleanup_stale_chunks(&self, max_age: std::time::Duration) { + let mut chunks = self.pending_chunks.lock().await; + let before = chunks.len(); + chunks.retain(|_, v| v.created_at.elapsed() < max_age); + let removed = before - chunks.len(); + if removed > 0 { + tracing::info!( + removed, + remaining = chunks.len(), + "cleaned up stale pending chunks" + ); + } + } } diff --git a/antd/src/types.rs b/antd/src/types.rs index efdc4d8..d8cba65 100644 --- a/antd/src/types.rs +++ b/antd/src/types.rs @@ -58,6 +58,77 @@ pub struct ChunkGetResponse { pub data: String, // base64 } +// ── External Signer (single-chunk publish) ── + +/// `POST /v1/chunks/prepare` — request a quote for publishing a single chunk +/// via the external-signer flow. The daemon collects quotes from the close +/// group, returns the payment shape, and stashes server-side state keyed by +/// `upload_id` for the matching finalize call. +#[derive(Deserialize)] +pub struct PrepareChunkRequest { + /// Raw chunk bytes, base64-encoded. Maximum one ant-protocol chunk + /// (≤ 4 MiB before self-encryption is irrelevant here — the bytes are + /// stored verbatim as one chunk at their BLAKE3 address). + pub data: String, +} + +/// `POST /v1/chunks/prepare` response. Mirrors [`PrepareUploadResponse`]'s +/// wave-batch shape so external signers can reuse the same `payForQuotes()` +/// path with no special-casing. +/// +/// When the chunk is already on-network, `already_stored` is `true` and the +/// `upload_id` / payment fields are omitted — the caller can update their +/// records with `address` and skip the finalize step. +#[derive(Serialize)] +pub struct PrepareChunkResponse { + /// Content-addressed BLAKE3 of the chunk bytes (hex, 32 bytes). Computed + /// locally on the daemon; the caller can also derive it independently if + /// needed. + pub address: String, + /// `true` if the chunk was already stored on the network. In that case + /// no payment or finalize call is needed. + pub already_stored: bool, + + /// Opaque token to pass back to finalize. Omitted when + /// `already_stored == true`. + #[serde(skip_serializing_if = "Option::is_none")] + pub upload_id: Option, + /// Always `"wave_batch"` for single-chunk publishes (single chunk is well + /// below the merkle threshold). Omitted when `already_stored == true`. + #[serde(skip_serializing_if = "Option::is_none")] + pub payment_type: Option, + /// Per-quote payment entries for `payForQuotes()`. Typically 5–7 entries + /// (one per peer in the close group). Empty/omitted when already stored. + #[serde(skip_serializing_if = "Vec::is_empty")] + pub payments: Vec, + /// Total amount to pay (atto tokens, decimal string). Omitted when + /// `already_stored == true`. + #[serde(skip_serializing_if = "Option::is_none")] + pub total_amount: Option, + /// EVM configuration — same source as the file/data prepare flow. Omitted + /// when `already_stored == true`. + #[serde(skip_serializing_if = "Option::is_none")] + pub payment_vault_address: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub payment_token_address: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub rpc_url: Option, +} + +#[derive(Deserialize)] +pub struct FinalizeChunkRequest { + /// The `upload_id` returned from `/v1/chunks/prepare`. + pub upload_id: String, + /// Map of quote_hash (hex) → tx_hash (hex) from the on-chain payment. + pub tx_hashes: HashMap, +} + +#[derive(Serialize)] +pub struct FinalizeChunkResponse { + /// Network address of the stored chunk (hex, 32 bytes). + pub address: String, +} + // ── External Signer (two-phase upload) ── #[derive(Deserialize)] @@ -633,4 +704,92 @@ mod tests { assert!(json.get("address").is_none()); assert!(json.get("data_map_address").is_none()); } + + // ── Single-chunk external-signer ── + + #[test] + fn prepare_chunk_response_for_new_chunk_includes_payment_shape() { + let resp = PrepareChunkResponse { + address: "deadbeef".repeat(8), + already_stored: false, + upload_id: Some("abc123".into()), + payment_type: Some("wave_batch".into()), + payments: vec![PaymentEntry { + quote_hash: "0xaa".into(), + rewards_address: "0xbb".into(), + amount: "100".into(), + }], + total_amount: Some("100".into()), + payment_vault_address: Some("0xcc".into()), + payment_token_address: Some("0xdd".into()), + rpc_url: Some("http://localhost:8545".into()), + }; + let json = serde_json::to_value(&resp).unwrap(); + assert_eq!(json["already_stored"], false); + assert_eq!(json["upload_id"], "abc123"); + assert_eq!(json["payment_type"], "wave_batch"); + assert_eq!(json["payments"][0]["quote_hash"], "0xaa"); + assert_eq!(json["total_amount"], "100"); + assert_eq!(json["payment_vault_address"], "0xcc"); + assert_eq!(json["rpc_url"], "http://localhost:8545"); + } + + #[test] + fn prepare_chunk_response_for_already_stored_omits_payment_fields() { + let resp = PrepareChunkResponse { + address: "deadbeef".repeat(8), + already_stored: true, + upload_id: None, + payment_type: None, + payments: Vec::new(), + total_amount: None, + payment_vault_address: None, + payment_token_address: None, + rpc_url: None, + }; + let json = serde_json::to_value(&resp).unwrap(); + assert_eq!(json["already_stored"], true); + assert!( + json.get("upload_id").is_none(), + "upload_id should be skipped when already_stored" + ); + assert!(json.get("payment_type").is_none()); + assert!(json.get("payments").is_none()); + assert!(json.get("total_amount").is_none()); + assert!(json.get("payment_vault_address").is_none()); + assert!(json.get("rpc_url").is_none()); + // address is always present so the caller can update their records + assert_eq!( + json["address"].as_str().unwrap().len(), + 64, + "BLAKE3 address must be 64 hex chars" + ); + } + + #[test] + fn prepare_chunk_request_deserializes() { + let req: PrepareChunkRequest = serde_json::from_str(r#"{"data":"SGVsbG8="}"#).unwrap(); + assert_eq!(req.data, "SGVsbG8="); + } + + #[test] + fn finalize_chunk_request_deserializes() { + let json = r#"{ + "upload_id": "abc", + "tx_hashes": {"0xaa": "0xbb", "0xcc": "0xdd"} + }"#; + let req: FinalizeChunkRequest = serde_json::from_str(json).unwrap(); + assert_eq!(req.upload_id, "abc"); + assert_eq!(req.tx_hashes.len(), 2); + assert_eq!(req.tx_hashes["0xaa"], "0xbb"); + } + + #[test] + fn finalize_chunk_response_serializes() { + let resp = FinalizeChunkResponse { + address: "deadbeef".repeat(8), + }; + let json = serde_json::to_value(&resp).unwrap(); + assert_eq!(json["address"].as_str().unwrap().len(), 64); + } }