diff --git a/.cargo/config.toml b/.cargo/config.toml new file mode 100644 index 000000000..f2c6a373b --- /dev/null +++ b/.cargo/config.toml @@ -0,0 +1,7 @@ +[alias] +test_unit = "test --features automation,use-test-vk" +test_integration = "test --workspace --all-features -p core test:: -- --test-threads 7" +test_integration_release = "test --release --workspace --all-features -p core test:: -- --test-threads 7" +build_console = ''' +build -p clementine-core --all-features --config build.rustflags=["--cfg","tokio_unstable"] +''' \ No newline at end of file diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 000000000..b4247fb09 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,22 @@ +# exclude everything +* + +# include source files +!/circuits-lib +!/.cargo +!/risc0-circuits +!/bridge-circuit-host +!/core +!/scripts +!Cargo.lock +!Cargo.toml + +# include for vergen constants +!/.git + +# include licenses +!LICENSE-* +!LICENSE + +# exclude bitvm cache (for local builders who forgot to delete it) +**/bitvm_cache*.bin diff --git a/.env.example b/.env.example new file mode 100644 index 000000000..9c8e9b5f3 --- /dev/null +++ b/.env.example @@ -0,0 +1,88 @@ +READ_CONFIG_FROM_ENV=1 + +HOST=127.0.0.1 +PORT=17000 +INDEX=0 +SECRET_KEY=1111111111111111111111111111111111111111111111111111111111111111 + +WINTERNITZ_SECRET_KEY=2222222222222222222222222222222222222222222222222222222222222222 + +VERIFIERS_PUBLIC_KEYS=034f355bdcb7cc0af728ef3cceb9615d90684bb5b2ca5f859ab0f0b704075871aa,02466d7fcae563e5cb09a0d1870bb580344804617879a14949cf22285f1bae3f27,023c72addb4fdf09af94f0c94d7fe92a386a7e70cf8a1d85916386bb2535c7b1b1,032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991 +NUM_VERIFIERS=4 + +OPERATOR_XONLY_PKS=4f355bdcb7cc0af728ef3cceb9615d90684bb5b2ca5f859ab0f0b704075871aa,466d7fcae563e5cb09a0d1870bb580344804617879a14949cf22285f1bae3f27 +NUM_OPERATORS=2 +OPERATOR_WITHDRAWAL_FEE_SATS=100000 + +BITCOIN_RPC_URL=http://127.0.0.1:18443/wallet/admin +BITCOIN_RPC_USER=admin +BITCOIN_RPC_PASSWORD=admin + +MEMPOOL_API_HOST=https://mempool.space/ +MEMPOOL_API_ENDPOINT=api/v1/fees/recommended + +DB_HOST=127.0.0.1 +DB_PORT=5432 +DB_USER=clementine +DB_PASSWORD=clementine +DB_NAME=clementine + +CITREA_RPC_URL=http://127.0.0.1:12345 +CITREA_LIGHT_CLIENT_PROVER_URL=http://127.0.0.1:12346 +BRIDGE_CONTRACT_ADDRESS=3100000000000000000000000000000000000002 + +HEADER_CHAIN_PROOF_PATH=../core/src/test/data/first_1.bin + +VERIFIER_ENDPOINTS=http://127.0.0.1:17001,http://127.0.0.1:17002,http://127.0.0.1:17003,http://127.0.0.1:17004 +OPERATOR_ENDPOINTS=http://127.0.0.1:17005,http://127.0.0.1:17006 + +OPERATOR_REIMBURSEMENT_ADDRESS=32iVBEu4dxkUQk9dJbZUiBiQdmypcEyJRf +OPERATOR_COLLATERAL_FUNDING_OUTPOINT=04bec3b3d996e4db2ac6175101bc9f281e7dba3029de9f4201d09c0d53b0ccf8:1 + +TELEMETRY_HOST=0.0.0.0 +TELEMETRY_PORT=8081 + +AGGREGATOR_VERIFICATION_ADDRESS=0x242fbec93465ce42b3d7c0e1901824a2697193fd + +READ_PARAMSET_FROM_ENV=1 + +NETWORK=regtest +NUM_ROUND_TXS=3 +NUM_KICKOFFS_PER_ROUND=10 +NUM_SIGNED_KICKOFFS=2 +BRIDGE_AMOUNT=1000000000 +KICKOFF_AMOUNT=0 +OPERATOR_CHALLENGE_AMOUNT=200000000 +COLLATERAL_FUNDING_AMOUNT=99000000 +KICKOFF_BLOCKHASH_COMMIT_LENGTH=40 +WATCHTOWER_CHALLENGE_BYTES=144 +WINTERNITZ_LOG_D=4 +USER_TAKES_AFTER=200 +OPERATOR_CHALLENGE_TIMEOUT_TIMELOCK=144 +OPERATOR_CHALLENGE_NACK_TIMELOCK=432 +DISPROVE_TIMEOUT_TIMELOCK=720 +ASSERT_TIMEOUT_TIMELOCK=576 +OPERATOR_REIMBURSE_TIMELOCK=12 +WATCHTOWER_CHALLENGE_TIMEOUT_TIMELOCK=288 +TIME_TO_SEND_WATCHTOWER_CHALLENGE=216 +LATEST_BLOCKHASH_TIMEOUT_TIMELOCK=360 +FINALITY_DEPTH=1 +START_HEIGHT=8148 +GENESIS_HEIGHT=8148 +GENESIS_CHAIN_STATE_HASH=1111111111111111111111111111111111111111111111111111111111111111 +HEADER_CHAIN_PROOF_BATCH_SIZE=100 +BRIDGE_NONSTANDARD=true + +SERVER_CERT_PATH="certs/server/server.pem" +SERVER_KEY_PATH="certs/server/server.key" +CA_CERT_PATH="certs/ca/ca.pem" +CLIENT_CERT_PATH="certs/client/client.pem" +CLIENT_KEY_PATH="certs/client/client.key" +AGGREGATOR_CERT_PATH="certs/aggregator/aggregator.pem" +CLIENT_VERIFICATION=true +SECURITY_COUNCIL=1:50929b74c1a04954b78b4b6035e97a5e078a5a0f28ec96d547bfee9ace803ac0 + +CITREA_RPC_URL=http://127.0.0.1:1234 +CITREA_LIGHT_CLIENT_PROVER_URL=http://127.0.0.1:1235 +CITREA_CHAIN_ID=5655 +BRIDGE_CONTRACT_ADDRESS=3100000000000000000000000000000000000002 diff --git a/.githooks/pre-commit b/.githooks/pre-commit new file mode 100755 index 000000000..c2bd7b183 --- /dev/null +++ b/.githooks/pre-commit @@ -0,0 +1,16 @@ +#!/bin/sh + +if ! command -v cargo-fmt &> /dev/null; then + echo "cargo fmt is not installed. Please install it." + exit 1 +fi + +echo "Checking code formatting..." +if ! cargo +nightly fmt --all -- --check; then + echo "Code is not properly formatted. Please run 'cargo +nightly fmt --all'." + echo "**After formatting, add the changes to your commit using 'git add' and commit again.**" + exit 1 +fi + +echo "Code formatting check passed." +exit 0 diff --git a/.github/ISSUE_TEMPLATE/bug_issue.yaml b/.github/ISSUE_TEMPLATE/bug_issue.yaml new file mode 100644 index 000000000..5510f1418 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_issue.yaml @@ -0,0 +1,46 @@ +name: Bug Template +description: This issue template is used for bug reports. +assignees: [] +labels: [] + +body: + - type: markdown + attributes: + value: | + Please fill out the following sections to help us reproduce and fix the bug. + + - type: textarea + id: expected-behavior + attributes: + label: Expected Behavior + placeholder: "Explain what should happen" + validations: + required: true + + - type: textarea + id: current-behavior + attributes: + label: Current Behavior + placeholder: "Explain what happens instead of the expected behavior" + validations: + required: true + + - type: textarea + id: steps-to-reproduce + attributes: + label: Steps to Reproduce + placeholder: "Provide an unambiguous set of steps to reproduce the bug" + validations: + required: true + + - type: textarea + id: possible-solutions + attributes: + label: Possible Solutions or Workarounds + placeholder: "Optional solution or workaround suggestions" + + - type: textarea + id: notes + attributes: + label: Notes + placeholder: "Optional final notes" diff --git a/.github/ISSUE_TEMPLATE/improvement_issue.yaml b/.github/ISSUE_TEMPLATE/improvement_issue.yaml new file mode 100644 index 000000000..7dd3df3f7 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/improvement_issue.yaml @@ -0,0 +1,14 @@ + +name: Improvement Proposal Template +description: This issue template is used for proposing any kind of improvements. +assignees: [] +labels: [] +body: + - type: textarea + id: issue-description + attributes: + placeholder: "Proposal description." + label: Proposal Description + description: "Describe the improvement idea, why we need it, how can it be done and how hard will it be (how much time it needs) to implement." + validations: + required: true diff --git a/.github/ISSUE_TEMPLATE/task_issue.yaml b/.github/ISSUE_TEMPLATE/task_issue.yaml new file mode 100644 index 000000000..5332e12a1 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/task_issue.yaml @@ -0,0 +1,40 @@ + +name: Task Template +description: This issue template is used for giving tasks to group members. +assignees: [] +labels: [] +body: + - type: markdown + attributes: + value: | + Please do not forget to add **assignees** and **labels** to this issue. + State your tasks explicitly + - type: textarea + id: issue-description + attributes: + placeholder: "Issue description." + label: Issue Description + description: "Describe the task and why wee need it. It can be multi-tasked issue also." + validations: + required: true + - type: textarea + id: step-details + attributes: + value: | + Parts of the task: + - [ ] ...(1) + - [ ] ...(2) + - [ ] ...(3) + label: Tasks + description: "Parts of the task" + - type: input + id: deadline + attributes: + label: Deadline of the Issue + description: "Please provide a deadline for the issue." + placeholder: "01.01.1970 - Monday - 23:59" + validations: + required: true + + + \ No newline at end of file diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 000000000..278752f10 --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,18 @@ +# Description + +Describe what this pull request does, here. + +## Linked Issues + +- Closes # (issue, if applicable) +- Related to # (issue) + +## Testing + +Describe how these changes were tested. If you've added new features, have you +added unit tests? + +## Docs + +Describe where this code is documented. If it changes a documented interface, +have the docs been updated? diff --git a/.github/actions/build-prerequisites/action.yml b/.github/actions/build-prerequisites/action.yml new file mode 100644 index 000000000..feac64e1c --- /dev/null +++ b/.github/actions/build-prerequisites/action.yml @@ -0,0 +1,11 @@ +# Start required services for building Clementine. + +name: Clementine Build Services +description: 'Start services required for building Clementine' + +runs: + using: "composite" + + steps: + - name: Install Protoc + uses: arduino/setup-protoc@v3 diff --git a/.github/actions/test-prerequisites/action.yml b/.github/actions/test-prerequisites/action.yml new file mode 100644 index 000000000..b81960225 --- /dev/null +++ b/.github/actions/test-prerequisites/action.yml @@ -0,0 +1,69 @@ +# Start required services for testing Clementine. + +name: Clementine Test Services +description: "Start services required for testing Clementine" + +inputs: + github_token: + description: "GitHub token for authentication" + required: true + +runs: + using: "composite" + + steps: + - name: Cache bitvm cache files + uses: actions/cache@v4 + id: cache-bitvm + with: + path: | + core/bitvm_cache.bin + core/bitvm_cache_dev.bin + key: bitvm-cache-05-08-2025 + + - name: Download bitvm cache bin + if: steps.cache-bitvm.outputs.cache-hit != 'true' + shell: bash + run: wget https://static.testnet.citrea.xyz/common/bitvm_cache.bin -O core/bitvm_cache.bin + + - name: Download bitvm cache dev bin + if: steps.cache-bitvm.outputs.cache-hit != 'true' + shell: bash + run: wget https://static.testnet.citrea.xyz/common/bitvm_cache_dev.bin -O core/bitvm_cache_dev.bin + + - name: Cache Bitcoin binaries + uses: actions/cache@v4 + id: cache-bitcoin + with: + path: | + bitcoin-29.0-x86_64-linux-gnu.tar.gz + bitcoin-29.0/ + key: bitcoin-29.0-x86_64-linux-gnu + + - name: Download Bitcoin + if: steps.cache-bitcoin.outputs.cache-hit != 'true' + shell: bash + run: wget https://bitcoincore.org/bin/bitcoin-core-29.0/bitcoin-29.0-x86_64-linux-gnu.tar.gz + + - name: Unpack Bitcoin + if: steps.cache-bitcoin.outputs.cache-hit != 'true' + shell: bash + run: tar -xzvf bitcoin-29.0-x86_64-linux-gnu.tar.gz + + - name: Set executable permissions + shell: bash + run: chmod +x bitcoin-29.0/bin/* + + - name: Add bitcoin to path + shell: bash + run: echo "$PWD/bitcoin-29.0/bin" >> $GITHUB_PATH + + - name: Install risc0 + shell: bash + env: + GITHUB_TOKEN: ${{ inputs.github_token }} + run: | + curl -L https://risczero.com/install | bash + export PATH="$PATH:$HOME/.risc0/bin" + rzup install + rzup install rust 1.85.0 diff --git a/.github/workflows/auto_tag.yml b/.github/workflows/auto_tag.yml new file mode 100644 index 000000000..19957b4b1 --- /dev/null +++ b/.github/workflows/auto_tag.yml @@ -0,0 +1,41 @@ +name: Label PR on Breaking Change + +on: + pull_request: + types: [opened, synchronize, reopened] + +jobs: + label: + runs-on: ubicloud-standard-2 + + steps: + - name: Checkout PR + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Get changed files + id: files + run: | + { + echo "CHANGED<> $GITHUB_OUTPUT + + - name: Check for deposit state changes + id: check + run: | + echo "${{ steps.files.outputs.CHANGED }}" | grep -E '^core/src/test/data/deposit_state_' && echo "match=true" >> $GITHUB_OUTPUT || echo "match=false" >> $GITHUB_OUTPUT + + - name: Add label + if: steps.check.outputs.match == 'true' + uses: actions/github-script@v7 + with: + script: | + github.rest.issues.addLabels({ + issue_number: context.payload.pull_request.number, + owner: context.repo.owner, + repo: context.repo.repo, + labels: ['F-deposit-replace-needed'] + }) diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml new file mode 100644 index 000000000..1e64f7dc7 --- /dev/null +++ b/.github/workflows/build_and_test.yml @@ -0,0 +1,307 @@ +name: Build And Test + +on: + push: + branches: + - main + - "releases/*" + pull_request: + types: [opened, synchronize, reopened, ready_for_review] + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: ${{ (github.ref != 'refs/heads/main') }} + +env: + CARGO_TERM_COLOR: always + RUST_LOG: warn,risc0_zkvm=error,risc0_circuit_rv32im=error + RISC0_DEV_MODE: 1 + RUST_MIN_STACK: 33554432 + + CARGOFLAGS: --workspace --all-targets + CARGOFLAGS_ALL_FEATURES: --workspace --all-targets --all-features + +jobs: + # Build ---------------------------------------------------------------------- + release_build_all_features: + name: Release | All features | Compile + runs-on: ubicloud-standard-16 + if: ${{ !github.event.pull_request.draft }} + + steps: + - uses: catchpoint/workflow-telemetry-action@v2 + with: + comment_on_pr: false + + - uses: actions/checkout@v4 + - uses: ./.github/actions/build-prerequisites + + - name: Save/restore build artifacts + uses: Swatinem/rust-cache@v2 + with: + shared-key: ${{ runner.os }}-cargo-RELEASE-${{ hashFiles('**/Cargo.lock') }}-${{ github.sha }} + + - name: Compile in release mode + run: cargo build $CARGOFLAGS_ALL_FEATURES --release + + release_build: + name: Release | No features | Compile + runs-on: ubicloud-standard-2 + if: ${{ !github.event.pull_request.draft }} + + steps: + - uses: catchpoint/workflow-telemetry-action@v2 + with: + comment_on_pr: false + + - uses: actions/checkout@v4 + + - name: Compile in release mode + run: cargo build $CARGOFLAGS --release + + # Full Tests ----------------------------------------------------------------- + debug_build_test_all_features: + name: Debug | All features | Test + runs-on: ubicloud-standard-16 + + env: + INFO_LOG_FILE: ${{ github.workspace }}/test-logs/debug/debug-all-features-test.log + + services: + postgres: + image: postgres:latest + env: + POSTGRES_DB: clementine + POSTGRES_USER: clementine + POSTGRES_PASSWORD: clementine + POSTGRES_INITDB_ARGS: "-c shared_buffers=8GB -c max_connections=5000" + ports: + - 5432:5432 + options: >- + --health-cmd pg_isready + --health-interval 10s + --health-timeout 5s + --health-retries 5 + + steps: + - name: Collect Workflow Telemetry + uses: catchpoint/workflow-telemetry-action@v2 + with: + comment_on_pr: false + + - uses: actions/checkout@v4 + - uses: ./.github/actions/build-prerequisites + - uses: ./.github/actions/test-prerequisites + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + + - name: Create test log directories + run: mkdir -p test-logs/debug + + - name: Save/restore build artifacts + uses: Swatinem/rust-cache@v2 + with: + shared-key: ${{ runner.os }}-cargo-DEBUG-${{ hashFiles('**/Cargo.lock') }}-${{ github.sha }} + + - name: Run unit tests + id: unit_tests + run: | + set -o pipefail + ./scripts/generate_certs.sh + cargo test_unit 2>&1 | tee unit_test_output.log + + - name: Check for specific test failure and run generate_deposit_state + if: failure() + run: | + if grep -q "test builder::sighash::tests::test_bridge_contract_change ... FAILED" unit_test_output.log; then + echo "Found test_bridge_contract_change failure, running generate_deposit_state test" + cargo test generate_deposit_state $CARGOFLAGS_ALL_FEATURES -- --ignored + fi + + - name: Upload deposit state artifact + if: failure() + uses: actions/upload-artifact@v4 + with: + name: deposit-state-debug + path: core/src/test/data/deposit_state_debug.bincode + if-no-files-found: ignore + retention-days: 1 + + - name: Run integration tests + run: cargo test_integration + + - name: Upload test logs + if: failure() + uses: actions/upload-artifact@v4 + with: + name: debug-test-logs + path: test-logs/debug/ + retention-days: 7 + + release_build_test_all_features: + name: Release | All features | Test + runs-on: ubicloud-standard-16 + if: ${{ !github.event.pull_request.draft }} + needs: release_build_all_features + + env: + INFO_LOG_FILE: ${{ github.workspace }}/test-logs/release/release-all-features-test.log + + services: + postgres: + image: postgres:latest + env: + POSTGRES_DB: clementine + POSTGRES_USER: clementine + POSTGRES_PASSWORD: clementine + POSTGRES_INITDB_ARGS: "-c shared_buffers=8GB -c max_connections=5000" + ports: + - 5432:5432 + options: >- + --health-cmd pg_isready + --health-interval 10s + --health-timeout 5s + --health-retries 5 + + steps: + - name: Collect Workflow Telemetry + uses: catchpoint/workflow-telemetry-action@v2 + with: + comment_on_pr: false + + - uses: actions/checkout@v4 + - uses: ./.github/actions/build-prerequisites + - uses: ./.github/actions/test-prerequisites + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + + - name: Create test log directories + run: mkdir -p test-logs/release + + - name: Save/restore build artifacts + uses: Swatinem/rust-cache@v2 + with: + shared-key: ${{ runner.os }}-cargo-RELEASE-${{ hashFiles('**/Cargo.lock') }}-${{ github.sha }} + + - name: Run unit tests + id: unit_tests + run: | + set -o pipefail + ./scripts/generate_certs.sh + cargo test_unit --release 2>&1 | tee unit_test_output.log + + - name: Check for specific test failure and run generate_deposit_state + if: failure() + run: | + if grep -q "test builder::sighash::tests::test_bridge_contract_change ... FAILED" unit_test_output.log; then + echo "Found test_bridge_contract_change failure, running generate_deposit_state test" + cargo test --release generate_deposit_state $CARGOFLAGS_ALL_FEATURES -- --ignored + fi + + - name: Upload deposit state artifact + if: failure() + uses: actions/upload-artifact@v4 + with: + name: deposit-state-release + path: core/src/test/data/deposit_state_release.bincode + if-no-files-found: ignore + retention-days: 1 + + - name: Run integration tests + run: cargo test_integration_release + + - name: Upload test logs + if: failure() + uses: actions/upload-artifact@v4 + with: + name: release-test-logs + path: test-logs/release/ + retention-days: 7 + + # Matrix (Standalone) Tests -------------------------------------------------- + standalone_tests: + name: Release | All features | Test ${{ matrix.description_suffix }} + runs-on: ubicloud-standard-8 + if: ${{ !github.event.pull_request.draft }} + needs: release_build_all_features + + env: + INFO_LOG_FILE: ${{ github.workspace }}/test-logs/standalone/standalone-${{ matrix.test_name }}-test.log + + strategy: + fail-fast: false + matrix: + include: + - test_script_name: additional_disprove_script_test_disrupted_payout_tx_block_hash + description_suffix: "Add. disprove disrupted payout tx blockhash path" + test_name: "additional-disrupted-payout" + - test_script_name: additional_disprove_script_test_disrupt_chal_sending_wts + description_suffix: "Add. disprove disrupted challenge sending watchtowers path" + test_name: "additional-disrupted-chal-sending-wts" + - test_script_name: additional_disprove_script_test_operator_forgot_wt_challenge + description_suffix: "Add. disprove operator forgot watchtower challenge path" + test_name: "additional-forgot-challenge" + - test_script_name: additional_disprove_script_test_disrupted_latest_block_hash + description_suffix: "Add. disprove disrupted latest blockhash path" + test_name: "additional-disrupted-latest-blockhash" + - test_script_name: additional_disprove_script_test_corrupted_public_input + description_suffix: "Add. disprove disrupted public input path" + test_name: "additional-corrupted-public-input" + - test_script_name: bitvm_disprove_scripts::disprove_script_test_healthy + description_suffix: "Disprove healthy path" + test_name: "disprove-healthy" + - test_script_name: bitvm_disprove_scripts::disprove_script_test_corrupted_assert + description_suffix: "Disprove disrupted assert path" + test_name: "disprove-corrupted-assert" + - test_script_name: citrea_deposit_and_withdraw_e2e_non_zero_genesis_height + description_suffix: "E2E non-zero genesis height" + test_name: "citrea-deposit-and-withdraw-non-zero-genesis-height" + + services: + postgres: + image: postgres:latest + env: + POSTGRES_DB: clementine + POSTGRES_USER: clementine + POSTGRES_PASSWORD: clementine + POSTGRES_INITDB_ARGS: "-c shared_buffers=8GB -c max_connections=1000" + ports: + - 5432:5432 + options: >- + --health-cmd pg_isready + --health-interval 10s + --health-timeout 5s + --health-retries 5 + + steps: + - name: Collect Workflow Telemetry + uses: catchpoint/workflow-telemetry-action@v2 + with: + comment_on_pr: false + + - uses: actions/checkout@v4 + - uses: ./.github/actions/build-prerequisites + - uses: ./.github/actions/test-prerequisites + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + + - name: Create test log directories + run: mkdir -p test-logs/standalone + + - name: Save/restore build artifacts + uses: Swatinem/rust-cache@v2 + with: + shared-key: ${{ runner.os }}-cargo-RELEASE-${{ hashFiles('**/Cargo.lock') }}-${{ github.sha }} + + - name: Run tests + run: | + ./scripts/generate_certs.sh + cargo test ${{ matrix.test_script_name }} $CARGOFLAGS_ALL_FEATURES --release -- --ignored --nocapture + + - name: Upload test logs + if: failure() || cancelled() + uses: actions/upload-artifact@v4 + with: + name: standalone-test-logs-${{ matrix.test_name }} + path: test-logs/standalone/ + retention-days: 7 diff --git a/.github/workflows/code_checks.yml b/.github/workflows/code_checks.yml new file mode 100644 index 000000000..32a5e9e00 --- /dev/null +++ b/.github/workflows/code_checks.yml @@ -0,0 +1,105 @@ +name: Code Checks + +on: + push: + branches: + - main + - "releases/*" + pull_request: + types: [opened, synchronize, reopened, ready_for_review] + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: ${{ (github.ref != 'refs/heads/main') }} + +env: + CARGO_TERM_COLOR: always + RUST_LOG: warn,risc0_zkvm=error,risc0_circuit_rv32im=error + RISC0_DEV_MODE: 1 + RUST_MIN_STACK: 33554432 + +jobs: + formatting: + name: Check formatting + runs-on: ubicloud-standard-2 + + steps: + - uses: actions/checkout@v4 + - name: Run Cargo fmt + run: cargo fmt --check + + linting: + name: Check linting + runs-on: ubicloud-standard-2 + + steps: + - uses: actions/checkout@v4 + - name: Install cargo-clippy + run: rustup component add --toolchain 1.85-x86_64-unknown-linux-gnu clippy + - name: Run Cargo clippy + run: cargo clippy --no-deps --all-targets --all-features -- -Dwarnings + + udeps: + name: Check unused dependencies + runs-on: ubicloud-standard-2 + + steps: + - uses: actions/checkout@v4 + + - name: Toolchain + uses: actions-rs/toolchain@v1 + with: + toolchain: nightly-2025-03-09 + override: true + + - name: Run cargo-udeps + env: + RUSTFLAGS: -A warnings + uses: aig787/cargo-udeps-action@v1 + with: + version: "latest" + args: "--workspace --all-features --all-targets" + + docs: + name: Check documentation build + runs-on: ubicloud-standard-2 + + steps: + - uses: actions/checkout@v4 + - name: Build documentation + run: cargo doc --no-deps --all-features --document-private-items + + codespell: + name: Check spelling + runs-on: ubicloud-standard-2 + if: github.event.pull_request.draft == false + steps: + - uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: "3.x" + + - name: Install codespell + run: pip install codespell + + - name: Run codespell + run: | + codespell --skip="*.lock,./target" -I="codespell_ignore.txt" + + check_for_todos: + name: Check for TODOs + runs-on: ubicloud-standard-2 + if: github.event.pull_request.draft == false + steps: + - uses: actions/checkout@v4 + + - name: Check for TODOs + run: | + if git grep -i "TODO" -- ':!docs' ':!*.md' ':!*.txt' ':!*.rst' ':!*.lock' ':!target' ':!scripts' ':!tests' ':!examples' ':!**/code_checks.yml'; then + echo "Found TODOs in code directories. Please address them before merging."; + exit 1; + else + echo "No TODOs found in code directories."; + fi diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml new file mode 100644 index 000000000..19e2730b2 --- /dev/null +++ b/.github/workflows/coverage.yml @@ -0,0 +1,78 @@ +name: Run Coverage + +on: [workflow_dispatch] + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: ${{ (github.ref != 'refs/heads/main') }} + +env: + CARGO_TERM_COLOR: always + RUST_LOG: warn,risc0_zkvm=error,risc0_circuit_rv32im=error + RISC0_DEV_MODE: 1 + RUST_MIN_STACK: 33554432 + + CARGOFLAGS: --workspace --all-targets + CARGOFLAGS_ALL_FEATURES: --workspace --all-targets --all-features + +jobs: + coverage: + name: Release | All features | Coverage + runs-on: ubicloud-standard-30 + + env: + INFO_LOG_FILE: ${{ github.workspace }}/test-logs/coverage/coverage-test.log + + services: + postgres: + image: postgres:latest + env: + POSTGRES_DB: clementine + POSTGRES_USER: clementine + POSTGRES_PASSWORD: clementine + POSTGRES_INITDB_ARGS: "-c shared_buffers=8GB -c max_connections=5000" + ports: + - 5432:5432 + options: >- + --health-cmd pg_isready + --health-interval 10s + --health-timeout 5s + --health-retries 5 + steps: + - name: Collect Workflow Telemetry + uses: catchpoint/workflow-telemetry-action@v2 + with: + comment_on_pr: false + + - uses: actions/checkout@v4 + - uses: ./.github/actions/build-prerequisites + - uses: ./.github/actions/test-prerequisites + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + - uses: taiki-e/install-action@cargo-llvm-cov + + - name: Create test log directories + run: mkdir -p test-logs/coverage + + - name: Save/restore build artifacts + uses: Swatinem/rust-cache@v2 + with: + shared-key: ${{ runner.os }}-cargo-RELEASE-${{ hashFiles('**/Cargo.lock') }}-${{ github.sha }} + + - name: Run coverage + run: cargo llvm-cov $CARGOFLAGS_ALL_FEATURES --release --lcov --locked --output-path lcov.info -- --test-threads 6 + + - name: Upload coverage + uses: codecov/codecov-action@v4 + with: + fail_ci_if_error: true # optional (default = false) + files: ./lcov.info + token: ${{ secrets.CODECOV_TOKEN }} + + - name: Upload test logs + if: failure() + uses: actions/upload-artifact@v4 + with: + name: coverage-test-logs + path: test-logs/coverage/ + retention-days: 7 diff --git a/.github/workflows/documentation.yml b/.github/workflows/documentation.yml new file mode 100644 index 000000000..86b24a44b --- /dev/null +++ b/.github/workflows/documentation.yml @@ -0,0 +1,44 @@ +name: Build and Deploy Documentation + +on: + push: + branches: + - main + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: ${{ (github.ref != 'refs/heads/main') }} + +env: + CARGO_TERM_COLOR: always + CARGOFLAGS: --workspace --all-targets --all-features + RUST_LOG: warn,risc0_zkvm=error,risc0_circuit_rv32im=error + RISC0_DEV_MODE: 1 + RUST_MIN_STACK: 33554432 + +jobs: + build: + name: Build and deploy documentation + runs-on: ubicloud-standard-2 + permissions: + contents: write + + steps: + - uses: actions/checkout@v4 + - name: Clean documentation directory + run: cargo clean --doc + - name: Build documentation + run: cargo doc --no-deps --all-features --document-private-items + - name: Finalize documentation + run: | + echo '' > target/doc/index.html + touch target/doc/.nojekyll + - name: Upload as artifact + uses: actions/upload-artifact@v4 + with: + name: Documentation + path: ./target/doc + - name: Deploy + uses: JamesIves/github-pages-deploy-action@v4 + with: + folder: target/doc diff --git a/.github/workflows/publish_docker.yml b/.github/workflows/publish_docker.yml new file mode 100644 index 000000000..6136b5f67 --- /dev/null +++ b/.github/workflows/publish_docker.yml @@ -0,0 +1,64 @@ +name: Dockerization + +on: + push: + branches: + - main + +env: + CARGO_TERM_COLOR: always + +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + # Except in `main` branch! Any cancelled job will cause the + # CI run to fail, and we want to keep a clean history for major branches. + cancel-in-progress: ${{ (github.ref != 'refs/heads/main') }} + +jobs: + build_and_publish: + name: Build and publish Docker image + runs-on: ubicloud-standard-2 + timeout-minutes: 120 + + steps: + - uses: actions/checkout@v4 + - name: Docker Setup Buildx + uses: docker/setup-buildx-action@v3.2.0 + - name: Login to Docker Hub + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + - name: Build and push + uses: docker/build-push-action@v5.3.0 + with: + context: . + file: ./scripts/docker/Dockerfile + push: true + tags: | + ${{ secrets.DOCKERHUB_USERNAME }}/clementine:${{ github.sha }} + ${{ secrets.DOCKERHUB_USERNAME }}/clementine:latest + + build_and_publish_automation: + name: Build and publish Docker image with automation flag + runs-on: ubicloud-standard-2 + timeout-minutes: 120 + + steps: + - uses: actions/checkout@v4 + - name: Docker Setup Buildx + uses: docker/setup-buildx-action@v3.2.0 + - name: Login to Docker Hub + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + - name: Build and push + uses: docker/build-push-action@v5.3.0 + with: + context: . + file: ./scripts/docker/Dockerfile.automation + push: true + tags: | + ${{ secrets.DOCKERHUB_USERNAME }}/clementine-automation:${{ github.sha }} + ${{ secrets.DOCKERHUB_USERNAME }}/clementine-automation:latest diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml new file mode 100644 index 000000000..0b9cb37da --- /dev/null +++ b/.github/workflows/release.yml @@ -0,0 +1,154 @@ +name: release + +on: + push: + tags: + - "v*.*.*" + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: ${{ (github.ref != 'refs/heads/main') }} + +jobs: + linux_amd64_binary_extraction: + runs-on: ubicloud-standard-30 + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Install Dependencies + run: | + sudo apt update && sudo apt -y install curl gcc cpp cmake clang llvm + sudo apt -y autoremove && sudo apt clean && sudo rm -rf /var/lib/apt/lists/* + + - name: Install Rust + run: | + curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y + rustup install 1.85.0 + rustup default 1.85.0 + + - name: Install risc0 + uses: ./.github/actions/test-prerequisites + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + + - name: Build Project (No Features) + run: | + cargo build --release + + - name: Copy No Features Binaries + run: | + cp target/release/clementine-core target/release/clementine-core-no-automation + cp target/release/clementine-cli target/release/clementine-cli-no-automation + + - name: Build Project (with automation) + run: | + cargo build --features automation --release + + - name: Copy Automation Feature Binaries + run: | + cp target/release/clementine-core target/release/clementine-core-with-automation + cp target/release/clementine-cli target/release/clementine-cli-with-automation + + - name: Upload clementine no-automation linux-amd64 Binary + uses: actions/upload-artifact@v4 + with: + name: clementine-core-${{ github.ref_name }}-no-automation-linux-amd64 + path: target/release/clementine-core-no-automation + + - name: Upload clementine with-automation linux-amd64 Binary + uses: actions/upload-artifact@v4 + with: + name: clementine-core-${{ github.ref_name }}-with-automation-linux-amd64 + path: target/release/clementine-core-with-automation + + - name: Upload clementine-cli no-automation linux-amd64 Binary + uses: actions/upload-artifact@v4 + with: + name: clementine-cli-${{ github.ref_name }}-no-automation-linux-amd64 + path: target/release/clementine-cli-no-automation + + - name: Upload clementine-cli with-automation linux-amd64 Binary + uses: actions/upload-artifact@v4 + with: + name: clementine-cli-${{ github.ref_name }}-with-automation-linux-amd64 + path: target/release/clementine-cli-with-automation + + release: + needs: linux_amd64_binary_extraction + runs-on: ubicloud-standard-2 + steps: + - name: Download no-automation linux-amd64 Binary + uses: actions/download-artifact@v4 + with: + name: clementine-core-${{ github.ref_name }}-no-automation-linux-amd64 + path: release + + - name: rename no-automation file + run: | + mv release/clementine-core-no-automation release/clementine-core-${{ github.ref_name }}-no-automation-linux-amd64 + + - name: Download with-automation linux-amd64 Binary + uses: actions/download-artifact@v4 + with: + name: clementine-core-${{ github.ref_name }}-with-automation-linux-amd64 + path: release + + - name: rename with-automation file + run: | + mv release/clementine-core-with-automation release/clementine-core-${{ github.ref_name }}-with-automation-linux-amd64 + + - name: Download clementine-cli no-automation linux-amd64 Binary + uses: actions/download-artifact@v4 + with: + name: clementine-cli-${{ github.ref_name }}-no-automation-linux-amd64 + path: release + + - name: rename clementine-cli no-automation file + run: | + mv release/clementine-cli-no-automation release/clementine-cli-${{ github.ref_name }}-no-automation-linux-amd64 + + - name: Download clementine-cli with-automation linux-amd64 Binary + uses: actions/download-artifact@v4 + with: + name: clementine-cli-${{ github.ref_name }}-with-automation-linux-amd64 + path: release + + - name: rename clementine-cli with-automation file + run: | + mv release/clementine-cli-with-automation release/clementine-cli-${{ github.ref_name }}-with-automation-linux-amd64 + + - name: Release + uses: softprops/action-gh-release@v1 + with: + draft: ${{ contains(github.ref, 'tmp') }} + files: | + release/clementine-core-${{ github.ref_name }}-no-automation-linux-amd64 + release/clementine-core-${{ github.ref_name }}-with-automation-linux-amd64 + release/clementine-cli-${{ github.ref_name }}-no-automation-linux-amd64 + release/clementine-cli-${{ github.ref_name }}-with-automation-linux-amd64 + name: Release ${{ github.ref_name }} + body: | + This is the release for version ${{ github.ref_name }}. + + It includes: + - clementine-core-${{ github.ref_name }}-no-automation-linux-amd64 + - clementine-core-${{ github.ref_name }}-with-automation-linux-amd64 + - clementine-cli-${{ github.ref_name }}-no-automation-linux-amd64 + - clementine-cli-${{ github.ref_name }}-with-automation-linux-amd64 + + - uses: actions/checkout@v4 + - name: Docker Setup Buildx + uses: docker/setup-buildx-action@v3.2.0 + - name: Login to Docker Hub + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + - name: Build and push + uses: docker/build-push-action@v5.3.0 + with: + context: . + file: ./scripts/docker/Dockerfile + push: true + tags: ${{ secrets.DOCKERHUB_USERNAME }}/clementine:${{ github.ref_name }} diff --git a/.gitignore b/.gitignore new file mode 100644 index 000000000..22069d5ab --- /dev/null +++ b/.gitignore @@ -0,0 +1,23 @@ +bitvm_cache.bin +bitvm_cache_dev.bin + +.DS_Store + +risc0-guests/operator/guest/Cargo.lock +risc0-guests/verifier/guest/Cargo.lock +target/ +.env +/configs +/core/configs +./database +.vscode +.docker/db/data/ +*.log +lcov.info +lcov.json +.docker/ +.idea/ +logs +out.json* + +core/certs/ diff --git a/Cargo.lock b/Cargo.lock new file mode 100644 index 000000000..8c3847459 --- /dev/null +++ b/Cargo.lock @@ -0,0 +1,9297 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 4 + +[[package]] +name = "addchain" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b2e69442aa5628ea6951fa33e24efe8313f4321a91bd729fc2f75bdfc858570" +dependencies = [ + "num-bigint 0.3.3", + "num-integer", + "num-traits", +] + +[[package]] +name = "addr2line" +version = "0.22.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e4503c46a5c0c7844e948c9a4d6acd9f50cccb4de1c48eb9e291ea17470c678" +dependencies = [ + "cpp_demangle", + "fallible-iterator", + "gimli 0.29.0", + "memmap2", + "object 0.35.0", + "rustc-demangle", + "smallvec", +] + +[[package]] +name = "addr2line" +version = "0.24.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dfbe277e56a376000877090da837660b4427aad530e3028d44e0bffe4f89a1c1" +dependencies = [ + "gimli 0.31.1", +] + +[[package]] +name = "adler2" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "320119579fcad9c21884f5c4861d16174d0e06250625266f50fe6898340abefa" + +[[package]] +name = "aead" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d122413f284cf2d62fb1b7db97e02edb8cda96d769b16e443a4f6195e35662b0" +dependencies = [ + "crypto-common", + "generic-array", +] + +[[package]] +name = "ahash" +version = "0.8.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a15f179cd60c4584b8a8c596927aadc462e27f2ca70c04e0071964a73ba7a75" +dependencies = [ + "cfg-if", + "once_cell", + "version_check", + "zerocopy", +] + +[[package]] +name = "aho-corasick" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" +dependencies = [ + "memchr", +] + +[[package]] +name = "allocator-api2" +version = "0.2.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" + +[[package]] +name = "alloy" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d2cc5aeb8dfa1e451a49fac87bc4b86c5de40ebea153ed88e83eb92b8151e74" +dependencies = [ + "alloy-consensus", + "alloy-contract", + "alloy-core", + "alloy-eips", + "alloy-genesis", + "alloy-network", + "alloy-provider", + "alloy-pubsub", + "alloy-rpc-client", + "alloy-rpc-types", + "alloy-serde", + "alloy-signer", + "alloy-signer-local", + "alloy-transport", + "alloy-transport-http", + "alloy-transport-ipc", + "alloy-transport-ws", +] + +[[package]] +name = "alloy-chains" +version = "0.1.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28e2652684758b0d9b389d248b209ed9fd9989ef489a550265fe4bb8454fe7eb" +dependencies = [ + "alloy-primitives", + "num_enum", + "strum 0.27.2", +] + +[[package]] +name = "alloy-consensus" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69e32ef5c74bbeb1733c37f4ac7f866f8c8af208b7b4265e21af609dcac5bd5e" +dependencies = [ + "alloy-eips", + "alloy-primitives", + "alloy-rlp", + "alloy-serde", + "alloy-trie", + "auto_impl", + "c-kzg", + "derive_more 1.0.0", + "k256", + "serde", +] + +[[package]] +name = "alloy-consensus-any" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fa13b7b1e1e3fedc42f0728103bfa3b4d566d3d42b606db449504d88dbdbdcf" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-primitives", + "alloy-rlp", + "alloy-serde", + "serde", +] + +[[package]] +name = "alloy-contract" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee6180fb232becdea70fad57c63b6967f01f74ab9595671b870f504116dd29de" +dependencies = [ + "alloy-dyn-abi", + "alloy-json-abi", + "alloy-network", + "alloy-network-primitives", + "alloy-primitives", + "alloy-provider", + "alloy-pubsub", + "alloy-rpc-types-eth", + "alloy-sol-types", + "alloy-transport", + "futures", + "futures-util", + "thiserror 2.0.12", +] + +[[package]] +name = "alloy-core" +version = "0.8.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d8bcce99ad10fe02640cfaec1c6bc809b837c783c1d52906aa5af66e2a196f6" +dependencies = [ + "alloy-dyn-abi", + "alloy-json-abi", + "alloy-primitives", + "alloy-rlp", + "alloy-sol-types", +] + +[[package]] +name = "alloy-dyn-abi" +version = "0.8.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eb8e762aefd39a397ff485bc86df673465c4ad3ec8819cc60833a8a3ba5cdc87" +dependencies = [ + "alloy-json-abi", + "alloy-primitives", + "alloy-sol-type-parser", + "alloy-sol-types", + "const-hex", + "itoa", + "serde", + "serde_json", + "winnow", +] + +[[package]] +name = "alloy-eip2124" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "675264c957689f0fd75f5993a73123c2cc3b5c235a38f5b9037fe6c826bfb2c0" +dependencies = [ + "alloy-primitives", + "alloy-rlp", + "crc", + "thiserror 2.0.12", +] + +[[package]] +name = "alloy-eip2930" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0069cf0642457f87a01a014f6dc29d5d893cd4fd8fddf0c3cdfad1bb3ebafc41" +dependencies = [ + "alloy-primitives", + "alloy-rlp", + "serde", +] + +[[package]] +name = "alloy-eip7702" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b15b13d38b366d01e818fe8e710d4d702ef7499eacd44926a06171dd9585d0c" +dependencies = [ + "alloy-primitives", + "alloy-rlp", + "k256", + "serde", + "thiserror 2.0.12", +] + +[[package]] +name = "alloy-eips" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5591581ca2ab0b3e7226a4047f9a1bfcf431da1d0cce3752fda609fea3c27e37" +dependencies = [ + "alloy-eip2124", + "alloy-eip2930", + "alloy-eip7702", + "alloy-primitives", + "alloy-rlp", + "alloy-serde", + "auto_impl", + "c-kzg", + "derive_more 1.0.0", + "once_cell", + "serde", + "sha2", +] + +[[package]] +name = "alloy-genesis" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0cded3a2d4bd7173f696458c5d4c98c18a628dfcc9f194385e80a486e412e2e0" +dependencies = [ + "alloy-eips", + "alloy-primitives", + "alloy-serde", + "alloy-trie", + "serde", +] + +[[package]] +name = "alloy-json-abi" +version = "0.8.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fe6beff64ad0aa6ad1019a3db26fef565aefeb011736150ab73ed3366c3cfd1b" +dependencies = [ + "alloy-primitives", + "alloy-sol-type-parser", + "serde", + "serde_json", +] + +[[package]] +name = "alloy-json-rpc" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "762414662d793d7aaa36ee3af6928b6be23227df1681ce9c039f6f11daadef64" +dependencies = [ + "alloy-primitives", + "alloy-sol-types", + "serde", + "serde_json", + "thiserror 2.0.12", + "tracing", +] + +[[package]] +name = "alloy-network" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8be03f2ebc00cf88bd06d3c6caf387dceaa9c7e6b268216779fa68a9bf8ab4e6" +dependencies = [ + "alloy-consensus", + "alloy-consensus-any", + "alloy-eips", + "alloy-json-rpc", + "alloy-network-primitives", + "alloy-primitives", + "alloy-rpc-types-any", + "alloy-rpc-types-eth", + "alloy-serde", + "alloy-signer", + "alloy-sol-types", + "async-trait", + "auto_impl", + "futures-utils-wasm", + "serde", + "serde_json", + "thiserror 2.0.12", +] + +[[package]] +name = "alloy-network-primitives" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3a00ce618ae2f78369918be0c20f620336381502c83b6ed62c2f7b2db27698b0" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-primitives", + "alloy-serde", + "serde", +] + +[[package]] +name = "alloy-primitives" +version = "0.8.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8c77490fe91a0ce933a1f219029521f20fc28c2c0ca95d53fa4da9c00b8d9d4e" +dependencies = [ + "alloy-rlp", + "bytes", + "cfg-if", + "const-hex", + "derive_more 2.0.1", + "foldhash", + "getrandom 0.2.16", + "hashbrown 0.15.4", + "indexmap 2.10.0", + "itoa", + "k256", + "keccak-asm", + "paste", + "proptest", + "rand 0.8.5", + "ruint", + "rustc-hash 2.1.1", + "serde", + "sha3", + "tiny-keccak", +] + +[[package]] +name = "alloy-provider" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cbe0a2acff0c4bd1669c71251ce10fc455cbffa1b4d0a817d5ea4ba7e5bb3db7" +dependencies = [ + "alloy-chains", + "alloy-consensus", + "alloy-eips", + "alloy-json-rpc", + "alloy-network", + "alloy-network-primitives", + "alloy-primitives", + "alloy-pubsub", + "alloy-rpc-client", + "alloy-rpc-types-anvil", + "alloy-rpc-types-debug", + "alloy-rpc-types-eth", + "alloy-rpc-types-trace", + "alloy-rpc-types-txpool", + "alloy-sol-types", + "alloy-transport", + "alloy-transport-http", + "alloy-transport-ipc", + "alloy-transport-ws", + "async-stream", + "async-trait", + "auto_impl", + "dashmap", + "futures", + "futures-utils-wasm", + "lru", + "parking_lot", + "pin-project", + "reqwest", + "serde", + "serde_json", + "thiserror 2.0.12", + "tokio", + "tracing", + "url", + "wasmtimer", +] + +[[package]] +name = "alloy-pubsub" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "de3a68996f193f542f9e29c88dfa8ed1369d6ee04fa764c1bf23dc11b2f9e4a2" +dependencies = [ + "alloy-json-rpc", + "alloy-primitives", + "alloy-transport", + "bimap", + "futures", + "serde", + "serde_json", + "tokio", + "tokio-stream", + "tower 0.5.2", + "tracing", +] + +[[package]] +name = "alloy-rlp" +version = "0.3.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f70d83b765fdc080dbcd4f4db70d8d23fe4761f2f02ebfa9146b833900634b4" +dependencies = [ + "alloy-rlp-derive", + "arrayvec", + "bytes", +] + +[[package]] +name = "alloy-rlp-derive" +version = "0.3.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "64b728d511962dda67c1bc7ea7c03736ec275ed2cf4c35d9585298ac9ccf3b73" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "alloy-rpc-client" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b37cc3c7883dc41be1b01460127ad7930466d0a4bb6ba15a02ee34d2745e2d7c" +dependencies = [ + "alloy-json-rpc", + "alloy-primitives", + "alloy-pubsub", + "alloy-transport", + "alloy-transport-http", + "alloy-transport-ipc", + "alloy-transport-ws", + "futures", + "pin-project", + "reqwest", + "serde", + "serde_json", + "tokio", + "tokio-stream", + "tower 0.5.2", + "tracing", + "url", + "wasmtimer", +] + +[[package]] +name = "alloy-rpc-types" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f18e68a3882f372e045ddc89eb455469347767d17878ca492cfbac81e71a111" +dependencies = [ + "alloy-primitives", + "alloy-rpc-types-anvil", + "alloy-rpc-types-engine", + "alloy-rpc-types-eth", + "alloy-rpc-types-trace", + "alloy-rpc-types-txpool", + "alloy-serde", + "serde", +] + +[[package]] +name = "alloy-rpc-types-anvil" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "10d06300df4a87d960add35909240fc72da355dd2ac926fa6999f9efafbdc5a7" +dependencies = [ + "alloy-primitives", + "alloy-rpc-types-eth", + "alloy-serde", + "serde", +] + +[[package]] +name = "alloy-rpc-types-any" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "318ae46dd12456df42527c3b94c1ae9001e1ceb707f7afe2c7807ac4e49ebad9" +dependencies = [ + "alloy-consensus-any", + "alloy-rpc-types-eth", + "alloy-serde", +] + +[[package]] +name = "alloy-rpc-types-debug" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2834b7012054cb2f90ee9893b7cc97702edca340ec1ef386c30c42e55e6cd691" +dependencies = [ + "alloy-primitives", + "serde", +] + +[[package]] +name = "alloy-rpc-types-engine" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e83dde9fcf1ccb9b815cc0c89bba26bbbbaae5150a53ae624ed0fc63cb3676c1" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-primitives", + "alloy-rlp", + "alloy-serde", + "derive_more 1.0.0", + "jsonwebtoken", + "rand 0.8.5", + "serde", + "strum 0.26.3", +] + +[[package]] +name = "alloy-rpc-types-eth" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b4dbee4d82f8a22dde18c28257bed759afeae7ba73da4a1479a039fd1445d04" +dependencies = [ + "alloy-consensus", + "alloy-consensus-any", + "alloy-eips", + "alloy-network-primitives", + "alloy-primitives", + "alloy-rlp", + "alloy-serde", + "alloy-sol-types", + "itertools 0.14.0", + "serde", + "serde_json", + "thiserror 2.0.12", +] + +[[package]] +name = "alloy-rpc-types-trace" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7bd951155515fa452a2ca4b5434d4b3ab742bcd3d1d1b9a91704bcef5b8d2604" +dependencies = [ + "alloy-primitives", + "alloy-rpc-types-eth", + "alloy-serde", + "serde", + "serde_json", + "thiserror 2.0.12", +] + +[[package]] +name = "alloy-rpc-types-txpool" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "21d8dd5bd94993eda3d56a8c4c0d693548183a35462523ffc4385c0b020d3b0c" +dependencies = [ + "alloy-primitives", + "alloy-rpc-types-eth", + "alloy-serde", + "serde", +] + +[[package]] +name = "alloy-serde" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8732058f5ca28c1d53d241e8504620b997ef670315d7c8afab856b3e3b80d945" +dependencies = [ + "alloy-primitives", + "serde", + "serde_json", +] + +[[package]] +name = "alloy-signer" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f96b3526fdd779a4bd0f37319cfb4172db52a7ac24cdbb8804b72091c18e1701" +dependencies = [ + "alloy-primitives", + "async-trait", + "auto_impl", + "either", + "elliptic-curve", + "k256", + "thiserror 2.0.12", +] + +[[package]] +name = "alloy-signer-local" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fe8f78cd6b7501c7e813a1eb4a087b72d23af51f5bb66d4e948dc840bdd207d8" +dependencies = [ + "alloy-consensus", + "alloy-network", + "alloy-primitives", + "alloy-signer", + "async-trait", + "k256", + "rand 0.8.5", + "thiserror 2.0.12", +] + +[[package]] +name = "alloy-sol-macro" +version = "0.8.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e10ae8e9a91d328ae954c22542415303919aabe976fe7a92eb06db1b68fd59f2" +dependencies = [ + "alloy-sol-macro-expander", + "alloy-sol-macro-input", + "proc-macro-error2", + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "alloy-sol-macro-expander" +version = "0.8.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "83ad5da86c127751bc607c174d6c9fe9b85ef0889a9ca0c641735d77d4f98f26" +dependencies = [ + "alloy-json-abi", + "alloy-sol-macro-input", + "const-hex", + "heck 0.5.0", + "indexmap 2.10.0", + "proc-macro-error2", + "proc-macro2", + "quote", + "syn 2.0.104", + "syn-solidity", + "tiny-keccak", +] + +[[package]] +name = "alloy-sol-macro-input" +version = "0.8.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba3d30f0d3f9ba3b7686f3ff1de9ee312647aac705604417a2f40c604f409a9e" +dependencies = [ + "alloy-json-abi", + "const-hex", + "dunce", + "heck 0.5.0", + "macro-string", + "proc-macro2", + "quote", + "serde_json", + "syn 2.0.104", + "syn-solidity", +] + +[[package]] +name = "alloy-sol-type-parser" +version = "0.8.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d162f8524adfdfb0e4bd0505c734c985f3e2474eb022af32eef0d52a4f3935c" +dependencies = [ + "serde", + "winnow", +] + +[[package]] +name = "alloy-sol-types" +version = "0.8.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d43d5e60466a440230c07761aa67671d4719d46f43be8ea6e7ed334d8db4a9ab" +dependencies = [ + "alloy-json-abi", + "alloy-primitives", + "alloy-sol-macro", + "const-hex", + "serde", +] + +[[package]] +name = "alloy-transport" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a8d762eadce3e9b65eac09879430c6f4fce3736cac3cac123f9b1bf435ddd13" +dependencies = [ + "alloy-json-rpc", + "base64 0.22.1", + "futures-utils-wasm", + "serde", + "serde_json", + "thiserror 2.0.12", + "tokio", + "tower 0.5.2", + "tracing", + "url", + "wasmtimer", +] + +[[package]] +name = "alloy-transport-http" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "20819c4cb978fb39ce6ac31991ba90f386d595f922f42ef888b4a18be190713e" +dependencies = [ + "alloy-json-rpc", + "alloy-transport", + "reqwest", + "serde_json", + "tower 0.5.2", + "tracing", + "url", +] + +[[package]] +name = "alloy-transport-ipc" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e88304aa8b796204e5e2500dfe235933ed692745e3effd94c3733643db6d218" +dependencies = [ + "alloy-json-rpc", + "alloy-pubsub", + "alloy-transport", + "bytes", + "futures", + "interprocess", + "pin-project", + "serde", + "serde_json", + "tokio", + "tokio-util", + "tracing", +] + +[[package]] +name = "alloy-transport-ws" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9653ea9aa06d0e02fcbe2f04f1c47f35a85c378ccefa98e54ae85210bc8bbfa" +dependencies = [ + "alloy-pubsub", + "alloy-transport", + "futures", + "http", + "rustls", + "serde_json", + "tokio", + "tokio-tungstenite", + "tracing", + "ws_stream_wasm", +] + +[[package]] +name = "alloy-trie" +version = "0.7.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d95a94854e420f07e962f7807485856cde359ab99ab6413883e15235ad996e8b" +dependencies = [ + "alloy-primitives", + "alloy-rlp", + "arrayvec", + "derive_more 1.0.0", + "nybbles", + "serde", + "smallvec", + "tracing", +] + +[[package]] +name = "android-tzdata" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0" + +[[package]] +name = "android_system_properties" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311" +dependencies = [ + "libc", +] + +[[package]] +name = "anstream" +version = "0.6.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "301af1932e46185686725e0fad2f8f2aa7da69dd70bf6ecc44d6b703844a3933" +dependencies = [ + "anstyle", + "anstyle-parse", + "anstyle-query", + "anstyle-wincon", + "colorchoice", + "is_terminal_polyfill", + "utf8parse", +] + +[[package]] +name = "anstyle" +version = "1.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "862ed96ca487e809f1c8e5a8447f6ee2cf102f846893800b20cebdf541fc6bbd" + +[[package]] +name = "anstyle-parse" +version = "0.2.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4e7644824f0aa2c7b9384579234ef10eb7efb6a0deb83f9630a49594dd9c15c2" +dependencies = [ + "utf8parse", +] + +[[package]] +name = "anstyle-query" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c8bdeb6047d8983be085bab0ba1472e6dc604e7041dbf6fcd5e71523014fae9" +dependencies = [ + "windows-sys 0.59.0", +] + +[[package]] +name = "anstyle-wincon" +version = "3.0.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "403f75924867bb1033c59fbf0797484329750cfbe3c4325cd33127941fabc882" +dependencies = [ + "anstyle", + "once_cell_polyfill", + "windows-sys 0.59.0", +] + +[[package]] +name = "anyhow" +version = "1.0.98" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e16d2d3311acee920a9eb8d33b8cbc1787ce4a264e85f964c2404b969bdcd487" +dependencies = [ + "backtrace", +] + +[[package]] +name = "arbitrary" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dde20b3d026af13f561bdd0f15edf01fc734f0dafcedbaf42bba506a9517f223" +dependencies = [ + "derive_arbitrary", +] + +[[package]] +name = "ark-bn254" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d69eab57e8d2663efa5c63135b2af4f396d66424f88954c21104125ab6b3e6bc" +dependencies = [ + "ark-ec", + "ark-ff 0.5.0", + "ark-r1cs-std", + "ark-std 0.5.0", +] + +[[package]] +name = "ark-crypto-primitives" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e0c292754729c8a190e50414fd1a37093c786c709899f29c9f7daccecfa855e" +dependencies = [ + "ahash", + "ark-crypto-primitives-macros", + "ark-ec", + "ark-ff 0.5.0", + "ark-relations", + "ark-serialize 0.5.0", + "ark-snark", + "ark-std 0.5.0", + "blake2", + "derivative", + "digest 0.10.7", + "fnv", + "merlin", + "rayon", + "sha2", +] + +[[package]] +name = "ark-crypto-primitives-macros" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7e89fe77d1f0f4fe5b96dfc940923d88d17b6a773808124f21e764dfb063c6a" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "ark-ec" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43d68f2d516162846c1238e755a7c4d131b892b70cc70c471a8e3ca3ed818fce" +dependencies = [ + "ahash", + "ark-ff 0.5.0", + "ark-poly", + "ark-serialize 0.5.0", + "ark-std 0.5.0", + "educe", + "fnv", + "hashbrown 0.15.4", + "itertools 0.13.0", + "num-bigint 0.4.6", + "num-integer", + "num-traits", + "rayon", + "zeroize", +] + +[[package]] +name = "ark-ff" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b3235cc41ee7a12aaaf2c575a2ad7b46713a8a50bda2fc3b003a04845c05dd6" +dependencies = [ + "ark-ff-asm 0.3.0", + "ark-ff-macros 0.3.0", + "ark-serialize 0.3.0", + "ark-std 0.3.0", + "derivative", + "num-bigint 0.4.6", + "num-traits", + "paste", + "rustc_version 0.3.3", + "zeroize", +] + +[[package]] +name = "ark-ff" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec847af850f44ad29048935519032c33da8aa03340876d351dfab5660d2966ba" +dependencies = [ + "ark-ff-asm 0.4.2", + "ark-ff-macros 0.4.2", + "ark-serialize 0.4.2", + "ark-std 0.4.0", + "derivative", + "digest 0.10.7", + "itertools 0.10.5", + "num-bigint 0.4.6", + "num-traits", + "paste", + "rustc_version 0.4.1", + "zeroize", +] + +[[package]] +name = "ark-ff" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a177aba0ed1e0fbb62aa9f6d0502e9b46dad8c2eab04c14258a1212d2557ea70" +dependencies = [ + "ark-ff-asm 0.5.0", + "ark-ff-macros 0.5.0", + "ark-serialize 0.5.0", + "ark-std 0.5.0", + "arrayvec", + "digest 0.10.7", + "educe", + "itertools 0.13.0", + "num-bigint 0.4.6", + "num-traits", + "paste", + "rayon", + "zeroize", +] + +[[package]] +name = "ark-ff-asm" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db02d390bf6643fb404d3d22d31aee1c4bc4459600aef9113833d17e786c6e44" +dependencies = [ + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ark-ff-asm" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ed4aa4fe255d0bc6d79373f7e31d2ea147bcf486cba1be5ba7ea85abdb92348" +dependencies = [ + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ark-ff-asm" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62945a2f7e6de02a31fe400aa489f0e0f5b2502e69f95f853adb82a96c7a6b60" +dependencies = [ + "quote", + "syn 2.0.104", +] + +[[package]] +name = "ark-ff-macros" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db2fd794a08ccb318058009eefdf15bcaaaaf6f8161eb3345f907222bac38b20" +dependencies = [ + "num-bigint 0.4.6", + "num-traits", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ark-ff-macros" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7abe79b0e4288889c4574159ab790824d0033b9fdcb2a112a3182fac2e514565" +dependencies = [ + "num-bigint 0.4.6", + "num-traits", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ark-ff-macros" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09be120733ee33f7693ceaa202ca41accd5653b779563608f1234f78ae07c4b3" +dependencies = [ + "num-bigint 0.4.6", + "num-traits", + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "ark-groth16" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "88f1d0f3a534bb54188b8dcc104307db6c56cdae574ddc3212aec0625740fc7e" +dependencies = [ + "ark-crypto-primitives", + "ark-ec", + "ark-ff 0.5.0", + "ark-poly", + "ark-relations", + "ark-serialize 0.5.0", + "ark-std 0.5.0", + "rayon", +] + +[[package]] +name = "ark-poly" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "579305839da207f02b89cd1679e50e67b4331e2f9294a57693e5051b7703fe27" +dependencies = [ + "ahash", + "ark-ff 0.5.0", + "ark-serialize 0.5.0", + "ark-std 0.5.0", + "educe", + "fnv", + "hashbrown 0.15.4", + "rayon", +] + +[[package]] +name = "ark-r1cs-std" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "941551ef1df4c7a401de7068758db6503598e6f01850bdb2cfdb614a1f9dbea1" +dependencies = [ + "ark-ec", + "ark-ff 0.5.0", + "ark-relations", + "ark-std 0.5.0", + "educe", + "num-bigint 0.4.6", + "num-integer", + "num-traits", + "tracing", +] + +[[package]] +name = "ark-relations" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec46ddc93e7af44bcab5230937635b06fb5744464dd6a7e7b083e80ebd274384" +dependencies = [ + "ark-ff 0.5.0", + "ark-std 0.5.0", + "tracing", + "tracing-subscriber 0.2.25", +] + +[[package]] +name = "ark-serialize" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d6c2b318ee6e10f8c2853e73a83adc0ccb88995aa978d8a3408d492ab2ee671" +dependencies = [ + "ark-std 0.3.0", + "digest 0.9.0", +] + +[[package]] +name = "ark-serialize" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "adb7b85a02b83d2f22f89bd5cac66c9c89474240cb6207cb1efc16d098e822a5" +dependencies = [ + "ark-std 0.4.0", + "digest 0.10.7", + "num-bigint 0.4.6", +] + +[[package]] +name = "ark-serialize" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f4d068aaf107ebcd7dfb52bc748f8030e0fc930ac8e360146ca54c1203088f7" +dependencies = [ + "ark-serialize-derive", + "ark-std 0.5.0", + "arrayvec", + "digest 0.10.7", + "num-bigint 0.4.6", + "rayon", +] + +[[package]] +name = "ark-serialize-derive" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "213888f660fddcca0d257e88e54ac05bca01885f258ccdf695bafd77031bb69d" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "ark-snark" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d368e2848c2d4c129ce7679a7d0d2d612b6a274d3ea6a13bad4445d61b381b88" +dependencies = [ + "ark-ff 0.5.0", + "ark-relations", + "ark-serialize 0.5.0", + "ark-std 0.5.0", +] + +[[package]] +name = "ark-std" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1df2c09229cbc5a028b1d70e00fdb2acee28b1055dfb5ca73eea49c5a25c4e7c" +dependencies = [ + "num-traits", + "rand 0.8.5", +] + +[[package]] +name = "ark-std" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94893f1e0c6eeab764ade8dc4c0db24caf4fe7cbbaafc0eba0a9030f447b5185" +dependencies = [ + "num-traits", + "rand 0.8.5", +] + +[[package]] +name = "ark-std" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "246a225cc6131e9ee4f24619af0f19d67761fff15d7ccc22e42b80846e69449a" +dependencies = [ + "colored", + "num-traits", + "rand 0.8.5", + "rayon", +] + +[[package]] +name = "arraydeque" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d902e3d592a523def97af8f317b08ce16b7ab854c1985a0c671e6f15cebc236" + +[[package]] +name = "arrayref" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76a2e8124351fda1ef8aaaa3bbd7ebbcb486bbcd4225aca0aa0d84bb2db8fecb" + +[[package]] +name = "arrayvec" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" +dependencies = [ + "serde", +] + +[[package]] +name = "async-stream" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b5a71a6f37880a80d1d7f19efd781e4b5de42c88f0722cc13bcb6cc2cfe8476" +dependencies = [ + "async-stream-impl", + "futures-core", + "pin-project-lite", +] + +[[package]] +name = "async-stream-impl" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "async-trait" +version = "0.1.88" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e539d3fca749fcee5236ab05e93a52867dd549cc157c8cb7f99595f3cedffdb5" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "async_io_stream" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6d7b9decdf35d8908a7e3ef02f64c5e9b1695e230154c0e8de3969142d9b94c" +dependencies = [ + "futures", + "pharos", + "rustc_version 0.4.1", +] + +[[package]] +name = "atoi" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f28d99ec8bfea296261ca1af174f24225171fea9664ba9003cbebee704810528" +dependencies = [ + "num-traits", +] + +[[package]] +name = "atomic-polyfill" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8cf2bce30dfe09ef0bfaef228b9d414faaf7e563035494d7fe092dba54b300f4" +dependencies = [ + "critical-section", +] + +[[package]] +name = "atomic-waker" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" + +[[package]] +name = "auto_impl" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffdcb70bdbc4d478427380519163274ac86e52916e10f0a8889adf0f96d3fee7" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "auto_ops" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7460f7dd8e100147b82a63afca1a20eb6c231ee36b90ba7272e14951cb58af59" + +[[package]] +name = "autocfg" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" + +[[package]] +name = "aws-lc-rs" +version = "1.13.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c953fe1ba023e6b7730c0d4b031d06f267f23a46167dcbd40316644b10a17ba" +dependencies = [ + "aws-lc-sys", + "zeroize", +] + +[[package]] +name = "aws-lc-sys" +version = "0.30.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dbfd150b5dbdb988bcc8fb1fe787eb6b7ee6180ca24da683b61ea5405f3d43ff" +dependencies = [ + "bindgen", + "cc", + "cmake", + "dunce", + "fs_extra", +] + +[[package]] +name = "axum" +version = "0.7.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "edca88bc138befd0323b20752846e6587272d3b03b0343c8ea28a6f819e6e71f" +dependencies = [ + "async-trait", + "axum-core", + "bytes", + "futures-util", + "http", + "http-body", + "http-body-util", + "itoa", + "matchit", + "memchr", + "mime", + "percent-encoding", + "pin-project-lite", + "rustversion", + "serde", + "sync_wrapper", + "tower 0.5.2", + "tower-layer", + "tower-service", +] + +[[package]] +name = "axum-core" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09f2bd6146b97ae3359fa0cc6d6b376d9539582c7b4220f041a33ec24c226199" +dependencies = [ + "async-trait", + "bytes", + "futures-util", + "http", + "http-body", + "http-body-util", + "mime", + "pin-project-lite", + "rustversion", + "sync_wrapper", + "tower-layer", + "tower-service", +] + +[[package]] +name = "backtrace" +version = "0.3.75" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6806a6321ec58106fea15becdad98371e28d92ccbc7c8f1b3b6dd724fe8f1002" +dependencies = [ + "addr2line 0.24.2", + "cfg-if", + "libc", + "miniz_oxide", + "object 0.36.7", + "rustc-demangle", + "windows-targets 0.52.6", +] + +[[package]] +name = "base16ct" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c7f02d4ea65f2c1853089ffd8d2787bdbc63de2f0d29dedbcf8ccdfa0ccd4cf" + +[[package]] +name = "base58ck" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c8d66485a3a2ea485c1913c4572ce0256067a5377ac8c75c4960e1cda98605f" +dependencies = [ + "bitcoin-internals", + "bitcoin_hashes", +] + +[[package]] +name = "base64" +version = "0.21.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" + +[[package]] +name = "base64" +version = "0.22.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" + +[[package]] +name = "base64-compat" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a8d4d2746f89841e49230dd26917df1876050f95abafafbe34f47cb534b88d7" +dependencies = [ + "byteorder", +] + +[[package]] +name = "base64ct" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55248b47b0caf0546f7988906588779981c43bb1bc9d0c44087278f80cdb44ba" + +[[package]] +name = "bech32" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d965446196e3b7decd44aa7ee49e31d630118f90ef12f97900f262eb915c951d" + +[[package]] +name = "bimap" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "230c5f1ca6a325a32553f8640d31ac9b49f2411e901e427570154868b46da4f7" + +[[package]] +name = "bincode" +version = "1.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1f45e9417d87227c7a56d22e471c6206462cba514c7590c09aff4cf6d1ddcad" +dependencies = [ + "serde", +] + +[[package]] +name = "bindgen" +version = "0.69.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "271383c67ccabffb7381723dea0672a673f292304fcb45c01cc648c7a8d58088" +dependencies = [ + "bitflags 2.9.1", + "cexpr", + "clang-sys", + "itertools 0.12.1", + "lazy_static", + "lazycell", + "log", + "prettyplease", + "proc-macro2", + "quote", + "regex", + "rustc-hash 1.1.0", + "shlex", + "syn 2.0.104", + "which", +] + +[[package]] +name = "bit-set" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08807e080ed7f9d5433fa9b275196cfc35414f66a0c79d864dc51a0d825231a3" +dependencies = [ + "bit-vec", +] + +[[package]] +name = "bit-vec" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e764a1d40d510daf35e07be9eb06e75770908c27d411ee6c92109c9840eaaf7" + +[[package]] +name = "bitcoin" +version = "0.32.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ad8929a18b8e33ea6b3c09297b687baaa71fb1b97353243a3f1029fad5c59c5b" +dependencies = [ + "base58ck", + "base64 0.21.7", + "bech32", + "bitcoin-internals", + "bitcoin-io", + "bitcoin-units", + "bitcoin_hashes", + "bitcoinconsensus", + "hex-conservative", + "hex_lit", + "secp256k1 0.29.1", + "serde", +] + +[[package]] +name = "bitcoin-internals" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "30bdbe14aa07b06e6cfeffc529a1f099e5fbe249524f8125358604df99a4bed2" +dependencies = [ + "serde", +] + +[[package]] +name = "bitcoin-io" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b47c4ab7a93edb0c7198c5535ed9b52b63095f4e9b45279c6736cec4b856baf" + +[[package]] +name = "bitcoin-script" +version = "0.4.0" +source = "git+https://github.com/BitVM/rust-bitcoin-script#3c75a6e4cfddb800f57710d8270c0a1dc60eb57e" +dependencies = [ + "bitcoin", + "script-macro", + "stdext", +] + +[[package]] +name = "bitcoin-script-stack" +version = "0.0.1" +source = "git+https://github.com/BitVM/rust-bitcoin-script-stack#643c5f1a44af448274849c01a5ae7fbdd54d8213" +dependencies = [ + "bitcoin", + "bitcoin-script", + "bitcoin-scriptexec", +] + +[[package]] +name = "bitcoin-scriptexec" +version = "0.0.0" +source = "git+https://github.com/BitVM/rust-bitcoin-scriptexec#b24608bff855ea8932ae236c7a04f13f730ab9f8" +dependencies = [ + "bitcoin", + "clap", + "console_error_panic_hook", + "getrandom 0.2.16", + "lazy_static", + "serde", + "serde-wasm-bindgen", + "serde_json", + "wasm-bindgen", +] + +[[package]] +name = "bitcoin-units" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5285c8bcaa25876d07f37e3d30c303f2609179716e11d688f51e8f1fe70063e2" +dependencies = [ + "bitcoin-internals", + "serde", +] + +[[package]] +name = "bitcoin_hashes" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb18c03d0db0247e147a21a6faafd5a7eb851c743db062de72018b6b7e8e4d16" +dependencies = [ + "bitcoin-io", + "hex-conservative", + "serde", +] + +[[package]] +name = "bitcoinconsensus" +version = "0.105.0+25.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f260ac8fb2c621329013fc0ed371c940fcc512552dcbcb9095ed0179098c9e18" +dependencies = [ + "cc", +] + +[[package]] +name = "bitcoincore-rpc" +version = "0.18.0" +source = "git+https://github.com/chainwayxyz/rust-bitcoincore-rpc.git?rev=5da45109a2de352472a6056ef90a517b66bc106f#5da45109a2de352472a6056ef90a517b66bc106f" +dependencies = [ + "async-trait", + "bitcoincore-rpc-json", + "jsonrpc-async", + "log", + "reqwest", + "serde", + "serde_json", + "url", +] + +[[package]] +name = "bitcoincore-rpc-json" +version = "0.18.0" +source = "git+https://github.com/chainwayxyz/rust-bitcoincore-rpc.git?rev=5da45109a2de352472a6056ef90a517b66bc106f#5da45109a2de352472a6056ef90a517b66bc106f" +dependencies = [ + "bitcoin", + "serde", + "serde_json", +] + +[[package]] +name = "bitflags" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" + +[[package]] +name = "bitflags" +version = "2.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b8e56985ec62d17e9c1001dc89c88ecd7dc08e47eba5ec7c29c7b5eeecde967" +dependencies = [ + "serde", +] + +[[package]] +name = "bitvec" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1bc2832c24239b0141d5674bb9174f9d68a8b5b3f2753311927c172ca46f7e9c" +dependencies = [ + "funty", + "radium", + "tap", + "wyz", +] + +[[package]] +name = "bitvm" +version = "0.1.0" +source = "git+https://github.com/chainwayxyz/BitVM?rev=a82e5a6bbc1183f98e8f2abd762baf20eb054475#a82e5a6bbc1183f98e8f2abd762baf20eb054475" +dependencies = [ + "ark-bn254", + "ark-crypto-primitives", + "ark-ec", + "ark-ff 0.5.0", + "ark-groth16", + "ark-relations", + "ark-serialize 0.5.0", + "ark-std 0.5.0", + "bitcoin", + "bitcoin-script", + "bitcoin-script-stack", + "bitcoin-scriptexec", + "blake3", + "colored", + "itertools 0.13.0", + "num-bigint 0.4.6", + "num-traits", + "paste", + "rand 0.8.5", + "rand_chacha 0.3.1", + "regex", + "serde", + "sha2", + "tqdm", +] + +[[package]] +name = "blake2" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46502ad458c9a52b69d4d4d32775c788b7a1b85e8bc9d482d92250fc0e3f8efe" +dependencies = [ + "digest 0.10.7", +] + +[[package]] +name = "blake3" +version = "1.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3888aaa89e4b2a40fca9848e400f6a658a5a3978de7be858e209cafa8be9a4a0" +dependencies = [ + "arrayref", + "arrayvec", + "cc", + "cfg-if", + "constant_time_eq", +] + +[[package]] +name = "block" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d8c1fef690941d3e7788d328517591fecc684c084084702d6ff1641e993699a" + +[[package]] +name = "block-buffer" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" +dependencies = [ + "generic-array", +] + +[[package]] +name = "blst" +version = "0.3.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4fd49896f12ac9b6dcd7a5998466b9b58263a695a3dd1ecc1aaca2e12a90b080" +dependencies = [ + "cc", + "glob", + "threadpool", + "zeroize", +] + +[[package]] +name = "bollard" +version = "0.19.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "899ca34eb6924d6ec2a77c6f7f5c7339e60fd68235eaf91edd5a15f12958bb06" +dependencies = [ + "base64 0.22.1", + "bollard-stubs", + "bytes", + "futures-core", + "futures-util", + "hex", + "http", + "http-body-util", + "hyper", + "hyper-named-pipe", + "hyper-util", + "hyperlocal", + "log", + "pin-project-lite", + "serde", + "serde_derive", + "serde_json", + "serde_repr", + "serde_urlencoded", + "thiserror 2.0.12", + "tokio", + "tokio-util", + "tower-service", + "url", + "winapi", +] + +[[package]] +name = "bollard-stubs" +version = "1.48.3-rc.28.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "64ea257e555d16a2c01e5593f40b73865cdf12efbceda33c6d14a2d8d1490368" +dependencies = [ + "serde", + "serde_json", + "serde_repr", + "serde_with", +] + +[[package]] +name = "bonsai-sdk" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0bce8d6acc5286a16e94c29e9c885d1869358885e08a6feeb6bc54e36fe20055" +dependencies = [ + "duplicate", + "maybe-async", + "reqwest", + "serde", + "thiserror 1.0.69", +] + +[[package]] +name = "borsh" +version = "1.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ad8646f98db542e39fc66e68a20b2144f6a732636df7c2354e74645faaa433ce" +dependencies = [ + "borsh-derive", + "bytes", + "cfg_aliases", +] + +[[package]] +name = "borsh-derive" +version = "1.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fdd1d3c0c2f5833f22386f252fe8ed005c7f59fdcddeef025c01b4c3b9fd9ac3" +dependencies = [ + "once_cell", + "proc-macro-crate", + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "bridge-circuit-host" +version = "0.1.0" +dependencies = [ + "alloy-rpc-types", + "ark-bn254", + "ark-crypto-primitives", + "ark-ec", + "ark-ff 0.5.0", + "ark-groth16", + "ark-relations", + "ark-serialize 0.5.0", + "ark-std 0.5.0", + "bincode", + "bitcoin", + "blake3", + "borsh", + "circuits-lib", + "eyre", + "hex", + "hex-literal", + "num-bigint 0.4.6", + "num-traits", + "once_cell", + "rand 0.8.5", + "risc0-binfmt", + "risc0-circuit-recursion", + "risc0-groth16", + "risc0-zkp", + "risc0-zkvm", + "serde", + "serde_json", + "sha2", + "sov-rollup-interface", + "tempfile", + "thiserror 1.0.69", + "tokio", + "tracing", +] + +[[package]] +name = "bumpalo" +version = "3.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46c5e41b57b8bba42a04676d81cb89e9ee8e859a1a66f80a5a72e1cb76b34d43" + +[[package]] +name = "byte-slice-cast" +version = "1.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7575182f7272186991736b70173b0ea045398f984bf5ebbb3804736ce1330c9d" + +[[package]] +name = "bytemuck" +version = "1.23.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c76a5792e44e4abe34d3abf15636779261d45a7450612059293d1d2cfc63422" +dependencies = [ + "bytemuck_derive", +] + +[[package]] +name = "bytemuck_derive" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3fa76293b4f7bb636ab88fd78228235b5248b4d05cc589aed610f954af5d7c7a" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "byteorder" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" + +[[package]] +name = "bytes" +version = "1.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a" +dependencies = [ + "serde", +] + +[[package]] +name = "c-kzg" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0307f72feab3300336fb803a57134159f6e20139af1357f36c54cb90d8e8928" +dependencies = [ + "blst", + "cc", + "glob", + "hex", + "libc", + "once_cell", + "serde", +] + +[[package]] +name = "camino" +version = "1.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0da45bc31171d8d6960122e222a67740df867c1dd53b4d51caa297084c185cab" +dependencies = [ + "serde", +] + +[[package]] +name = "cargo-platform" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e35af189006b9c0f00a064685c727031e3ed2d8020f7ba284d78cc2671bd36ea" +dependencies = [ + "serde", +] + +[[package]] +name = "cargo_metadata" +version = "0.19.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd5eb614ed4c27c5d706420e4320fbe3216ab31fa1c33cd8246ac36dae4479ba" +dependencies = [ + "camino", + "cargo-platform", + "semver 1.0.26", + "serde", + "serde_json", + "thiserror 2.0.12", +] + +[[package]] +name = "cc" +version = "1.2.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3a42d84bb6b69d3a8b3eaacf0d88f179e1929695e1ad012b6cf64d9caaa5fd2" +dependencies = [ + "jobserver", + "libc", + "shlex", +] + +[[package]] +name = "cesu8" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d43a04d8753f35258c91f8ec639f792891f748a1edbd759cf1dcea3382ad83c" + +[[package]] +name = "cexpr" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766" +dependencies = [ + "nom", +] + +[[package]] +name = "cfg-if" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9555578bc9e57714c812a1f84e4fc5b4d21fcb063490c624de019f7464c91268" + +[[package]] +name = "cfg_aliases" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" + +[[package]] +name = "chacha20" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3613f74bd2eac03dad61bd53dbe620703d4371614fe0bc3b9f04dd36fe4e818" +dependencies = [ + "cfg-if", + "cipher", + "cpufeatures", +] + +[[package]] +name = "chacha20poly1305" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "10cd79432192d1c0f4e1a0fef9527696cc039165d729fb41b3f4f4f354c2dc35" +dependencies = [ + "aead", + "chacha20", + "cipher", + "poly1305", + "zeroize", +] + +[[package]] +name = "chrono" +version = "0.4.41" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c469d952047f47f91b68d1cba3f10d63c11d73e4636f24f08daf0278abf01c4d" +dependencies = [ + "android-tzdata", + "iana-time-zone", + "js-sys", + "num-traits", + "serde", + "wasm-bindgen", + "windows-link", +] + +[[package]] +name = "cipher" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "773f3b9af64447d2ce9850330c473515014aa235e6a783b02db81ff39e4a3dad" +dependencies = [ + "crypto-common", + "inout", + "zeroize", +] + +[[package]] +name = "circuits-lib" +version = "0.1.0" +dependencies = [ + "alloy-primitives", + "alloy-rpc-types", + "alloy-rpc-types-eth", + "ark-bn254", + "ark-crypto-primitives", + "ark-ec", + "ark-ff 0.5.0", + "ark-groth16", + "ark-relations", + "ark-serialize 0.5.0", + "ark-std 0.5.0", + "bincode", + "bitcoin", + "blake3", + "borsh", + "crypto-bigint", + "derive_more 1.0.0", + "eyre", + "hex", + "hex-literal", + "itertools 0.14.0", + "jmt 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", + "k256", + "lazy_static", + "num-bigint 0.4.6", + "num-traits", + "once_cell", + "risc0-groth16", + "risc0-zkvm", + "serde", + "serde_json", + "sha2", + "sov-rollup-interface", + "tracing", +] + +[[package]] +name = "citrea-e2e" +version = "0.1.0" +source = "git+https://github.com/chainwayxyz/citrea-e2e?rev=859cddf#859cddf2f7f6ecc92be0cd5f99d282bff2b632ce" +dependencies = [ + "alloy-primitives", + "anyhow", + "async-trait", + "bitcoin", + "bitcoincore-rpc", + "bollard", + "futures", + "hex", + "jsonrpsee", + "nix", + "rand 0.8.5", + "serde", + "serde_json", + "tempfile", + "tokio", + "toml", + "tracing", + "tracing-subscriber 0.3.19", +] + +[[package]] +name = "clang-sys" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b023947811758c97c59bf9d1c188fd619ad4718dcaa767947df1cadb14f39f4" +dependencies = [ + "glob", + "libc", + "libloading", +] + +[[package]] +name = "clap" +version = "4.5.42" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed87a9d530bb41a67537289bafcac159cb3ee28460e0a4571123d2a778a6a882" +dependencies = [ + "clap_builder", + "clap_derive", +] + +[[package]] +name = "clap_builder" +version = "4.5.42" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "64f4f3f3c77c94aff3c7e9aac9a2ca1974a5adf392a8bb751e827d6d127ab966" +dependencies = [ + "anstream", + "anstyle", + "clap_lex", + "strsim", +] + +[[package]] +name = "clap_derive" +version = "4.5.41" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef4f52386a59ca4c860f7393bcf8abd8dfd91ecccc0f774635ff68e92eeef491" +dependencies = [ + "heck 0.5.0", + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "clap_lex" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b94f61472cee1439c0b966b47e3aca9ae07e45d070759512cd390ea2bebc6675" + +[[package]] +name = "clementine-core" +version = "0.4.0" +dependencies = [ + "alloy", + "alloy-sol-types", + "ark-bn254", + "ark-ff 0.5.0", + "ark-groth16", + "ark-serialize 0.5.0", + "async-stream", + "async-trait", + "base64 0.22.1", + "bincode", + "bitcoin", + "bitcoin-script", + "bitcoincore-rpc", + "bitvm", + "borsh", + "bridge-circuit-host", + "chacha20poly1305", + "circuits-lib", + "citrea-e2e", + "clap", + "color-eyre", + "ctor", + "eyre", + "futures", + "futures-core", + "futures-util", + "hex", + "hex-literal", + "hkdf", + "http", + "hyper", + "hyper-util", + "jsonrpc-async", + "jsonrpsee", + "lazy_static", + "log", + "metrics", + "metrics-derive", + "metrics-exporter-prometheus", + "metrics-util", + "once_cell", + "pgmq", + "prost", + "rand 0.8.5", + "rand_chacha 0.9.0", + "reqwest", + "risc0-zkvm", + "rustls", + "rustls-pki-types", + "secp256k1 0.31.0", + "secrecy", + "serde", + "serde_json", + "serde_with", + "serial_test", + "sha2", + "sov-rollup-interface", + "sqlx", + "statig", + "tempfile", + "thiserror 1.0.69", + "tokio", + "tokio-retry", + "tokio-stream", + "toml", + "tonic", + "tonic-build", + "tower 0.4.13", + "tracing", + "tracing-subscriber 0.3.19", + "url", + "vergen-git2", + "x25519-dalek", +] + +[[package]] +name = "cmake" +version = "0.1.54" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7caa3f9de89ddbe2c607f4101924c5abec803763ae9534e4f4d7d8f84aa81f0" +dependencies = [ + "cc", +] + +[[package]] +name = "cobs" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fa961b519f0b462e3a3b4a34b64d119eeaca1d59af726fe450bbba07a9fc0a1" +dependencies = [ + "thiserror 2.0.12", +] + +[[package]] +name = "color-eyre" +version = "0.6.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5920befb47832a6d61ee3a3a846565cfa39b331331e68a3b1d1116630f2f26d" +dependencies = [ + "backtrace", + "color-spantrace", + "eyre", + "indenter", + "once_cell", + "owo-colors", + "tracing-error", +] + +[[package]] +name = "color-spantrace" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8b88ea9df13354b55bc7234ebcce36e6ef896aca2e42a15de9e10edce01b427" +dependencies = [ + "once_cell", + "owo-colors", + "tracing-core", + "tracing-error", +] + +[[package]] +name = "colorchoice" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b05b61dc5112cbb17e4b6cd61790d9845d13888356391624cbe7e41efeac1e75" + +[[package]] +name = "colored" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "117725a109d387c937a1533ce01b450cbde6b88abceea8473c4d7a85853cda3c" +dependencies = [ + "lazy_static", + "windows-sys 0.59.0", +] + +[[package]] +name = "combine" +version = "4.6.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba5a308b75df32fe02788e748662718f03fde005016435c444eea572398219fd" +dependencies = [ + "bytes", + "memchr", +] + +[[package]] +name = "concurrent-queue" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ca0197aee26d1ae37445ee532fefce43251d24cc7c166799f4d46817f1d3973" +dependencies = [ + "crossbeam-utils", +] + +[[package]] +name = "console_error_panic_hook" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a06aeb73f470f66dcdbf7223caeebb85984942f22f1adb2a088cf9668146bbbc" +dependencies = [ + "cfg-if", + "wasm-bindgen", +] + +[[package]] +name = "const-hex" +version = "1.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "83e22e0ed40b96a48d3db274f72fd365bd78f67af39b6bbd47e8a15e1c6207ff" +dependencies = [ + "cfg-if", + "cpufeatures", + "hex", + "proptest", + "serde", +] + +[[package]] +name = "const-oid" +version = "0.9.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" + +[[package]] +name = "const_format" +version = "0.2.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "126f97965c8ad46d6d9163268ff28432e8f6a1196a55578867832e3049df63dd" +dependencies = [ + "const_format_proc_macros", +] + +[[package]] +name = "const_format_proc_macros" +version = "0.2.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d57c2eccfb16dbac1f4e61e206105db5820c9d26c3c472bc17c774259ef7744" +dependencies = [ + "proc-macro2", + "quote", + "unicode-xid", +] + +[[package]] +name = "constant_time_eq" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7c74b8349d32d297c9134b8c88677813a227df8f779daa29bfc29c183fe3dca6" + +[[package]] +name = "core-foundation" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "core-foundation" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2a6cd9ae233e7f62ba4e9353e81a88df7fc8a5987b8d445b4d90c879bd156f6" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "core-foundation-sys" +version = "0.8.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" + +[[package]] +name = "core-graphics-types" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "45390e6114f68f718cc7a830514a96f903cccd70d02a8f6d9f643ac4ba45afaf" +dependencies = [ + "bitflags 1.3.2", + "core-foundation 0.9.4", + "libc", +] + +[[package]] +name = "cpp_demangle" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96e58d342ad113c2b878f16d5d034c03be492ae460cdbc02b7f0f2284d310c7d" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "cpufeatures" +version = "0.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59ed5838eebb26a2bb2e58f6d5b5316989ae9d08bab10e0e6d103e656d1b0280" +dependencies = [ + "libc", +] + +[[package]] +name = "crc" +version = "3.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9710d3b3739c2e349eb44fe848ad0b7c8cb1e42bd87ee49371df2f7acaf3e675" +dependencies = [ + "crc-catalog", +] + +[[package]] +name = "crc-catalog" +version = "2.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19d374276b40fb8bbdee95aef7c7fa6b5316ec764510eb64b8dd0e2ed0d7e7f5" + +[[package]] +name = "crc32fast" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9481c1c90cbf2ac953f07c8d4a58aa3945c425b7185c9154d67a65e4230da511" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "critical-section" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "790eea4361631c5e7d22598ecd5723ff611904e3344ce8720784c93e3d83d40b" + +[[package]] +name = "crossbeam-deque" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9dd111b7b7f7d55b72c0a6ae361660ee5853c9af73f70c3c2ef6858b950e2e51" +dependencies = [ + "crossbeam-epoch", + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-epoch" +version = "0.9.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" +dependencies = [ + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-queue" +version = "0.3.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f58bbc28f91df819d0aa2a2c00cd19754769c2fad90579b3592b1c9ba7a3115" +dependencies = [ + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-utils" +version = "0.8.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" + +[[package]] +name = "crossterm" +version = "0.25.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e64e6c0fbe2c17357405f7c758c1ef960fce08bdfb2c03d88d2a18d7e09c4b67" +dependencies = [ + "bitflags 1.3.2", + "crossterm_winapi", + "libc", + "mio 0.8.11", + "parking_lot", + "signal-hook", + "signal-hook-mio", + "winapi", +] + +[[package]] +name = "crossterm_winapi" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "acdd7c62a3665c7f6830a51635d9ac9b23ed385797f70a83bb8bafe9c572ab2b" +dependencies = [ + "winapi", +] + +[[package]] +name = "crunchy" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "460fbee9c2c2f33933d720630a6a0bac33ba7053db5344fac858d4b8952d77d5" + +[[package]] +name = "crypto-bigint" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0dc92fb57ca44df6db8059111ab3af99a63d5d0f8375d9972e319a379c6bab76" +dependencies = [ + "generic-array", + "rand_core 0.6.4", + "subtle", + "zeroize", +] + +[[package]] +name = "crypto-common" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" +dependencies = [ + "generic-array", + "rand_core 0.6.4", + "typenum", +] + +[[package]] +name = "ctor" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec09e802f5081de6157da9a75701d6c713d8dc3ba52571fd4bd25f412644e8a6" +dependencies = [ + "ctor-proc-macro", + "dtor", +] + +[[package]] +name = "ctor-proc-macro" +version = "0.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2931af7e13dc045d8e9d26afccc6fa115d64e115c9c84b1166288b46f6782c2" + +[[package]] +name = "curve25519-dalek" +version = "4.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97fb8b7c4503de7d6ae7b42ab72a5a59857b4c937ec27a3d4539dba95b5ab2be" +dependencies = [ + "cfg-if", + "cpufeatures", + "curve25519-dalek-derive", + "fiat-crypto", + "rustc_version 0.4.1", + "subtle", + "zeroize", +] + +[[package]] +name = "curve25519-dalek-derive" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "darling" +version = "0.20.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc7f46116c46ff9ab3eb1597a45688b6715c6e628b5c133e288e709a29bcb4ee" +dependencies = [ + "darling_core", + "darling_macro", +] + +[[package]] +name = "darling_core" +version = "0.20.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d00b9596d185e565c2207a0b01f8bd1a135483d02d9b7b0a54b11da8d53412e" +dependencies = [ + "fnv", + "ident_case", + "proc-macro2", + "quote", + "strsim", + "syn 2.0.104", +] + +[[package]] +name = "darling_macro" +version = "0.20.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc34b93ccb385b40dc71c6fceac4b2ad23662c7eeb248cf10d529b7e055b6ead" +dependencies = [ + "darling_core", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "dashmap" +version = "6.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5041cc499144891f3790297212f32a74fb938e5136a14943f338ef9e0ae276cf" +dependencies = [ + "cfg-if", + "crossbeam-utils", + "hashbrown 0.14.5", + "lock_api", + "once_cell", + "parking_lot_core", +] + +[[package]] +name = "data-encoding" +version = "2.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a2330da5de22e8a3cb63252ce2abb30116bf5265e89c0e01bc17015ce30a476" + +[[package]] +name = "der" +version = "0.7.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7c1832837b905bbfb5101e07cc24c8deddf52f93225eee6ead5f4d63d53ddcb" +dependencies = [ + "const-oid", + "pem-rfc7468", + "zeroize", +] + +[[package]] +name = "deranged" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c9e6a11ca8224451684bc0d7d5a7adbf8f2fd6887261a1cfc3c0432f9d4068e" +dependencies = [ + "powerfmt", + "serde", +] + +[[package]] +name = "derivative" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "derive_arbitrary" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "30542c1ad912e0e3d22a1935c290e12e8a29d704a420177a31faad4a601a0800" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "derive_builder" +version = "0.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "507dfb09ea8b7fa618fcf76e953f4f5e192547945816d5358edffe39f6f94947" +dependencies = [ + "derive_builder_macro", +] + +[[package]] +name = "derive_builder_core" +version = "0.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d5bcf7b024d6835cfb3d473887cd966994907effbe9227e8c8219824d06c4e8" +dependencies = [ + "darling", + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "derive_builder_macro" +version = "0.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab63b0e2bf4d5928aff72e83a7dace85d7bba5fe12dcc3c5a572d78caffd3f3c" +dependencies = [ + "derive_builder_core", + "syn 2.0.104", +] + +[[package]] +name = "derive_more" +version = "0.99.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6edb4b64a43d977b8e99788fe3a04d483834fba1215a7e02caa415b626497f7f" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "derive_more" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a9b99b9cbbe49445b21764dc0625032a89b145a2642e67603e1c936f5458d05" +dependencies = [ + "derive_more-impl 1.0.0", +] + +[[package]] +name = "derive_more" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "093242cf7570c207c83073cf82f79706fe7b8317e98620a47d5be7c3d8497678" +dependencies = [ + "derive_more-impl 2.0.1", +] + +[[package]] +name = "derive_more-impl" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb7330aeadfbe296029522e6c40f315320aba36fc43a5b3632f3795348f3bd22" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", + "unicode-xid", +] + +[[package]] +name = "derive_more-impl" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bda628edc44c4bb645fbe0f758797143e4e07926f7ebf4e9bdfbd3d2ce621df3" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", + "unicode-xid", +] + +[[package]] +name = "digest" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" +dependencies = [ + "generic-array", +] + +[[package]] +name = "digest" +version = "0.10.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" +dependencies = [ + "block-buffer", + "const-oid", + "crypto-common", + "subtle", +] + +[[package]] +name = "directories" +version = "5.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a49173b84e034382284f27f1af4dcbbd231ffa358c0fe316541a7337f376a35" +dependencies = [ + "dirs-sys", +] + +[[package]] +name = "dirs" +version = "5.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44c45a9d03d6676652bcb5e724c7e988de1acad23a711b5217ab9cbecbec2225" +dependencies = [ + "dirs-sys", +] + +[[package]] +name = "dirs-sys" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "520f05a5cbd335fae5a99ff7a6ab8627577660ee5cfd6a94a6a929b52ff0321c" +dependencies = [ + "libc", + "option-ext", + "redox_users", + "windows-sys 0.48.0", +] + +[[package]] +name = "displaydoc" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "docker-generate" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ccf673e0848ef09fa4aeeba78e681cf651c0c7d35f76ee38cec8e55bc32fa111" + +[[package]] +name = "doctest-file" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aac81fa3e28d21450aa4d2ac065992ba96a1d7303efbce51a95f4fd175b67562" + +[[package]] +name = "dotenvy" +version = "0.15.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1aaf95b3e5c8f23aa320147307562d361db0ae0d51242340f558153b4eb2439b" + +[[package]] +name = "downcast-rs" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75b325c5dbd37f80359721ad39aca5a29fb04c89279657cffdda8736d0c0b9d2" + +[[package]] +name = "downloader" +version = "0.2.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ac1e888d6830712d565b2f3a974be3200be9296bc1b03db8251a4cbf18a4a34" +dependencies = [ + "digest 0.10.7", + "futures", + "rand 0.8.5", + "reqwest", + "thiserror 1.0.69", + "tokio", +] + +[[package]] +name = "dtor" +version = "0.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97cbdf2ad6846025e8e25df05171abfb30e3ababa12ee0a0e44b9bbe570633a8" +dependencies = [ + "dtor-proc-macro", +] + +[[package]] +name = "dtor-proc-macro" +version = "0.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7454e41ff9012c00d53cf7f475c5e3afa3b91b7c90568495495e8d9bf47a1055" + +[[package]] +name = "dunce" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92773504d58c093f6de2459af4af33faa518c13451eb8f2b5698ed3d36e7c813" + +[[package]] +name = "duplicate" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "de78e66ac9061e030587b2a2e75cc88f22304913c907b11307bca737141230cb" +dependencies = [ + "heck 0.4.1", + "proc-macro-error", +] + +[[package]] +name = "dyn-clone" +version = "1.0.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0881ea181b1df73ff77ffaaf9c7544ecc11e82fba9b5f27b262a3c73a332555" + +[[package]] +name = "ecdsa" +version = "0.16.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee27f32b5c5292967d2d4a9d7f1e0b0aed2c15daded5a60300e4abb9d8020bca" +dependencies = [ + "der", + "digest 0.10.7", + "elliptic-curve", + "rfc6979", + "serdect", + "signature", + "spki", +] + +[[package]] +name = "educe" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d7bc049e1bd8cdeb31b68bbd586a9464ecf9f3944af3958a7a9d0f8b9799417" +dependencies = [ + "enum-ordinalize", + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "either" +version = "1.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" +dependencies = [ + "serde", +] + +[[package]] +name = "elf" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4445909572dbd556c457c849c4ca58623d84b27c8fff1e74b0b4227d8b90d17b" + +[[package]] +name = "elliptic-curve" +version = "0.13.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5e6043086bf7973472e0c7dff2142ea0b680d30e18d9cc40f267efbf222bd47" +dependencies = [ + "base16ct", + "crypto-bigint", + "digest 0.10.7", + "ff", + "generic-array", + "group", + "pem-rfc7468", + "pkcs8", + "rand_core 0.6.4", + "sec1", + "serdect", + "subtle", + "zeroize", +] + +[[package]] +name = "embedded-io" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef1a6892d9eef45c8fa6b9e0086428a2cca8491aca8f787c534a3d6d0bcb3ced" + +[[package]] +name = "embedded-io" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "edd0f118536f44f5ccd48bcb8b111bdc3de888b58c74639dfb034a357d0f206d" + +[[package]] +name = "encoding_rs" +version = "0.8.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75030f3c4f45dafd7586dd6780965a8c7e8e285a5ecb86713e63a79c5b2766f3" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "endian-type" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c34f04666d835ff5d62e058c3995147c06f42fe86ff053337632bca83e42702d" + +[[package]] +name = "enum-map" +version = "2.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6866f3bfdf8207509a033af1a75a7b08abda06bbaaeae6669323fd5a097df2e9" +dependencies = [ + "enum-map-derive", +] + +[[package]] +name = "enum-map-derive" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f282cfdfe92516eb26c2af8589c274c7c17681f5ecc03c18255fe741c6aa64eb" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "enum-ordinalize" +version = "4.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fea0dcfa4e54eeb516fe454635a95753ddd39acda650ce703031c6973e315dd5" +dependencies = [ + "enum-ordinalize-derive", +] + +[[package]] +name = "enum-ordinalize-derive" +version = "4.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d28318a75d4aead5c4db25382e8ef717932d0346600cacae6357eb5941bc5ff" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "equivalent" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" + +[[package]] +name = "erased-serde" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e004d887f51fcb9fef17317a2f3525c887d8aa3f4f50fed920816a688284a5b7" +dependencies = [ + "serde", + "typeid", +] + +[[package]] +name = "errno" +version = "0.3.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "778e2ac28f6c47af28e4907f13ffd1e1ddbd400980a9abd7c8df189bf578a5ad" +dependencies = [ + "libc", + "windows-sys 0.60.2", +] + +[[package]] +name = "etcetera" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "136d1b5283a1ab77bd9257427ffd09d8667ced0570b6f938942bc7568ed5b943" +dependencies = [ + "cfg-if", + "home", + "windows-sys 0.48.0", +] + +[[package]] +name = "event-listener" +version = "5.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3492acde4c3fc54c845eaab3eed8bd00c7a7d881f78bfc801e43a93dec1331ae" +dependencies = [ + "concurrent-queue", + "parking", + "pin-project-lite", +] + +[[package]] +name = "eyre" +version = "0.6.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7cd915d99f24784cdc19fd37ef22b97e3ff0ae756c7e492e9fbfe897d61e2aec" +dependencies = [ + "indenter", + "once_cell", +] + +[[package]] +name = "fallible-iterator" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2acce4a10f12dc2fb14a218589d4f1f62ef011b2d0cc4b3cb1bba8e94da14649" + +[[package]] +name = "faster-hex" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7223ae2d2f179b803433d9c830478527e92b8117eab39460edae7f1614d9fb73" +dependencies = [ + "heapless 0.8.0", + "serde", +] + +[[package]] +name = "fastrand" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" + +[[package]] +name = "fastrlp" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "139834ddba373bbdd213dffe02c8d110508dcf1726c2be27e8d1f7d7e1856418" +dependencies = [ + "arrayvec", + "auto_impl", + "bytes", +] + +[[package]] +name = "fastrlp" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce8dba4714ef14b8274c371879b175aa55b16b30f269663f19d576f380018dc4" +dependencies = [ + "arrayvec", + "auto_impl", + "bytes", +] + +[[package]] +name = "ff" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0b50bfb653653f9ca9095b427bed08ab8d75a137839d9ad64eb11810d5b6393" +dependencies = [ + "bitvec", + "byteorder", + "ff_derive", + "rand_core 0.6.4", + "subtle", +] + +[[package]] +name = "ff_derive" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f10d12652036b0e99197587c6ba87a8fc3031986499973c030d8b44fcc151b60" +dependencies = [ + "addchain", + "num-bigint 0.3.3", + "num-integer", + "num-traits", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "fiat-crypto" +version = "0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28dea519a9695b9977216879a3ebfddf92f1c08c05d984f8996aecd6ecdc811d" + +[[package]] +name = "fixed-hash" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "835c052cb0c08c1acf6ffd71c022172e18723949c8282f2b9f27efbc51e64534" +dependencies = [ + "byteorder", + "rand 0.8.5", + "rustc-hex", + "static_assertions", +] + +[[package]] +name = "fixedbitset" +version = "0.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d674e81391d1e1ab681a28d99df07927c6d4aa5b027d7da16ba32d1d21ecd99" + +[[package]] +name = "flate2" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a3d7db9596fecd151c5f638c0ee5d5bd487b6e0ea232e5dc96d5250f6f94b1d" +dependencies = [ + "crc32fast", + "miniz_oxide", +] + +[[package]] +name = "flume" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da0e4dd2a88388a1f4ccc7c9ce104604dab68d9f408dc34cd45823d5a9069095" +dependencies = [ + "futures-core", + "futures-sink", + "spin", +] + +[[package]] +name = "fnv" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" + +[[package]] +name = "foldhash" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9c4f5dac5e15c24eb999c26181a6ca40b39fe946cbe4c263c7209467bc83af2" + +[[package]] +name = "foreign-types" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" +dependencies = [ + "foreign-types-shared 0.1.1", +] + +[[package]] +name = "foreign-types" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d737d9aa519fb7b749cbc3b962edcf310a8dd1f4b67c91c4f83975dbdd17d965" +dependencies = [ + "foreign-types-macros", + "foreign-types-shared 0.3.1", +] + +[[package]] +name = "foreign-types-macros" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a5c6c585bc94aaf2c7b51dd4c2ba22680844aba4c687be581871a6f518c5742" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "foreign-types-shared" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" + +[[package]] +name = "foreign-types-shared" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa9a19cbb55df58761df49b23516a86d432839add4af60fc256da840f66ed35b" + +[[package]] +name = "form_urlencoded" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456" +dependencies = [ + "percent-encoding", +] + +[[package]] +name = "fs_extra" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42703706b716c37f96a77aea830392ad231f44c9e9a67872fa5548707e11b11c" + +[[package]] +name = "funty" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" + +[[package]] +name = "futures" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876" +dependencies = [ + "futures-channel", + "futures-core", + "futures-executor", + "futures-io", + "futures-sink", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-channel" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" +dependencies = [ + "futures-core", + "futures-sink", +] + +[[package]] +name = "futures-core" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" + +[[package]] +name = "futures-executor" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e28d1d997f585e54aebc3f97d39e72338912123a67330d723fdbb564d646c9f" +dependencies = [ + "futures-core", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-intrusive" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d930c203dd0b6ff06e0201a4a2fe9149b43c684fd4420555b26d21b1a02956f" +dependencies = [ + "futures-core", + "lock_api", + "parking_lot", +] + +[[package]] +name = "futures-io" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" + +[[package]] +name = "futures-macro" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "futures-sink" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7" + +[[package]] +name = "futures-task" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" + +[[package]] +name = "futures-util" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" +dependencies = [ + "futures-channel", + "futures-core", + "futures-io", + "futures-macro", + "futures-sink", + "futures-task", + "memchr", + "pin-project-lite", + "pin-utils", + "slab", +] + +[[package]] +name = "futures-utils-wasm" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42012b0f064e01aa58b545fe3727f90f7dd4020f4a3ea735b50344965f5a57e9" + +[[package]] +name = "generic-array" +version = "0.14.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" +dependencies = [ + "typenum", + "version_check", + "zeroize", +] + +[[package]] +name = "getrandom" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592" +dependencies = [ + "cfg-if", + "js-sys", + "libc", + "wasi 0.11.1+wasi-snapshot-preview1", + "wasm-bindgen", +] + +[[package]] +name = "getrandom" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26145e563e54f2cadc477553f1ec5ee650b00862f0a58bcd12cbdc5f0ea2d2f4" +dependencies = [ + "cfg-if", + "js-sys", + "libc", + "r-efi", + "wasi 0.14.2+wasi-0.2.4", + "wasm-bindgen", +] + +[[package]] +name = "gimli" +version = "0.29.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "40ecd4077b5ae9fd2e9e169b102c6c330d0605168eb0e8bf79952b256dbefffd" +dependencies = [ + "fallible-iterator", + "stable_deref_trait", +] + +[[package]] +name = "gimli" +version = "0.31.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" + +[[package]] +name = "git2" +version = "0.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2deb07a133b1520dc1a5690e9bd08950108873d7ed5de38dcc74d3b5ebffa110" +dependencies = [ + "bitflags 2.9.1", + "libc", + "libgit2-sys", + "log", + "url", +] + +[[package]] +name = "glob" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8d1add55171497b4705a648c6b583acafb01d58050a51727785f0b2c8e0a2b2" + +[[package]] +name = "group" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63" +dependencies = [ + "ff", + "rand_core 0.6.4", + "subtle", +] + +[[package]] +name = "h2" +version = "0.4.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17da50a276f1e01e0ba6c029e47b7100754904ee8a278f886546e98575380785" +dependencies = [ + "atomic-waker", + "bytes", + "fnv", + "futures-core", + "futures-sink", + "http", + "indexmap 2.10.0", + "slab", + "tokio", + "tokio-util", + "tracing", +] + +[[package]] +name = "hash32" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0c35f58762feb77d74ebe43bdbc3210f09be9fe6742234d573bacc26ed92b67" +dependencies = [ + "byteorder", +] + +[[package]] +name = "hash32" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47d60b12902ba28e2730cd37e95b8c9223af2808df9e902d4df49588d1470606" +dependencies = [ + "byteorder", +] + +[[package]] +name = "hashbrown" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" + +[[package]] +name = "hashbrown" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e" +dependencies = [ + "ahash", +] + +[[package]] +name = "hashbrown" +version = "0.14.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" +dependencies = [ + "ahash", +] + +[[package]] +name = "hashbrown" +version = "0.15.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5971ac85611da7067dbfcabef3c70ebb5606018acd9e2a3903a0da507521e0d5" +dependencies = [ + "allocator-api2", + "equivalent", + "foldhash", + "serde", +] + +[[package]] +name = "hashlink" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ba4ff7128dee98c7dc9794b6a411377e1404dba1c97deb8d1a55297bd25d8af" +dependencies = [ + "hashbrown 0.14.5", +] + +[[package]] +name = "hashlink" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7382cf6263419f2d8df38c55d7da83da5c18aef87fc7a7fc1fb1e344edfe14c1" +dependencies = [ + "hashbrown 0.15.4", +] + +[[package]] +name = "heapless" +version = "0.7.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cdc6457c0eb62c71aac4bc17216026d8410337c4126773b9c5daba343f17964f" +dependencies = [ + "atomic-polyfill", + "hash32 0.2.1", + "rustc_version 0.4.1", + "serde", + "spin", + "stable_deref_trait", +] + +[[package]] +name = "heapless" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0bfb9eb618601c89945a70e254898da93b13be0388091d42117462b265bb3fad" +dependencies = [ + "hash32 0.3.1", + "stable_deref_trait", +] + +[[package]] +name = "heck" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" + +[[package]] +name = "heck" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" + +[[package]] +name = "hermit-abi" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc0fef456e4baa96da950455cd02c081ca953b141298e41db3fc7e36b1da849c" + +[[package]] +name = "hex" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" +dependencies = [ + "serde", +] + +[[package]] +name = "hex-conservative" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5313b072ce3c597065a808dbf612c4c8e8590bdbf8b579508bf7a762c5eae6cd" +dependencies = [ + "arrayvec", +] + +[[package]] +name = "hex-literal" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6fe2267d4ed49bc07b63801559be28c718ea06c4738b7a03c94df7386d2cde46" + +[[package]] +name = "hex_lit" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3011d1213f159867b13cfd6ac92d2cd5f1345762c63be3554e84092d85a50bbd" + +[[package]] +name = "hkdf" +version = "0.12.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b5f8eb2ad728638ea2c7d47a21db23b7b58a72ed6a38256b8a1849f15fbbdf7" +dependencies = [ + "hmac", +] + +[[package]] +name = "hmac" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" +dependencies = [ + "digest 0.10.7", +] + +[[package]] +name = "home" +version = "0.5.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "589533453244b0995c858700322199b2becb13b627df2851f64a2775d024abcf" +dependencies = [ + "windows-sys 0.59.0", +] + +[[package]] +name = "http" +version = "1.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f4a85d31aea989eead29a3aaf9e1115a180df8282431156e533de47660892565" +dependencies = [ + "bytes", + "fnv", + "itoa", +] + +[[package]] +name = "http-body" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" +dependencies = [ + "bytes", + "http", +] + +[[package]] +name = "http-body-util" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b021d93e26becf5dc7e1b75b1bed1fd93124b374ceb73f43d4d4eafec896a64a" +dependencies = [ + "bytes", + "futures-core", + "http", + "http-body", + "pin-project-lite", +] + +[[package]] +name = "httparse" +version = "1.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6dbf3de79e51f3d586ab4cb9d5c3e2c14aa28ed23d180cf89b4df0454a69cc87" + +[[package]] +name = "httpdate" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" + +[[package]] +name = "hyper" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc2b571658e38e0c01b1fdca3bbbe93c00d3d71693ff2770043f8c29bc7d6f80" +dependencies = [ + "bytes", + "futures-channel", + "futures-util", + "h2", + "http", + "http-body", + "httparse", + "httpdate", + "itoa", + "pin-project-lite", + "smallvec", + "tokio", + "want", +] + +[[package]] +name = "hyper-named-pipe" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73b7d8abf35697b81a825e386fc151e0d503e8cb5fcb93cc8669c376dfd6f278" +dependencies = [ + "hex", + "hyper", + "hyper-util", + "pin-project-lite", + "tokio", + "tower-service", + "winapi", +] + +[[package]] +name = "hyper-rustls" +version = "0.27.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3c93eb611681b207e1fe55d5a71ecf91572ec8a6705cdb6857f7d8d5242cf58" +dependencies = [ + "http", + "hyper", + "hyper-util", + "log", + "rustls", + "rustls-native-certs", + "rustls-pki-types", + "tokio", + "tokio-rustls", + "tower-service", + "webpki-roots 1.0.2", +] + +[[package]] +name = "hyper-timeout" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b90d566bffbce6a75bd8b09a05aa8c2cb1fabb6cb348f8840c9e4c90a0d83b0" +dependencies = [ + "hyper", + "hyper-util", + "pin-project-lite", + "tokio", + "tower-service", +] + +[[package]] +name = "hyper-tls" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70206fc6890eaca9fde8a0bf71caa2ddfc9fe045ac9e5c70df101a7dbde866e0" +dependencies = [ + "bytes", + "http-body-util", + "hyper", + "hyper-util", + "native-tls", + "tokio", + "tokio-native-tls", + "tower-service", +] + +[[package]] +name = "hyper-util" +version = "0.1.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d9b05277c7e8da2c93a568989bb6207bef0112e8d17df7a6eda4a3cf143bc5e" +dependencies = [ + "base64 0.22.1", + "bytes", + "futures-channel", + "futures-core", + "futures-util", + "http", + "http-body", + "hyper", + "ipnet", + "libc", + "percent-encoding", + "pin-project-lite", + "socket2 0.6.0", + "tokio", + "tower-service", + "tracing", +] + +[[package]] +name = "hyperlocal" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "986c5ce3b994526b3cd75578e62554abd09f0899d6206de48b3e96ab34ccc8c7" +dependencies = [ + "hex", + "http-body-util", + "hyper", + "hyper-util", + "pin-project-lite", + "tokio", + "tower-service", +] + +[[package]] +name = "iana-time-zone" +version = "0.1.63" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0c919e5debc312ad217002b8048a17b7d83f80703865bbfcfebb0458b0b27d8" +dependencies = [ + "android_system_properties", + "core-foundation-sys", + "iana-time-zone-haiku", + "js-sys", + "log", + "wasm-bindgen", + "windows-core 0.61.2", +] + +[[package]] +name = "iana-time-zone-haiku" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" +dependencies = [ + "cc", +] + +[[package]] +name = "ics23" +version = "0.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73b17f1a5bd7d12ad30a21445cfa5f52fd7651cb3243ba866f9916b1ec112f12" +dependencies = [ + "anyhow", + "blake2", + "blake3", + "bytes", + "hex", + "informalsystems-pbjson", + "prost", + "ripemd", + "serde", + "sha2", + "sha3", +] + +[[package]] +name = "icu_collections" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "200072f5d0e3614556f94a9930d5dc3e0662a652823904c3a75dc3b0af7fee47" +dependencies = [ + "displaydoc", + "potential_utf", + "yoke", + "zerofrom", + "zerovec", +] + +[[package]] +name = "icu_locale_core" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0cde2700ccaed3872079a65fb1a78f6c0a36c91570f28755dda67bc8f7d9f00a" +dependencies = [ + "displaydoc", + "litemap", + "tinystr", + "writeable", + "zerovec", +] + +[[package]] +name = "icu_normalizer" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "436880e8e18df4d7bbc06d58432329d6458cc84531f7ac5f024e93deadb37979" +dependencies = [ + "displaydoc", + "icu_collections", + "icu_normalizer_data", + "icu_properties", + "icu_provider", + "smallvec", + "zerovec", +] + +[[package]] +name = "icu_normalizer_data" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00210d6893afc98edb752b664b8890f0ef174c8adbb8d0be9710fa66fbbf72d3" + +[[package]] +name = "icu_properties" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "016c619c1eeb94efb86809b015c58f479963de65bdb6253345c1a1276f22e32b" +dependencies = [ + "displaydoc", + "icu_collections", + "icu_locale_core", + "icu_properties_data", + "icu_provider", + "potential_utf", + "zerotrie", + "zerovec", +] + +[[package]] +name = "icu_properties_data" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "298459143998310acd25ffe6810ed544932242d3f07083eee1084d83a71bd632" + +[[package]] +name = "icu_provider" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "03c80da27b5f4187909049ee2d72f276f0d9f99a42c306bd0131ecfe04d8e5af" +dependencies = [ + "displaydoc", + "icu_locale_core", + "stable_deref_trait", + "tinystr", + "writeable", + "yoke", + "zerofrom", + "zerotrie", + "zerovec", +] + +[[package]] +name = "ident_case" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" + +[[package]] +name = "idna" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "686f825264d630750a544639377bae737628043f20d38bbc029e8f29ea968a7e" +dependencies = [ + "idna_adapter", + "smallvec", + "utf8_iter", +] + +[[package]] +name = "idna_adapter" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3acae9609540aa318d1bc588455225fb2085b9ed0c4f6bd0d9d5bcd86f1a0344" +dependencies = [ + "icu_normalizer", + "icu_properties", +] + +[[package]] +name = "impl-codec" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba6a270039626615617f3f36d15fc827041df3b78c439da2cadfa47455a77f2f" +dependencies = [ + "parity-scale-codec", +] + +[[package]] +name = "impl-trait-for-tuples" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a0eb5a3343abf848c0984fe4604b2b105da9539376e24fc0a3b0007411ae4fd9" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "include_bytes_aligned" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ee796ad498c8d9a1d68e477df8f754ed784ef875de1414ebdaf169f70a6a784" + +[[package]] +name = "indenter" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce23b50ad8242c51a442f3ff322d56b02f08852c77e4c0b4d3fd684abc89c683" + +[[package]] +name = "indexmap" +version = "1.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" +dependencies = [ + "autocfg", + "hashbrown 0.12.3", + "serde", +] + +[[package]] +name = "indexmap" +version = "2.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fe4cd85333e22411419a0bcae1297d25e58c9443848b11dc6a86fefe8c78a661" +dependencies = [ + "equivalent", + "hashbrown 0.15.4", + "serde", +] + +[[package]] +name = "informalsystems-pbjson" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9aa4a0980c8379295100d70854354e78df2ee1c6ca0f96ffe89afeb3140e3a3d" +dependencies = [ + "base64 0.21.7", + "serde", +] + +[[package]] +name = "inout" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "879f10e63c20629ecabbb64a8010319738c66a5cd0c29b02d63d272b03751d01" +dependencies = [ + "generic-array", +] + +[[package]] +name = "interprocess" +version = "2.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d941b405bd2322993887859a8ee6ac9134945a24ec5ec763a8a962fc64dfec2d" +dependencies = [ + "doctest-file", + "futures-core", + "libc", + "recvmsg", + "tokio", + "widestring", + "windows-sys 0.52.0", +] + +[[package]] +name = "inventory" +version = "0.3.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab08d7cd2c5897f2c949e5383ea7c7db03fb19130ffcfbf7eda795137ae3cb83" +dependencies = [ + "rustversion", +] + +[[package]] +name = "io-uring" +version = "0.7.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d93587f37623a1a17d94ef2bc9ada592f5465fe7732084ab7beefabe5c77c0c4" +dependencies = [ + "bitflags 2.9.1", + "cfg-if", + "libc", +] + +[[package]] +name = "ipnet" +version = "2.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130" + +[[package]] +name = "iri-string" +version = "0.7.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dbc5ebe9c3a1a7a5127f920a418f7585e9e758e911d0466ed004f393b0e380b2" +dependencies = [ + "memchr", + "serde", +] + +[[package]] +name = "is_terminal_polyfill" +version = "1.70.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7943c866cc5cd64cbc25b2e01621d07fa8eb2a1a23160ee81ce38704e97b8ecf" + +[[package]] +name = "itertools" +version = "0.10.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" +dependencies = [ + "either", +] + +[[package]] +name = "itertools" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1c173a5686ce8bfa551b3563d0c2170bf24ca44da99c7ca4bfdab5418c3fe57" +dependencies = [ + "either", +] + +[[package]] +name = "itertools" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba291022dbbd398a455acf126c1e341954079855bc60dfdda641363bd6922569" +dependencies = [ + "either", +] + +[[package]] +name = "itertools" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186" +dependencies = [ + "either", +] + +[[package]] +name = "itertools" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b192c782037fadd9cfa75548310488aabdbf3d2da73885b31bd0abd03351285" +dependencies = [ + "either", +] + +[[package]] +name = "itoa" +version = "1.0.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" + +[[package]] +name = "jmt" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf2a10370b45cd850e64993ccd81d25ea2d4b5b0d0312546e7489fed82064f2e" +dependencies = [ + "anyhow", + "borsh", + "digest 0.10.7", + "hashbrown 0.13.2", + "hex", + "ics23", + "itertools 0.10.5", + "mirai-annotations", + "num-derive 0.3.3", + "num-traits", + "serde", + "sha2", + "thiserror 1.0.69", + "tracing", +] + +[[package]] +name = "jmt" +version = "0.11.0" +source = "git+https://github.com/penumbra-zone/jmt.git?rev=550a2f2#550a2f20984a5c31c51715381d3f67390e138ffa" +dependencies = [ + "anyhow", + "borsh", + "digest 0.10.7", + "hashbrown 0.13.2", + "hex", + "ics23", + "itertools 0.10.5", + "mirai-annotations", + "num-derive 0.3.3", + "num-traits", + "serde", + "sha2", + "thiserror 1.0.69", + "tracing", +] + +[[package]] +name = "jni" +version = "0.21.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a87aa2bb7d2af34197c04845522473242e1aa17c12f4935d5856491a7fb8c97" +dependencies = [ + "cesu8", + "cfg-if", + "combine", + "jni-sys", + "log", + "thiserror 1.0.69", + "walkdir", + "windows-sys 0.45.0", +] + +[[package]] +name = "jni-sys" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8eaf4bc02d17cbdd7ff4c7438cafcdf7fb9a4613313ad11b4f8fefe7d3fa0130" + +[[package]] +name = "jobserver" +version = "0.1.33" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38f262f097c174adebe41eb73d66ae9c06b2844fb0da69969647bbddd9b0538a" +dependencies = [ + "getrandom 0.3.3", + "libc", +] + +[[package]] +name = "js-sys" +version = "0.3.77" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1cfaf33c695fc6e08064efbc1f72ec937429614f25eef83af942d0e227c3a28f" +dependencies = [ + "once_cell", + "wasm-bindgen", +] + +[[package]] +name = "jsonrpc-async" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a20e8e4ed08ee58717113cbf277b1ecef5cd9554d3e48c114de338289727d466" +dependencies = [ + "async-trait", + "base64-compat", + "serde", + "serde_derive", + "serde_json", + "tokio", +] + +[[package]] +name = "jsonrpsee" +version = "0.24.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37b26c20e2178756451cfeb0661fb74c47dd5988cb7e3939de7e9241fd604d42" +dependencies = [ + "jsonrpsee-core", + "jsonrpsee-http-client", + "jsonrpsee-proc-macros", + "jsonrpsee-types", + "tracing", +] + +[[package]] +name = "jsonrpsee-core" +version = "0.24.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "456196007ca3a14db478346f58c7238028d55ee15c1df15115596e411ff27925" +dependencies = [ + "async-trait", + "bytes", + "futures-util", + "http", + "http-body", + "http-body-util", + "jsonrpsee-types", + "serde", + "serde_json", + "thiserror 1.0.69", + "tokio", + "tracing", +] + +[[package]] +name = "jsonrpsee-http-client" +version = "0.24.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c872b6c9961a4ccc543e321bb5b89f6b2d2c7fe8b61906918273a3333c95400c" +dependencies = [ + "async-trait", + "base64 0.22.1", + "http-body", + "hyper", + "hyper-rustls", + "hyper-util", + "jsonrpsee-core", + "jsonrpsee-types", + "rustls", + "rustls-platform-verifier", + "serde", + "serde_json", + "thiserror 1.0.69", + "tokio", + "tower 0.4.13", + "tracing", + "url", +] + +[[package]] +name = "jsonrpsee-proc-macros" +version = "0.24.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e65763c942dfc9358146571911b0cd1c361c2d63e2d2305622d40d36376ca80" +dependencies = [ + "heck 0.5.0", + "proc-macro-crate", + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "jsonrpsee-types" +version = "0.24.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08a8e70baf945b6b5752fc8eb38c918a48f1234daf11355e07106d963f860089" +dependencies = [ + "http", + "serde", + "serde_json", + "thiserror 1.0.69", +] + +[[package]] +name = "jsonwebtoken" +version = "9.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a87cc7a48537badeae96744432de36f4be2b4a34a05a5ef32e9dd8a1c169dde" +dependencies = [ + "base64 0.22.1", + "js-sys", + "pem", + "ring", + "serde", + "serde_json", + "simple_asn1", +] + +[[package]] +name = "k256" +version = "0.13.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6e3919bbaa2945715f0bb6d3934a173d1e9a59ac23767fbaaef277265a7411b" +dependencies = [ + "cfg-if", + "ecdsa", + "elliptic-curve", + "once_cell", + "serdect", + "sha2", + "signature", +] + +[[package]] +name = "keccak" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ecc2af9a1119c51f12a14607e783cb977bde58bc069ff0c3da1095e635d70654" +dependencies = [ + "cpufeatures", +] + +[[package]] +name = "keccak-asm" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "505d1856a39b200489082f90d897c3f07c455563880bc5952e38eabf731c83b6" +dependencies = [ + "digest 0.10.7", + "sha3-asm", +] + +[[package]] +name = "lazy-regex" +version = "3.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60c7310b93682b36b98fa7ea4de998d3463ccbebd94d935d6b48ba5b6ffa7126" +dependencies = [ + "lazy-regex-proc_macros", + "once_cell", + "regex", +] + +[[package]] +name = "lazy-regex-proc_macros" +version = "3.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ba01db5ef81e17eb10a5e0f2109d1b3a3e29bac3070fdbd7d156bf7dbd206a1" +dependencies = [ + "proc-macro2", + "quote", + "regex", + "syn 2.0.104", +] + +[[package]] +name = "lazy_static" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" +dependencies = [ + "spin", +] + +[[package]] +name = "lazycell" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" + +[[package]] +name = "libc" +version = "0.2.174" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1171693293099992e19cddea4e8b849964e9846f4acee11b3948bcc337be8776" + +[[package]] +name = "libgit2-sys" +version = "0.18.2+1.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c42fe03df2bd3c53a3a9c7317ad91d80c81cd1fb0caec8d7cc4cd2bfa10c222" +dependencies = [ + "cc", + "libc", + "libz-sys", + "pkg-config", +] + +[[package]] +name = "libloading" +version = "0.8.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07033963ba89ebaf1584d767badaa2e8fcec21aedea6b8c0346d487d49c28667" +dependencies = [ + "cfg-if", + "windows-targets 0.53.3", +] + +[[package]] +name = "libm" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f9fbbcab51052fe104eb5e5d351cf728d30a5be1fe14d9be8a3b097481fb97de" + +[[package]] +name = "libredox" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "391290121bad3d37fbddad76d8f5d1c1c314cfc646d143d7e07a3086ddff0ce3" +dependencies = [ + "bitflags 2.9.1", + "libc", +] + +[[package]] +name = "libsqlite3-sys" +version = "0.30.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e99fb7a497b1e3339bc746195567ed8d3e24945ecd636e3619d20b9de9e9149" +dependencies = [ + "pkg-config", + "vcpkg", +] + +[[package]] +name = "libz-sys" +version = "1.1.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b70e7a7df205e92a1a4cd9aaae7898dac0aa555503cc0a649494d0d60e7651d" +dependencies = [ + "cc", + "libc", + "pkg-config", + "vcpkg", +] + +[[package]] +name = "linux-raw-sys" +version = "0.4.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d26c52dbd32dccf2d10cac7725f8eae5296885fb5703b261f7d0a0739ec807ab" + +[[package]] +name = "linux-raw-sys" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd945864f07fe9f5371a27ad7b52a172b4b499999f1d97574c9fa68373937e12" + +[[package]] +name = "litemap" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "241eaef5fd12c88705a01fc1066c48c4b36e0dd4377dcdc7ec3942cea7a69956" + +[[package]] +name = "lock_api" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96936507f153605bddfcda068dd804796c84324ed2510809e5b2a624c81da765" +dependencies = [ + "autocfg", + "scopeguard", +] + +[[package]] +name = "log" +version = "0.4.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94" + +[[package]] +name = "lru" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "227748d55f2f0ab4735d87fd623798cb6b664512fe979705f829c9f81c934465" +dependencies = [ + "hashbrown 0.15.4", +] + +[[package]] +name = "lru-slab" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "112b39cec0b298b6c1999fee3e31427f74f676e4cb9879ed1a121b43661a4154" + +[[package]] +name = "lzma-sys" +version = "0.1.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5fda04ab3764e6cde78b9974eec4f779acaba7c4e84b36eca3cf77c581b85d27" +dependencies = [ + "cc", + "libc", + "pkg-config", +] + +[[package]] +name = "macro-string" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b27834086c65ec3f9387b096d66e99f221cf081c2b738042aa252bcd41204e3" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "malachite" +version = "0.4.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2fbdf9cb251732db30a7200ebb6ae5d22fe8e11397364416617d2c2cf0c51cb5" +dependencies = [ + "malachite-base", + "malachite-float", + "malachite-nz", + "malachite-q", +] + +[[package]] +name = "malachite-base" +version = "0.4.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5ea0ed76adf7defc1a92240b5c36d5368cfe9251640dcce5bd2d0b7c1fd87aeb" +dependencies = [ + "hashbrown 0.14.5", + "itertools 0.11.0", + "libm", + "ryu", +] + +[[package]] +name = "malachite-float" +version = "0.4.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af9d20db1c73759c1377db7b27575df6f2eab7368809dd62c0a715dc1bcc39f7" +dependencies = [ + "itertools 0.11.0", + "malachite-base", + "malachite-nz", + "malachite-q", +] + +[[package]] +name = "malachite-nz" +version = "0.4.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34a79feebb2bc9aa7762047c8e5495269a367da6b5a90a99882a0aeeac1841f7" +dependencies = [ + "itertools 0.11.0", + "libm", + "malachite-base", +] + +[[package]] +name = "malachite-q" +version = "0.4.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50f235d5747b1256b47620f5640c2a17a88c7569eebdf27cd9cb130e1a619191" +dependencies = [ + "itertools 0.11.0", + "malachite-base", + "malachite-nz", +] + +[[package]] +name = "malloc_buf" +version = "0.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62bb907fe88d54d8d9ce32a3cceab4218ed2f6b7d35617cafe9adf84e43919cb" +dependencies = [ + "libc", +] + +[[package]] +name = "matchers" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558" +dependencies = [ + "regex-automata 0.1.10", +] + +[[package]] +name = "matchit" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" + +[[package]] +name = "matrixmultiply" +version = "0.3.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a06de3016e9fae57a36fd14dba131fccf49f74b40b7fbdb472f96e361ec71a08" +dependencies = [ + "autocfg", + "rawpointer", +] + +[[package]] +name = "maybe-async" +version = "0.2.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5cf92c10c7e361d6b99666ec1c6f9805b0bea2c3bd8c78dc6fe98ac5bd78db11" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "md-5" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d89e7ee0cfbedfc4da3340218492196241d89eefb6dab27de5df917a6d2e78cf" +dependencies = [ + "cfg-if", + "digest 0.10.7", +] + +[[package]] +name = "memchr" +version = "2.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32a282da65faaf38286cf3be983213fcf1d2e2a58700e808f83f4ea9a4804bc0" + +[[package]] +name = "memmap2" +version = "0.9.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "483758ad303d734cec05e5c12b41d7e93e6a6390c5e9dae6bdeb7c1259012d28" +dependencies = [ + "libc", +] + +[[package]] +name = "merlin" +version = "3.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "58c38e2799fc0978b65dfff8023ec7843e2330bb462f19198840b34b6582397d" +dependencies = [ + "byteorder", + "keccak", + "rand_core 0.6.4", + "zeroize", +] + +[[package]] +name = "metal" +version = "0.29.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ecfd3296f8c56b7c1f6fbac3c71cefa9d78ce009850c45000015f206dc7fa21" +dependencies = [ + "bitflags 2.9.1", + "block", + "core-graphics-types", + "foreign-types 0.5.0", + "log", + "objc", + "paste", +] + +[[package]] +name = "metrics" +version = "0.23.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3045b4193fbdc5b5681f32f11070da9be3609f189a79f3390706d42587f46bb5" +dependencies = [ + "ahash", + "portable-atomic", +] + +[[package]] +name = "metrics-derive" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3dbdd96ed57d565ec744cba02862d707acf373c5772d152abae6ec5c4e24f6c" +dependencies = [ + "proc-macro2", + "quote", + "regex", + "syn 2.0.104", +] + +[[package]] +name = "metrics-exporter-prometheus" +version = "0.15.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4f0c8427b39666bf970460908b213ec09b3b350f20c0c2eabcbba51704a08e6" +dependencies = [ + "base64 0.22.1", + "http-body-util", + "hyper", + "hyper-rustls", + "hyper-util", + "indexmap 2.10.0", + "ipnet", + "metrics", + "metrics-util", + "quanta", + "thiserror 1.0.69", + "tokio", + "tracing", +] + +[[package]] +name = "metrics-util" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4259040465c955f9f2f1a4a8a16dc46726169bca0f88e8fb2dbeced487c3e828" +dependencies = [ + "aho-corasick", + "crossbeam-epoch", + "crossbeam-utils", + "hashbrown 0.14.5", + "indexmap 2.10.0", + "metrics", + "num_cpus", + "ordered-float", + "quanta", + "radix_trie", + "sketches-ddsketch", +] + +[[package]] +name = "mime" +version = "0.3.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" + +[[package]] +name = "minimal-lexical" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" + +[[package]] +name = "miniz_oxide" +version = "0.8.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fa76a2c86f704bdb222d66965fb3d63269ce38518b83cb0575fca855ebb6316" +dependencies = [ + "adler2", +] + +[[package]] +name = "mio" +version = "0.8.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c" +dependencies = [ + "libc", + "log", + "wasi 0.11.1+wasi-snapshot-preview1", + "windows-sys 0.48.0", +] + +[[package]] +name = "mio" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78bed444cc8a2160f01cbcf811ef18cac863ad68ae8ca62092e8db51d51c761c" +dependencies = [ + "libc", + "wasi 0.11.1+wasi-snapshot-preview1", + "windows-sys 0.59.0", +] + +[[package]] +name = "mirai-annotations" +version = "1.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c9be0862c1b3f26a88803c4a49de6889c10e608b3ee9344e6ef5b45fb37ad3d1" + +[[package]] +name = "multimap" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d87ecb2933e8aeadb3e3a02b828fed80a7528047e68b4f424523a0981a3a084" + +[[package]] +name = "native-tls" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87de3442987e9dbec73158d5c715e7ad9072fda936bb03d19d7fa10e00520f0e" +dependencies = [ + "libc", + "log", + "openssl", + "openssl-probe", + "openssl-sys", + "schannel", + "security-framework 2.11.1", + "security-framework-sys", + "tempfile", +] + +[[package]] +name = "ndarray" +version = "0.16.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "882ed72dce9365842bf196bdeedf5055305f11fc8c03dee7bb0194a6cad34841" +dependencies = [ + "matrixmultiply", + "num-complex", + "num-integer", + "num-traits", + "portable-atomic", + "portable-atomic-util", + "rawpointer", + "rayon", +] + +[[package]] +name = "nibble_vec" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77a5d83df9f36fe23f0c3648c6bbb8b0298bb5f1939c8f2704431371f4b84d43" +dependencies = [ + "smallvec", +] + +[[package]] +name = "nix" +version = "0.29.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "71e2746dc3a24dd78b3cfcb7be93368c6de9963d30f43a6a73998a9cf4b17b46" +dependencies = [ + "bitflags 2.9.1", + "cfg-if", + "cfg_aliases", + "libc", +] + +[[package]] +name = "no_std_strings" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a5b0c77c1b780822bc749a33e39aeb2c07584ab93332303babeabb645298a76e" + +[[package]] +name = "nom" +version = "7.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" +dependencies = [ + "memchr", + "minimal-lexical", +] + +[[package]] +name = "ntapi" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8a3895c6391c39d7fe7ebc444a87eb2991b2a0bc718fdabd071eec617fc68e4" +dependencies = [ + "winapi", +] + +[[package]] +name = "nu-ansi-term" +version = "0.46.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84" +dependencies = [ + "overload", + "winapi", +] + +[[package]] +name = "num-bigint" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f6f7833f2cbf2360a6cfd58cd41a53aa7a90bd4c202f5b1c7dd2ed73c57b2c3" +dependencies = [ + "autocfg", + "num-integer", + "num-traits", +] + +[[package]] +name = "num-bigint" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a5e44f723f1133c9deac646763579fdb3ac745e418f2a7af9cd0c431da1f20b9" +dependencies = [ + "num-integer", + "num-traits", + "rand 0.8.5", +] + +[[package]] +name = "num-bigint-dig" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc84195820f291c7697304f3cbdadd1cb7199c0efc917ff5eafd71225c136151" +dependencies = [ + "byteorder", + "lazy_static", + "libm", + "num-integer", + "num-iter", + "num-traits", + "rand 0.8.5", + "smallvec", + "zeroize", +] + +[[package]] +name = "num-complex" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73f88a1307638156682bada9d7604135552957b7818057dcef22705b4d509495" +dependencies = [ + "num-traits", +] + +[[package]] +name = "num-conv" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" + +[[package]] +name = "num-derive" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "876a53fff98e03a936a674b29568b0e605f06b29372c2489ff4de23f1949743d" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "num-derive" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed3955f1a9c7c0c15e092f9c887db08b1fc683305fdf6eb6684f22555355e202" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "num-integer" +version = "0.1.46" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f" +dependencies = [ + "num-traits", +] + +[[package]] +name = "num-iter" +version = "0.1.45" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1429034a0490724d0075ebb2bc9e875d6503c3cf69e235a8941aa757d83ef5bf" +dependencies = [ + "autocfg", + "num-integer", + "num-traits", +] + +[[package]] +name = "num-traits" +version = "0.2.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" +dependencies = [ + "autocfg", + "libm", +] + +[[package]] +name = "num_cpus" +version = "1.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91df4bbde75afed763b708b7eee1e8e7651e02d97f6d5dd763e89367e957b23b" +dependencies = [ + "hermit-abi", + "libc", +] + +[[package]] +name = "num_enum" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a973b4e44ce6cad84ce69d797acf9a044532e4184c4f267913d1b546a0727b7a" +dependencies = [ + "num_enum_derive", + "rustversion", +] + +[[package]] +name = "num_enum_derive" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77e878c846a8abae00dd069496dbe8751b16ac1c3d6bd2a7283a938e8228f90d" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "num_threads" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c7398b9c8b70908f6371f47ed36737907c87c52af34c268fed0bf0ceb92ead9" +dependencies = [ + "libc", +] + +[[package]] +name = "nvtx" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ad2e855e8019f99e4b94ac33670eb4e4f570a2e044f3749a0b2c7f83b841e52c" +dependencies = [ + "cc", +] + +[[package]] +name = "nybbles" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8983bb634df7248924ee0c4c3a749609b5abcb082c28fffe3254b3eb3602b307" +dependencies = [ + "alloy-rlp", + "const-hex", + "proptest", + "serde", + "smallvec", +] + +[[package]] +name = "objc" +version = "0.2.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "915b1b472bc21c53464d6c8461c9d3af805ba1ef837e1cac254428f4a77177b1" +dependencies = [ + "malloc_buf", +] + +[[package]] +name = "objc2-core-foundation" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c10c2894a6fed806ade6027bcd50662746363a9589d3ec9d9bef30a4e4bc166" +dependencies = [ + "bitflags 2.9.1", +] + +[[package]] +name = "object" +version = "0.35.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8ec7ab813848ba4522158d5517a6093db1ded27575b070f4177b8d12b41db5e" +dependencies = [ + "flate2", + "memchr", + "ruzstd", +] + +[[package]] +name = "object" +version = "0.36.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62948e14d923ea95ea2c7c86c71013138b66525b86bdc08d2dcc262bdb497b87" +dependencies = [ + "memchr", +] + +[[package]] +name = "once_cell" +version = "1.21.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" + +[[package]] +name = "once_cell_polyfill" +version = "1.70.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4895175b425cb1f87721b59f0f286c2092bd4af812243672510e1ac53e2e0ad" + +[[package]] +name = "opaque-debug" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c08d65885ee38876c4f86fa503fb49d7b507c2b62552df7c70b2fce627e06381" + +[[package]] +name = "openssl" +version = "0.10.73" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8505734d46c8ab1e19a1dce3aef597ad87dcb4c37e7188231769bd6bd51cebf8" +dependencies = [ + "bitflags 2.9.1", + "cfg-if", + "foreign-types 0.3.2", + "libc", + "once_cell", + "openssl-macros", + "openssl-sys", +] + +[[package]] +name = "openssl-macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "openssl-probe" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" + +[[package]] +name = "openssl-sys" +version = "0.9.109" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90096e2e47630d78b7d1c20952dc621f957103f8bc2c8359ec81290d75238571" +dependencies = [ + "cc", + "libc", + "pkg-config", + "vcpkg", +] + +[[package]] +name = "option-ext" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" + +[[package]] +name = "ordered-float" +version = "4.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7bb71e1b3fa6ca1c61f383464aaf2bb0e2f8e772a1f01d486832464de363b951" +dependencies = [ + "num-traits", +] + +[[package]] +name = "overload" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" + +[[package]] +name = "owo-colors" +version = "4.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48dd4f4a2c8405440fd0462561f0e5806bd0f77e86f51c761481bdd4018b545e" + +[[package]] +name = "parity-scale-codec" +version = "3.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "799781ae679d79a948e13d4824a40970bfa500058d245760dd857301059810fa" +dependencies = [ + "arrayvec", + "bitvec", + "byte-slice-cast", + "const_format", + "impl-trait-for-tuples", + "parity-scale-codec-derive", + "rustversion", + "serde", +] + +[[package]] +name = "parity-scale-codec-derive" +version = "3.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34b4653168b563151153c9e4c08ebed57fb8262bebfa79711552fa983c623e7a" +dependencies = [ + "proc-macro-crate", + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "parking" +version = "2.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f38d5652c16fde515bb1ecef450ab0f6a219d619a7274976324d5e377f7dceba" + +[[package]] +name = "parking_lot" +version = "0.12.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70d58bf43669b5795d1576d0641cfb6fbb2057bf629506267a92807158584a13" +dependencies = [ + "lock_api", + "parking_lot_core", +] + +[[package]] +name = "parking_lot_core" +version = "0.9.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc838d2a56b5b1a6c25f55575dfc605fabb63bb2365f6c2353ef9159aa69e4a5" +dependencies = [ + "cfg-if", + "libc", + "redox_syscall", + "smallvec", + "windows-targets 0.52.6", +] + +[[package]] +name = "paste" +version = "1.0.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" + +[[package]] +name = "pem" +version = "3.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38af38e8470ac9dee3ce1bae1af9c1671fffc44ddfd8bd1d0a3445bf349a8ef3" +dependencies = [ + "base64 0.22.1", + "serde", +] + +[[package]] +name = "pem-rfc7468" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "88b39c9bfcfc231068454382784bb460aae594343fb030d46e9f50a645418412" +dependencies = [ + "base64ct", +] + +[[package]] +name = "percent-encoding" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" + +[[package]] +name = "pest" +version = "2.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1db05f56d34358a8b1066f67cbb203ee3e7ed2ba674a6263a1d5ec6db2204323" +dependencies = [ + "memchr", + "thiserror 2.0.12", + "ucd-trie", +] + +[[package]] +name = "petgraph" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3672b37090dbd86368a4145bc067582552b29c27377cad4e0a306c97f9bd7772" +dependencies = [ + "fixedbitset", + "indexmap 2.10.0", +] + +[[package]] +name = "pgmq" +version = "0.30.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b776031ffc4e2291941f2657fad6276408d4f16aa4084ac4ca934ce8d2d886d" +dependencies = [ + "chrono", + "log", + "serde", + "serde_json", + "sqlx", + "thiserror 1.0.69", + "tokio", + "url", +] + +[[package]] +name = "pharos" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e9567389417feee6ce15dd6527a8a1ecac205ef62c2932bcf3d9f6fc5b78b414" +dependencies = [ + "futures", + "rustc_version 0.4.1", +] + +[[package]] +name = "pin-project" +version = "1.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "677f1add503faace112b9f1373e43e9e054bfdd22ff1a63c1bc485eaec6a6a8a" +dependencies = [ + "pin-project-internal", +] + +[[package]] +name = "pin-project-internal" +version = "1.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "pin-project-lite" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" + +[[package]] +name = "pin-utils" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" + +[[package]] +name = "pkcs1" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8ffb9f10fa047879315e6625af03c164b16962a5368d724ed16323b68ace47f" +dependencies = [ + "der", + "pkcs8", + "spki", +] + +[[package]] +name = "pkcs8" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7" +dependencies = [ + "der", + "spki", +] + +[[package]] +name = "pkg-config" +version = "0.3.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c" + +[[package]] +name = "poly1305" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8159bd90725d2df49889a078b54f4f79e87f1f8a8444194cdca81d38f5393abf" +dependencies = [ + "cpufeatures", + "opaque-debug", + "universal-hash", +] + +[[package]] +name = "portable-atomic" +version = "1.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f84267b20a16ea918e43c6a88433c2d54fa145c92a811b5b047ccbe153674483" + +[[package]] +name = "portable-atomic-util" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d8a2f0d8d040d7848a709caf78912debcc3f33ee4b3cac47d73d1e1069e83507" +dependencies = [ + "portable-atomic", +] + +[[package]] +name = "postcard" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6764c3b5dd454e283a30e6dfe78e9b31096d9e32036b5d1eaac7a6119ccb9a24" +dependencies = [ + "cobs", + "embedded-io 0.4.0", + "embedded-io 0.6.1", + "heapless 0.7.17", + "serde", +] + +[[package]] +name = "potential_utf" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5a7c30837279ca13e7c867e9e40053bc68740f988cb07f7ca6df43cc734b585" +dependencies = [ + "zerovec", +] + +[[package]] +name = "powerfmt" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" + +[[package]] +name = "ppv-lite86" +version = "0.2.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9" +dependencies = [ + "zerocopy", +] + +[[package]] +name = "prettyplease" +version = "0.2.36" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff24dfcda44452b9816fff4cd4227e1bb73ff5a2f1bc1105aa92fb8565ce44d2" +dependencies = [ + "proc-macro2", + "syn 2.0.104", +] + +[[package]] +name = "primitive-types" +version = "0.12.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b34d9fd68ae0b74a41b21c03c2f62847aa0ffea044eee893b4c140b37e244e2" +dependencies = [ + "fixed-hash", + "impl-codec", + "uint", +] + +[[package]] +name = "proc-macro-crate" +version = "3.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "edce586971a4dfaa28950c6f18ed55e0406c1ab88bbce2c6f6293a7aaba73d35" +dependencies = [ + "toml_edit", +] + +[[package]] +name = "proc-macro-error" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" +dependencies = [ + "proc-macro-error-attr", + "proc-macro2", + "quote", + "syn 1.0.109", + "version_check", +] + +[[package]] +name = "proc-macro-error-attr" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" +dependencies = [ + "proc-macro2", + "quote", + "version_check", +] + +[[package]] +name = "proc-macro-error-attr2" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96de42df36bb9bba5542fe9f1a054b8cc87e172759a1868aa05c1f3acc89dfc5" +dependencies = [ + "proc-macro2", + "quote", +] + +[[package]] +name = "proc-macro-error2" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11ec05c52be0a07b08061f7dd003e7d7092e0472bc731b4af7bb1ef876109802" +dependencies = [ + "proc-macro-error-attr2", + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "proc-macro2" +version = "1.0.95" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "02b3e5e68a3a1a02aad3ec490a98007cbc13c37cbe84a3cd7b8e406d76e7f778" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "proptest" +version = "1.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6fcdab19deb5195a31cf7726a210015ff1496ba1464fd42cb4f537b8b01b471f" +dependencies = [ + "bit-set", + "bit-vec", + "bitflags 2.9.1", + "lazy_static", + "num-traits", + "rand 0.9.2", + "rand_chacha 0.9.0", + "rand_xorshift", + "regex-syntax 0.8.5", + "rusty-fork", + "tempfile", + "unarray", +] + +[[package]] +name = "prost" +version = "0.13.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2796faa41db3ec313a31f7624d9286acf277b52de526150b7e69f3debf891ee5" +dependencies = [ + "bytes", + "prost-derive", +] + +[[package]] +name = "prost-build" +version = "0.13.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be769465445e8c1474e9c5dac2018218498557af32d9ed057325ec9a41ae81bf" +dependencies = [ + "heck 0.5.0", + "itertools 0.14.0", + "log", + "multimap", + "once_cell", + "petgraph", + "prettyplease", + "prost", + "prost-types", + "regex", + "syn 2.0.104", + "tempfile", +] + +[[package]] +name = "prost-derive" +version = "0.13.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a56d757972c98b346a9b766e3f02746cde6dd1cd1d1d563472929fdd74bec4d" +dependencies = [ + "anyhow", + "itertools 0.14.0", + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "prost-types" +version = "0.13.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52c2c1bf36ddb1a1c396b3601a3cec27c2462e45f07c386894ec3ccf5332bd16" +dependencies = [ + "prost", +] + +[[package]] +name = "puffin" +version = "0.19.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa9dae7b05c02ec1a6bc9bcf20d8bc64a7dcbf57934107902a872014899b741f" +dependencies = [ + "anyhow", + "byteorder", + "cfg-if", + "itertools 0.10.5", + "once_cell", + "parking_lot", +] + +[[package]] +name = "quanta" +version = "0.12.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3ab5a9d756f0d97bdc89019bd2e4ea098cf9cde50ee7564dde6b81ccc8f06c7" +dependencies = [ + "crossbeam-utils", + "libc", + "once_cell", + "raw-cpuid", + "wasi 0.11.1+wasi-snapshot-preview1", + "web-sys", + "winapi", +] + +[[package]] +name = "quick-error" +version = "1.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" + +[[package]] +name = "quinn" +version = "0.11.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "626214629cda6781b6dc1d316ba307189c85ba657213ce642d9c77670f8202c8" +dependencies = [ + "bytes", + "cfg_aliases", + "pin-project-lite", + "quinn-proto", + "quinn-udp", + "rustc-hash 2.1.1", + "rustls", + "socket2 0.5.10", + "thiserror 2.0.12", + "tokio", + "tracing", + "web-time", +] + +[[package]] +name = "quinn-proto" +version = "0.11.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49df843a9161c85bb8aae55f101bc0bac8bcafd637a620d9122fd7e0b2f7422e" +dependencies = [ + "bytes", + "getrandom 0.3.3", + "lru-slab", + "rand 0.9.2", + "ring", + "rustc-hash 2.1.1", + "rustls", + "rustls-pki-types", + "slab", + "thiserror 2.0.12", + "tinyvec", + "tracing", + "web-time", +] + +[[package]] +name = "quinn-udp" +version = "0.5.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcebb1209ee276352ef14ff8732e24cc2b02bbac986cd74a4c81bcb2f9881970" +dependencies = [ + "cfg_aliases", + "libc", + "once_cell", + "socket2 0.5.10", + "tracing", + "windows-sys 0.59.0", +] + +[[package]] +name = "quote" +version = "1.0.40" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "r-efi" +version = "5.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" + +[[package]] +name = "radium" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09" + +[[package]] +name = "radix_trie" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c069c179fcdc6a2fe24d8d18305cf085fdbd4f922c041943e203685d6a1c58fd" +dependencies = [ + "endian-type", + "nibble_vec", +] + +[[package]] +name = "rand" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" +dependencies = [ + "libc", + "rand_chacha 0.3.1", + "rand_core 0.6.4", + "serde", +] + +[[package]] +name = "rand" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6db2770f06117d490610c7488547d543617b21bfa07796d7a12f6f1bd53850d1" +dependencies = [ + "rand_chacha 0.9.0", + "rand_core 0.9.3", +] + +[[package]] +name = "rand_chacha" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +dependencies = [ + "ppv-lite86", + "rand_core 0.6.4", +] + +[[package]] +name = "rand_chacha" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb" +dependencies = [ + "ppv-lite86", + "rand_core 0.9.3", +] + +[[package]] +name = "rand_core" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" +dependencies = [ + "getrandom 0.2.16", +] + +[[package]] +name = "rand_core" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "99d9a13982dcf210057a8a78572b2217b667c3beacbf3a0d8b454f6f82837d38" +dependencies = [ + "getrandom 0.3.3", +] + +[[package]] +name = "rand_xorshift" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "513962919efc330f829edb2535844d1b912b0fbe2ca165d613e4e8788bb05a5a" +dependencies = [ + "rand_core 0.9.3", +] + +[[package]] +name = "raw-cpuid" +version = "11.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c6df7ab838ed27997ba19a4664507e6f82b41fe6e20be42929332156e5e85146" +dependencies = [ + "bitflags 2.9.1", +] + +[[package]] +name = "rawpointer" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60a357793950651c4ed0f3f52338f53b2f809f32d83a07f72909fa13e4c6c1e3" + +[[package]] +name = "rayon" +version = "1.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b418a60154510ca1a002a752ca9714984e21e4241e804d32555251faf8b78ffa" +dependencies = [ + "either", + "rayon-core", +] + +[[package]] +name = "rayon-core" +version = "1.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2" +dependencies = [ + "crossbeam-deque", + "crossbeam-utils", +] + +[[package]] +name = "recvmsg" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3edd4d5d42c92f0a659926464d4cce56b562761267ecf0f469d85b7de384175" + +[[package]] +name = "redox_syscall" +version = "0.5.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5407465600fb0548f1442edf71dd20683c6ed326200ace4b1ef0763521bb3b77" +dependencies = [ + "bitflags 2.9.1", +] + +[[package]] +name = "redox_users" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba009ff324d1fc1b900bd1fdb31564febe58a8ccc8a6fdbb93b543d33b13ca43" +dependencies = [ + "getrandom 0.2.16", + "libredox", + "thiserror 1.0.69", +] + +[[package]] +name = "ref-cast" +version = "1.0.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a0ae411dbe946a674d89546582cea4ba2bb8defac896622d6496f14c23ba5cf" +dependencies = [ + "ref-cast-impl", +] + +[[package]] +name = "ref-cast-impl" +version = "1.0.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1165225c21bff1f3bbce98f5a1f889949bc902d3575308cc7b0de30b4f6d27c7" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "regex" +version = "1.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" +dependencies = [ + "aho-corasick", + "memchr", + "regex-automata 0.4.9", + "regex-syntax 0.8.5", +] + +[[package]] +name = "regex-automata" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" +dependencies = [ + "regex-syntax 0.6.29", +] + +[[package]] +name = "regex-automata" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax 0.8.5", +] + +[[package]] +name = "regex-syntax" +version = "0.6.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" + +[[package]] +name = "regex-syntax" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" + +[[package]] +name = "reqwest" +version = "0.12.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cbc931937e6ca3a06e3b6c0aa7841849b160a90351d6ab467a8b9b9959767531" +dependencies = [ + "base64 0.22.1", + "bytes", + "futures-channel", + "futures-core", + "futures-util", + "h2", + "http", + "http-body", + "http-body-util", + "hyper", + "hyper-rustls", + "hyper-tls", + "hyper-util", + "js-sys", + "log", + "native-tls", + "percent-encoding", + "pin-project-lite", + "quinn", + "rustls", + "rustls-pki-types", + "serde", + "serde_json", + "serde_urlencoded", + "sync_wrapper", + "tokio", + "tokio-native-tls", + "tokio-rustls", + "tokio-util", + "tower 0.5.2", + "tower-http", + "tower-service", + "url", + "wasm-bindgen", + "wasm-bindgen-futures", + "wasm-streams", + "web-sys", + "webpki-roots 1.0.2", +] + +[[package]] +name = "rfc6979" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8dd2a808d456c4a54e300a23e9f5a67e122c3024119acbfd73e3bf664491cb2" +dependencies = [ + "hmac", + "subtle", +] + +[[package]] +name = "ring" +version = "0.17.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4689e6c2294d81e88dc6261c768b63bc4fcdb852be6d1352498b114f61383b7" +dependencies = [ + "cc", + "cfg-if", + "getrandom 0.2.16", + "libc", + "untrusted", + "windows-sys 0.52.0", +] + +[[package]] +name = "ringbuffer" +version = "0.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3df6368f71f205ff9c33c076d170dd56ebf68e8161c733c0caa07a7a5509ed53" + +[[package]] +name = "ripemd" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd124222d17ad93a644ed9d011a40f4fb64aa54275c08cc216524a9ea82fb09f" +dependencies = [ + "digest 0.10.7", +] + +[[package]] +name = "risc0-binfmt" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62eb7025356a233c1bc267c458a2ce56fcfc89b136d813c8a77be14ef1eaf2b1" +dependencies = [ + "anyhow", + "borsh", + "derive_more 2.0.1", + "elf", + "lazy_static", + "postcard", + "risc0-zkp", + "risc0-zkvm-platform", + "semver 1.0.26", + "serde", + "tracing", +] + +[[package]] +name = "risc0-build" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62ffc0f135e6c1e9851e7e19438d03ff41a9d49199ee4f6c17b8bb30b4f83910" +dependencies = [ + "anyhow", + "cargo_metadata", + "derive_builder", + "dirs", + "docker-generate", + "hex", + "risc0-binfmt", + "risc0-zkos-v1compat", + "risc0-zkp", + "risc0-zkvm-platform", + "rzup", + "semver 1.0.26", + "serde", + "serde_json", + "stability", + "tempfile", +] + +[[package]] +name = "risc0-build-kernel" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2cc3029ee7a4103aa176346f85431f1aa5193ea4025844417fcf1591f66299d4" +dependencies = [ + "cc", + "directories", + "glob", + "hex", + "rayon", + "sha2", + "tempfile", +] + +[[package]] +name = "risc0-circuit-keccak" +version = "3.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0094af5a57b020388a03bdd3834959c7d62723f1687be81414ade25104d93263" +dependencies = [ + "anyhow", + "bytemuck", + "cfg-if", + "keccak", + "paste", + "rayon", + "risc0-binfmt", + "risc0-circuit-keccak-sys", + "risc0-circuit-recursion", + "risc0-core", + "risc0-sys", + "risc0-zkp", + "tracing", + "xz2", +] + +[[package]] +name = "risc0-circuit-keccak-sys" +version = "3.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43afb4572af3b812fb0c83bfac5014041af10937288dcb67b7f9cea649483ff8" +dependencies = [ + "cc", + "derive_more 2.0.1", + "glob", + "risc0-build-kernel", + "risc0-core", + "risc0-sys", +] + +[[package]] +name = "risc0-circuit-recursion" +version = "3.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76ebded45c902c2b6939924a1cddd1d06b5d1d4ad1531e8798ebfee78f9c038d" +dependencies = [ + "anyhow", + "bytemuck", + "cfg-if", + "downloader", + "hex", + "lazy-regex", + "metal", + "rand 0.8.5", + "rayon", + "risc0-circuit-recursion-sys", + "risc0-core", + "risc0-sys", + "risc0-zkp", + "serde", + "sha2", + "tracing", + "zip", +] + +[[package]] +name = "risc0-circuit-recursion-sys" +version = "3.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3a0eda7272f9e18b914f33b85b58e221056dbef1477ceb13351e442a06a44de9" +dependencies = [ + "glob", + "risc0-build-kernel", + "risc0-core", + "risc0-sys", +] + +[[package]] +name = "risc0-circuit-rv32im" +version = "3.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "15030849f8356f01f23c74b37dbfa4283100b594eb634109993e9e005ef45f64" +dependencies = [ + "anyhow", + "auto_ops", + "bit-vec", + "bytemuck", + "byteorder", + "cfg-if", + "derive_more 2.0.1", + "enum-map", + "malachite", + "num-derive 0.4.2", + "num-traits", + "paste", + "postcard", + "rand 0.8.5", + "rayon", + "ringbuffer", + "risc0-binfmt", + "risc0-circuit-rv32im-sys", + "risc0-core", + "risc0-sys", + "risc0-zkp", + "serde", + "smallvec", + "tracing", +] + +[[package]] +name = "risc0-circuit-rv32im-sys" +version = "3.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d5e586b310d20fab3f141a318704ded77c20ace155af4db1b6594bd60579b90" +dependencies = [ + "cc", + "derive_more 2.0.1", + "glob", + "risc0-build-kernel", + "risc0-core", + "risc0-sys", +] + +[[package]] +name = "risc0-core" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "317bbf70a8750b64d4fd7a2bdc9d7d5f30d8bb305cae486962c797ef35c8d08e" +dependencies = [ + "bytemuck", + "bytemuck_derive", + "nvtx", + "puffin", + "rand_core 0.6.4", +] + +[[package]] +name = "risc0-groth16" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7cf5d0b673d5fc67a89147c2e9c53134707dcc8137a43d1ef06b4ff68e99b74f" +dependencies = [ + "anyhow", + "ark-bn254", + "ark-ec", + "ark-groth16", + "ark-serialize 0.5.0", + "bytemuck", + "hex", + "num-bigint 0.4.6", + "num-traits", + "risc0-binfmt", + "risc0-core", + "risc0-zkp", + "serde", + "serde_json", + "stability", + "tempfile", + "tracing", +] + +[[package]] +name = "risc0-sys" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11abd6064c039f24b58676419cd13c92cbf4858e66948dd55b188b03511db44c" +dependencies = [ + "anyhow", + "risc0-build-kernel", +] + +[[package]] +name = "risc0-zkos-v1compat" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f76c479b69d1987cb54ac72dcc017197296fdcd6daf78fafc10cbbd3a167a7de" +dependencies = [ + "include_bytes_aligned", + "no_std_strings", +] + +[[package]] +name = "risc0-zkp" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a287e9cd6d7b3b38eeb49c62090c46a1935922309fbd997a9143ed8c43c8f3cb" +dependencies = [ + "anyhow", + "blake2", + "borsh", + "bytemuck", + "cfg-if", + "digest 0.10.7", + "ff", + "hex", + "hex-literal", + "metal", + "ndarray", + "parking_lot", + "paste", + "rand 0.8.5", + "rand_core 0.6.4", + "rayon", + "risc0-core", + "risc0-sys", + "risc0-zkvm-platform", + "serde", + "sha2", + "stability", + "tracing", +] + +[[package]] +name = "risc0-zkvm" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9684b333c1c5d83f29ce2a92314ccfafd9d8cdfa6c4e19c07b97015d2f1eb9d0" +dependencies = [ + "addr2line 0.22.0", + "anyhow", + "bincode", + "bonsai-sdk", + "borsh", + "bytemuck", + "bytes", + "derive_more 2.0.1", + "elf", + "enum-map", + "getrandom 0.2.16", + "hex", + "keccak", + "lazy-regex", + "num-bigint 0.4.6", + "num-traits", + "prost", + "rand 0.8.5", + "rayon", + "risc0-binfmt", + "risc0-build", + "risc0-circuit-keccak", + "risc0-circuit-recursion", + "risc0-circuit-rv32im", + "risc0-core", + "risc0-groth16", + "risc0-zkos-v1compat", + "risc0-zkp", + "risc0-zkvm-platform", + "rrs-lib", + "rustc-demangle", + "rzup", + "semver 1.0.26", + "serde", + "sha2", + "stability", + "tempfile", + "tracing", + "typetag", +] + +[[package]] +name = "risc0-zkvm-platform" +version = "2.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cae9cb2c2f6cab2dfa395ea6e2576713929040c7fb0c5f4150d13e1119d18686" +dependencies = [ + "bytemuck", + "cfg-if", + "getrandom 0.2.16", + "getrandom 0.3.3", + "libm", + "stability", +] + +[[package]] +name = "rlp" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb919243f34364b6bd2fc10ef797edbfa75f33c252e7998527479c6d6b47e1ec" +dependencies = [ + "bytes", + "rustc-hex", +] + +[[package]] +name = "rrs-lib" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4382d3af3a4ebdae7f64ba6edd9114fff92c89808004c4943b393377a25d001" +dependencies = [ + "downcast-rs", + "paste", +] + +[[package]] +name = "rsa" +version = "0.9.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78928ac1ed176a5ca1d17e578a1825f3d81ca54cf41053a592584b020cfd691b" +dependencies = [ + "const-oid", + "digest 0.10.7", + "num-bigint-dig", + "num-integer", + "num-traits", + "pkcs1", + "pkcs8", + "rand_core 0.6.4", + "signature", + "spki", + "subtle", + "zeroize", +] + +[[package]] +name = "ruint" +version = "1.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11256b5fe8c68f56ac6f39ef0720e592f33d2367a4782740d9c9142e889c7fb4" +dependencies = [ + "alloy-rlp", + "ark-ff 0.3.0", + "ark-ff 0.4.2", + "bytes", + "fastrlp 0.3.1", + "fastrlp 0.4.0", + "num-bigint 0.4.6", + "num-integer", + "num-traits", + "parity-scale-codec", + "primitive-types", + "proptest", + "rand 0.8.5", + "rand 0.9.2", + "rlp", + "ruint-macro", + "serde", + "valuable", + "zeroize", +] + +[[package]] +name = "ruint-macro" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48fd7bd8a6377e15ad9d42a8ec25371b94ddc67abe7c8b9127bec79bebaaae18" + +[[package]] +name = "rustc-demangle" +version = "0.1.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56f7d92ca342cea22a06f2121d944b4fd82af56988c270852495420f961d4ace" + +[[package]] +name = "rustc-hash" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" + +[[package]] +name = "rustc-hash" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "357703d41365b4b27c590e3ed91eabb1b663f07c4c084095e60cbed4362dff0d" +dependencies = [ + "rand 0.8.5", +] + +[[package]] +name = "rustc-hex" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3e75f6a532d0fd9f7f13144f392b6ad56a32696bfcd9c78f797f16bbb6f072d6" + +[[package]] +name = "rustc_version" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0dfe2087c51c460008730de8b57e6a320782fbfb312e1f4d520e6c6fae155ee" +dependencies = [ + "semver 0.11.0", +] + +[[package]] +name = "rustc_version" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" +dependencies = [ + "semver 1.0.26", +] + +[[package]] +name = "rustix" +version = "0.38.44" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fdb5bc1ae2baa591800df16c9ca78619bf65c0488b41b96ccec5d11220d8c154" +dependencies = [ + "bitflags 2.9.1", + "errno", + "libc", + "linux-raw-sys 0.4.15", + "windows-sys 0.59.0", +] + +[[package]] +name = "rustix" +version = "1.0.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11181fbabf243db407ef8df94a6ce0b2f9a733bd8be4ad02b4eda9602296cac8" +dependencies = [ + "bitflags 2.9.1", + "errno", + "libc", + "linux-raw-sys 0.9.4", + "windows-sys 0.60.2", +] + +[[package]] +name = "rustls" +version = "0.23.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0ebcbd2f03de0fc1122ad9bb24b127a5a6cd51d72604a3f3c50ac459762b6cc" +dependencies = [ + "aws-lc-rs", + "log", + "once_cell", + "ring", + "rustls-pki-types", + "rustls-webpki", + "subtle", + "zeroize", +] + +[[package]] +name = "rustls-native-certs" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7fcff2dd52b58a8d98a70243663a0d234c4e2b79235637849d15913394a247d3" +dependencies = [ + "openssl-probe", + "rustls-pki-types", + "schannel", + "security-framework 3.2.0", +] + +[[package]] +name = "rustls-pemfile" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dce314e5fee3f39953d46bb63bb8a46d40c2f8fb7cc5a3b6cab2bde9721d6e50" +dependencies = [ + "rustls-pki-types", +] + +[[package]] +name = "rustls-pki-types" +version = "1.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "229a4a4c221013e7e1f1a043678c5cc39fe5171437c88fb47151a21e6f5b5c79" +dependencies = [ + "web-time", + "zeroize", +] + +[[package]] +name = "rustls-platform-verifier" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19787cda76408ec5404443dc8b31795c87cd8fec49762dc75fa727740d34acc1" +dependencies = [ + "core-foundation 0.10.1", + "core-foundation-sys", + "jni", + "log", + "once_cell", + "rustls", + "rustls-native-certs", + "rustls-platform-verifier-android", + "rustls-webpki", + "security-framework 3.2.0", + "security-framework-sys", + "webpki-root-certs 0.26.11", + "windows-sys 0.59.0", +] + +[[package]] +name = "rustls-platform-verifier-android" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f87165f0995f63a9fbeea62b64d10b4d9d8e78ec6d7d51fb2125fda7bb36788f" + +[[package]] +name = "rustls-webpki" +version = "0.103.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0a17884ae0c1b773f1ccd2bd4a8c72f16da897310a98b0e84bf349ad5ead92fc" +dependencies = [ + "aws-lc-rs", + "ring", + "rustls-pki-types", + "untrusted", +] + +[[package]] +name = "rustversion" +version = "1.0.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a0d197bd2c9dc6e53b84da9556a69ba4cdfab8619eb41a8bd1cc2027a0f6b1d" + +[[package]] +name = "rusty-fork" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb3dcc6e454c328bb824492db107ab7c0ae8fcffe4ad210136ef014458c1bc4f" +dependencies = [ + "fnv", + "quick-error", + "tempfile", + "wait-timeout", +] + +[[package]] +name = "ruzstd" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5174a470eeb535a721ae9fdd6e291c2411a906b96592182d05217591d5c5cf7b" +dependencies = [ + "byteorder", + "derive_more 0.99.20", + "twox-hash", +] + +[[package]] +name = "ryu" +version = "1.0.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" + +[[package]] +name = "rzup" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "400558bf12d4292a7804093b60a437ba8b0219ea7d53716b2c010a0d31e5f4a8" +dependencies = [ + "semver 1.0.26", + "serde", + "strum 0.26.3", + "tempfile", + "thiserror 2.0.12", + "toml", + "yaml-rust2", +] + +[[package]] +name = "same-file" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" +dependencies = [ + "winapi-util", +] + +[[package]] +name = "scc" +version = "2.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22b2d775fb28f245817589471dd49c5edf64237f4a19d10ce9a92ff4651a27f4" +dependencies = [ + "sdd", +] + +[[package]] +name = "schannel" +version = "0.1.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f29ebaa345f945cec9fbbc532eb307f0fdad8161f281b6369539c8d84876b3d" +dependencies = [ + "windows-sys 0.59.0", +] + +[[package]] +name = "schemars" +version = "0.8.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3fbf2ae1b8bc8e02df939598064d22402220cd5bbcca1c76f7d6a310974d5615" +dependencies = [ + "dyn-clone", + "schemars_derive", + "serde", + "serde_json", +] + +[[package]] +name = "schemars" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4cd191f9397d57d581cddd31014772520aa448f65ef991055d7f61582c65165f" +dependencies = [ + "dyn-clone", + "ref-cast", + "serde", + "serde_json", +] + +[[package]] +name = "schemars" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "82d20c4491bc164fa2f6c5d44565947a52ad80b9505d8e36f8d54c27c739fcd0" +dependencies = [ + "dyn-clone", + "ref-cast", + "serde", + "serde_json", +] + +[[package]] +name = "schemars_derive" +version = "0.8.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32e265784ad618884abaea0600a9adf15393368d840e0222d101a072f3f7534d" +dependencies = [ + "proc-macro2", + "quote", + "serde_derive_internals", + "syn 2.0.104", +] + +[[package]] +name = "scopeguard" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" + +[[package]] +name = "script-macro" +version = "0.4.0" +source = "git+https://github.com/BitVM/rust-bitcoin-script#3c75a6e4cfddb800f57710d8270c0a1dc60eb57e" +dependencies = [ + "bitcoin", + "proc-macro-error", + "proc-macro2", + "quote", +] + +[[package]] +name = "sdd" +version = "3.0.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "490dcfcbfef26be6800d11870ff2df8774fa6e86d047e3e8c8a76b25655e41ca" + +[[package]] +name = "sec1" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3e97a565f76233a6003f9f5c54be1d9c5bdfa3eccfb189469f11ec4901c47dc" +dependencies = [ + "base16ct", + "der", + "generic-array", + "pkcs8", + "serdect", + "subtle", + "zeroize", +] + +[[package]] +name = "secp256k1" +version = "0.29.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9465315bc9d4566e1724f0fffcbcc446268cb522e60f9a27bcded6b19c108113" +dependencies = [ + "bitcoin_hashes", + "rand 0.8.5", + "secp256k1-sys 0.10.1", + "serde", +] + +[[package]] +name = "secp256k1" +version = "0.31.0" +source = "git+https://github.com/rust-bitcoin/rust-secp256k1?rev=4d36fefdddb118425bb9bcf611bb6e4dff306cfc#4d36fefdddb118425bb9bcf611bb6e4dff306cfc" +dependencies = [ + "bitcoin_hashes", + "rand 0.9.2", + "secp256k1-sys 0.11.0", + "serde", +] + +[[package]] +name = "secp256k1-sys" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d4387882333d3aa8cb20530a17c69a3752e97837832f34f6dccc760e715001d9" +dependencies = [ + "cc", +] + +[[package]] +name = "secp256k1-sys" +version = "0.11.0" +source = "git+https://github.com/rust-bitcoin/rust-secp256k1?rev=4d36fefdddb118425bb9bcf611bb6e4dff306cfc#4d36fefdddb118425bb9bcf611bb6e4dff306cfc" +dependencies = [ + "cc", +] + +[[package]] +name = "secrecy" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e891af845473308773346dc847b2c23ee78fe442e0472ac50e22a18a93d3ae5a" +dependencies = [ + "serde", + "zeroize", +] + +[[package]] +name = "security-framework" +version = "2.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" +dependencies = [ + "bitflags 2.9.1", + "core-foundation 0.9.4", + "core-foundation-sys", + "libc", + "security-framework-sys", +] + +[[package]] +name = "security-framework" +version = "3.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "271720403f46ca04f7ba6f55d438f8bd878d6b8ca0a1046e8228c4145bcbb316" +dependencies = [ + "bitflags 2.9.1", + "core-foundation 0.10.1", + "core-foundation-sys", + "libc", + "security-framework-sys", +] + +[[package]] +name = "security-framework-sys" +version = "2.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49db231d56a190491cb4aeda9527f1ad45345af50b0851622a7adb8c03b01c32" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "semver" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f301af10236f6df4160f7c3f04eec6dbc70ace82d23326abad5edee88801c6b6" +dependencies = [ + "semver-parser", +] + +[[package]] +name = "semver" +version = "1.0.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56e6fa9c48d24d85fb3de5ad847117517440f6beceb7798af16b4a87d616b8d0" +dependencies = [ + "serde", +] + +[[package]] +name = "semver-parser" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9900206b54a3527fdc7b8a938bffd94a568bac4f4aa8113b209df75a09c0dec2" +dependencies = [ + "pest", +] + +[[package]] +name = "send_wrapper" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd0b0ec5f1c1ca621c432a25813d8d60c88abe6d3e08a3eb9cf37d97a0fe3d73" + +[[package]] +name = "serde" +version = "1.0.219" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde-wasm-bindgen" +version = "0.6.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8302e169f0eddcc139c70f139d19d6467353af16f9fce27e8c30158036a1e16b" +dependencies = [ + "js-sys", + "serde", + "wasm-bindgen", +] + +[[package]] +name = "serde_derive" +version = "1.0.219" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "serde_derive_internals" +version = "0.29.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "18d26a20a969b9e3fdf2fc2d9f21eda6c40e2de84c9408bb5d3b05d499aae711" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "serde_json" +version = "1.0.142" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "030fedb782600dcbd6f02d479bf0d817ac3bb40d644745b769d6a96bc3afc5a7" +dependencies = [ + "itoa", + "memchr", + "ryu", + "serde", +] + +[[package]] +name = "serde_repr" +version = "0.1.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "175ee3e80ae9982737ca543e96133087cbd9a485eecc3bc4de9c1a37b47ea59c" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "serde_spanned" +version = "0.6.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf41e0cfaf7226dca15e8197172c295a782857fcb97fad1808a166870dee75a3" +dependencies = [ + "serde", +] + +[[package]] +name = "serde_urlencoded" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" +dependencies = [ + "form_urlencoded", + "itoa", + "ryu", + "serde", +] + +[[package]] +name = "serde_with" +version = "3.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2c45cd61fefa9db6f254525d46e392b852e0e61d9a1fd36e5bd183450a556d5" +dependencies = [ + "base64 0.22.1", + "chrono", + "hex", + "indexmap 1.9.3", + "indexmap 2.10.0", + "schemars 0.9.0", + "schemars 1.0.4", + "serde", + "serde_derive", + "serde_json", + "serde_with_macros", + "time", +] + +[[package]] +name = "serde_with_macros" +version = "3.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "de90945e6565ce0d9a25098082ed4ee4002e047cb59892c318d66821e14bb30f" +dependencies = [ + "darling", + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "serdect" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a84f14a19e9a014bb9f4512488d9829a68e04ecabffb0f9904cd1ace94598177" +dependencies = [ + "base16ct", + "serde", +] + +[[package]] +name = "serial_test" +version = "3.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b258109f244e1d6891bf1053a55d63a5cd4f8f4c30cf9a1280989f80e7a1fa9" +dependencies = [ + "futures", + "log", + "once_cell", + "parking_lot", + "scc", + "serial_test_derive", +] + +[[package]] +name = "serial_test_derive" +version = "3.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d69265a08751de7844521fd15003ae0a888e035773ba05695c5c759a6f89eef" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "sha1" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3bf829a2d51ab4a5ddf1352d8470c140cadc8301b2ae1789db023f01cedd6ba" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest 0.10.7", +] + +[[package]] +name = "sha2" +version = "0.10.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest 0.10.7", + "sha2-asm", +] + +[[package]] +name = "sha2-asm" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b845214d6175804686b2bd482bcffe96651bb2d1200742b712003504a2dac1ab" +dependencies = [ + "cc", +] + +[[package]] +name = "sha3" +version = "0.10.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75872d278a8f37ef87fa0ddbda7802605cb18344497949862c0d4dcb291eba60" +dependencies = [ + "digest 0.10.7", + "keccak", +] + +[[package]] +name = "sha3-asm" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c28efc5e327c837aa837c59eae585fc250715ef939ac32881bcc11677cd02d46" +dependencies = [ + "cc", + "cfg-if", +] + +[[package]] +name = "sharded-slab" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6" +dependencies = [ + "lazy_static", +] + +[[package]] +name = "shlex" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" + +[[package]] +name = "signal-hook" +version = "0.3.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d881a16cf4426aa584979d30bd82cb33429027e42122b169753d6ef1085ed6e2" +dependencies = [ + "libc", + "signal-hook-registry", +] + +[[package]] +name = "signal-hook-mio" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34db1a06d485c9142248b7a054f034b349b212551f3dfd19c94d45a754a217cd" +dependencies = [ + "libc", + "mio 0.8.11", + "signal-hook", +] + +[[package]] +name = "signal-hook-registry" +version = "1.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9203b8055f63a2a00e2f593bb0510367fe707d7ff1e5c872de2f537b339e5410" +dependencies = [ + "libc", +] + +[[package]] +name = "signature" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77549399552de45a898a580c1b41d445bf730df867cc44e6c0233bbc4b8329de" +dependencies = [ + "digest 0.10.7", + "rand_core 0.6.4", +] + +[[package]] +name = "simd-adler32" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d66dc143e6b11c1eddc06d5c423cfc97062865baf299914ab64caa38182078fe" + +[[package]] +name = "simple_asn1" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "297f631f50729c8c99b84667867963997ec0b50f32b2a7dbcab828ef0541e8bb" +dependencies = [ + "num-bigint 0.4.6", + "num-traits", + "thiserror 2.0.12", + "time", +] + +[[package]] +name = "sketches-ddsketch" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85636c14b73d81f541e525f585c0a2109e6744e1565b5c1668e31c70c10ed65c" + +[[package]] +name = "slab" +version = "0.4.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04dc19736151f35336d325007ac991178d504a119863a2fcb3758cdb5e52c50d" + +[[package]] +name = "smallvec" +version = "1.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03" +dependencies = [ + "serde", +] + +[[package]] +name = "socket2" +version = "0.5.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e22376abed350d73dd1cd119b57ffccad95b4e585a7cda43e286245ce23c0678" +dependencies = [ + "libc", + "windows-sys 0.52.0", +] + +[[package]] +name = "socket2" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "233504af464074f9d066d7b5416c5f9b894a5862a6506e306f7b816cdd6f1807" +dependencies = [ + "libc", + "windows-sys 0.59.0", +] + +[[package]] +name = "sov-keys" +version = "0.7.3-rc.5" +source = "git+https://github.com/chainwayxyz/citrea?tag=v0.7.3-rc.5#bbf5f5622291c45f8eda26bf9d905e045724a4c9" +dependencies = [ + "borsh", + "derive_more 1.0.0", + "digest 0.10.7", + "hex", + "k256", + "rand 0.8.5", + "schemars 0.8.22", + "serde", + "sha2", + "thiserror 2.0.12", +] + +[[package]] +name = "sov-rollup-interface" +version = "0.7.3-rc.5" +source = "git+https://github.com/chainwayxyz/citrea?tag=v0.7.3-rc.5#bbf5f5622291c45f8eda26bf9d905e045724a4c9" +dependencies = [ + "alloy-primitives", + "anyhow", + "async-trait", + "borsh", + "bytes", + "digest 0.10.7", + "faster-hex", + "hex", + "jmt 0.11.0 (git+https://github.com/penumbra-zone/jmt.git?rev=550a2f2)", + "risc0-zkp", + "serde", + "sha2", + "sov-keys", + "thiserror 2.0.12", + "tokio", + "tracing", + "uuid", +] + +[[package]] +name = "spin" +version = "0.9.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" +dependencies = [ + "lock_api", +] + +[[package]] +name = "spki" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d91ed6c858b01f942cd56b37a94b3e0a1798290327d1236e4d9cf4eaca44d29d" +dependencies = [ + "base64ct", + "der", +] + +[[package]] +name = "sqlx" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fefb893899429669dcdd979aff487bd78f4064e5e7907e4269081e0ef7d97dc" +dependencies = [ + "sqlx-core", + "sqlx-macros", + "sqlx-mysql", + "sqlx-postgres", + "sqlx-sqlite", +] + +[[package]] +name = "sqlx-core" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee6798b1838b6a0f69c007c133b8df5866302197e404e8b6ee8ed3e3a5e68dc6" +dependencies = [ + "base64 0.22.1", + "bytes", + "chrono", + "crc", + "crossbeam-queue", + "either", + "event-listener", + "futures-core", + "futures-intrusive", + "futures-io", + "futures-util", + "hashbrown 0.15.4", + "hashlink 0.10.0", + "indexmap 2.10.0", + "log", + "memchr", + "once_cell", + "percent-encoding", + "serde", + "serde_json", + "sha2", + "smallvec", + "thiserror 2.0.12", + "tokio", + "tokio-stream", + "tracing", + "url", +] + +[[package]] +name = "sqlx-macros" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2d452988ccaacfbf5e0bdbc348fb91d7c8af5bee192173ac3636b5fb6e6715d" +dependencies = [ + "proc-macro2", + "quote", + "sqlx-core", + "sqlx-macros-core", + "syn 2.0.104", +] + +[[package]] +name = "sqlx-macros-core" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19a9c1841124ac5a61741f96e1d9e2ec77424bf323962dd894bdb93f37d5219b" +dependencies = [ + "dotenvy", + "either", + "heck 0.5.0", + "hex", + "once_cell", + "proc-macro2", + "quote", + "serde", + "serde_json", + "sha2", + "sqlx-core", + "sqlx-mysql", + "sqlx-postgres", + "sqlx-sqlite", + "syn 2.0.104", + "tokio", + "url", +] + +[[package]] +name = "sqlx-mysql" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa003f0038df784eb8fecbbac13affe3da23b45194bd57dba231c8f48199c526" +dependencies = [ + "atoi", + "base64 0.22.1", + "bitflags 2.9.1", + "byteorder", + "bytes", + "chrono", + "crc", + "digest 0.10.7", + "dotenvy", + "either", + "futures-channel", + "futures-core", + "futures-io", + "futures-util", + "generic-array", + "hex", + "hkdf", + "hmac", + "itoa", + "log", + "md-5", + "memchr", + "once_cell", + "percent-encoding", + "rand 0.8.5", + "rsa", + "serde", + "sha1", + "sha2", + "smallvec", + "sqlx-core", + "stringprep", + "thiserror 2.0.12", + "tracing", + "whoami", +] + +[[package]] +name = "sqlx-postgres" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db58fcd5a53cf07c184b154801ff91347e4c30d17a3562a635ff028ad5deda46" +dependencies = [ + "atoi", + "base64 0.22.1", + "bitflags 2.9.1", + "byteorder", + "chrono", + "crc", + "dotenvy", + "etcetera", + "futures-channel", + "futures-core", + "futures-util", + "hex", + "hkdf", + "hmac", + "home", + "itoa", + "log", + "md-5", + "memchr", + "once_cell", + "rand 0.8.5", + "serde", + "serde_json", + "sha2", + "smallvec", + "sqlx-core", + "stringprep", + "thiserror 2.0.12", + "tracing", + "whoami", +] + +[[package]] +name = "sqlx-sqlite" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c2d12fe70b2c1b4401038055f90f151b78208de1f9f89a7dbfd41587a10c3eea" +dependencies = [ + "atoi", + "chrono", + "flume", + "futures-channel", + "futures-core", + "futures-executor", + "futures-intrusive", + "futures-util", + "libsqlite3-sys", + "log", + "percent-encoding", + "serde", + "serde_urlencoded", + "sqlx-core", + "thiserror 2.0.12", + "tracing", + "url", +] + +[[package]] +name = "stability" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d904e7009df136af5297832a3ace3370cd14ff1546a232f4f185036c2736fcac" +dependencies = [ + "quote", + "syn 2.0.104", +] + +[[package]] +name = "stable_deref_trait" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" + +[[package]] +name = "static_assertions" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" + +[[package]] +name = "statig" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42c467cc59664639bf70b8225b1b4a9c30d926f3e010c29e804bf940d618c663" +dependencies = [ + "serde", + "statig_macro", +] + +[[package]] +name = "statig_macro" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf4c61563b68df6e452ceece3fba1329c8c6a5d348fe17b0778fada28bc95fde" +dependencies = [ + "proc-macro-error", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "stdext" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4af28eeb7c18ac2dbdb255d40bee63f203120e1db6b0024b177746ebec7049c1" + +[[package]] +name = "stringprep" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b4df3d392d81bd458a8a621b8bffbd2302a12ffe288a9d931670948749463b1" +dependencies = [ + "unicode-bidi", + "unicode-normalization", + "unicode-properties", +] + +[[package]] +name = "strsim" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" + +[[package]] +name = "strum" +version = "0.26.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8fec0f0aef304996cf250b31b5a10dee7980c85da9d759361292b8bca5a18f06" +dependencies = [ + "strum_macros 0.26.4", +] + +[[package]] +name = "strum" +version = "0.27.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af23d6f6c1a224baef9d3f61e287d2761385a5b88fdab4eb4c6f11aeb54c4bcf" +dependencies = [ + "strum_macros 0.27.2", +] + +[[package]] +name = "strum_macros" +version = "0.26.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c6bee85a5a24955dc440386795aa378cd9cf82acd5f764469152d2270e581be" +dependencies = [ + "heck 0.5.0", + "proc-macro2", + "quote", + "rustversion", + "syn 2.0.104", +] + +[[package]] +name = "strum_macros" +version = "0.27.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7695ce3845ea4b33927c055a39dc438a45b059f7c1b3d91d38d10355fb8cbca7" +dependencies = [ + "heck 0.5.0", + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "subtle" +version = "2.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" + +[[package]] +name = "syn" +version = "1.0.109" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "syn" +version = "2.0.104" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17b6f705963418cdb9927482fa304bc562ece2fdd4f616084c50b7023b435a40" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "syn-solidity" +version = "0.8.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4560533fbd6914b94a8fb5cc803ed6801c3455668db3b810702c57612bac9412" +dependencies = [ + "paste", + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "sync_wrapper" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0bf256ce5efdfa370213c1dabab5935a12e49f2c58d15e9eac2870d3b4f27263" +dependencies = [ + "futures-core", +] + +[[package]] +name = "synstructure" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "sysinfo" +version = "0.34.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4b93974b3d3aeaa036504b8eefd4c039dced109171c1ae973f1dc63b2c7e4b2" +dependencies = [ + "libc", + "memchr", + "ntapi", + "objc2-core-foundation", + "windows", +] + +[[package]] +name = "tap" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" + +[[package]] +name = "tempfile" +version = "3.20.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8a64e3985349f2441a1a9ef0b853f869006c3855f2cda6862a94d26ebb9d6a1" +dependencies = [ + "fastrand", + "getrandom 0.3.3", + "once_cell", + "rustix 1.0.8", + "windows-sys 0.59.0", +] + +[[package]] +name = "thiserror" +version = "1.0.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52" +dependencies = [ + "thiserror-impl 1.0.69", +] + +[[package]] +name = "thiserror" +version = "2.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "567b8a2dae586314f7be2a752ec7474332959c6460e02bde30d702a66d488708" +dependencies = [ + "thiserror-impl 2.0.12", +] + +[[package]] +name = "thiserror-impl" +version = "1.0.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "thiserror-impl" +version = "2.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f7cf42b4507d8ea322120659672cf1b9dbb93f8f2d4ecfd6e51350ff5b17a1d" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "thread_local" +version = "1.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f60246a4944f24f6e018aa17cdeffb7818b76356965d03b07d6a9886e8962185" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "threadpool" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d050e60b33d41c19108b32cea32164033a9013fe3b46cbd4457559bfbf77afaa" +dependencies = [ + "num_cpus", +] + +[[package]] +name = "time" +version = "0.3.41" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a7619e19bc266e0f9c5e6686659d394bc57973859340060a69221e57dbc0c40" +dependencies = [ + "deranged", + "itoa", + "libc", + "num-conv", + "num_threads", + "powerfmt", + "serde", + "time-core", + "time-macros", +] + +[[package]] +name = "time-core" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c9e9a38711f559d9e3ce1cdb06dd7c5b8ea546bc90052da6d06bb76da74bb07c" + +[[package]] +name = "time-macros" +version = "0.2.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3526739392ec93fd8b359c8e98514cb3e8e021beb4e5f597b00a0221f8ed8a49" +dependencies = [ + "num-conv", + "time-core", +] + +[[package]] +name = "tiny-keccak" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c9d3793400a45f954c52e73d068316d76b6f4e36977e3fcebb13a2721e80237" +dependencies = [ + "crunchy", +] + +[[package]] +name = "tinystr" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d4f6d1145dcb577acf783d4e601bc1d76a13337bb54e6233add580b07344c8b" +dependencies = [ + "displaydoc", + "zerovec", +] + +[[package]] +name = "tinyvec" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09b3661f17e86524eccd4371ab0429194e0d7c008abb45f7a7495b1719463c71" +dependencies = [ + "tinyvec_macros", +] + +[[package]] +name = "tinyvec_macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" + +[[package]] +name = "tokio" +version = "1.47.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "89e49afdadebb872d3145a5638b59eb0691ea23e46ca484037cfab3b76b95038" +dependencies = [ + "backtrace", + "bytes", + "io-uring", + "libc", + "mio 1.0.4", + "parking_lot", + "pin-project-lite", + "signal-hook-registry", + "slab", + "socket2 0.6.0", + "tokio-macros", + "windows-sys 0.59.0", +] + +[[package]] +name = "tokio-macros" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "tokio-native-tls" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2" +dependencies = [ + "native-tls", + "tokio", +] + +[[package]] +name = "tokio-retry" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f57eb36ecbe0fc510036adff84824dd3c24bb781e21bfa67b69d556aa85214f" +dependencies = [ + "pin-project", + "rand 0.8.5", + "tokio", +] + +[[package]] +name = "tokio-rustls" +version = "0.26.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e727b36a1a0e8b74c376ac2211e40c2c8af09fb4013c60d910495810f008e9b" +dependencies = [ + "rustls", + "tokio", +] + +[[package]] +name = "tokio-stream" +version = "0.1.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eca58d7bba4a75707817a2c44174253f9236b2d5fbd055602e9d5c07c139a047" +dependencies = [ + "futures-core", + "pin-project-lite", + "tokio", + "tokio-util", +] + +[[package]] +name = "tokio-tungstenite" +version = "0.26.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a9daff607c6d2bf6c16fd681ccb7eecc83e4e2cdc1ca067ffaadfca5de7f084" +dependencies = [ + "futures-util", + "log", + "rustls", + "rustls-pki-types", + "tokio", + "tokio-rustls", + "tungstenite", + "webpki-roots 0.26.11", +] + +[[package]] +name = "tokio-util" +version = "0.7.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "66a539a9ad6d5d281510d5bd368c973d636c02dbf8a67300bfb6b950696ad7df" +dependencies = [ + "bytes", + "futures-core", + "futures-sink", + "pin-project-lite", + "tokio", +] + +[[package]] +name = "toml" +version = "0.8.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc1beb996b9d83529a9e75c17a1686767d148d70663143c7854d8b4a09ced362" +dependencies = [ + "serde", + "serde_spanned", + "toml_datetime", + "toml_edit", +] + +[[package]] +name = "toml_datetime" +version = "0.6.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22cddaf88f4fbc13c51aebbf5f8eceb5c7c5a9da2ac40a13519eb5b0a0e8f11c" +dependencies = [ + "serde", +] + +[[package]] +name = "toml_edit" +version = "0.22.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41fe8c660ae4257887cf66394862d21dbca4a6ddd26f04a3560410406a2f819a" +dependencies = [ + "indexmap 2.10.0", + "serde", + "serde_spanned", + "toml_datetime", + "toml_write", + "winnow", +] + +[[package]] +name = "toml_write" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d99f8c9a7727884afe522e9bd5edbfc91a3312b36a77b5fb8926e4c31a41801" + +[[package]] +name = "tonic" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "877c5b330756d856ffcc4553ab34a5684481ade925ecc54bcd1bf02b1d0d4d52" +dependencies = [ + "async-stream", + "async-trait", + "axum", + "base64 0.22.1", + "bytes", + "h2", + "http", + "http-body", + "http-body-util", + "hyper", + "hyper-timeout", + "hyper-util", + "percent-encoding", + "pin-project", + "prost", + "rustls-pemfile", + "socket2 0.5.10", + "tokio", + "tokio-rustls", + "tokio-stream", + "tower 0.4.13", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tonic-build" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9557ce109ea773b399c9b9e5dca39294110b74f1f342cb347a80d1fce8c26a11" +dependencies = [ + "prettyplease", + "proc-macro2", + "prost-build", + "prost-types", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "tower" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" +dependencies = [ + "futures-core", + "futures-util", + "indexmap 1.9.3", + "pin-project", + "pin-project-lite", + "rand 0.8.5", + "slab", + "tokio", + "tokio-util", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tower" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d039ad9159c98b70ecfd540b2573b97f7f52c3e8d9f8ad57a24b916a536975f9" +dependencies = [ + "futures-core", + "futures-util", + "pin-project-lite", + "sync_wrapper", + "tokio", + "tower-layer", + "tower-service", +] + +[[package]] +name = "tower-http" +version = "0.6.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "adc82fd73de2a9722ac5da747f12383d2bfdb93591ee6c58486e0097890f05f2" +dependencies = [ + "bitflags 2.9.1", + "bytes", + "futures-util", + "http", + "http-body", + "iri-string", + "pin-project-lite", + "tower 0.5.2", + "tower-layer", + "tower-service", +] + +[[package]] +name = "tower-layer" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "121c2a6cda46980bb0fcd1647ffaf6cd3fc79a013de288782836f6df9c48780e" + +[[package]] +name = "tower-service" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" + +[[package]] +name = "tqdm" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa2d2932240205a99b65f15d9861992c95fbb8c9fb280b3a1f17a92db6dc611f" +dependencies = [ + "anyhow", + "crossterm", + "once_cell", +] + +[[package]] +name = "tracing" +version = "0.1.41" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0" +dependencies = [ + "log", + "pin-project-lite", + "tracing-attributes", + "tracing-core", +] + +[[package]] +name = "tracing-attributes" +version = "0.1.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81383ab64e72a7a8b8e13130c49e3dab29def6d0c7d76a03087b3cf71c5c6903" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "tracing-core" +version = "0.1.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9d12581f227e93f094d3af2ae690a574abb8a2b9b7a96e7cfe9647b2b617678" +dependencies = [ + "once_cell", + "valuable", +] + +[[package]] +name = "tracing-error" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b1581020d7a273442f5b45074a6a57d5757ad0a47dac0e9f0bd57b81936f3db" +dependencies = [ + "tracing", + "tracing-subscriber 0.3.19", +] + +[[package]] +name = "tracing-log" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee855f1f400bd0e5c02d150ae5de3840039a3f54b025156404e34c23c03f47c3" +dependencies = [ + "log", + "once_cell", + "tracing-core", +] + +[[package]] +name = "tracing-serde" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "704b1aeb7be0d0a84fc9828cae51dab5970fee5088f83d1dd7ee6f6246fc6ff1" +dependencies = [ + "serde", + "tracing-core", +] + +[[package]] +name = "tracing-subscriber" +version = "0.2.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e0d2eaa99c3c2e41547cfa109e910a68ea03823cccad4a0525dcbc9b01e8c71" +dependencies = [ + "tracing-core", +] + +[[package]] +name = "tracing-subscriber" +version = "0.3.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8189decb5ac0fa7bc8b96b7cb9b2701d60d48805aca84a238004d665fcc4008" +dependencies = [ + "matchers", + "nu-ansi-term", + "once_cell", + "regex", + "serde", + "serde_json", + "sharded-slab", + "smallvec", + "thread_local", + "tracing", + "tracing-core", + "tracing-log", + "tracing-serde", +] + +[[package]] +name = "try-lock" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" + +[[package]] +name = "tungstenite" +version = "0.26.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4793cb5e56680ecbb1d843515b23b6de9a75eb04b66643e256a396d43be33c13" +dependencies = [ + "bytes", + "data-encoding", + "http", + "httparse", + "log", + "rand 0.9.2", + "rustls", + "rustls-pki-types", + "sha1", + "thiserror 2.0.12", + "utf-8", +] + +[[package]] +name = "twox-hash" +version = "1.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97fee6b57c6a41524a810daee9286c02d7752c4253064d0b05472833a438f675" +dependencies = [ + "cfg-if", + "static_assertions", +] + +[[package]] +name = "typeid" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc7d623258602320d5c55d1bc22793b57daff0ec7efc270ea7d55ce1d5f5471c" + +[[package]] +name = "typenum" +version = "1.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1dccffe3ce07af9386bfd29e80c0ab1a8205a2fc34e4bcd40364df902cfa8f3f" + +[[package]] +name = "typetag" +version = "0.2.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73f22b40dd7bfe8c14230cf9702081366421890435b2d625fa92b4acc4c3de6f" +dependencies = [ + "erased-serde", + "inventory", + "once_cell", + "serde", + "typetag-impl", +] + +[[package]] +name = "typetag-impl" +version = "0.2.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "35f5380909ffc31b4de4f4bdf96b877175a016aa2ca98cee39fcfd8c4d53d952" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "ucd-trie" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2896d95c02a80c6d6a5d6e953d479f5ddf2dfdb6a244441010e373ac0fb88971" + +[[package]] +name = "uint" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76f64bba2c53b04fcab63c01a7d7427eadc821e3bc48c34dc9ba29c501164b52" +dependencies = [ + "byteorder", + "crunchy", + "hex", + "static_assertions", +] + +[[package]] +name = "unarray" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eaea85b334db583fe3274d12b4cd1880032beab409c0d774be044d4480ab9a94" + +[[package]] +name = "unicode-bidi" +version = "0.3.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c1cb5db39152898a79168971543b1cb5020dff7fe43c8dc468b0885f5e29df5" + +[[package]] +name = "unicode-ident" +version = "1.0.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512" + +[[package]] +name = "unicode-normalization" +version = "0.1.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5033c97c4262335cded6d6fc3e5c18ab755e1a3dc96376350f3d8e9f009ad956" +dependencies = [ + "tinyvec", +] + +[[package]] +name = "unicode-properties" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e70f2a8b45122e719eb623c01822704c4e0907e7e426a05927e1a1cfff5b75d0" + +[[package]] +name = "unicode-xid" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853" + +[[package]] +name = "universal-hash" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc1de2c688dc15305988b563c3854064043356019f97a4b46276fe734c4f07ea" +dependencies = [ + "crypto-common", + "subtle", +] + +[[package]] +name = "untrusted" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" + +[[package]] +name = "url" +version = "2.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32f8b686cadd1473f4bd0117a5d28d36b1ade384ea9b5069a1c40aefed7fda60" +dependencies = [ + "form_urlencoded", + "idna", + "percent-encoding", +] + +[[package]] +name = "utf-8" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" + +[[package]] +name = "utf8_iter" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" + +[[package]] +name = "utf8parse" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" + +[[package]] +name = "uuid" +version = "1.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b3758f5e68192bb96cc8f9b7e2c2cfdabb435499a28499a42f8f984092adad4b" +dependencies = [ + "borsh", + "borsh-derive", + "getrandom 0.2.16", + "serde", +] + +[[package]] +name = "valuable" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba73ea9cf16a25df0c8caa16c51acb937d5712a8429db78a3ee29d5dcacd3a65" + +[[package]] +name = "vcpkg" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" + +[[package]] +name = "vergen" +version = "9.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b2bf58be11fc9414104c6d3a2e464163db5ef74b12296bda593cac37b6e4777" +dependencies = [ + "anyhow", + "cargo_metadata", + "derive_builder", + "regex", + "rustc_version 0.4.1", + "rustversion", + "sysinfo", + "time", + "vergen-lib", +] + +[[package]] +name = "vergen-git2" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4f6ee511ec45098eabade8a0750e76eec671e7fb2d9360c563911336bea9cac1" +dependencies = [ + "anyhow", + "derive_builder", + "git2", + "rustversion", + "time", + "vergen", + "vergen-lib", +] + +[[package]] +name = "vergen-lib" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b07e6010c0f3e59fcb164e0163834597da68d1f864e2b8ca49f74de01e9c166" +dependencies = [ + "anyhow", + "derive_builder", + "rustversion", +] + +[[package]] +name = "version_check" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" + +[[package]] +name = "wait-timeout" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09ac3b126d3914f9849036f826e054cbabdc8519970b8998ddaf3b5bd3c65f11" +dependencies = [ + "libc", +] + +[[package]] +name = "walkdir" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b" +dependencies = [ + "same-file", + "winapi-util", +] + +[[package]] +name = "want" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" +dependencies = [ + "try-lock", +] + +[[package]] +name = "wasi" +version = "0.11.1+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" + +[[package]] +name = "wasi" +version = "0.14.2+wasi-0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9683f9a5a998d873c0d21fcbe3c083009670149a8fab228644b8bd36b2c48cb3" +dependencies = [ + "wit-bindgen-rt", +] + +[[package]] +name = "wasite" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8dad83b4f25e74f184f64c43b150b91efe7647395b42289f38e50566d82855b" + +[[package]] +name = "wasm-bindgen" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1edc8929d7499fc4e8f0be2262a241556cfc54a0bea223790e71446f2aab1ef5" +dependencies = [ + "cfg-if", + "once_cell", + "rustversion", + "wasm-bindgen-macro", +] + +[[package]] +name = "wasm-bindgen-backend" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2f0a0651a5c2bc21487bde11ee802ccaf4c51935d0d3d42a6101f98161700bc6" +dependencies = [ + "bumpalo", + "log", + "proc-macro2", + "quote", + "syn 2.0.104", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-futures" +version = "0.4.50" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "555d470ec0bc3bb57890405e5d4322cc9ea83cebb085523ced7be4144dac1e61" +dependencies = [ + "cfg-if", + "js-sys", + "once_cell", + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "wasm-bindgen-macro" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7fe63fc6d09ed3792bd0897b314f53de8e16568c2b3f7982f468c0bf9bd0b407" +dependencies = [ + "quote", + "wasm-bindgen-macro-support", +] + +[[package]] +name = "wasm-bindgen-macro-support" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", + "wasm-bindgen-backend", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-shared" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a05d73b933a847d6cccdda8f838a22ff101ad9bf93e33684f39c1f5f0eece3d" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "wasm-streams" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "15053d8d85c7eccdbefef60f06769760a563c7f0a9d6902a13d35c7800b0ad65" +dependencies = [ + "futures-util", + "js-sys", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", +] + +[[package]] +name = "wasmtimer" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d8d49b5d6c64e8558d9b1b065014426f35c18de636895d24893dbbd329743446" +dependencies = [ + "futures", + "js-sys", + "parking_lot", + "pin-utils", + "slab", + "wasm-bindgen", +] + +[[package]] +name = "web-sys" +version = "0.3.77" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33b6dd2ef9186f1f2072e409e99cd22a975331a6b3591b12c764e0e55c60d5d2" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "web-time" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a6580f308b1fad9207618087a65c04e7a10bc77e02c8e84e9b00dd4b12fa0bb" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "webpki-root-certs" +version = "0.26.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75c7f0ef91146ebfb530314f5f1d24528d7f0767efbfd31dce919275413e393e" +dependencies = [ + "webpki-root-certs 1.0.2", +] + +[[package]] +name = "webpki-root-certs" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4e4ffd8df1c57e87c325000a3d6ef93db75279dc3a231125aac571650f22b12a" +dependencies = [ + "rustls-pki-types", +] + +[[package]] +name = "webpki-roots" +version = "0.26.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "521bc38abb08001b01866da9f51eb7c5d647a19260e00054a8c7fd5f9e57f7a9" +dependencies = [ + "webpki-roots 1.0.2", +] + +[[package]] +name = "webpki-roots" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7e8983c3ab33d6fb807cfcdad2491c4ea8cbc8ed839181c7dfd9c67c83e261b2" +dependencies = [ + "rustls-pki-types", +] + +[[package]] +name = "which" +version = "4.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87ba24419a2078cd2b0f2ede2691b6c66d8e47836da3b6db8265ebad47afbfc7" +dependencies = [ + "either", + "home", + "once_cell", + "rustix 0.38.44", +] + +[[package]] +name = "whoami" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6994d13118ab492c3c80c1f81928718159254c53c472bf9ce36f8dae4add02a7" +dependencies = [ + "redox_syscall", + "wasite", +] + +[[package]] +name = "widestring" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd7cf3379ca1aac9eea11fba24fd7e315d621f8dfe35c8d7d2be8b793726e07d" + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-util" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb" +dependencies = [ + "windows-sys 0.59.0", +] + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" + +[[package]] +name = "windows" +version = "0.57.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "12342cb4d8e3b046f3d80effd474a7a02447231330ef77d71daa6fbc40681143" +dependencies = [ + "windows-core 0.57.0", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-core" +version = "0.57.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d2ed2439a290666cd67ecce2b0ffaad89c2a56b976b736e6ece670297897832d" +dependencies = [ + "windows-implement 0.57.0", + "windows-interface 0.57.0", + "windows-result 0.1.2", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-core" +version = "0.61.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0fdd3ddb90610c7638aa2b3a3ab2904fb9e5cdbecc643ddb3647212781c4ae3" +dependencies = [ + "windows-implement 0.60.0", + "windows-interface 0.59.1", + "windows-link", + "windows-result 0.3.4", + "windows-strings", +] + +[[package]] +name = "windows-implement" +version = "0.57.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9107ddc059d5b6fbfbffdfa7a7fe3e22a226def0b2608f72e9d552763d3e1ad7" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "windows-implement" +version = "0.60.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a47fddd13af08290e67f4acabf4b459f647552718f683a7b415d290ac744a836" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "windows-interface" +version = "0.57.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29bee4b38ea3cde66011baa44dba677c432a78593e202392d1e9070cf2a7fca7" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "windows-interface" +version = "0.59.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd9211b69f8dcdfa817bfd14bf1c97c9188afa36f4750130fcdf3f400eca9fa8" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "windows-link" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e6ad25900d524eaabdbbb96d20b4311e1e7ae1699af4fb28c17ae66c80d798a" + +[[package]] +name = "windows-result" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e383302e8ec8515204254685643de10811af0ed97ea37210dc26fb0032647f8" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-result" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56f42bd332cc6c8eac5af113fc0c1fd6a8fd2aa08a0119358686e5160d0586c6" +dependencies = [ + "windows-link", +] + +[[package]] +name = "windows-strings" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56e6c93f3a0c3b36176cb1327a4958a0353d5d166c2a35cb268ace15e91d3b57" +dependencies = [ + "windows-link", +] + +[[package]] +name = "windows-sys" +version = "0.45.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0" +dependencies = [ + "windows-targets 0.42.2", +] + +[[package]] +name = "windows-sys" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" +dependencies = [ + "windows-targets 0.48.5", +] + +[[package]] +name = "windows-sys" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-sys" +version = "0.59.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-sys" +version = "0.60.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb" +dependencies = [ + "windows-targets 0.53.3", +] + +[[package]] +name = "windows-targets" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071" +dependencies = [ + "windows_aarch64_gnullvm 0.42.2", + "windows_aarch64_msvc 0.42.2", + "windows_i686_gnu 0.42.2", + "windows_i686_msvc 0.42.2", + "windows_x86_64_gnu 0.42.2", + "windows_x86_64_gnullvm 0.42.2", + "windows_x86_64_msvc 0.42.2", +] + +[[package]] +name = "windows-targets" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" +dependencies = [ + "windows_aarch64_gnullvm 0.48.5", + "windows_aarch64_msvc 0.48.5", + "windows_i686_gnu 0.48.5", + "windows_i686_msvc 0.48.5", + "windows_x86_64_gnu 0.48.5", + "windows_x86_64_gnullvm 0.48.5", + "windows_x86_64_msvc 0.48.5", +] + +[[package]] +name = "windows-targets" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" +dependencies = [ + "windows_aarch64_gnullvm 0.52.6", + "windows_aarch64_msvc 0.52.6", + "windows_i686_gnu 0.52.6", + "windows_i686_gnullvm 0.52.6", + "windows_i686_msvc 0.52.6", + "windows_x86_64_gnu 0.52.6", + "windows_x86_64_gnullvm 0.52.6", + "windows_x86_64_msvc 0.52.6", +] + +[[package]] +name = "windows-targets" +version = "0.53.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d5fe6031c4041849d7c496a8ded650796e7b6ecc19df1a431c1a363342e5dc91" +dependencies = [ + "windows-link", + "windows_aarch64_gnullvm 0.53.0", + "windows_aarch64_msvc 0.53.0", + "windows_i686_gnu 0.53.0", + "windows_i686_gnullvm 0.53.0", + "windows_i686_msvc 0.53.0", + "windows_x86_64_gnu 0.53.0", + "windows_x86_64_gnullvm 0.53.0", + "windows_x86_64_msvc 0.53.0", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "86b8d5f90ddd19cb4a147a5fa63ca848db3df085e25fee3cc10b39b6eebae764" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7651a1f62a11b8cbd5e0d42526e55f2c99886c77e007179efff86c2b137e66c" + +[[package]] +name = "windows_i686_gnu" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" + +[[package]] +name = "windows_i686_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" + +[[package]] +name = "windows_i686_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" + +[[package]] +name = "windows_i686_gnu" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1dc67659d35f387f5f6c479dc4e28f1d4bb90ddd1a5d3da2e5d97b42d6272c3" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ce6ccbdedbf6d6354471319e781c0dfef054c81fbc7cf83f338a4296c0cae11" + +[[package]] +name = "windows_i686_msvc" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" + +[[package]] +name = "windows_i686_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" + +[[package]] +name = "windows_i686_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" + +[[package]] +name = "windows_i686_msvc" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "581fee95406bb13382d2f65cd4a908ca7b1e4c2f1917f143ba16efe98a589b5d" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e55b5ac9ea33f2fc1716d1742db15574fd6fc8dadc51caab1c16a3d3b4190ba" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0a6e035dd0599267ce1ee132e51c27dd29437f63325753051e71dd9e42406c57" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486" + +[[package]] +name = "winnow" +version = "0.7.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3edebf492c8125044983378ecb5766203ad3b4c2f7a922bd7dd207f6d443e95" +dependencies = [ + "memchr", +] + +[[package]] +name = "wit-bindgen-rt" +version = "0.39.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f42320e61fe2cfd34354ecb597f86f413484a798ba44a8ca1165c58d42da6c1" +dependencies = [ + "bitflags 2.9.1", +] + +[[package]] +name = "writeable" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea2f10b9bb0928dfb1b42b65e1f9e36f7f54dbdf08457afefb38afcdec4fa2bb" + +[[package]] +name = "ws_stream_wasm" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c173014acad22e83f16403ee360115b38846fe754e735c5d9d3803fe70c6abc" +dependencies = [ + "async_io_stream", + "futures", + "js-sys", + "log", + "pharos", + "rustc_version 0.4.1", + "send_wrapper", + "thiserror 2.0.12", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", +] + +[[package]] +name = "wyz" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05f360fc0b24296329c78fda852a1e9ae82de9cf7b27dae4b7f62f118f77b9ed" +dependencies = [ + "tap", +] + +[[package]] +name = "x25519-dalek" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7e468321c81fb07fa7f4c636c3972b9100f0346e5b6a9f2bd0603a52f7ed277" +dependencies = [ + "curve25519-dalek", + "rand_core 0.6.4", + "serde", + "zeroize", +] + +[[package]] +name = "xz2" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "388c44dc09d76f1536602ead6d325eb532f5c122f17782bd57fb47baeeb767e2" +dependencies = [ + "lzma-sys", +] + +[[package]] +name = "yaml-rust2" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a1a1c0bc9823338a3bdf8c61f994f23ac004c6fa32c08cd152984499b445e8d" +dependencies = [ + "arraydeque", + "encoding_rs", + "hashlink 0.9.1", +] + +[[package]] +name = "yoke" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f41bb01b8226ef4bfd589436a297c53d118f65921786300e427be8d487695cc" +dependencies = [ + "serde", + "stable_deref_trait", + "yoke-derive", + "zerofrom", +] + +[[package]] +name = "yoke-derive" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38da3c9736e16c5d3c8c597a9aaa5d1fa565d0532ae05e27c24aa62fb32c0ab6" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", + "synstructure", +] + +[[package]] +name = "zerocopy" +version = "0.8.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1039dd0d3c310cf05de012d8a39ff557cb0d23087fd44cad61df08fc31907a2f" +dependencies = [ + "zerocopy-derive", +] + +[[package]] +name = "zerocopy-derive" +version = "0.8.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ecf5b4cc5364572d7f4c329661bcc82724222973f2cab6f050a4e5c22f75181" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "zerofrom" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50cc42e0333e05660c3587f3bf9d0478688e15d870fab3346451ce7f8c9fbea5" +dependencies = [ + "zerofrom-derive", +] + +[[package]] +name = "zerofrom-derive" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", + "synstructure", +] + +[[package]] +name = "zeroize" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" +dependencies = [ + "zeroize_derive", +] + +[[package]] +name = "zeroize_derive" +version = "1.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "zerotrie" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "36f0bbd478583f79edad978b407914f61b2972f5af6fa089686016be8f9af595" +dependencies = [ + "displaydoc", + "yoke", + "zerofrom", +] + +[[package]] +name = "zerovec" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a05eb080e015ba39cc9e23bbe5e7fb04d5fb040350f99f34e338d5fdd294428" +dependencies = [ + "yoke", + "zerofrom", + "zerovec-derive", +] + +[[package]] +name = "zerovec-derive" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b96237efa0c878c64bd89c436f661be4e46b2f3eff1ebb976f7ef2321d2f58f" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "zip" +version = "2.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fabe6324e908f85a1c52063ce7aa26b68dcb7eb6dbc83a2d148403c9bc3eba50" +dependencies = [ + "arbitrary", + "crc32fast", + "crossbeam-utils", + "displaydoc", + "flate2", + "indexmap 2.10.0", + "memchr", + "thiserror 2.0.12", + "zopfli", +] + +[[package]] +name = "zopfli" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "edfc5ee405f504cd4984ecc6f14d02d55cfda60fa4b689434ef4102aae150cd7" +dependencies = [ + "bumpalo", + "crc32fast", + "log", + "simd-adler32", +] diff --git a/Cargo.toml b/Cargo.toml new file mode 100644 index 000000000..fe1219eb4 --- /dev/null +++ b/Cargo.toml @@ -0,0 +1,145 @@ +[workspace] +resolver = "2" +members = [ + "core", + "circuits-lib", + # "risc0-circuits/bridge-circuit", + # "risc0-circuits/work-only", + "bridge-circuit-host", +] # Add "risc0-circuits/operator", "risc0-circuits/watchtower" later + +[workspace.dependencies] +color-eyre = "0.6.5" +ctor = "0.4.2" +hex = "0.4.3" +lazy_static = { version = "1.5.0", default-features = false } +serde = { version = "1.0", default-features = false, features = ["derive"] } +serde_json = "1.0.128" +thiserror = "1.0.64" +metrics = { version = "0.23.0" } +metrics-derive = { version = "0.1.0" } +metrics-exporter-prometheus = { version = "0.15.3" } +metrics-util = { version = "0.17.0" } +tracing = { version = "0.1.40", default-features = false } +tracing-subscriber = { version = "0.3.18", features = ["json"] } +log = "0.4" +jsonrpsee = { version = "0.24.2", default-features = false } +async-trait = "0.1.83" +clap = "4.5.20" +toml = "0.8.19" +sqlx = { version = "0.8.3", default-features = false } +serial_test = "3.2.0" +tempfile = "3.8" +eyre = { version = "0.6.12" } +tokio-retry = { version = "0.3" } +alloy = { version = "0.11.1", features = ["full"] } +statig = { version = "0.3.0", features = ["async", "serde"] } +pgmq = "0.30.0" +serde_with = "3.12.0" +rand_chacha = "0.9.0" +secrecy = { version = "0.10.0", features = ["serde"] } +reqwest = { version = "0.12.12", features = ["rustls-tls", "json", "http2"], default-features = false } + +# for emergency stop encryption +x25519-dalek = { version = "2", features = ["static_secrets"] } +chacha20poly1305 = { version = "0.10", features = ["stream"] } + + +# Citrea dependencies +citrea-e2e = { git = "https://github.com/chainwayxyz/citrea-e2e", rev = "859cddf" } + +citrea-sov-rollup-interface = { git = "https://github.com/chainwayxyz/citrea", tag = "v0.7.3-rc.5", package = "sov-rollup-interface" } + +# bitcoin +bitcoin = { version = "0.32.5", features = ["serde", "base64"] } +bitcoincore-rpc = "0.18.0" +secp256k1 = { version = "0.31.0", features = [ + "serde", + "rand", + "std", + "global-context", +] } +bitcoin-script = { git = "https://github.com/BitVM/rust-bitcoin-script" } + +# async + gRPC +tonic = { version = "0.12.3", features = ["tls"] } +prost = "0.13.3" +tokio = { version = "1.40.0", features = ["full"] } +tokio-stream = { version = "0.1.16", features = ["sync"] } +futures = "0.3.31" +async-stream = "0.3.6" +futures-util = "0.3.31" +futures-core = "0.3.31" +http = "^1" +hyper = "^1" +tower = "^0.4" +hyper-util = { version = "0.1" } + +# Circuits +sha2 = { version = "=0.10.8", default-features = false } +hkdf = { version = "0.12.4", default-features = false } +crypto-bigint = { version = "=0.5.5", features = ["rand_core"] } +borsh = { version = "1.5.1", features = ["derive"] } +k256 = { version = "=0.13.4" } +risc0-build = "2.3.1" +risc0-zkvm = "2.3.1" +once_cell = "1.10.0" +jmt = "0.11.0" +derive_more = { version = "1.0.0", features = ["display"] } +blake3 = "1.6.1" +itertools = "0.14.0" +bitvm = { git = "https://github.com/chainwayxyz/BitVM", rev = "a82e5a6bbc1183f98e8f2abd762baf20eb054475" } + +ark-groth16 = { version = "0.5.0", default-features = false } +ark-serialize = "0.5.0" +ark-bn254 = { version = "0.5.0", features = [ + "curve", + "scalar_field", +], default-features = false } + +# Bridge Circuit Host +risc0-circuit-recursion = "=3.0.0" +risc0-zkp = "=2.0.2" +risc0-groth16 = "=2.0.2" +risc0-binfmt = { version = "=2.0.2" } + +ark-ff = "0.5.0" +ark-ec = "0.5.0" +ark-std = "0.5.0" +ark-crypto-primitives = "0.5.0" +ark-relations = "0.5.0" + +alloy-rpc-types = "0.11.1" +alloy-primitives = "0.8.25" +alloy-rpc-types-eth = "0.11.1" +alloy-rpc-client = "0.11.1" +alloy-sol-types = { version = "0.8.25", default-features = false, features = ["json"] } + + +rand = "0.8" +num-bigint = "0.4.6" +num-traits = "0.2.19" +bincode = "1.3.3" +hex-literal = "0.4.1" +rustls = "0.23.27" +rustls-pki-types = "1.11.0" + +base64 = "0.22.1" +vergen-git2 = { version = "1.0.0", features = [ + "build", + "cargo", + "rustc", + "si", +] } + +[patch.crates-io] +bitcoincore-rpc = { version = "0.18.0", git = "https://github.com/chainwayxyz/rust-bitcoincore-rpc.git", rev = "5da45109a2de352472a6056ef90a517b66bc106f" } +secp256k1 = { git = "https://github.com/rust-bitcoin/rust-secp256k1", rev = "4d36fefdddb118425bb9bcf611bb6e4dff306cfc" } + +[profile.dev.package.backtrace] +opt-level = 3 + +[profile.release] +lto = true +strip = true +codegen-units = 1 diff --git a/README.md b/README.md new file mode 100644 index 000000000..c4494bc19 --- /dev/null +++ b/README.md @@ -0,0 +1,41 @@ +# Clementine ๐ŸŠ + +Clementine is Citrea's BitVM based trust-minimized two-way peg program. You can +check Clementine whitepaper at [citrea.xyz/clementine_whitepaper.pdf](https://citrea.xyz/clementine_whitepaper.pdf). + +The repository includes: + +- A library for bridge operator, verifiers, aggregator and watchtower +- Circuits that will be optimistically verified with BitVM + +> [!WARNING] +> +> Clementine is still a work in progress. It has not been audited and should not +> be used in production under any circumstances. It also requires a full BitVM +> implementation to be run fully on-chain. + +## Documentation + +High level documentations are in [docs/](docs). These documentations explains +the design, architecture and usage of Clementine. + +To start using Clementine, jump [docs/usage.md](docs/usage.md) documentation. + +Code documentation is also present and can be viewed at +[chainwayxyz.github.io/clementine/clementine_core](https://chainwayxyz.github.io/clementine/clementine_core/). + +It can also be generated locally: + +```bash +cargo doc --no-deps +``` + +Documentation will be available at `target/doc/clementine_core/index.html` after +that. + +## License + +**(C) 2025 Chainway Limited** `clementine` was developed by Chainway Limited. +While we plan to adopt an open source license, we have not yet selected one. As +such, all rights are reserved for the time being. Please reach out to us if you +have thoughts on licensing. diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 000000000..14a70e79c --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,66 @@ +# Clementine Security Information +This document provides links and contact information for Clementine. + +## Email Contact +To contact the Citrea security team by email please use the following email address: + +``` +security@citrea.xyz +``` + +If the issue is sensitive, please encrypt your email using the provided GPG key: + +``` +-----BEGIN PGP PUBLIC KEY BLOCK----- + +mQINBGhZQXgBEADs4X61A9OcXJ1vbsFwq/tCm8FVCjHx9KADZ7WRht4Tr/4tlrXP +VT7PeMjf97osh131ofq0i5fmumXSRHRTakn5hUJiIAGPQyaxJd6gXp3U9D8p2Q0S +xb/8sQmkncIqg1a1ZmUxekbBM0PXDePGMUVCFdpIFGiCh1QEES2Or2vI6PPwPFuA +VhAre+HGFnhIk0jtHqOntpviHuWx5Oqp7kmelvdvYqMAOjdWUYG7LUyykJc/xBvD +k9YLYHYa1ItWuQFHsR4GBraTrGOJ7sCgbkT4eneGBQRdwnhRxuWWsDyC5BKT2mq1 +RmhVMd4RmYdmmvlIk9ACOGHRDV4Hi7ymZTgCH1DOhBDHoTHtq4SKUsFHh0+BnZZH +PNGOnw3fRq8mC6tOrBQrf1/iTWPoM3i6hZlBWKdhQf0mwuF5DAo5mvYXc2RUUwY/ +pfYWjiEr+ItanJrzkLPDMedPdMbAW/87CpAKoGkMeg6CmftQgPE30Ksue3b6HExz +wrHIrqqD8JqWxwLIp0eMkh7soMp1avFCUOx57c1hXP3bqGEX9AABur4b3MyIp2QZ +jeyzmXc+3Z4XYZpJz98YRiiQQBkOZ+eZgrTPq7JoQ8gG8GnngEmPY67Ak+dWtRNR +hSJb3mQHwrveoX1aRp7+wtsWpIqplxdEulg7LqHDHyHAYN+h2sO8JPdvhQARAQAB +tCVTZWN1cml0eSBDaXRyZWEgPHNlY3VyaXR5QGNpdHJlYS54eXo+iQJOBBMBCAA4 +FiEEJrjWfVY4H6mv/nsGxnvc0y/0jG4FAmhZQXgCGwMFCwkIBwIGFQoJCAsCBBYC +AwECHgECF4AACgkQxnvc0y/0jG4cdA//V/FR/n1aA58VTOY2Rsf/ou45Dc4SRihJ +PO0fMtsbtKGGEopYob522hUlEqxvq4Kw1g73PXjlhFMD8Qvne/yoKJ9plsKbd121 +DZ9aSGoycAoaFzDX1zUJfSUaEuGu7V6wymdRz48/gC+zQw0gPCE+Muf/Bd7ZNOui +wNoBPSRWG/3Z1pyaAqjAHlRHQ+T9ue7veqVxHQUkQy4nz2kOsHLu4g/LUNjvtqC8 +h4MweQbzyvgjULZkZZYjpWaRtgwCqqhvho46AwVcZ2HOSj+xRc0TQUvx3h74I4vO +vvjZQymIkp0bCQ8muemiZeMpolXh6YH3rWir1Xi72ljgJMI38PD1cd1QlLaFGuoL +yLPTUNUwOmrRI7gDLeyL5LbxvfojGDhpmiGeMQEHYj3fGFEmBjX4yZTsX+ZmQTHC +NH8X8aDFcHMhKxnntUj4O+qWHwSqY72NtKvJhfOZwZUNBH6/zHElyZbHftV2jbKB +QRPrPBmA9qcV91d6IwDrTKUr0xOGJCY9MtATlkSFpfCYxGGgPPw028QlNUlpVG1A +KV79kpYJJPZKF3iFgu00T8PSoAbQvosVCDytb7cYZfdFW51xUZ4T7Lg8Ll/NgX/X +nEcc9Z5NvKUkZZ6R30Pj4rzwIsIPoZaDumkJw9GF7LRArurgyl1e0jAkDk4uwzFE +Jijf9586ypi5Ag0EaFlG3wEQALcC3wL/hBNWvjx51Wl7zRUwr3tJFjKZ4rQrJzII +zsj+R9BZOAvp3Uo/WK4m0oAWLeqhP37m7vn0zoWJzZrOjX5NhuaGNyo3bCBlFvLV +p6R+NyamPqVutSNBbh/Do3F15AdTKaQQidesrNzf0UuVk8wI6jCJKcJODCsdxA3Y +QaoF8uVB9Rcx5EktB5o44hJH78/a8tIZFM5VuesbRBxtItyDeIpG+VZZMAFqx740 +B7PoOs/HUtvY683bfidH9EtV7sOMPgUnRMfyFgUTfQPdyOzrUF1h2G92bEupD35U +xiUT43X48UCZl6uOYScKa4/8ftpPmwdWBrBRkLbhI2wCOun/Kthdww03H738AuUa +JuJOBevuquH2ULjdZfHEj9Hqo32x1dbFueLfoR2Rp2nFlAGVyoTCsNxHVuXySSq6 +kWZBFJi3e0R5+LkVV2tztn/9EBqYXKSRADhbR1BNdJbCMIAk9caLDnD7cPkiS3CQ +URQMz/G95InasXgGmBM6rno5dGhJZ3XD+mWPg/eRtvICYaVSo87rR60GEh36IvBe +kHIdxRkIyjZASbJI+f+00Lxk7D8ZmPZjQ+HrsaWOZq6FDRd9dM9OwFqk4MJBOAeI +1I76VDUsVH3rzeWmVs7vUWFMdFczY6xYpP7sJQkP26Res0rxTGSU5C80tiLcykqN +mECZABEBAAGJAjwEGAEIACYWIQQmuNZ9Vjgfqa/+ewbGe9zTL/SMbgUCaFlG3wIb +DAUJB4YfagAKCRDGe9zTL/SMbm33D/4rB3w9iXxRF9T1QUIfUajvZbWVGmbDFiZ1 +XB0JI38EqgUKGm36ykyQbcfSeJKNgWBkOEEO8q9fw8EARkZPbv9fSDtW/3i5jNIW +ygSN36fAIaIUWcZ+Rnkl8C5/9cYIgZuwaWYBearlW75GPqGMNbW92U1GN93QWn3l +Q5/I474MJdHpQpGFOVnhitoLPqBxFOxhRoxSpMQaL8iozmAN7N5K002TcYt95JiO +rH2T3YgOdCnEbDAfpnpvWqMqBtu3ydXnLcHmVuuHJSCaE9tM/WcdUdiLHP9D4qZB +FxY9nFycgH9pYhPAvgSE/HM7HePx9kc22wJiXLzXviGIFYl40su+lzs4fuFMgC6E +c9Hj39reL85QaobPNJI+dcgpsVs0xsKROXPyZYQai8PayJJc1f1V+/tMS14ef7MM +LIYvMmi1uPFaRCFEHnA4onXkB52rYXt5acyrl24a5eDNZD/qRrrDKMqkgXrPrxHJ +23S73LUQFpvDQ+DA/fZor516H3HWHYro1nINLXwip1cX2e2AoaJFP+gWZ2HCypWZ +WkSRLz/cyZC1U7LdMA5Z3whFAwVcI91FHEgbMgdGTWGtNbeKOp5sDqIgmduxXzCu +6Ztsw6ECa11z3k977nN+56AKH/OdbrEtHu9lZLds8yglJpgp9JN7f54CadhxIozi +cXvJNNsO+w== +=0vMP +-----END PGP PUBLIC KEY BLOCK----- +``` \ No newline at end of file diff --git a/audits/Sigma_Prime_Chainway_Labs_Clementine_Security_Assessment_Report_v2_0.pdf b/audits/Sigma_Prime_Chainway_Labs_Clementine_Security_Assessment_Report_v2_0.pdf new file mode 100644 index 000000000..2dbf43165 Binary files /dev/null and b/audits/Sigma_Prime_Chainway_Labs_Clementine_Security_Assessment_Report_v2_0.pdf differ diff --git a/bridge-circuit-host/Cargo.toml b/bridge-circuit-host/Cargo.toml new file mode 100644 index 000000000..2b3363178 --- /dev/null +++ b/bridge-circuit-host/Cargo.toml @@ -0,0 +1,53 @@ +[package] +name = "bridge-circuit-host" +version = "0.1.0" +edition = "2021" + +[dependencies] +risc0-zkvm = { workspace = true, features = ["metal", "bonsai", "client", "prove"] } +risc0-circuit-recursion = { workspace = true } +risc0-zkp = { workspace = true } +risc0-groth16 = { workspace = true } +risc0-binfmt = { workspace = true } + +ark-bn254 = { workspace = true } +ark-ff = { workspace = true } +ark-ec = { workspace = true } +ark-std = { workspace = true } +ark-crypto-primitives = { workspace = true } +ark-serialize = { workspace = true } +ark-relations = { workspace = true } +ark-groth16 = { workspace = true } + +alloy-rpc-types = { workspace = true } + +serde = { workspace = true } +serde_json = { workspace = true } +rand = { workspace = true, features = ["small_rng"] } +borsh = { workspace = true, features = ["derive"] } +num-bigint = { workspace = true } +num-traits = { workspace = true } +bitcoin = { workspace = true, features = ["serde"] } +tempfile = { workspace = true } +blake3 = { workspace = true } +sha2 = { workspace = true } +tokio = { workspace = true, features = ["rt-multi-thread"]} +bincode = { workspace = true } +hex = { workspace = true } +hex-literal = { workspace = true } +tracing = { workspace = true, default-features = false } +thiserror = { workspace = true } +eyre = { workspace = true } +once_cell = { workspace = true } + +circuits-lib = { path = "../circuits-lib" } + +citrea-sov-rollup-interface = { workspace = true } + + + + + +[features] +metal = ["risc0-zkvm/metal"] +use-test-vk = ["circuits-lib/use-test-vk"] diff --git a/bridge-circuit-host/bin-files/bch_params_challenge_tx_with_annex.bin b/bridge-circuit-host/bin-files/bch_params_challenge_tx_with_annex.bin new file mode 100644 index 000000000..2874a6efa Binary files /dev/null and b/bridge-circuit-host/bin-files/bch_params_challenge_tx_with_annex.bin differ diff --git a/bridge-circuit-host/bin-files/bch_params_challenge_tx_with_large_annex.bin b/bridge-circuit-host/bin-files/bch_params_challenge_tx_with_large_annex.bin new file mode 100644 index 000000000..9f47cb683 Binary files /dev/null and b/bridge-circuit-host/bin-files/bch_params_challenge_tx_with_large_annex.bin differ diff --git a/bridge-circuit-host/bin-files/bch_params_challenge_tx_with_large_annex_and_output.bin b/bridge-circuit-host/bin-files/bch_params_challenge_tx_with_large_annex_and_output.bin new file mode 100644 index 000000000..4ecbb38e4 Binary files /dev/null and b/bridge-circuit-host/bin-files/bch_params_challenge_tx_with_large_annex_and_output.bin differ diff --git a/bridge-circuit-host/bin-files/bch_params_challenge_tx_with_large_output.bin b/bridge-circuit-host/bin-files/bch_params_challenge_tx_with_large_output.bin new file mode 100644 index 000000000..dae8e8e8f Binary files /dev/null and b/bridge-circuit-host/bin-files/bch_params_challenge_tx_with_large_output.bin differ diff --git a/bridge-circuit-host/bin-files/bch_params_varying_total_works.bin b/bridge-circuit-host/bin-files/bch_params_varying_total_works.bin new file mode 100644 index 000000000..157c09d1f Binary files /dev/null and b/bridge-circuit-host/bin-files/bch_params_varying_total_works.bin differ diff --git a/bridge-circuit-host/bin-files/bch_params_varying_total_works_first_two_valid.bin b/bridge-circuit-host/bin-files/bch_params_varying_total_works_first_two_valid.bin new file mode 100644 index 000000000..dbb97408c Binary files /dev/null and b/bridge-circuit-host/bin-files/bch_params_varying_total_works_first_two_valid.bin differ diff --git a/bridge-circuit-host/bin-files/bch_params_varying_total_works_insufficient_total_work.bin b/bridge-circuit-host/bin-files/bch_params_varying_total_works_insufficient_total_work.bin new file mode 100644 index 000000000..88eb3aaaf Binary files /dev/null and b/bridge-circuit-host/bin-files/bch_params_varying_total_works_insufficient_total_work.bin differ diff --git a/bridge-circuit-host/bin-files/testnet4-headers.bin b/bridge-circuit-host/bin-files/testnet4-headers.bin new file mode 100644 index 000000000..b7fd9ecec Binary files /dev/null and b/bridge-circuit-host/bin-files/testnet4-headers.bin differ diff --git a/bridge-circuit-host/src/bridge_circuit_host.rs b/bridge-circuit-host/src/bridge_circuit_host.rs new file mode 100644 index 000000000..f19469130 --- /dev/null +++ b/bridge-circuit-host/src/bridge_circuit_host.rs @@ -0,0 +1,767 @@ +use crate::docker::{stark_to_bitvm2_g16, stark_to_bitvm2_g16_dev_mode}; +use crate::structs::{ + BridgeCircuitBitvmInputs, BridgeCircuitHostParams, SuccinctBridgeCircuitPublicInputs, +}; +use crate::utils::{calculate_succinct_output_prefix, is_dev_mode}; +use ark_bn254::Bn254; +use bitcoin::Transaction; +use borsh; +use circuits_lib::bridge_circuit::constants::{ + DEVNET_LC_IMAGE_ID, MAINNET_LC_IMAGE_ID, REGTEST_LC_IMAGE_ID, TESTNET_LC_IMAGE_ID, +}; +use circuits_lib::bridge_circuit::groth16::CircuitGroth16Proof; +use circuits_lib::bridge_circuit::merkle_tree::BitcoinMerkleTree; +use circuits_lib::bridge_circuit::spv::SPV; +use circuits_lib::bridge_circuit::structs::WorkOnlyCircuitInput; +use circuits_lib::bridge_circuit::transaction::CircuitTransaction; +use citrea_sov_rollup_interface::zk::light_client_proof::output::LightClientCircuitOutput; +use eyre::{eyre, Result, WrapErr}; + +use circuits_lib::common::constants::{ + MAINNET_HEADER_CHAIN_METHOD_ID, REGTEST_HEADER_CHAIN_METHOD_ID, SIGNET_HEADER_CHAIN_METHOD_ID, + TESTNET4_HEADER_CHAIN_METHOD_ID, +}; +use circuits_lib::header_chain::mmr_native::MMRNative; +use risc0_zkvm::{compute_image_id, default_prover, ExecutorEnv, ProverOpts, Receipt}; + +pub const REGTEST_BRIDGE_CIRCUIT_ELF: &[u8] = + include_bytes!("../../risc0-circuits/elfs/regtest-bridge-circuit-guest.bin"); + +pub const REGTEST_BRIDGE_CIRCUIT_ELF_TEST: &[u8] = + include_bytes!("../../risc0-circuits/elfs/test-regtest-bridge-circuit-guest.bin"); + +pub const TESTNET4_BRIDGE_CIRCUIT_ELF: &[u8] = + include_bytes!("../../risc0-circuits/elfs/testnet4-bridge-circuit-guest.bin"); + +pub const MAINNET_BRIDGE_CIRCUIT_ELF: &[u8] = + include_bytes!("../../risc0-circuits/elfs/mainnet-bridge-circuit-guest.bin"); + +pub const SIGNET_BRIDGE_CIRCUIT_ELF: &[u8] = + include_bytes!("../../risc0-circuits/elfs/signet-bridge-circuit-guest.bin"); + +pub const TESTNET4_HEADER_CHAIN_GUEST_ELF: &[u8] = + include_bytes!("../../risc0-circuits/elfs/testnet4-header-chain-guest.bin"); + +pub const MAINNET_HEADER_CHAIN_ELF: &[u8] = + include_bytes!("../../risc0-circuits/elfs/mainnet-header-chain-guest.bin"); +pub const TESTNET4_HEADER_CHAIN_ELF: &[u8] = + include_bytes!("../../risc0-circuits/elfs/testnet4-header-chain-guest.bin"); +pub const SIGNET_HEADER_CHAIN_ELF: &[u8] = + include_bytes!("../../risc0-circuits/elfs/signet-header-chain-guest.bin"); +pub const REGTEST_HEADER_CHAIN_ELF: &[u8] = + include_bytes!("../../risc0-circuits/elfs/regtest-header-chain-guest.bin"); + +pub const MAINNET_WORK_ONLY_ELF: &[u8] = + include_bytes!("../../risc0-circuits/elfs/mainnet-work-only-guest.bin"); +pub const TESTNET4_WORK_ONLY_ELF: &[u8] = + include_bytes!("../../risc0-circuits/elfs/testnet4-work-only-guest.bin"); +pub const SIGNET_WORK_ONLY_ELF: &[u8] = + include_bytes!("../../risc0-circuits/elfs/signet-work-only-guest.bin"); +pub const REGTEST_WORK_ONLY_ELF: &[u8] = + include_bytes!("../../risc0-circuits/elfs/regtest-work-only-guest.bin"); + +/// Generates a Groth16 proof for the Bridge Circuit after performing sanity checks. +/// +/// This function first validates various conditions such as header chain output, +/// light client proof, and SPV verification. It then constructs a succinct proof +/// for the Bridge Circuit. Finally, it converts the succinct proof +/// into a Groth16 proof using a Circom circuit. +/// +/// # Arguments +/// +/// * `bridge_circuit_host_params` - The host parameters containing circuit-related inputs. +/// * `bridge_circuit_elf` - The compiled ELF binary representing the Bridge Circuit. +/// +/// # Returns +/// +/// Returns a Result containing a tuple consisting of: +/// - `ark_groth16::Proof`: The final Groth16 proof. +/// - `[u8; 31]`: The Groth16 output. +/// - `BridgeCircuitBitvmInputs`: The structured inputs for the Bridge Circuit BitVM. +/// +/// # Errors +/// +/// This function will return an error if: +/// - The number of watchtowers does not match expectations. +/// - The header chain proof output differs from the expected value. +/// - Light client proof verification fails. +/// - SPV verification fails. +/// - The journal hash does not match the expected hash. +/// - Any serialization/deserialization operation fails. +/// - The network is unsupported. +/// - The execution environment cannot be built. +/// - Proof generation fails. +/// - Receipt journal conversion fails. +/// - Computing the image ID fails. +/// - Converting succinct receipt fails. +/// - Converting groth16 seal to array fails. +/// +pub fn prove_bridge_circuit( + bridge_circuit_host_params: BridgeCircuitHostParams, + bridge_circuit_elf: &[u8], +) -> Result<( + ark_groth16::Proof, + [u8; 31], + BridgeCircuitBitvmInputs, +)> { + tracing::info!("Starting bridge circuit proof generation"); + let bridge_circuit_input = bridge_circuit_host_params + .clone() + .into_bridge_circuit_input(); + + let header_chain_proof_output_serialized = borsh::to_vec(&bridge_circuit_input.hcp) + .wrap_err("Could not serialize header chain output")?; + + if bridge_circuit_input.lcp.lc_journal != bridge_circuit_host_params.lcp_receipt.journal.bytes { + return Err(eyre!("Light client proof output mismatch")); + } + + tracing::debug!(target: "ci", "Watchtower challenges: {:?}", + bridge_circuit_input.watchtower_inputs); + + let lc_image_id = match bridge_circuit_host_params.network.0 { + bitcoin::Network::Bitcoin => MAINNET_LC_IMAGE_ID, + bitcoin::Network::Testnet4 => TESTNET_LC_IMAGE_ID, + bitcoin::Network::Signet => DEVNET_LC_IMAGE_ID, + bitcoin::Network::Regtest => REGTEST_LC_IMAGE_ID, + _ => return Err(eyre!("Unsupported network")), + }; + + // Verify light client proof + if bridge_circuit_host_params + .lcp_receipt + .verify(lc_image_id) + .is_err() + { + return Err(eyre!("Light client proof verification failed")); + } + + // Header chain verification + if header_chain_proof_output_serialized + != bridge_circuit_host_params.headerchain_receipt.journal.bytes + { + return Err(eyre!("Header chain proof output mismatch")); + } + + let header_chain_method_id = match bridge_circuit_host_params.network.0 { + bitcoin::Network::Bitcoin => MAINNET_HEADER_CHAIN_METHOD_ID, + bitcoin::Network::Testnet4 => TESTNET4_HEADER_CHAIN_METHOD_ID, + bitcoin::Network::Signet => SIGNET_HEADER_CHAIN_METHOD_ID, + bitcoin::Network::Regtest => REGTEST_HEADER_CHAIN_METHOD_ID, + _ => return Err(eyre!("Unsupported network")), + }; + + // Check for headerchain receipt + if bridge_circuit_host_params + .headerchain_receipt + .verify(header_chain_method_id) + .is_err() + { + return Err(eyre!("Header chain receipt verification failed")); + } + + // SPV verification + if !bridge_circuit_input.payout_spv.verify( + bridge_circuit_input + .hcp + .chain_state + .block_hashes_mmr + .clone(), + ) { + return Err(eyre!("SPV verification failed")); + } + + // Make sure the L1 block hash of the LightClientCircuitOutput matches the payout tx block hash + let lc_output: LightClientCircuitOutput = borsh::from_slice( + bridge_circuit_host_params + .lcp_receipt + .journal + .bytes + .as_slice(), + ) + .wrap_err("Failed to deserialize light client circuit output")?; + + let lc_l1_block_hash = lc_output.latest_da_state.block_hash; + + let spv_l1_block_hash = bridge_circuit_input + .payout_spv + .block_header + .compute_block_hash(); + + if lc_l1_block_hash != spv_l1_block_hash { + return Err(eyre!( + "L1 block hash mismatch: expected {:?}, got {:?}", + lc_l1_block_hash, + spv_l1_block_hash + )); + } + + let public_inputs: SuccinctBridgeCircuitPublicInputs = + SuccinctBridgeCircuitPublicInputs::new(bridge_circuit_input.clone())?; + + let journal_hash = public_inputs.host_journal_hash(); + + let mut binding = ExecutorEnv::builder(); + let env = binding + .write_slice( + &borsh::to_vec(&bridge_circuit_input) + .wrap_err("Failed to serialize bridge circuit input")?, + ) + .add_assumption(bridge_circuit_host_params.headerchain_receipt) + .add_assumption(bridge_circuit_host_params.lcp_receipt) + .build() + .map_err(|e| eyre!("Failed to build execution environment: {}", e))?; + + let prover = default_prover(); + + tracing::info!("Checks complete, proving bridge circuit to generate STARK proof"); + + let succinct_receipt = prover + .prove_with_opts(env, bridge_circuit_elf, &ProverOpts::succinct()) + .map_err(|e| eyre!("Failed to generate bridge circuit proof: {}", e))? + .receipt; + + tracing::info!("Bridge circuit proof (STARK) generated"); + + let succinct_receipt_journal: [u8; 32] = succinct_receipt + .clone() + .journal + .bytes + .try_into() + .map_err(|_| eyre!("Failed to convert journal bytes to array"))?; + + if *journal_hash.as_bytes() != succinct_receipt_journal { + return Err(eyre!("Journal hash mismatch")); + } + + let bridge_circuit_method_id = compute_image_id(bridge_circuit_elf) + .map_err(|e| eyre!("Failed to compute bridge circuit image ID: {}", e))?; + + let combined_method_id_constant = + calculate_succinct_output_prefix(bridge_circuit_method_id.as_bytes()); + + let (g16_proof, g16_output) = if is_dev_mode() { + stark_to_bitvm2_g16_dev_mode(succinct_receipt, &succinct_receipt_journal)? + } else { + stark_to_bitvm2_g16( + succinct_receipt + .inner + .succinct() + .wrap_err("Failed to get succinct receipt")? + .clone(), + &succinct_receipt_journal, + )? + }; + + tracing::info!("Bridge circuit proof (Groth16) generated"); + + let risc0_g16_seal_vec = g16_proof.to_vec(); + let risc0_g16_256 = risc0_g16_seal_vec[0..256] + .try_into() + .wrap_err("Failed to convert groth16 seal to array")?; + let circuit_g16_proof = CircuitGroth16Proof::from_seal(risc0_g16_256); + let ark_groth16_proof: ark_groth16::Proof = circuit_g16_proof.into(); + + tracing::debug!( + target: "ci", + "Circuit debug info:\n\ + - Combined method ID constant: {:?}\n\ + - Payout tx block hash: {:?}\n\ + - Latest block hash: {:?}\n\ + - Challenge sending watchtowers: {:?}\n\ + - Deposit constant: {:?}", + combined_method_id_constant, + public_inputs.payout_tx_block_hash.0, + public_inputs.latest_block_hash.0, + public_inputs.challenge_sending_watchtowers.0, + public_inputs.deposit_constant.0 + ); + + Ok(( + ark_groth16_proof, + g16_output, + BridgeCircuitBitvmInputs { + payout_tx_block_hash: public_inputs.payout_tx_block_hash.0, + latest_block_hash: public_inputs.latest_block_hash.0, + challenge_sending_watchtowers: public_inputs.challenge_sending_watchtowers.0, + deposit_constant: public_inputs.deposit_constant.0, + combined_method_id: combined_method_id_constant, + }, + )) +} + +/// Constructs an SPV (Simplified Payment Verification) proof. +/// +/// This function processes block headers, constructs an MMR (Merkle Mountain Range) +/// for block header commitment, and generates a Merkle proof for the payout transaction's +/// inclusion in the block. +/// +/// # Arguments +/// +/// * `payout_tx` - The payout transaction to prove inclusion for. +/// * `block_hash_bytes` - A slice of block hashes, each 32 bytes long. +/// * `payment_block` - The block containing the payout transaction. +/// * `payment_block_height` - The height of the payment block in the blockchain. +/// * `genesis_block_height` - The height of the genesis block. +/// * `payment_tx_index` - The index of the payout transaction in the block's transaction list. +/// +/// # Returns +/// +/// Returns a `Result` containing: +/// - The payout transaction wrapped in a `CircuitTransaction`. +/// - A Merkle proof of the transaction's inclusion in the block. +/// - The block header. +/// - An MMR proof of the block header's inclusion in the MMR. +/// +/// # Errors +/// +/// This function will return an error if: +/// - Input parameters are invalid or out of bounds. +/// - MMR proof generation fails. +/// - Merkle tree construction fails. +/// - Payment block height is less than genesis block height. +/// - Payment transaction index is out of bounds. +/// +pub fn create_spv( + payout_tx: Transaction, + block_hash_bytes: &[[u8; 32]], + payment_block: bitcoin::Block, + payment_block_height: u32, + genesis_block_height: u32, + payment_tx_index: u32, +) -> Result { + // Input validation + if payment_block_height < genesis_block_height { + return Err(eyre!( + "Payment block height ({}) cannot be less than genesis block height ({})", + payment_block_height, + genesis_block_height + )); + } + + if payment_tx_index as usize >= payment_block.txdata.len() { + return Err(eyre!( + "Payment transaction index ({}) out of bounds (block has {} transactions)", + payment_tx_index, + payment_block.txdata.len() + )); + } + + let mut mmr_native = MMRNative::new(); + for block_hash in block_hash_bytes { + mmr_native.append(*block_hash); + } + + let block_txids: Vec = payment_block + .txdata + .iter() + .map(|tx| CircuitTransaction(tx.clone())) + .collect(); + + let mmr_inclusion_proof = mmr_native + .generate_proof(payment_block_height - genesis_block_height) + .wrap_err("Failed to generate MMR inclusion proof")?; + + let block_mt = BitcoinMerkleTree::new_mid_state(&block_txids); + + let payout_tx_proof = block_mt.generate_proof(payment_tx_index); + + Ok(SPV { + transaction: CircuitTransaction(payout_tx), + block_inclusion_proof: payout_tx_proof, + block_header: payment_block.header.into(), + mmr_inclusion_proof: mmr_inclusion_proof.1, + }) +} + +/// Generates a Groth16 proof of a bitcoin header chain proof where it only outputs the total work. +/// +/// This function constructs an execution environment, serializes the provided +/// input using Borsh, and utilizes a default prover to generate a proof using +/// the Groth16 proving system. +/// +/// # Arguments +/// +/// * `receipt` - A header-chain `Receipt` that serves as an assumption for the proof. +/// * `input` - A reference to `WorkOnlyCircuitInput` containing the necessary +/// header chain output data. +/// +/// # Returns +/// +/// Returns a Result containing a new `Receipt` with the Groth16 proof result. +/// +/// # Errors +/// +/// This function will return an error if: +/// - Input serialization fails. +/// - Execution environment building fails. +/// - Proof generation fails. +/// +pub fn prove_work_only_header_chain_proof( + receipt: Receipt, + input: &WorkOnlyCircuitInput, +) -> Result { + let env = ExecutorEnv::builder() + .add_assumption(receipt) + .write_slice(&borsh::to_vec(&input).wrap_err("Failed to serialize input")?) + .build() + .map_err(|e| eyre!("Failed to build execution environment: {}", e))?; + let prover = default_prover(); + + Ok(prover + .prove_with_opts(env, TESTNET4_WORK_ONLY_ELF, &ProverOpts::groth16()) + .map_err(|e| eyre!("Failed to generate work only header chain proof: {}", e))? + .receipt) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{mock_zkvm::MockZkvmHost, utils::total_work_from_wt_tx}; + + const TESTNET4_HEADER_CHAIN_GUEST_ELF: &[u8] = + include_bytes!("../../risc0-circuits/elfs/testnet4-header-chain-guest.bin"); + const TESTNET4_WORK_ONLY_ELF: &[u8] = + include_bytes!("../../risc0-circuits/elfs/testnet4-work-only-guest.bin"); + + use borsh::BorshDeserialize; + use circuits_lib::{ + bridge_circuit::{ + constants::REGTEST_WORK_ONLY_METHOD_ID, + structs::{ChallengeSendingWatchtowers, TotalWork, WorkOnlyCircuitOutput}, + total_work_and_watchtower_flags, + }, + common::zkvm::ZkvmHost, + header_chain::{ + header_chain_circuit, BlockHeaderCircuitOutput, ChainState, CircuitBlockHeader, + HeaderChainCircuitInput, HeaderChainPrevProofType, + }, + }; + use risc0_zkvm::default_executor; + + const TESTNET4_HEADERS: &[u8] = include_bytes!("../bin-files/testnet4-headers.bin"); + + #[test] + fn test_header_chain_circuit() { + let value = option_env!("BITCOIN_NETWORK"); + println!("BITCOIN_NETWORK: {:?}", value); + let headers = TESTNET4_HEADERS + .chunks(80) + .map(|header| CircuitBlockHeader::try_from_slice(header).unwrap()) + .collect::>(); + + let host = MockZkvmHost::new(); + + let input = HeaderChainCircuitInput { + method_id: [0; 8], + prev_proof: HeaderChainPrevProofType::GenesisBlock(ChainState::genesis_state()), + block_headers: headers[..4000].to_vec(), + }; + host.write(&input); + header_chain_circuit(&host); + let proof = host.prove([0; 8].as_ref()); + + let output = BlockHeaderCircuitOutput::try_from_slice(&proof.journal).unwrap(); + let new_host = MockZkvmHost::new(); + + let newinput = HeaderChainCircuitInput { + method_id: [0; 8], + prev_proof: HeaderChainPrevProofType::PrevProof(output), + block_headers: headers[4000..8000].to_vec(), + }; + new_host.write(&newinput); + new_host.add_assumption(proof); + + header_chain_circuit(&new_host); + + let new_proof = new_host.prove([0; 8].as_ref()); + + let new_output = BlockHeaderCircuitOutput::try_from_slice(&new_proof.journal).unwrap(); + + println!("Output: {:?}", new_output); + } + + /// Please use RISC0_DDEV_MODE=1 to run the following tests. + #[test] + #[allow(clippy::print_literal)] + fn test_varying_total_works() { + eprintln!("\x1b[31mPlease update test data if the elf files are changed. Run the tests on bridge_circuit_test_data.rs to update the test data.\x1b[0m"); + let bridge_circuit_host_params_serialized = + include_bytes!("../bin-files/bch_params_varying_total_works.bin"); + let bridge_circuit_host_params: BridgeCircuitHostParams = + borsh::BorshDeserialize::try_from_slice(bridge_circuit_host_params_serialized) + .expect("Failed to deserialize BridgeCircuitHostParams"); + + let bridge_circuit_inputs = bridge_circuit_host_params + .clone() + .into_bridge_circuit_input(); + + for watchtower_input in &bridge_circuit_inputs.watchtower_inputs { + println!( + "Watchtower input: {:?}", + watchtower_input.watchtower_challenge_tx.output[2] + ); + } + + let bridge_circuit_elf = REGTEST_BRIDGE_CIRCUIT_ELF_TEST; + + let executor = default_executor(); + + let env = ExecutorEnv::builder() + .write_slice(&borsh::to_vec(&bridge_circuit_inputs).unwrap()) + .add_assumption(bridge_circuit_host_params.headerchain_receipt) + .add_assumption(bridge_circuit_host_params.lcp_receipt) + .build() + .expect("Failed to build execution environment"); + + let session_info = executor.execute(env, bridge_circuit_elf).unwrap(); + + let public_inputs: SuccinctBridgeCircuitPublicInputs = + SuccinctBridgeCircuitPublicInputs::new(bridge_circuit_inputs.clone()).unwrap(); + + let journal_hash = public_inputs.host_journal_hash(); + + assert_eq!( + session_info.journal.bytes, + *journal_hash.as_bytes(), + "Journal hash mismatch" + ); + } + + #[test] + #[allow(clippy::print_literal)] + #[should_panic(expected = "Insufficient total work")] + fn test_insufficient_total_work() { + eprintln!("\x1b[31mPlease update test data if the elf files are changed. Run the tests on bridge_circuit_test_data.rs to update the test data.\x1b[0m"); + let bridge_circuit_host_params_serialized = include_bytes!( + "../bin-files/bch_params_varying_total_works_insufficient_total_work.bin" + ); + let bridge_circuit_host_params: BridgeCircuitHostParams = + borsh::BorshDeserialize::try_from_slice(bridge_circuit_host_params_serialized) + .expect("Failed to deserialize BridgeCircuitHostParams"); + + let bridge_circuit_inputs = bridge_circuit_host_params + .clone() + .into_bridge_circuit_input(); + + for watchtower_input in &bridge_circuit_inputs.watchtower_inputs { + println!( + "Watchtower input: {:?}", + watchtower_input.watchtower_challenge_tx.output[2] + ); + } + + let bridge_circuit_elf = REGTEST_BRIDGE_CIRCUIT_ELF_TEST; + + let executor = default_executor(); + + let env = ExecutorEnv::builder() + .write_slice(&borsh::to_vec(&bridge_circuit_inputs).unwrap()) + .add_assumption(bridge_circuit_host_params.headerchain_receipt) + .add_assumption(bridge_circuit_host_params.lcp_receipt) + .build() + .expect("Failed to build execution environment"); + + executor.execute(env, bridge_circuit_elf).unwrap(); + } + #[cfg(feature = "use-test-vk")] + #[test] + #[allow(clippy::print_literal)] + fn test_varying_total_works_first_two_valid() { + eprintln!("{}Please update test data if the elf files are changed. Run the tests on bridge_circuit_test_data.rs to update the test data.{}", "\x1b[31m", "\x1b[0m"); + let bridge_circuit_host_params_serialized = + include_bytes!("../bin-files/bch_params_varying_total_works_first_two_valid.bin"); + let bridge_circuit_host_params: BridgeCircuitHostParams = + borsh::BorshDeserialize::try_from_slice(bridge_circuit_host_params_serialized) + .expect("Failed to deserialize BridgeCircuitHostParams"); + + let bridge_circuit_input = bridge_circuit_host_params + .clone() + .into_bridge_circuit_input(); + + let mut total_works: Vec<[u8; 16]> = + Vec::with_capacity(bridge_circuit_input.watchtower_inputs.len()); + + for watchtower_input in &bridge_circuit_input.watchtower_inputs { + println!( + "Watchtower input: {:?}", + watchtower_input.watchtower_challenge_tx.output[2] + ); + + let total_work = total_work_from_wt_tx(&watchtower_input.watchtower_challenge_tx); + total_works.push(total_work); + } + + let (total_work, challenge_sending_wts) = + total_work_and_watchtower_flags(&bridge_circuit_input, ®TEST_WORK_ONLY_METHOD_ID); + + println!( + "Total work: {:?}, Challenge sending watchtowers: {:?}", + total_work, challenge_sending_wts + ); + + total_works.sort(); + + let expected_total_work = TotalWork(total_works[1]); + let expected_challenge_sending_wts = ChallengeSendingWatchtowers([ + 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]); + + assert_eq!(total_work, expected_total_work, "Total work mismatch"); + assert_eq!( + challenge_sending_wts, expected_challenge_sending_wts, + "Challenge sending watchtowers mismatch" + ); + } + + #[test] + #[ignore = "This test is too slow and only runs in x86_64."] + fn work_only_from_header_chain_test() { + std::env::set_var("RISC0_DEV_MODE", "1"); + let testnet4_header_chain_method_id_from_elf: [u32; 8] = + compute_image_id(TESTNET4_HEADER_CHAIN_GUEST_ELF) + .unwrap() + .as_words() + .try_into() + .unwrap(); + + let headers = TESTNET4_HEADERS + .chunks(80) + .map(|header| CircuitBlockHeader::try_from_slice(header).unwrap()) + .collect::>(); + + // Prepare the input for the circuit + let header_chain_input = HeaderChainCircuitInput { + method_id: testnet4_header_chain_method_id_from_elf, + prev_proof: HeaderChainPrevProofType::GenesisBlock(ChainState::genesis_state()), + block_headers: headers[..10].to_vec(), + }; + + let mut binding = ExecutorEnv::builder(); + let env = binding.write_slice(&borsh::to_vec(&header_chain_input).unwrap()); + let env = env.build().unwrap(); + let prover = default_prover(); + + let header_chain_receipt = prover + .prove_with_opts( + env, + TESTNET4_HEADER_CHAIN_GUEST_ELF, + &ProverOpts::succinct(), + ) + .unwrap() + .receipt; + + // Extract journal of receipt + let header_chain_output = + BlockHeaderCircuitOutput::try_from_slice(&header_chain_receipt.journal.bytes).unwrap(); + + println!("Output: {:?}", header_chain_output); + + let work_only_input = circuits_lib::bridge_circuit::structs::WorkOnlyCircuitInput { + header_chain_circuit_output: header_chain_output.clone(), + }; + + let mut binding = ExecutorEnv::builder(); + let env = binding.write_slice(&borsh::to_vec(&work_only_input).unwrap()); + let env = env.add_assumption(header_chain_receipt); + let env = env.build().unwrap(); + let prover = default_prover(); + + let work_only_prove_info = prover + .prove_with_opts(env, TESTNET4_WORK_ONLY_ELF, &ProverOpts::groth16()) + .unwrap(); + + println!( + "Work only prove info . receipt: {:?}", + work_only_prove_info.receipt + ); + println!( + "Work only prove info . session stats: {:?}", + work_only_prove_info.stats + ); + + let groth16_seal = &work_only_prove_info.receipt.inner.groth16().unwrap().seal; + let seal: [u8; 256] = groth16_seal[0..256].try_into().unwrap(); + + // Extract journal of receipt + let work_only_output = WorkOnlyCircuitOutput::try_from_slice( + &work_only_prove_info.receipt.journal.bytes.clone(), + ) + .unwrap(); + + println!("Output: {:?}", work_only_output); + + let circuit_g16_proof = CircuitGroth16Proof::from_seal(&seal); + println!("Circuit G16 proof: {:?}", circuit_g16_proof); + } + + #[test] + fn test_bridge_circuit_with_annex() { + eprintln!("\x1b[31mPlease update test data if the elf files are changed. Run the tests on bridge_circuit_test_data.rs to update the test data.\x1b[0m"); + let input_bytes: &[u8] = + include_bytes!("../bin-files/bch_params_challenge_tx_with_annex.bin"); + let bridge_circuit_host_params: BridgeCircuitHostParams = borsh::from_slice(input_bytes) + .expect("Failed to deserialize BridgeCircuitHostParams from file"); + + let (proof, public_output, bitvm_inputs) = + prove_bridge_circuit(bridge_circuit_host_params, REGTEST_BRIDGE_CIRCUIT_ELF_TEST) + .unwrap(); + println!("Proof: {:?}", proof); + println!("Public Output: {:?}", public_output); + println!("BitVM Inputs: {:?}", bitvm_inputs); + } + + #[test] + #[should_panic(expected = "Invalid witness length, expected 1 element")] + fn test_bridge_circuit_with_large_input() { + eprintln!("\x1b[31mPlease update test data if the elf files are changed. Run the tests on bridge_circuit_test_data.rs to update the test data.\x1b[0m"); + let input_bytes: &[u8] = + include_bytes!("../bin-files/bch_params_challenge_tx_with_large_annex.bin"); + let mut bridge_circuit_host_params: BridgeCircuitHostParams = + borsh::from_slice(input_bytes) + .expect("Failed to deserialize BridgeCircuitHostParams from file"); + + // Now add the removed witness element back to the watchtower inputs + for watchtower_input in &mut bridge_circuit_host_params.watchtower_inputs { + let large_data: Vec = vec![0x80; 3999000]; + watchtower_input + .watchtower_challenge_witness + .push(large_data); + } + let (_, _, _) = + prove_bridge_circuit(bridge_circuit_host_params, REGTEST_BRIDGE_CIRCUIT_ELF_TEST) + .unwrap(); + } + + #[test] + fn test_bridge_circuit_with_large_output() { + eprintln!("\x1b[31mPlease update test data if the elf files are changed. Run the tests on bridge_circuit_test_data.rs to update the test data.\x1b[0m"); + let input_bytes: &[u8] = + include_bytes!("../bin-files/bch_params_challenge_tx_with_large_output.bin"); + let bridge_circuit_host_params: BridgeCircuitHostParams = borsh::from_slice(input_bytes) + .expect("Failed to deserialize BridgeCircuitHostParams from file"); + + let (proof, public_output, bitvm_inputs) = + prove_bridge_circuit(bridge_circuit_host_params, REGTEST_BRIDGE_CIRCUIT_ELF_TEST) + .unwrap(); + println!("Proof: {:?}", proof); + println!("Public Output: {:?}", public_output); + println!("BitVM Inputs: {:?}", bitvm_inputs); + } + + #[test] + fn test_bridge_circuit_with_large_input_and_output() { + eprintln!("\x1b[31mPlease update test data if the elf files are changed. Run the tests on bridge_circuit_test_data.rs to update the test data.\x1b[0m"); + let input_bytes: &[u8] = + include_bytes!("../bin-files/bch_params_challenge_tx_with_large_annex_and_output.bin"); + let bridge_circuit_host_params: BridgeCircuitHostParams = borsh::from_slice(input_bytes) + .expect("Failed to deserialize BridgeCircuitHostParams from file"); + + let (proof, public_output, bitvm_inputs) = + prove_bridge_circuit(bridge_circuit_host_params, REGTEST_BRIDGE_CIRCUIT_ELF_TEST) + .unwrap(); + println!("Proof: {:?}", proof); + println!("Public Output: {:?}", public_output); + println!("BitVM Inputs: {:?}", bitvm_inputs); + } +} diff --git a/bridge-circuit-host/src/docker.rs b/bridge-circuit-host/src/docker.rs new file mode 100644 index 000000000..e1cf429c9 --- /dev/null +++ b/bridge-circuit-host/src/docker.rs @@ -0,0 +1,553 @@ +use hex::ToHex; +use num_bigint::BigUint; +use num_traits::Num; +use risc0_groth16::{to_json, ProofJson, Seal}; +use risc0_zkvm::sha::Digestible; +use risc0_zkvm::{ + sha::Digest, ReceiptClaim, SuccinctReceipt, SuccinctReceiptVerifierParameters, SystemState, +}; +use risc0_zkvm::{Groth16Receipt, Groth16ReceiptVerifierParameters, InnerReceipt, Receipt}; +use serde_json::Value; +use std::{ + env::consts::ARCH, + fs, + path::Path, + process::{Command, Stdio}, +}; + +use eyre::{eyre, ContextCompat, Result, WrapErr}; +use tempfile::tempdir; +use tracing; + +/// Convert a STARK proof to a SNARK proof. Taken from risc0-groth16 and modified slightly. +pub fn stark_to_bitvm2_g16( + succinct_receipt: SuccinctReceipt, + journal: &[u8], +) -> Result<(Seal, [u8; 31])> { + let ident_receipt = risc0_zkvm::recursion::identity_p254(&succinct_receipt) + .map_err(|e| eyre!("Failed to create identity receipt: {:?}", e))?; + let identity_p254_seal_bytes = ident_receipt.get_seal_bytes(); + let receipt_claim = succinct_receipt + .claim + .value() + .wrap_err("Failed to get receipt claim value")?; + tracing::debug!("Journal for stark_to_bitvm2_g16: {:?}", journal); + + // This part is from risc0-groth16 + if !is_x86_architecture() { + return Err(eyre!( + "stark_to_snark is only supported on x86 architecture" + )); + } + if !is_docker_installed() { + return Err(eyre!("Please install docker first")); // Maybe check this at startup... + } + + let tmp_dir = tempdir().wrap_err("Failed to create temporary directory")?; + let work_var = std::env::var("RISC0_WORK_DIR").ok(); + let work_dir = work_var.as_ref().map(Path::new).unwrap_or(tmp_dir.path()); + tracing::debug!("work_dir: {:?}", work_dir); + + std::fs::write(work_dir.join("seal.r0"), identity_p254_seal_bytes.clone()) + .wrap_err("Failed to write seal file")?; + let seal_path = work_dir.join("input.json"); + let proof_path = work_dir.join("proof.json"); + let output_path = work_dir.join("public.json"); + let mut seal_json = Vec::new(); + to_json(&*identity_p254_seal_bytes, &mut seal_json) + .map_err(|e| eyre!("Failed to convert seal to JSON: {:?}", e))?; + std::fs::write(seal_path.clone(), seal_json).wrap_err("Failed to write seal JSON")?; + + let pre_state: risc0_zkvm::MaybePruned = receipt_claim.clone().pre; + tracing::debug!("pre_state: {:?}", pre_state); + let pre_state_digest: Digest = pre_state.clone().digest(); + tracing::debug!("pre_state_digest: {:?}", pre_state_digest); + let pre_state_digest_bits: Vec = pre_state_digest + .as_bytes() + .iter() + .flat_map(|&byte| (0..8).rev().map(move |i| ((byte >> i) & 1).to_string())) + .collect(); + tracing::debug!("pre_state_digest_bits: {:?}", pre_state_digest_bits); + let post_state: risc0_zkvm::MaybePruned = receipt_claim.clone().post; + tracing::debug!("post_state: {:?}", post_state); + let post_state_digest: Digest = post_state.clone().digest(); + let post_state_digest_bits: Vec = post_state_digest + .as_bytes() + .iter() + .flat_map(|&byte| (0..8).rev().map(move |i| ((byte >> i) & 1).to_string())) + .collect(); + tracing::debug!("post_state_digest_bits: {:?}", post_state_digest_bits); + + let mut journal_bits = Vec::new(); + for byte in journal { + for i in 0..8 { + journal_bits.push((byte >> (7 - i)) & 1); + } + } + tracing::debug!("journal_bits len: {:?}", journal_bits.len()); + + let succinct_verifier_params = SuccinctReceiptVerifierParameters::default(); + tracing::debug!("Succinct verifier params: {:?}", succinct_verifier_params); + let succinct_control_root = succinct_verifier_params.control_root; + tracing::debug!("Succinct control root: {:?}", succinct_control_root); + let mut succinct_control_root_bytes: [u8; 32] = succinct_control_root + .as_bytes() + .try_into() + .wrap_err("Failed to convert succinct control root to 32 bytes")?; + succinct_control_root_bytes.reverse(); + let succinct_control_root_bytes: String = succinct_control_root_bytes.encode_hex(); + let a1_str = succinct_control_root_bytes[0..32].to_string(); + let a0_str = succinct_control_root_bytes[32..64].to_string(); + tracing::debug!("Succinct control root a0: {:?}", a0_str); + tracing::debug!("Succinct control root a1: {:?}", a1_str); + let a0_dec = to_decimal(&a0_str) + .ok_or_else(|| eyre!("Failed to convert succinct control root a0 to decimal"))?; + let a1_dec = to_decimal(&a1_str) + .ok_or_else(|| eyre!("Failed to convert succinct control root a1 to decimal"))?; + tracing::debug!("Succinct control root a0 dec: {:?}", a0_dec); + tracing::debug!("Succinct control root a1 dec: {:?}", a1_dec); + tracing::debug!("CONTROL_ID: {:?}", ident_receipt.control_id); + let mut id_bn254_fr_bits: Vec = ident_receipt + .control_id + .as_bytes() + .iter() + .flat_map(|&byte| (0..8).rev().map(move |i| ((byte >> i) & 1).to_string())) + .collect(); + tracing::debug!("id_bn254_fr_bits: {:?}", id_bn254_fr_bits); + + // remove 248th and 249th bits + id_bn254_fr_bits.remove(248); + id_bn254_fr_bits.remove(248); + + tracing::debug!( + "id_bn254_fr_bits after removing 2 extra bits: {:?}", + id_bn254_fr_bits + ); + + let mut seal_json: Value = { + let file_content = fs::read_to_string(&seal_path).wrap_err("Failed to read seal file")?; + serde_json::from_str(&file_content).wrap_err("Failed to parse seal JSON")? + }; + + seal_json["journal_digest_bits"] = journal_bits.into(); + seal_json["pre_state_digest_bits"] = pre_state_digest_bits.into(); + seal_json["post_state_digest_bits"] = post_state_digest_bits.into(); + seal_json["id_bn254_fr_bits"] = id_bn254_fr_bits.into(); + seal_json["control_root"] = vec![a0_dec, a1_dec].into(); + std::fs::write( + seal_path, + serde_json::to_string_pretty(&seal_json).wrap_err("Failed to write updated seal JSON")?, + ) + .wrap_err("Failed to write seal file")?; + + let output = Command::new("docker") + .arg("run") + .arg("--pull=always") + .arg("--rm") + .arg("--platform=linux/amd64") // Force linux/amd64 platform + .arg("-v") + .arg(format!("{}:/mnt", work_dir.to_string_lossy())) + .arg("ozancw/risc0-to-bitvm2-groth16-prover:latest") + .stdout(Stdio::piped()) + .stderr(Stdio::piped()) + .output() + .wrap_err("Failed to execute docker command")?; + + if !output.status.success() { + return Err(eyre!( + "STARK to SNARK prover docker image returned failure: {:?}", + output + )); + } + + tracing::debug!("proof_path: {:?}", proof_path); + let proof_content = + std::fs::read_to_string(proof_path).wrap_err("Failed to read proof file")?; + let output_content_dec = + std::fs::read_to_string(output_path).wrap_err("Failed to read output file")?; + let proof_json: ProofJson = + serde_json::from_str(&proof_content).wrap_err("Failed to parse proof JSON")?; + + let parsed_json: Value = + serde_json::from_str(&output_content_dec).wrap_err("Failed to parse output JSON")?; + let output_str = parsed_json[0] + .as_str() + .ok_or_else(|| eyre!("Failed to get output string from JSON"))?; + + // Step 2: Convert the decimal string to BigUint and then to hexadecimal + let output_content_hex = BigUint::from_str_radix(output_str, 10) + .wrap_err("Failed to parse decimal string")? + .to_str_radix(16); + + // If the length of the hexadecimal string is odd, add a leading zero + let output_content_hex = if output_content_hex.len() % 2 == 0 { + output_content_hex + } else { + format!("0{}", output_content_hex) + }; + + // Step 3: Decode the hexadecimal string to a byte vector + let output_byte_vec = + hex::decode(&output_content_hex).wrap_err("Failed to decode hex string")?; + let output_bytes: [u8; 31] = output_byte_vec + .as_slice() + .try_into() + .wrap_err("Failed to convert output bytes to array")?; + + Ok(( + proof_json + .try_into() + .map_err(|e| eyre!("Failed to convert proof JSON to Seal: {:?}", e))?, + output_bytes, + )) +} + +const ID_BN254_FR_BITS: [&str; 254] = [ + "1", "1", "0", "0", "0", "0", "0", "0", "0", "1", "1", "1", "1", "0", "1", "0", "0", "1", "1", + "0", "0", "1", "0", "1", "0", "0", "0", "1", "0", "1", "0", "0", "0", "1", "0", "1", "1", "1", + "0", "0", "0", "0", "1", "1", "1", "1", "0", "0", "1", "0", "1", "1", "0", "1", "0", "0", "1", + "0", "0", "0", "1", "0", "1", "1", "0", "1", "1", "0", "0", "0", "0", "1", "0", "0", "0", "0", + "0", "0", "0", "1", "1", "0", "0", "1", "0", "1", "1", "0", "0", "0", "1", "0", "1", "1", "1", + "0", "1", "0", "1", "0", "0", "1", "1", "0", "0", "0", "0", "0", "0", "1", "1", "1", "1", "0", + "1", "0", "0", "1", "0", "0", "1", "1", "0", "1", "1", "1", "0", "1", "1", "0", "0", "1", "0", + "0", "1", "1", "1", "1", "0", "0", "0", "1", "1", "1", "0", "1", "0", "1", "0", "0", "1", "1", + "1", "0", "1", "1", "1", "0", "1", "1", "0", "0", "1", "0", "0", "1", "1", "0", "1", "0", "0", + "1", "0", "1", "1", "1", "0", "1", "0", "1", "1", "1", "0", "0", "1", "0", "1", "1", "0", "1", + "0", "0", "0", "1", "1", "1", "1", "1", "1", "1", "1", "1", "1", "0", "1", "0", "1", "1", "0", + "0", "0", "0", "0", "0", "0", "0", "1", "1", "0", "1", "0", "0", "1", "1", "0", "1", "1", "0", + "0", "1", "1", "0", "0", "1", "1", "0", "1", "1", "1", "0", "0", "1", "0", "0", "0", "1", "0", + "0", "0", "0", "0", "1", "0", "0", +]; + +pub fn dev_stark_to_risc0_g16(receipt: Receipt, journal: &[u8]) -> Result { + let identity_p254_seal_bytes = vec![0u8; 222668]; + let receipt_claim = receipt + .claim() + .wrap_err("Failed to get receipt claim")? + .value() + .wrap_err("Failed to get receipt claim value")?; + + // This part is from risc0-groth16 + if !is_x86_architecture() { + return Err(eyre!( + "stark_to_snark is only supported on x86 architecture" + )); + } + if !is_docker_installed() { + return Err(eyre!("Please install docker first")); + } + + let tmp_dir = tempdir().wrap_err("Failed to create temporary directory")?; + let work_var = std::env::var("RISC0_WORK_DIR").ok(); + let work_dir = work_var.as_ref().map(Path::new).unwrap_or(tmp_dir.path()); + tracing::debug!("work_dir: {:?}", work_dir); + std::fs::write(work_dir.join("seal.r0"), identity_p254_seal_bytes.clone()) + .wrap_err("Failed to write seal file")?; + let seal_path = work_dir.join("input.json"); + let proof_path = work_dir.join("proof.json"); + let _output_path = work_dir.join("public.json"); + + let pre_state: risc0_zkvm::MaybePruned = receipt_claim.clone().pre; + tracing::debug!("pre_state: {:?}", pre_state); + let pre_state_digest: Digest = pre_state.clone().digest(); + tracing::debug!("pre_state_digest: {:?}", pre_state_digest); + let pre_state_digest_bits: Vec = pre_state_digest + .as_bytes() + .iter() + .flat_map(|&byte| (0..8).rev().map(move |i| ((byte >> i) & 1).to_string())) + .collect(); + tracing::debug!("pre_state_digest_bits: {:?}", pre_state_digest_bits); + let post_state: risc0_zkvm::MaybePruned = receipt_claim.clone().post; + tracing::debug!("post_state: {:?}", post_state); + let post_state_digest: Digest = post_state.clone().digest(); + let post_state_digest_bits: Vec = post_state_digest + .as_bytes() + .iter() + .flat_map(|&byte| (0..8).rev().map(move |i| ((byte >> i) & 1).to_string())) + .collect(); + tracing::debug!("post_state_digest_bits: {:?}", post_state_digest_bits); + + let journal_digest: Digest = journal.digest(); + + let mut journal_digest_bits = Vec::new(); + for byte in journal_digest.as_bytes() { + for i in 0..8 { + journal_digest_bits.push((byte >> (7 - i)) & 1); + } + } + tracing::debug!("journal_bits len: {:?}", journal_digest_bits.len()); + + let succinct_verifier_params = SuccinctReceiptVerifierParameters::default(); + tracing::debug!("Succinct verifier params: {:?}", succinct_verifier_params); + let succinct_control_root = succinct_verifier_params.control_root; + tracing::debug!("Succinct control root: {:?}", succinct_control_root); + let mut succinct_control_root_bytes: [u8; 32] = succinct_control_root + .as_bytes() + .try_into() + .wrap_err("Failed to convert succinct control root to 32 bytes")?; + succinct_control_root_bytes.reverse(); + let succinct_control_root_bytes: String = succinct_control_root_bytes.encode_hex(); + let a1_str = succinct_control_root_bytes[0..32].to_string(); + let a0_str = succinct_control_root_bytes[32..64].to_string(); + tracing::debug!("Succinct control root a0: {:?}", a0_str); + tracing::debug!("Succinct control root a1: {:?}", a1_str); + let a0_dec = to_decimal(&a0_str) + .ok_or_else(|| eyre!("Failed to convert succinct control root a0 to decimal"))?; + let a1_dec = to_decimal(&a1_str) + .ok_or_else(|| eyre!("Failed to convert succinct control root a1 to decimal"))?; + tracing::debug!("Succinct control root a0 dec: {:?}", a0_dec); + tracing::debug!("Succinct control root a1 dec: {:?}", a1_dec); + + let id_bn254_fr_bits: Vec = ID_BN254_FR_BITS + .iter() + .map(|&bit| bit.to_string()) + .collect(); + + let mut seal_json: Value = serde_json::json!({}); + + seal_json["journal_digest_bits"] = journal_digest_bits.into(); + seal_json["pre_state_digest_bits"] = pre_state_digest_bits.into(); + seal_json["post_state_digest_bits"] = post_state_digest_bits.into(); + seal_json["id_bn254_fr_bits"] = id_bn254_fr_bits.into(); + seal_json["control_root"] = vec![a0_dec, a1_dec].into(); + std::fs::write( + seal_path, + serde_json::to_string_pretty(&seal_json) + .wrap_err("Failed to convert seal JSON to string")?, + ) + .wrap_err("Failed to write seal file")?; + + let output = Command::new("docker") + .arg("run") + .arg("--pull=always") + .arg("--rm") + .arg("--platform=linux/amd64") // Force linux/amd64 platform + .arg("-v") + .arg(format!("{}:/mnt", work_dir.to_string_lossy())) + .arg("ozancw/dev-risc0-groth16-prover-const-digest-len") + .stdout(Stdio::piped()) + .stderr(Stdio::piped()) + .output() + .wrap_err("Failed to execute docker command")?; + + if !output.status.success() { + return Err(eyre!( + "STARK to SNARK prover docker image returned failure: {:?}", + output + )); + } + + tracing::debug!("proof_path: {:?}", proof_path); + let contents = std::fs::read_to_string(proof_path).wrap_err("Failed to read proof file")?; + let proof_json: ProofJson = + serde_json::from_str(&contents).wrap_err("Failed to parse proof JSON")?; + let seal: Seal = proof_json + .try_into() + .map_err(|e| eyre!("Failed to convert proof JSON to Seal: {:?}", e))?; + let g16_verifier_params = Groth16ReceiptVerifierParameters::default(); // This is incorrect, but should not matter as it is not used. + let g16_receipt = Groth16Receipt::new( + seal.to_vec(), + risc0_zkvm::MaybePruned::Value(receipt_claim), + g16_verifier_params.digest(), + ); + let inner_receipt = InnerReceipt::Groth16(g16_receipt); + Ok(Receipt::new(inner_receipt, journal.to_vec())) +} + +const ID_BN254_FR_BITS_DEV_BRIDGE: [&str; 254] = [ + "1", "1", "0", "0", "0", "0", "0", "0", "0", "1", "1", "1", "1", "0", "1", "0", "0", "1", "1", + "0", "0", "1", "0", "1", "0", "0", "0", "1", "0", "1", "0", "0", "0", "1", "0", "1", "1", "1", + "0", "0", "0", "0", "1", "1", "1", "1", "0", "0", "1", "0", "1", "1", "0", "1", "0", "0", "1", + "0", "0", "0", "1", "0", "1", "1", "0", "1", "1", "0", "0", "0", "0", "1", "0", "0", "0", "0", + "0", "0", "0", "1", "1", "0", "0", "1", "0", "1", "1", "0", "0", "0", "1", "0", "1", "1", "1", + "0", "1", "0", "1", "0", "0", "1", "1", "0", "0", "0", "0", "0", "0", "1", "1", "1", "1", "0", + "1", "0", "0", "1", "0", "0", "1", "1", "0", "1", "1", "1", "0", "1", "1", "0", "0", "1", "0", + "0", "1", "1", "1", "1", "0", "0", "0", "1", "1", "1", "0", "1", "0", "1", "0", "0", "1", "1", + "1", "0", "1", "1", "1", "0", "1", "1", "0", "0", "1", "0", "0", "1", "1", "0", "1", "0", "0", + "1", "0", "1", "1", "1", "0", "1", "0", "1", "1", "1", "0", "0", "1", "0", "1", "1", "0", "1", + "0", "0", "0", "1", "1", "1", "1", "1", "1", "1", "1", "1", "1", "0", "1", "0", "1", "1", "0", + "0", "0", "0", "0", "0", "0", "0", "1", "1", "0", "1", "0", "0", "1", "1", "0", "1", "1", "0", + "0", "1", "1", "0", "0", "1", "1", "0", "1", "1", "1", "0", "0", "1", "0", "0", "0", "1", "0", + "0", "0", "0", "0", "1", "0", "0", +]; + +pub fn stark_to_bitvm2_g16_dev_mode(receipt: Receipt, journal: &[u8]) -> Result<(Seal, [u8; 31])> { + let identity_p254_seal_bytes = vec![0u8; 222668]; + let receipt_claim = receipt + .claim() + .wrap_err("Failed to get receipt claim")? + .value() + .wrap_err("Failed to get receipt claim value")?; + + // This part is from risc0-groth16 + if !is_x86_architecture() { + return Err(eyre!( + "stark_to_snark is only supported on x86 architecture" + )); + } + if !is_docker_installed() { + return Err(eyre!("Please install docker first")); + } + + let tmp_dir = tempdir().wrap_err("Failed to create temporary directory")?; + let work_var = std::env::var("RISC0_WORK_DIR").ok(); + let work_dir = work_var.as_ref().map(Path::new).unwrap_or(tmp_dir.path()); + tracing::debug!("work_dir: {:?}", work_dir); + std::fs::write(work_dir.join("seal.r0"), identity_p254_seal_bytes.clone()) + .wrap_err("Failed to write seal file")?; + let seal_path = work_dir.join("input.json"); + let proof_path = work_dir.join("proof.json"); + let output_path = work_dir.join("public.json"); + + let pre_state: risc0_zkvm::MaybePruned = receipt_claim.clone().pre; + tracing::debug!("pre_state: {:?}", pre_state); + let pre_state_digest: Digest = pre_state.clone().digest(); + tracing::debug!("pre_state_digest: {:?}", pre_state_digest); + let pre_state_digest_bits: Vec = pre_state_digest + .as_bytes() + .iter() + .flat_map(|&byte| (0..8).rev().map(move |i| ((byte >> i) & 1).to_string())) + .collect(); + tracing::debug!("pre_state_digest_bits: {:?}", pre_state_digest_bits); + let post_state: risc0_zkvm::MaybePruned = receipt_claim.clone().post; + tracing::debug!("post_state: {:?}", post_state); + let post_state_digest: Digest = post_state.clone().digest(); + let post_state_digest_bits: Vec = post_state_digest + .as_bytes() + .iter() + .flat_map(|&byte| (0..8).rev().map(move |i| ((byte >> i) & 1).to_string())) + .collect(); + tracing::debug!("post_state_digest_bits: {:?}", post_state_digest_bits); + + let mut journal_bits = Vec::new(); + for byte in journal { + for i in 0..8 { + journal_bits.push((byte >> (7 - i)) & 1); + } + } + tracing::debug!("journal_bits len: {:?}", journal_bits.len()); + + let succinct_verifier_params = SuccinctReceiptVerifierParameters::default(); + tracing::debug!("Succinct verifier params: {:?}", succinct_verifier_params); + let succinct_control_root = succinct_verifier_params.control_root; + tracing::debug!("Succinct control root: {:?}", succinct_control_root); + let mut succinct_control_root_bytes: [u8; 32] = succinct_control_root + .as_bytes() + .try_into() + .wrap_err("Failed to convert succinct control root to 32 bytes")?; + succinct_control_root_bytes.reverse(); + let succinct_control_root_bytes: String = succinct_control_root_bytes.encode_hex(); + let a1_str = succinct_control_root_bytes[0..32].to_string(); + let a0_str = succinct_control_root_bytes[32..64].to_string(); + tracing::debug!("Succinct control root a0: {:?}", a0_str); + tracing::debug!("Succinct control root a1: {:?}", a1_str); + let a0_dec = to_decimal(&a0_str) + .ok_or_else(|| eyre!("Failed to convert succinct control root a0 to decimal"))?; + let a1_dec = to_decimal(&a1_str) + .ok_or_else(|| eyre!("Failed to convert succinct control root a1 to decimal"))?; + tracing::debug!("Succinct control root a0 dec: {:?}", a0_dec); + tracing::debug!("Succinct control root a1 dec: {:?}", a1_dec); + + let id_bn254_fr_bits: Vec = ID_BN254_FR_BITS_DEV_BRIDGE + .iter() + .map(|&bit| bit.to_string()) + .collect(); + + let mut seal_json: Value = serde_json::json!({}); + + seal_json["journal_digest_bits"] = journal_bits.into(); + seal_json["pre_state_digest_bits"] = pre_state_digest_bits.into(); + seal_json["post_state_digest_bits"] = post_state_digest_bits.into(); + seal_json["id_bn254_fr_bits"] = id_bn254_fr_bits.into(); + seal_json["control_root"] = vec![a0_dec, a1_dec].into(); + std::fs::write( + seal_path, + serde_json::to_string_pretty(&seal_json) + .wrap_err("Failed to convert seal JSON to string")?, + ) + .wrap_err("Failed to write seal file")?; + + let output = Command::new("docker") + .arg("run") + .arg("--pull=always") + .arg("--rm") + .arg("--platform=linux/amd64") // Force linux/amd64 platform + .arg("-v") + .arg(format!("{}:/mnt", work_dir.to_string_lossy())) + .arg("ozancw/dev-risc0-to-bitvm2-groth16-prover:latest") + .stdout(Stdio::piped()) + .stderr(Stdio::piped()) + .output() + .wrap_err("Failed to execute docker command")?; + + if !output.status.success() { + return Err(eyre!( + "STARK to SNARK prover docker image returned failure: {:?}", + output + )); + } + + tracing::debug!("proof_path: {:?}", proof_path); + let proof_content = + std::fs::read_to_string(proof_path).wrap_err("Failed to read proof file")?; + let output_content_dec = + std::fs::read_to_string(output_path).wrap_err("Failed to read output file")?; + tracing::debug!("output content: {:?}", output_content_dec); + let proof_json: ProofJson = + serde_json::from_str(&proof_content).wrap_err("Failed to parse proof JSON")?; + + // Convert output_content_dec from decimal to hex + let parsed_json: Value = + serde_json::from_str(&output_content_dec).wrap_err("Failed to parse output JSON")?; + let output_str = parsed_json[0] + .as_str() + .wrap_err("Failed to get output string from JSON")?; // Extracts the string from the JSON array + + // Step 2: Convert the decimal string to BigUint and then to hexadecimal + let output_content_hex = BigUint::from_str_radix(output_str, 10) + .wrap_err("Failed to parse decimal string")? + .to_str_radix(16); + + // If the length of the hexadecimal string is odd, add a leading zero + let output_content_hex = if output_content_hex.len() % 2 == 0 { + output_content_hex + } else { + format!("0{}", output_content_hex) + }; + + // Step 3: Decode the hexadecimal string to a byte vector + let output_byte_vec = + hex::decode(&output_content_hex).wrap_err("Failed to decode hex string")?; + // Create our target 31-byte array, initialized to all zeros. + let mut output_bytes = [0u8; 31]; + + // Calculate the starting position in the destination array. + // This ensures the bytes are right-aligned, effectively padding with leading zeros. + let start_index = 31 - output_byte_vec.len(); + + // Copy the decoded bytes from the vector into the correct slice of the array. + output_bytes[start_index..].copy_from_slice(&output_byte_vec); + Ok(( + proof_json + .try_into() + .map_err(|e| eyre!("Failed to convert proof JSON to Seal: {:?}", e))?, + output_bytes, + )) +} + +fn is_docker_installed() -> bool { + Command::new("docker") + .arg("--version") + .output() + .map(|output| output.status.success()) + .unwrap_or(false) +} + +fn is_x86_architecture() -> bool { + ARCH == "x86_64" || ARCH == "x86" +} + +pub fn to_decimal(s: &str) -> Option { + let int = BigUint::from_str_radix(s, 16).ok(); + int.map(|n| n.to_str_radix(10)) +} diff --git a/bridge-circuit-host/src/lib.rs b/bridge-circuit-host/src/lib.rs new file mode 100644 index 000000000..b5f29f350 --- /dev/null +++ b/bridge-circuit-host/src/lib.rs @@ -0,0 +1,42 @@ +use eyre::bail; +use risc0_zkvm::{InnerReceipt, Receipt}; + +pub mod bridge_circuit_host; +pub mod docker; +pub mod mock_zkvm; +pub mod structs; +pub mod utils; + +/// Converts an `InnerReceipt` into a `Receipt`, ensuring all required fields are present. +/// +/// # Arguments +/// * `inner` - The `InnerReceipt` to extract data from. +/// +/// # Returns +/// Returns a `Receipt` if all required fields are found, otherwise returns an error. +/// +/// # Errors +/// This function can return an error in the following cases: +/// * If `inner.claim()` is empty. +/// * If `claim.value()` is empty. +/// * If `claim.output.value()` is empty. +/// * If `output` is `None`. +/// * If `output.journal.value()` is empty. +pub fn receipt_from_inner(inner: InnerReceipt) -> eyre::Result { + let mb_claim = inner.claim().or_else(|_| bail!("Claim is empty"))?; + let claim = mb_claim + .value() + .or_else(|_| bail!("Claim content is empty"))?; + let output = claim + .output + .value() + .or_else(|_| bail!("Output content is empty"))?; + let Some(output) = output else { + bail!("Output body is empty"); + }; + let journal = output + .journal + .value() + .or_else(|_| bail!("Journal content is empty"))?; + Ok(Receipt::new(inner, journal)) +} diff --git a/bridge-circuit-host/src/mock_zkvm.rs b/bridge-circuit-host/src/mock_zkvm.rs new file mode 100644 index 000000000..3a5286ce1 --- /dev/null +++ b/bridge-circuit-host/src/mock_zkvm.rs @@ -0,0 +1,56 @@ +use std::sync::{Arc, Mutex}; + +use circuits_lib::common::zkvm::{VerificationContext, ZkvmGuest, ZkvmHost}; + +#[derive(Debug, Clone, Default)] +struct ZkvmData { + values: Vec, + journal: Vec, +} + +#[derive(Debug, Clone, Default)] +pub struct MockZkvmHost { + data: Arc>, +} + +impl MockZkvmHost { + pub fn new() -> Self { + Self::default() + } +} + +impl ZkvmGuest for MockZkvmHost { + fn read_from_host(&self) -> T { + let data = self.data.lock().unwrap(); + T::try_from_slice(&data.values).unwrap() + } + fn commit(&self, item: &T) { + let mut data = self.data.lock().unwrap(); + let value = borsh::to_vec(item).unwrap(); + data.journal.extend_from_slice(&value); + } + + fn verify(&self, _method_id: [u32; 8], _journal: &T) { + tracing::warn!("This is a mock zkvm host, no real verification is done."); + } +} + +impl ZkvmHost for MockZkvmHost { + fn write(&self, value: &T) { + let mut data = self.data.lock().unwrap(); + let value = borsh::to_vec(value).unwrap(); + data.values.extend_from_slice(&value); + } + fn prove(&self, _elf: &[u32]) -> VerificationContext { + tracing::warn!("This is a mock zkvm host, no real proof is generated."); + let data = self.data.lock().unwrap(); + VerificationContext { + method_id: [42; 8], + journal: data.journal.clone(), + } + } + + fn add_assumption(&self, _proof: VerificationContext) { + tracing::warn!("This is a mock zkvm host, no assumptions are added."); + } +} diff --git a/bridge-circuit-host/src/structs.rs b/bridge-circuit-host/src/structs.rs new file mode 100644 index 000000000..73e854e97 --- /dev/null +++ b/bridge-circuit-host/src/structs.rs @@ -0,0 +1,764 @@ +use alloy_rpc_types::EIP1186StorageProof; +use ark_bn254::Bn254; +use ark_ff::PrimeField; +use bitcoin::{hashes::Hash, Network, Transaction, Txid, XOnlyPublicKey}; +use borsh::{BorshDeserialize, BorshSerialize}; +use circuits_lib::{ + bridge_circuit::{ + deposit_constant, get_first_op_return_output, journal_hash, parse_op_return_data, + spv::SPV, + structs::{ + BridgeCircuitInput, ChallengeSendingWatchtowers, DepositConstant, LatestBlockhash, + LightClientProof, PayoutTxBlockhash, StorageProof, WatchtowerInput, + }, + transaction::CircuitTransaction, + verify_watchtower_challenges, + }, + header_chain::BlockHeaderCircuitOutput, +}; +use eyre::Result; +use risc0_zkvm::Receipt; +use std::ops::{Deref, DerefMut}; + +use crate::utils::get_ark_verifying_key_prod; +use thiserror::Error; + +const OP_RETURN_OUTPUT: usize = 1; +const ANCHOR_OUTPUT: usize = 1; + +/// Parameters required for bridge circuit proof generation. +/// +/// This struct contains all the necessary inputs and proofs required to generate +/// a bridge circuit proof, including transactions, receipts, and cryptographic proofs. +#[derive(Debug, Clone, BorshSerialize, BorshDeserialize)] +pub struct BridgeCircuitHostParams { + pub kickoff_tx: CircuitTransaction, + pub spv: SPV, + pub block_header_circuit_output: BlockHeaderCircuitOutput, + pub headerchain_receipt: Receipt, + pub light_client_proof: LightClientProof, + pub lcp_receipt: Receipt, + pub storage_proof: StorageProof, + pub network: CircuitNetwork, + pub watchtower_inputs: Vec, + pub all_tweaked_watchtower_pubkeys: Vec, + pub watchtower_challenge_connector_start_idx: u16, + pub payout_input_index: u16, +} + +/// Errors that can occur when constructing or validating bridge circuit host parameters. +#[derive(Debug, Clone, Error)] +pub enum BridgeCircuitHostParamsError { + #[error("Invalid kickoff transaction")] + InvalidKickoffTx, + #[error("Invalid headerchain receipt")] + InvalidHeaderchainReceipt, + #[error("Invalid light client proof")] + InvalidLightClientProof, + #[error("Invalid LCP receipt")] + InvalidLcpReceipt, + #[error("Invalid storage proof")] + InvalidStorageProof, + #[error("Invalid network")] + InvalidNetwork, + #[error("Invalid watchtower inputs")] + InvalidWatchtowerInputs, + #[error("Invalid public key")] + InvalidPubkey, + #[error("Invalid number of kickoff outputs")] + InvalidNumberOfKickoffOutputs, + #[error("Payout input index not found")] + PayoutInputIndexNotFound, + #[error("Payout input index too large: {0}")] + PayoutInputIndexTooLarge(usize), + #[error("Invalid kickoff transaction vout")] + KickOffTxInvalidVout, + #[error("Failed to deserialize storage proof: {0}")] + StorageProofDeserializationError(String), + #[error("Failed to parse operator public key")] + InvalidOperatorPubkey, + #[error("Kickoff transaction missing outputs")] + MissingKickoffOutputs, + #[error("Invalid deposit storage proof")] + InvalidDepositStorageProof, + #[error("Round transaction ID mismatch")] + RoundTxidMismatch, + #[error("Failed to verify bridge circuit proof")] + ProofVerificationFailed, +} + +impl BridgeCircuitHostParams { + /// Creates a new instance of BridgeCircuitHostParams. + /// + /// # Arguments + /// + /// * `kickoff_tx` - The kickoff transaction + /// * `spv` - Simplified Payment Verification proof for the payout transaction + /// * `block_header_circuit_output` - Output from the header chain circuit + /// * `headerchain_receipt` - Receipt from the header chain proof + /// * `light_client_proof` - Light client proof for validation + /// * `lcp_receipt` - Receipt from the light client proof + /// * `storage_proof` - Storage proof from the blockchain (l2) state + /// * `network` - Bitcoin network (mainnet, testnet, etc.) + /// * `watchtower_inputs` - Inputs including details about watchtower challenge transactions + /// * `all_tweaked_watchtower_pubkeys` - All tweaked watchtower public keys + /// * `watchtower_challenge_connector_start_idx` - Starting index for watchtower challenge connectors on kickoff tx + /// * `payout_input_index` - Index of the payout input in the transaction + #[allow(clippy::too_many_arguments)] + pub fn new( + kickoff_tx: Transaction, + spv: SPV, + block_header_circuit_output: BlockHeaderCircuitOutput, + headerchain_receipt: Receipt, + light_client_proof: LightClientProof, + lcp_receipt: Receipt, + storage_proof: StorageProof, + network: Network, + watchtower_inputs: Vec, + all_tweaked_watchtower_pubkeys: Vec, + watchtower_challenge_connector_start_idx: u16, + payout_input_index: u16, + ) -> Self { + let all_tweaked_watchtower_pubkeys: Vec = + all_tweaked_watchtower_pubkeys + .into_iter() + .map(CircuitXOnlyPublicKey::from) + .collect(); + + BridgeCircuitHostParams { + kickoff_tx: CircuitTransaction(kickoff_tx), + spv, + block_header_circuit_output, + headerchain_receipt, + light_client_proof, + lcp_receipt, + storage_proof, + network: CircuitNetwork(network), + watchtower_inputs, + all_tweaked_watchtower_pubkeys, + watchtower_challenge_connector_start_idx, + payout_input_index, + } + } + + /// Creates a new instance of BridgeCircuitHostParams with watchtower transactions. + /// + /// This method automatically derives several parameters from the provided watchtower contexts + /// and validates the inputs before construction. + /// + /// # Arguments + /// + /// * `kickoff_tx` - The kickoff transaction + /// * `spv` - Simplified Payment Verification proof for the payout transaction + /// * `headerchain_receipt` - Receipt from the header chain proof + /// * `light_client_proof` - Light client proof for validation + /// * `lcp_receipt` - Receipt from the light client proof + /// * `storage_proof` - Storage proof from the blockchain (l2) state + /// * `network` - Bitcoin network + /// * `watchtower_contexts` - Contexts containing watchtower transactions and transactions that includes prevouts + /// * `watchtower_challenge_connector_start_idx` - Starting index for watchtower challenge connectors on kickoff tx + /// + /// # Returns + /// + /// Returns a `Result` containing the constructed `BridgeCircuitHostParams` or an error. + /// + /// # Errors + /// + /// This function will return an error if: + /// - Watchtower input generation fails + /// - Header chain receipt journal deserialization fails + /// - Public key extraction from kickoff transaction fails + /// - Storage proof deserialization fails + /// - Payout input index calculation fails + #[allow(clippy::too_many_arguments)] + pub fn new_with_wt_tx( + kickoff_tx: Transaction, + spv: SPV, + headerchain_receipt: Receipt, + light_client_proof: LightClientProof, + lcp_receipt: Receipt, + storage_proof: StorageProof, + network: Network, + watchtower_contexts: &[WatchtowerContext], + watchtower_challenge_connector_start_idx: u16, + ) -> Result { + let watchtower_inputs = get_wt_inputs( + kickoff_tx.compute_txid(), + watchtower_contexts, + watchtower_challenge_connector_start_idx, + )?; + + let block_header_circuit_output: BlockHeaderCircuitOutput = + borsh::from_slice(&headerchain_receipt.journal.bytes) + .map_err(|_| BridgeCircuitHostParamsError::InvalidHeaderchainReceipt)?; + + let all_tweaked_watchtower_pubkeys = + get_all_pubkeys(&kickoff_tx, watchtower_challenge_connector_start_idx)?; + + let storage_proof_utxo: EIP1186StorageProof = + serde_json::from_str(&storage_proof.storage_proof_utxo).map_err(|e| { + BridgeCircuitHostParamsError::StorageProofDeserializationError(e.to_string()) + })?; + + let wd_txid_bytes: [u8; 32] = storage_proof_utxo.value.to_be_bytes(); + + let wd_txid: Txid = bitcoin::consensus::deserialize(&wd_txid_bytes) + .map_err(|_| BridgeCircuitHostParamsError::InvalidStorageProof)?; + + let payout_input_index = get_payout_input_index(wd_txid, &spv.transaction.0)?; + + let all_tweaked_watchtower_pubkeys: Vec = + all_tweaked_watchtower_pubkeys + .into_iter() + .map(CircuitXOnlyPublicKey::from) + .collect(); + + Ok(BridgeCircuitHostParams { + kickoff_tx: CircuitTransaction(kickoff_tx), + spv, + block_header_circuit_output, + headerchain_receipt, + light_client_proof, + lcp_receipt, + storage_proof, + network: CircuitNetwork(network), + watchtower_inputs, + all_tweaked_watchtower_pubkeys, + watchtower_challenge_connector_start_idx, + payout_input_index, + }) + } + + /// Converts the host parameters into bridge circuit input format. + /// + /// This method transforms the host parameters into the format required by the bridge circuit, + /// serializing public keys and organizing the data appropriately. + /// + /// # Returns + /// + /// Returns a `BridgeCircuitInput` containing all the necessary data for circuit execution. + pub fn into_bridge_circuit_input(self) -> BridgeCircuitInput { + let BridgeCircuitHostParams { + kickoff_tx, + spv, + block_header_circuit_output, + headerchain_receipt: _, + light_client_proof, + lcp_receipt: _, + storage_proof, + network: _, + watchtower_inputs, + all_tweaked_watchtower_pubkeys, + watchtower_challenge_connector_start_idx, + payout_input_index, + } = self; + + let all_tweaked_watchtower_pubkeys: Vec<[u8; 32]> = all_tweaked_watchtower_pubkeys + .iter() + .map(|pubkey| (**pubkey).serialize()) + .collect(); + + BridgeCircuitInput::new( + kickoff_tx.into(), + watchtower_inputs, + all_tweaked_watchtower_pubkeys, + block_header_circuit_output, + spv, + payout_input_index, + light_client_proof, + storage_proof, + watchtower_challenge_connector_start_idx, + ) + } +} + +/// Finds the index of the payout input in the payout transaction based on the withdrawal transaction ID. +/// +/// # Arguments +/// +/// * `wd_txid` - The withdrawal transaction ID to search for +/// * `payout_tx` - The payout transaction to search within +/// +/// # Returns +/// +/// Returns a `Result` containing the input index as `u16` or an error. +/// +/// # Errors +/// +/// This function will return an error if: +/// - The withdrawal transaction ID is not found in any input +/// - The input index is too large to fit in a `u16` +fn get_payout_input_index( + wd_txid: Txid, + payout_tx: &Transaction, +) -> Result { + for (index, input) in payout_tx.input.iter().enumerate() { + if input.previous_output.txid == wd_txid { + return u16::try_from(index).map_err(|_| { + // This should never happen + BridgeCircuitHostParamsError::PayoutInputIndexTooLarge(index) + }); + } + } + Err(BridgeCircuitHostParamsError::PayoutInputIndexNotFound) +} + +/// Generates watchtower inputs from watchtower contexts. +/// +/// # Arguments +/// +/// * `kickoff_tx_id` - The transaction ID of the kickoff transaction +/// * `watchtower_contexts` - Array of watchtower contexts containing transactions +/// * `watchtower_challenge_connector_start_idx` - Starting index for watchtower challenge connectors on kickoff tx +/// +/// # Returns +/// +/// Returns a `Result` containing a vector of `WatchtowerInput` or an error. +/// +/// # Errors +/// +/// This function will return an error if any watchtower input generation fails. +fn get_wt_inputs( + kickoff_tx_id: Txid, + watchtower_contexts: &[WatchtowerContext], + watchtower_challenge_connector_start_idx: u16, +) -> Result, BridgeCircuitHostParamsError> { + watchtower_contexts + .iter() + .map(|context| { + WatchtowerInput::from_txs( + kickoff_tx_id, + context.watchtower_tx.clone(), + &context.prevout_txs, + watchtower_challenge_connector_start_idx, + ) + .map_err(|_| BridgeCircuitHostParamsError::InvalidWatchtowerInputs) + }) + .collect() +} + +/// Extracts all tweaked watchtower public keys from a kickoff transaction. +/// +/// # Arguments +/// +/// * `kickoff_tx` - The kickoff transaction containing watchtower public keys in its outputs +/// * `watchtower_challenge_connector_start_idx` - Starting index for watchtower challenge connectors on kickoff tx +/// +/// # Returns +/// +/// Returns a `Result` containing a vector of `XOnlyPublicKey` or an error. +/// +/// # Errors +/// +/// This function will return an error if: +/// - The kickoff transaction has insufficient outputs +/// - Any public key extraction fails +/// - The transaction structure is invalid +pub fn get_all_pubkeys( + kickoff_tx: &Transaction, + watchtower_challenge_connector_start_idx: u16, +) -> Result, BridgeCircuitHostParamsError> { + let start_index = watchtower_challenge_connector_start_idx as usize; + let end_index = kickoff_tx + .output + .len() + .checked_sub(OP_RETURN_OUTPUT) + .ok_or(BridgeCircuitHostParamsError::InvalidNumberOfKickoffOutputs)? + .checked_sub(ANCHOR_OUTPUT) + .ok_or(BridgeCircuitHostParamsError::InvalidNumberOfKickoffOutputs)?; + + let mut all_tweaked_watchtower_pubkeys = Vec::new(); + + for i in (start_index..end_index).step_by(2) { + let output = &kickoff_tx.output[i]; + + if !output.script_pubkey.is_p2tr() { + return Err(BridgeCircuitHostParamsError::InvalidPubkey); + } + + let xonly_public_key = XOnlyPublicKey::from_slice(&output.script_pubkey.as_bytes()[2..34]) + .map_err(|_| BridgeCircuitHostParamsError::InvalidPubkey)?; + + all_tweaked_watchtower_pubkeys.push(xonly_public_key); + } + Ok(all_tweaked_watchtower_pubkeys) +} + +/// Context containing watchtower transaction and transactions that include prevouts. +pub struct WatchtowerContext { + pub watchtower_tx: Transaction, + pub prevout_txs: Vec, +} + +/// Public inputs for the succinct bridge circuit. +/// +/// This struct contains all the public inputs that are committed after hashing to in the bridge circuit proof, +/// including block hashes, watchtower challenges, and deposit constants. +#[derive(Debug, Clone)] +pub struct SuccinctBridgeCircuitPublicInputs { + pub bridge_circuit_input: BridgeCircuitInput, + pub challenge_sending_watchtowers: ChallengeSendingWatchtowers, + pub deposit_constant: DepositConstant, + pub payout_tx_block_hash: PayoutTxBlockhash, + pub latest_block_hash: LatestBlockhash, +} + +impl SuccinctBridgeCircuitPublicInputs { + /// Creates new succinct bridge circuit public inputs from bridge circuit input. + /// + /// # Arguments + /// + /// * `bridge_circuit_input` - The bridge circuit input containing all necessary data + /// + /// # Returns + /// + /// Returns a new instance of `SuccinctBridgeCircuitPublicInputs`. + /// + /// # Errors + /// + /// This function will return an error if: + /// - Block hash extraction fails + /// - Deposit constant calculation fails + /// - Watchtower challenge verification fails + pub fn new( + bridge_circuit_input: BridgeCircuitInput, + ) -> Result { + let latest_block_hash: LatestBlockhash = + bridge_circuit_input.hcp.chain_state.best_block_hash[12..32] + .try_into() + .map_err(|_| BridgeCircuitHostParamsError::InvalidKickoffTx)?; + + let payout_tx_block_hash: PayoutTxBlockhash = bridge_circuit_input + .payout_spv + .block_header + .compute_block_hash()[12..32] + .try_into() + .map_err(|_| BridgeCircuitHostParamsError::InvalidKickoffTx)?; + + let deposit_constant = host_deposit_constant(&bridge_circuit_input)?; + let watchtower_challenge_set = verify_watchtower_challenges(&bridge_circuit_input); + + Ok(Self { + bridge_circuit_input, + challenge_sending_watchtowers: ChallengeSendingWatchtowers( + watchtower_challenge_set.challenge_senders, + ), + deposit_constant, + payout_tx_block_hash, + latest_block_hash, + }) + } + + /// Calculates the host-side journal hash for the bridge circuit. + /// + /// # Returns + /// + /// Returns a `blake3::Hash` representing the journal hash. + pub fn host_journal_hash(&self) -> blake3::Hash { + journal_hash( + self.payout_tx_block_hash, + self.latest_block_hash, + self.challenge_sending_watchtowers, + self.deposit_constant, + ) + } +} + +/// Calculates the deposit constant from bridge circuit input. +/// +/// # Arguments +/// +/// * `input` - The bridge circuit input containing deposit information +/// +/// # Returns +/// +/// Returns a `Result` containing the `DepositConstant` or an error. +/// +/// # Errors +/// +/// This function will return an error if: +/// - Transaction output is missing +/// - Storage proof deserialization fails +/// - Operator public key parsing fails +/// - Round transaction ID validation fails +fn host_deposit_constant( + input: &BridgeCircuitInput, +) -> Result { + let first_op_return_output = get_first_op_return_output(&input.payout_spv.transaction) + .ok_or(BridgeCircuitHostParamsError::InvalidOperatorPubkey)?; + + let deposit_storage_proof: EIP1186StorageProof = + serde_json::from_str(&input.sp.storage_proof_deposit_txid).map_err(|e| { + BridgeCircuitHostParamsError::StorageProofDeserializationError(e.to_string()) + })?; + + let round_txid = input.kickoff_tx.input[0] + .previous_output + .txid + .to_byte_array(); + + if input.kickoff_tx.input[0] + .previous_output + .txid + .to_byte_array() + != round_txid + { + return Err(BridgeCircuitHostParamsError::RoundTxidMismatch); + } + + let kickff_round_vout = input.kickoff_tx.input[0].previous_output.vout; + + let operator_xonlypk: [u8; 32] = parse_op_return_data(&first_op_return_output.script_pubkey) + .ok_or(BridgeCircuitHostParamsError::InvalidOperatorPubkey)? + .try_into() + .map_err(|_| BridgeCircuitHostParamsError::InvalidOperatorPubkey)?; + + let deposit_value_bytes: [u8; 32] = deposit_storage_proof.value.to_be_bytes::<32>(); + + Ok(deposit_constant( + operator_xonlypk, + input.watchtower_challenge_connector_start_idx, + &input.all_tweaked_watchtower_pubkeys, + deposit_value_bytes, + round_txid, + kickff_round_vout, + input.hcp.genesis_state_hash, + )) +} + +/// Inputs required for BitVM2 bridge circuit verification. +/// +/// This struct contains all the inputs needed to verify a bridge circuit proof +/// in the BitVM2, including block hashes, watchtower data, and method IDs. +#[derive(Debug, Clone, Copy)] +pub struct BridgeCircuitBitvmInputs { + pub payout_tx_block_hash: [u8; 20], + pub latest_block_hash: [u8; 20], + pub challenge_sending_watchtowers: [u8; 20], + pub deposit_constant: [u8; 32], + pub combined_method_id: [u8; 32], +} + +impl BridgeCircuitBitvmInputs { + /// Creates a new instance of BridgeCircuitBitvmInputs. + /// + /// # Arguments + /// + /// * `payout_tx_block_hash` - Hash of the block containing the payout transaction + /// * `latest_block_hash` - Hash of the latest block in the header chain proof provided by the operator + /// * `challenge_sending_watchtowers` - Hash representing watchtowers that sent challenges + /// * `deposit_constant` - Constant value representing the deposit + /// * `combined_method_id` - Combined method ID for the circuit + /// + /// # Returns + /// + /// Returns a new instance of `BridgeCircuitBitvmInputs`. + pub fn new( + payout_tx_block_hash: [u8; 20], + latest_block_hash: [u8; 20], + challenge_sending_watchtowers: [u8; 20], + deposit_constant: [u8; 32], + combined_method_id: [u8; 32], + ) -> Self { + Self { + payout_tx_block_hash, + latest_block_hash, + challenge_sending_watchtowers, + deposit_constant, + combined_method_id, + } + } + + /// Calculates the Groth16 public input for the bridge circuit. + /// + /// This method computes the public input hash used in Groth16 proof verification + /// by combining all the input data in a specific order. + /// + /// # Returns + /// + /// Returns a `blake3::Hash` representing the public input. + pub fn calculate_groth16_public_input(&self) -> blake3::Hash { + let concatenated_data = [ + self.payout_tx_block_hash, + self.latest_block_hash, + self.challenge_sending_watchtowers, + ] + .concat(); + let x = blake3::hash(&concatenated_data); + let hash_bytes = x.as_bytes(); + + let concat_journal = [self.deposit_constant, *hash_bytes].concat(); + + let journal_hash = blake3::hash(&concat_journal); + + let hash_bytes = journal_hash.as_bytes(); + + let concat_input = [self.combined_method_id, *hash_bytes].concat(); + + blake3::hash(&concat_input) + } + + /// Verifies a bridge circuit Groth16 proof. + /// + /// This method verifies that a given Groth16 proof is valid for this bridge circuit + /// by computing the expected public input and verifying the proof against it. + /// + /// # Arguments + /// + /// * `proof` - The Groth16 proof to verify + /// + /// # Returns + /// + /// Returns a `Result` containing `true` if the proof is valid, or an error if verification fails. + /// + /// # Errors + /// + /// This function will return an error if: + /// - Proof verification fails + /// - Public input calculation fails + /// - Verifying key retrieval fails + pub fn verify_bridge_circuit( + &self, + proof: ark_groth16::Proof, + ) -> Result { + let mut hasher = blake3::Hasher::new(); + hasher.update(&self.payout_tx_block_hash); + hasher.update(&self.latest_block_hash); + hasher.update(&self.challenge_sending_watchtowers); + let x = hasher.finalize(); + let x_bytes: [u8; 32] = x.into(); + + let mut hasher = blake3::Hasher::new(); + hasher.update(&self.deposit_constant); + hasher.update(&x_bytes); + let y = hasher.finalize(); + let y_bytes: [u8; 32] = y.into(); + + let mut hasher = blake3::Hasher::new(); + hasher.update(&self.combined_method_id); + hasher.update(&y_bytes); + let public_output = hasher.finalize(); + + let public_output_bytes: [u8; 32] = public_output.into(); + let public_input_scalar = + ark_bn254::Fr::from_be_bytes_mod_order(&public_output_bytes[0..31]); + + let ark_vk = get_ark_verifying_key_prod(); + let ark_pvk = ark_groth16::prepare_verifying_key(&ark_vk); + + ark_groth16::Groth16::::verify_proof( + &ark_pvk, + &proof, + &[public_input_scalar], + ) + .map_err(|_| BridgeCircuitHostParamsError::ProofVerificationFailed) + } +} + +#[derive(Clone, PartialEq, Eq, Debug, Hash)] +pub struct CircuitNetwork(pub Network); + +impl CircuitNetwork { + pub fn from(network: Network) -> Self { + Self(network) + } + + pub fn inner(&self) -> &Network { + &self.0 + } +} + +impl BorshSerialize for CircuitNetwork { + fn serialize(&self, writer: &mut W) -> borsh::io::Result<()> { + BorshSerialize::serialize(&(self.0 as u8), writer) + } +} + +impl BorshDeserialize for CircuitNetwork { + fn deserialize_reader(reader: &mut R) -> borsh::io::Result { + let value = u8::deserialize_reader(reader)?; + let network = match value { + 0 => Network::Bitcoin, + 1 => Network::Testnet, + 2 => Network::Testnet4, + 3 => Network::Signet, + 4 => Network::Regtest, + _ => { + return Err(std::io::Error::new( + std::io::ErrorKind::InvalidData, + "Invalid Network", + )) + } + }; + Ok(Self(network)) + } +} + +impl Deref for CircuitNetwork { + type Target = Network; + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl DerefMut for CircuitNetwork { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +impl From for CircuitNetwork { + fn from(network: Network) -> Self { + Self(network) + } +} + +#[derive(Clone, PartialEq, Eq, Debug, Hash)] +pub struct CircuitXOnlyPublicKey(pub XOnlyPublicKey); + +impl CircuitXOnlyPublicKey { + pub fn from(pk: XOnlyPublicKey) -> Self { + Self(pk) + } + + pub fn inner(&self) -> &XOnlyPublicKey { + &self.0 + } +} + +impl BorshSerialize for CircuitXOnlyPublicKey { + fn serialize(&self, writer: &mut W) -> borsh::io::Result<()> { + BorshSerialize::serialize(&self.0.serialize(), writer) + } +} + +impl BorshDeserialize for CircuitXOnlyPublicKey { + fn deserialize_reader(reader: &mut R) -> borsh::io::Result { + use bitcoin::secp256k1::XOnlyPublicKey as RawXOnly; + + let bytes: [u8; 32] = BorshDeserialize::deserialize_reader(reader)?; + let raw_key = RawXOnly::from_slice(&bytes).map_err(|_| { + std::io::Error::new(std::io::ErrorKind::InvalidData, "Invalid XOnlyPublicKey") + })?; + Ok(Self(raw_key)) + } +} + +impl Deref for CircuitXOnlyPublicKey { + type Target = XOnlyPublicKey; + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl DerefMut for CircuitXOnlyPublicKey { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +impl From for CircuitXOnlyPublicKey { + fn from(pk: XOnlyPublicKey) -> Self { + Self(pk) + } +} diff --git a/bridge-circuit-host/src/utils.rs b/bridge-circuit-host/src/utils.rs new file mode 100644 index 000000000..71c993263 --- /dev/null +++ b/bridge-circuit-host/src/utils.rs @@ -0,0 +1,301 @@ +use ark_bn254::{Bn254, Fq, Fq2, G1Affine, G2Affine}; +use ark_groth16::VerifyingKey; +use bitcoin::{opcodes, script::Instruction, Transaction}; +use risc0_circuit_recursion::control_id::BN254_IDENTITY_CONTROL_ID; +use risc0_zkvm::{sha::Digestible, SuccinctReceiptVerifierParameters, SystemState}; +use sha2::{Digest, Sha256}; +use std::str::FromStr; + +/// This is the test Verifying Key of the STARK-to-BitVM2 Groth16 proof Circom circuit. +pub fn get_ark_verifying_key_prod() -> ark_groth16::VerifyingKey { + let alpha_g1 = G1Affine::new( + Fq::from_str( + "20491192805390485299153009773594534940189261866228447918068658471970481763042", + ) + .unwrap(), + Fq::from_str( + "9383485363053290200918347156157836566562967994039712273449902621266178545958", + ) + .unwrap(), + ); + + let beta_g2 = G2Affine::new( + Fq2::new( + Fq::from_str( + "6375614351688725206403948262868962793625744043794305715222011528459656738731", + ) + .unwrap(), + Fq::from_str( + "4252822878758300859123897981450591353533073413197771768651442665752259397132", + ) + .unwrap(), + ), + Fq2::new( + Fq::from_str( + "10505242626370262277552901082094356697409835680220590971873171140371331206856", + ) + .unwrap(), + Fq::from_str( + "21847035105528745403288232691147584728191162732299865338377159692350059136679", + ) + .unwrap(), + ), + ); + + let gamma_g2 = G2Affine::new( + Fq2::new( + Fq::from_str( + "10857046999023057135944570762232829481370756359578518086990519993285655852781", + ) + .unwrap(), + Fq::from_str( + "11559732032986387107991004021392285783925812861821192530917403151452391805634", + ) + .unwrap(), + ), + Fq2::new( + Fq::from_str( + "8495653923123431417604973247489272438418190587263600148770280649306958101930", + ) + .unwrap(), + Fq::from_str( + "4082367875863433681332203403145435568316851327593401208105741076214120093531", + ) + .unwrap(), + ), + ); + + let delta_g2 = G2Affine::new( + Fq2::new( + Fq::from_str( + "19928663713463533589216209779412278386769407450988172849262535478593422929698", + ) + .unwrap(), + Fq::from_str( + "19916519943909223643323234301580053157586699704876134064841182937085943926141", + ) + .unwrap(), + ), + Fq2::new( + Fq::from_str( + "4584600978911428195337731119171761277167808711062125916470525050324985708782", + ) + .unwrap(), + Fq::from_str( + "903010326261527050999816348900764705196723158942686053018929539519969664840", + ) + .unwrap(), + ), + ); + + let gamma_abc_g1 = vec![ + G1Affine::new( + Fq::from_str( + "6698887085900109660417671413804888867145870700073340970189635830129386206569", + ) + .unwrap(), + Fq::from_str( + "10431087902009508261375793061696708147989126018612269070732549055898651692604", + ) + .unwrap(), + ), + G1Affine::new( + Fq::from_str( + "20225609417084538563062516991929114218412992453664808591983416996515711931386", + ) + .unwrap(), + Fq::from_str( + "3236310410959095762960658876334609343091075204896196791007975095263664214628", + ) + .unwrap(), + ), + ]; + + VerifyingKey:: { + alpha_g1, + beta_g2, + gamma_g2, + delta_g2, + gamma_abc_g1, + } +} + +/// This is the risc0 dev mode Verifying Key of the STARK-to-BitVM2 Groth16 proof Circom circuit. +/// The circuit doesn't verify the succinct proof. +pub fn get_ark_verifying_key_dev_mode_bridge() -> ark_groth16::VerifyingKey { + let alpha_g1 = G1Affine::new( + Fq::from_str( + "16428432848801857252194528405604668803277877773566238944394625302971855135431", + ) + .unwrap(), + Fq::from_str( + "16846502678714586896801519656441059708016666274385668027902869494772365009666", + ) + .unwrap(), + ); + + let beta_g2 = G2Affine::new( + Fq2::new( + Fq::from_str( + "16348171800823588416173124589066524623406261996681292662100840445103873053252", + ) + .unwrap(), + Fq::from_str( + "3182164110458002340215786955198810119980427837186618912744689678939861918171", + ) + .unwrap(), + ), + Fq2::new( + Fq::from_str( + "19687132236965066906216944365591810874384658708175106803089633851114028275753", + ) + .unwrap(), + Fq::from_str( + "4920802715848186258981584729175884379674325733638798907835771393452862684714", + ) + .unwrap(), + ), + ); + + let gamma_g2 = G2Affine::new( + Fq2::new( + Fq::from_str( + "10857046999023057135944570762232829481370756359578518086990519993285655852781", + ) + .unwrap(), + Fq::from_str( + "11559732032986387107991004021392285783925812861821192530917403151452391805634", + ) + .unwrap(), + ), + Fq2::new( + Fq::from_str( + "8495653923123431417604973247489272438418190587263600148770280649306958101930", + ) + .unwrap(), + Fq::from_str( + "4082367875863433681332203403145435568316851327593401208105741076214120093531", + ) + .unwrap(), + ), + ); + + let delta_g2 = G2Affine::new( + Fq2::new( + Fq::from_str( + "10344314270577662144722843760227508818741873611994191144741344525392186054338", + ) + .unwrap(), + Fq::from_str( + "8978205513343000086769980417601674188045305036608293363718735995778381961042", + ) + .unwrap(), + ), + Fq2::new( + Fq::from_str( + "6146189823045836375835894813061243921076479945213547666722317462322308723161", + ) + .unwrap(), + Fq::from_str( + "2284851597903171792019116404381013452010819014851726552415237662410982114085", + ) + .unwrap(), + ), + ); + + let gamma_abc_g1 = vec![ + G1Affine::new( + Fq::from_str( + "16750568820360300560824181364652256812515534588114371155103059323541578267", + ) + .unwrap(), + Fq::from_str( + "5696152291317012726307566910263567359492805895110755470946585143294904791489", + ) + .unwrap(), + ), + G1Affine::new( + Fq::from_str( + "21186587675978507462548352788288327905178369542654940794501214693473789853405", + ) + .unwrap(), + Fq::from_str( + "9059307258716845325004258585264983974929512424027765090293033859278411111397", + ) + .unwrap(), + ), + ]; + + VerifyingKey:: { + alpha_g1, + beta_g2, + gamma_g2, + delta_g2, + gamma_abc_g1, + } +} + +// Clementine do not use the runtime option to determine dev mode which is newly added in risc0_zkvm. +// Instead, it uses the environment variable RISC0_DEV_MODE to determine if it is in dev mode. +// However is_dev_mode function from risc0_zkvm is deprecated. +// So we implement our own version of is_dev_mode. +pub fn is_dev_mode() -> bool { + std::env::var("RISC0_DEV_MODE") + .ok() + .map(|x| x.to_lowercase()) + .filter(|x| x == "1" || x == "true" || x == "yes") + .is_some() +} + +pub fn get_verifying_key() -> ark_groth16::VerifyingKey { + if is_dev_mode() { + get_ark_verifying_key_dev_mode_bridge() + } else { + get_ark_verifying_key_prod() + } +} + +/// Sha256(control_root, pre_state_digest, post_state_digest, id_bn254_fr) +pub fn calculate_succinct_output_prefix(method_id: &[u8]) -> [u8; 32] { + let succinct_verifier_params = SuccinctReceiptVerifierParameters::default(); + let succinct_control_root = succinct_verifier_params.control_root; + let mut succinct_control_root_bytes: [u8; 32] = + succinct_control_root.as_bytes().try_into().unwrap(); + for byte in succinct_control_root_bytes.iter_mut() { + *byte = byte.reverse_bits(); + } + let pre_state_bytes = method_id.to_vec(); + let control_id_bytes: [u8; 32] = BN254_IDENTITY_CONTROL_ID.into(); + + // Expected post state for an execution that halted successfully + let post_state: SystemState = risc0_binfmt::SystemState { + pc: 0, + merkle_root: risc0_zkp::core::digest::Digest::default(), + }; + let post_state_bytes: [u8; 32] = post_state.digest().into(); + + let mut hasher = Sha256::new(); + hasher.update(succinct_control_root_bytes); + hasher.update(pre_state_bytes); + hasher.update(post_state_bytes); + hasher.update(control_id_bytes); + let result: [u8; 32] = hasher.finalize().into(); + + result +} + +pub fn total_work_from_wt_tx(wt_tx: &Transaction) -> [u8; 16] { + let output = wt_tx.output[2].clone(); + let mut instructions = output.script_pubkey.instructions(); + if let Some(Ok(Instruction::Op(opcodes::all::OP_RETURN))) = instructions.next() { + if let Some(Ok(Instruction::PushBytes(data))) = instructions.next() { + let data_bytes = data.as_bytes(); + let total_work: [u8; 16] = data_bytes[64..] + .try_into() + .expect("Expected total work data to be exactly 16 bytes long after OP_RETURN"); + return total_work; + } + panic!("Expected OP_RETURN followed by data"); + } + panic!("Expected OP_RETURN instruction in the transaction output script"); +} diff --git a/circuits-lib/Cargo.toml b/circuits-lib/Cargo.toml new file mode 100644 index 000000000..1f87e3242 --- /dev/null +++ b/circuits-lib/Cargo.toml @@ -0,0 +1,52 @@ +[package] +name = "circuits-lib" +version = "0.1.0" +edition = "2021" + +[dependencies] +sha2 = { workspace = true, default-features = false } +serde = { workspace = true, default-features = false, features = ["derive"] } + +risc0-zkvm = { workspace = true, features = ["std"] } +risc0-groth16 = { workspace = true } + +borsh = { workspace = true, features = ["derive"] } +bitcoin = { workspace = true, features = ["rand-std", "serde"] } + +ark-bn254 = { workspace = true } +ark-ff = { workspace = true } +ark-ec = { workspace = true } +ark-std = { workspace = true } +ark-crypto-primitives = { workspace = true } +ark-serialize = { workspace = true } +ark-relations = { workspace = true } +ark-groth16 = { workspace = true, default-features = false } + +num-bigint = { workspace = true } +num-traits = { workspace = true } +hex = { workspace = true } +once_cell = { workspace = true } +hex-literal = { workspace = true } +jmt = { workspace = true } + +eyre = { workspace = true } +tracing = { workspace = true } + +alloy-primitives = { workspace = true, features = ["serde"] } +alloy-rpc-types = { workspace = true } + +serde_json = { workspace = true } +bincode = { workspace = true } +alloy-rpc-types-eth = { workspace = true, features = ["serde"] } +derive_more = { workspace = true, features = ["display"]} +crypto-bigint = { workspace = true } +blake3 = { workspace = true } +itertools = { workspace = true } +k256 = { workspace = true } +lazy_static = { workspace = true } + +citrea-sov-rollup-interface = { workspace = true } + +[features] +default = [] +use-test-vk = [] # Use the test verification key - this is used for testing purposes only diff --git a/circuits-lib/src/bridge_circuit/bin/prepared_vk.bin b/circuits-lib/src/bridge_circuit/bin/prepared_vk.bin new file mode 100644 index 000000000..69632f0db Binary files /dev/null and b/circuits-lib/src/bridge_circuit/bin/prepared_vk.bin differ diff --git a/circuits-lib/src/bridge_circuit/bin/test_prepared_vk.bin b/circuits-lib/src/bridge_circuit/bin/test_prepared_vk.bin new file mode 100644 index 000000000..8f32ff206 Binary files /dev/null and b/circuits-lib/src/bridge_circuit/bin/test_prepared_vk.bin differ diff --git a/circuits-lib/src/bridge_circuit/constants.rs b/circuits-lib/src/bridge_circuit/constants.rs new file mode 100644 index 000000000..19212b9be --- /dev/null +++ b/circuits-lib/src/bridge_circuit/constants.rs @@ -0,0 +1,138 @@ +//! # Bridge Circuit Constants +//! +//! This module contains constants used in the bridge circuit, including method IDs for different networks, +//! Groth16 related constants, and prepared verification keys. These constants are essential for the operation +//! of the bridge circuit and are used in various cryptographic operations. +//! ## Work-Only Circuit Method IDs +//! The method IDs for different networks are used to identify the specific work-only circuits. +//! They are used for verifying the total work done on a Bitcoin blockchain for a given Watchtower challenge. +//! ## Groth16 Related Constants +//! These constants are used in the Groth16 proof verification process. +//! They include the post state, input, assumptions, claim tag, and output tag. +//! They are used to recover all five public outputs of the Groth16 proof when Risc0 pipeline is used +//! for generating the proof. +//! ## Verification Keys +//! The prepared verification keys are used to verify the Groth16 proofs. They are included in +//! the binary format. The `get_prepared_vk` function can be used to retrieve the appropriate +//! verification key according to the feature flags. + +use ark_bn254::Fr; +use ark_ff::BigInt; +use hex_literal::hex; + +pub const REGTEST_LC_IMAGE_ID: [u8; 32] = + hex!("e02f42a01d0cb5a6dbc2f5d26b28a9881808a3023c8330a5aea2a0bea754c216"); + +pub const DEVNET_LC_IMAGE_ID: [u8; 32] = + hex!("3d8dc93f82c5aca7ed513f30eebf6fc1580fc3ad3e7b1e65912fc2ace1a9a3f6"); + +pub const TESTNET_LC_IMAGE_ID: [u8; 32] = + hex!("19b3bc65347ca0499f2a8f71117fe924df4b9c1c2a93eb4dd70df939490256fe"); + +// MAINNET LC_IMAGE_ID is not yet provided by CITREA, so we use a placeholder. +pub const MAINNET_LC_IMAGE_ID: [u8; 32] = + hex!("0000000000000000000000000000000000000000000000000000000000000000"); + +// Work-only circuit method IDs for different networks. +pub static MAINNET_WORK_ONLY_METHOD_ID: [u8; 32] = + hex!("7a5e19c21ae060be36ddff77c8f07849d8fc8b0b240384e8801a66e328b2ee22"); +pub static TESTNET4_WORK_ONLY_METHOD_ID: [u8; 32] = + hex!("8d6d75f630594a20ec0c55da0a9ba89a7cceffab9cb492eb9b3689d8c1e44c51"); +pub static REGTEST_WORK_ONLY_METHOD_ID: [u8; 32] = + hex!("ee89251fc3344efd8ab2f9ff3dbf7a4d509cb3223b374af70485efd1d3810edd"); +pub static SIGNET_WORK_ONLY_METHOD_ID: [u8; 32] = + hex!("dc4600ac1fba430ff3d86e2776eb415a796c6b210a2c8e434b30a4f49727ac2d"); + +// GROTH16 RELATED CONSTANTS +pub static POST_STATE: [u8; 32] = + hex_literal::hex!("a3acc27117418996340b84e5a90f3ef4c49d22c79e44aad822ec9c313e1eb8e2"); +pub static INPUT: [u8; 32] = + hex_literal::hex!("0000000000000000000000000000000000000000000000000000000000000000"); +pub static ASSUMPTIONS: [u8; 32] = + hex_literal::hex!("0000000000000000000000000000000000000000000000000000000000000000"); +pub static CLAIM_TAG: [u8; 32] = + hex_literal::hex!("cb1fefcd1f2d9a64975cbbbf6e161e2914434b0cbb9960b84df5d717e86b48af"); // SHA256 hash of "risc0.ReceiptClaim" +pub static OUTPUT_TAG: [u8; 32] = + hex_literal::hex!("77eafeb366a78b47747de0d7bb176284085ff5564887009a5be63da32d3559d4"); // SHA256 hash of "risc0.Output" + +pub const A0_BIGINT: BigInt<4> = BigInt::new([162754123530195662, 1949396425256203034, 0, 0]); +pub const A0_ARK: ark_ff::Fp, 4> = Fr::new(A0_BIGINT); + +pub const A1_BIGINT: BigInt<4> = BigInt::new([2457364108815709557, 2960371475104660934, 0, 0]); +pub const A1_ARK: ark_ff::Fp, 4> = Fr::new(A1_BIGINT); + +pub const BN_254_CONTROL_ID_BIGINT: BigInt<4> = BigInt::new([ + 10066737433256753856, + 15970898588890169697, + 12996428817291790227, + 307492062473808767, +]); +pub const BN_254_CONTROL_ID_ARK: ark_ff::Fp, 4> = + Fr::new(BN_254_CONTROL_ID_BIGINT); + +pub const PREPARED_VK: &[u8] = include_bytes!("bin/prepared_vk.bin"); + +pub const TEST_PREPARED_VK: &[u8] = include_bytes!("bin/test_prepared_vk.bin"); + +#[cfg(feature = "use-test-vk")] +pub fn get_prepared_vk() -> &'static [u8] { + TEST_PREPARED_VK +} + +#[cfg(not(feature = "use-test-vk"))] +pub fn get_prepared_vk() -> &'static [u8] { + PREPARED_VK +} + +#[cfg(test)] +mod tests { + use ark_bn254::Fr; + use ark_ff::PrimeField; + use risc0_zkvm::Digest; + + use crate::bridge_circuit::constants::{A0_BIGINT, A1_BIGINT}; + + // This test checks that the A0 and A1 constants match the expected values derived from the control root + // of the Groth16 verifier parameters. If they do not match, it indicates that the constants need to be updated + // in the `constants.rs` file. This is important because the A0 and A1 constants are used in the bridge circuit to verify the Groth16 + // proof, and any mismatch could lead to incorrect verification results. + #[test] + fn test_a0_and_a1() { + let verifier_context = risc0_zkvm::VerifierContext::default(); + let params = verifier_context + .groth16_verifier_parameters + .as_ref() + .unwrap(); + let (a0, a1) = split_digest(params.control_root); + + let a0_bigint = a0.into_bigint(); + let a1_bigint = a1.into_bigint(); + + assert_eq!((a0_bigint, a1_bigint), (A0_BIGINT, A1_BIGINT), + "A0 and A1 do not match the expected values, please update the a0 and a1 constants in constants.rs. a0: {:?}, a1: {:?}", + a0_bigint.0, a1_bigint.0); + } + + // This is the exact same implementation as in risc0_groth16, but we need to re-implement it here to change + // the return type. Please check the original implementation each time risc0 version is updated. + fn split_digest(d: Digest) -> (Fr, Fr) { + let big_endian: Vec = d.as_bytes().to_vec().iter().rev().cloned().collect(); + let middle = big_endian.len() / 2; + let (b, a) = big_endian.split_at(middle); + ( + Fr::from_be_bytes_mod_order(&from_u256_hex(&hex::encode(a))), + Fr::from_be_bytes_mod_order(&from_u256_hex(&hex::encode(b))), + ) + } + + fn from_u256_hex(value: &str) -> Vec { + to_fixed_array(hex::decode(value).unwrap()).to_vec() + } + + fn to_fixed_array(input: Vec) -> [u8; 32] { + let mut fixed_array = [0u8; 32]; + let start = core::cmp::max(32, input.len()) - core::cmp::min(32, input.len()); + fixed_array[start..].copy_from_slice(&input[input.len().saturating_sub(32)..]); + fixed_array + } +} diff --git a/circuits-lib/src/bridge_circuit/groth16.rs b/circuits-lib/src/bridge_circuit/groth16.rs new file mode 100644 index 000000000..8ac48fec1 --- /dev/null +++ b/circuits-lib/src/bridge_circuit/groth16.rs @@ -0,0 +1,170 @@ +//! # Groth16 Proof Struct +//! This module defines the `CircuitGroth16Proof` struct, which represents a Groth16 proof +//! for the bridge circuit. It includes methods for creating a proof from a given Risc0 seal +//! and converting it to a compressed format. The proof consists of three components: `a`, +//! `b`, and `c`, which are points on the elliptic curve used in the Groth16 protocol. +//! ## Key Components +//! - **G1 and G2 Points:** The proof consists of points `a` and `c` in G1, and point `b` in G2. +//! - **Serialization/Deserialization:** The proof can be serialized to a compressed format +//! and deserialized back, allowing for efficient storage and transmission. +//! - **Conversion to Groth16 Proof:** The `CircuitGroth16Proof` can be converted to a Groth16 proof +//! for use in verification. + +use ark_bn254::Bn254; +use ark_ff::{Field, PrimeField}; +use ark_groth16::Proof; +use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, Compress, SerializationError}; +type G1 = ark_bn254::G1Affine; +type G2 = ark_bn254::G2Affine; + +/// CircuitGroth16Proof represents a Groth16 proof for the circuit. +#[derive(Copy, Clone, Debug)] +pub struct CircuitGroth16Proof { + a: G1, + b: G2, + c: G1, +} + +impl CircuitGroth16Proof { + pub fn new(a: G1, b: G2, c: G1) -> CircuitGroth16Proof { + CircuitGroth16Proof { a, b, c } + } + + /// Creates a new CircuitGroth16Proof from the given risc0 seal, which + /// itself is a 256-byte array. + pub fn from_seal(seal: &[u8; 256]) -> CircuitGroth16Proof { + let a = G1::new( + ark_bn254::Fq::from_be_bytes_mod_order(&seal[0..32]), + ark_bn254::Fq::from_be_bytes_mod_order(&seal[32..64]), + ); + + let b = G2::new( + ark_bn254::Fq2::from_base_prime_field_elems([ + ark_bn254::Fq::from_be_bytes_mod_order(&seal[96..128]), + ark_bn254::Fq::from_be_bytes_mod_order(&seal[64..96]), + ]) + .unwrap(), + ark_bn254::Fq2::from_base_prime_field_elems([ + ark_bn254::Fq::from_be_bytes_mod_order(&seal[160..192]), + ark_bn254::Fq::from_be_bytes_mod_order(&seal[128..160]), + ]) + .unwrap(), + ); + + let c = G1::new( + ark_bn254::Fq::from_be_bytes_mod_order(&seal[192..224]), + ark_bn254::Fq::from_be_bytes_mod_order(&seal[224..256]), + ); + + CircuitGroth16Proof::new(a, b, c) + } + + pub fn from_compressed( + compressed: &[u8; 128], + ) -> Result { + let a_compressed = &compressed[0..32]; + let b_compressed = &compressed[32..96]; + let c_compressed = &compressed[96..128]; + let a = ark_bn254::G1Affine::deserialize_compressed(a_compressed)?; + let b = ark_bn254::G2Affine::deserialize_compressed(b_compressed)?; + let c = ark_bn254::G1Affine::deserialize_compressed(c_compressed)?; + + Ok(CircuitGroth16Proof::new(a, b, c)) + } + + pub fn to_compressed(&self) -> Result<[u8; 128], SerializationError> { + let mut a_compressed = [0u8; 32]; + let mut b_compressed = [0u8; 64]; + let mut c_compressed = [0u8; 32]; + + ark_bn254::G1Affine::serialize_with_mode(&self.a, &mut a_compressed[..], Compress::Yes) + .expect("Serialization should not fail for valid curve points"); + ark_bn254::G2Affine::serialize_with_mode(&self.b, &mut b_compressed[..], Compress::Yes) + .expect("Serialization should not fail for valid curve points"); + ark_bn254::G1Affine::serialize_with_mode(&self.c, &mut c_compressed[..], Compress::Yes) + .expect("Serialization should not fail for valid curve points"); + + let mut compressed = [0u8; 128]; + compressed[0..32].copy_from_slice(&a_compressed); + compressed[32..96].copy_from_slice(&b_compressed); + compressed[96..128].copy_from_slice(&c_compressed); + + Ok(compressed) + } + + pub fn a(&self) -> &G1 { + &self.a + } + + pub fn b(&self) -> &G2 { + &self.b + } + + pub fn c(&self) -> &G1 { + &self.c + } +} + +impl From for Proof { + fn from(g16_seal: CircuitGroth16Proof) -> Self { + Proof:: { + a: g16_seal.a, + b: g16_seal.b, + c: g16_seal.c, + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use ark_ff::UniformRand; + use ark_std::test_rng; + + fn random_g1() -> G1 { + let mut rng = test_rng(); + G1::rand(&mut rng) + } + + fn random_g2() -> G2 { + let mut rng = test_rng(); + G2::rand(&mut rng) + } + + #[test] + fn test_new_and_accessors() { + let a = random_g1(); + let b = random_g2(); + let c = random_g1(); + + let proof = CircuitGroth16Proof::new(a, b, c); + assert_eq!(proof.a(), &a); + assert_eq!(proof.b(), &b); + assert_eq!(proof.c(), &c); + } + + #[test] + fn test_to_compressed_and_from_compressed() { + for _ in 0..16 { + let proof = CircuitGroth16Proof::new(random_g1(), random_g2(), random_g1()); + + let compressed = proof.to_compressed().expect("Compression failed"); + let decompressed_proof = + CircuitGroth16Proof::from_compressed(&compressed).expect("Decompression failed"); + + assert_eq!(proof.a(), decompressed_proof.a()); + assert_eq!(proof.b(), decompressed_proof.b()); + assert_eq!(proof.c(), decompressed_proof.c()); + } + } + + #[test] + fn test_conversion_to_proof_bn254() { + let proof = CircuitGroth16Proof::new(random_g1(), random_g2(), random_g1()); + let groth16_proof: Proof = proof.into(); + + assert_eq!(proof.a(), &groth16_proof.a); + assert_eq!(proof.b(), &groth16_proof.b); + assert_eq!(proof.c(), &groth16_proof.c); + } +} diff --git a/circuits-lib/src/bridge_circuit/groth16_verifier.rs b/circuits-lib/src/bridge_circuit/groth16_verifier.rs new file mode 100644 index 000000000..ee9fede35 --- /dev/null +++ b/circuits-lib/src/bridge_circuit/groth16_verifier.rs @@ -0,0 +1,159 @@ +//! # Bridge Circuit Groth16 Verifier +//! This module implements the Groth16 verifier for the bridge circuit. +//! It includes functions to create digests for the work-only circuit output, +//! verify Groth16 proofs, and handle the conversion of hexadecimal strings to decimal. +//! The verifier uses the prepared verification keys and constants defined in the `constants.rs` module. +//! +//! ## Key Functions +//! - `create_journal_digest`: Creates a digest for the journal of the work-only circuit output. +//! - `create_output_digest`: Creates an output digest for the work-only circuit output. +//! - `create_claim_digest`: Creates a claim digest for the work-only circuit output. +//! - `CircuitGroth16WithTotalWork`: A struct that encapsulates the Groth16 proof with total work and genesis state hash. +//! - `verify`: Verifies the Groth16 proof against the prepared verification key and public inputs. +//! - `to_decimal`: Converts a hexadecimal string to a decimal string representation. + +use ark_bn254::{Bn254, Fr}; +use ark_groth16::PreparedVerifyingKey; +use ark_groth16::Proof; +use ark_serialize::CanonicalDeserialize; +use num_bigint::BigUint; +use num_traits::Num; + +use super::constants::{ + get_prepared_vk, A0_ARK, A1_ARK, ASSUMPTIONS, BN_254_CONTROL_ID_ARK, CLAIM_TAG, INPUT, + OUTPUT_TAG, POST_STATE, +}; +use super::groth16::CircuitGroth16Proof; +use super::structs::WorkOnlyCircuitOutput; +use hex::ToHex; +use sha2::{Digest, Sha256}; +use std::str::FromStr; + +/// Creates a digest for the journal of the work-only circuit output. +pub fn create_journal_digest(work_only_circuit_output: &WorkOnlyCircuitOutput) -> [u8; 32] { + let pre_digest = borsh::to_vec(work_only_circuit_output).unwrap(); + Sha256::digest(pre_digest).into() +} + +/// Creates an output digest for the work-only circuit output. +pub fn create_output_digest(work_only_circuit_output: &WorkOnlyCircuitOutput) -> [u8; 32] { + let journal_digest: [u8; 32] = create_journal_digest(work_only_circuit_output); + let len_output: u16 = 2; + + let output_pre_digest: [u8; 98] = [ + &OUTPUT_TAG, + &journal_digest[..], + &ASSUMPTIONS[..], + &len_output.to_le_bytes(), + ] + .concat() + .try_into() + .expect("Slice has correct length"); + + Sha256::digest(output_pre_digest).into() +} + +/// Creates a claim digest for the work-only circuit output. +pub fn create_claim_digest(output_digest: &[u8; 32], pre_state: &[u8; 32]) -> [u8; 32] { + let data: [u8; 8] = [0; 8]; + + let claim_len: u16 = 4; + + let concatenated = [ + &CLAIM_TAG, + &INPUT, + pre_state, + &POST_STATE, + output_digest, + &data[..], + &claim_len.to_le_bytes(), + ] + .concat(); + + let mut claim_digest = Sha256::digest(concatenated); + claim_digest.reverse(); + + claim_digest.into() +} + +/// Groth16 proof with total work and genesis state hash. In Clementine, this is provided by +/// the watchtowers who challenge the operator whom they suspect of malicious behavior. Just +/// by knowing the Groth16 proof and the total work, we can reconstruct the public outputs of +/// the proof and verify it against the Verifying Key (VK) of the Groth16 proof. +pub struct CircuitGroth16WithTotalWork { + groth16_seal: CircuitGroth16Proof, + total_work: [u8; 16], + genesis_state_hash: [u8; 32], +} + +impl CircuitGroth16WithTotalWork { + /// Creates a new instance of `CircuitGroth16WithTotalWork`. + pub fn new( + groth16_seal: CircuitGroth16Proof, + total_work: [u8; 16], + genesis_state_hash: [u8; 32], + ) -> CircuitGroth16WithTotalWork { + CircuitGroth16WithTotalWork { + groth16_seal, + total_work, + genesis_state_hash, + } + } + + /// Given the `pre_state` (which is actually the `method ID` of the work-only circuit), + /// verifies the Groth16 proof against the prepared Verifying Key (VK) and the public inputs. + pub fn verify(&self, pre_state: &[u8; 32]) -> bool { + let ark_proof: Proof = self.groth16_seal.into(); + + let prepared_vk: &[u8] = get_prepared_vk(); + + let prepared_vk: PreparedVerifyingKey> = + CanonicalDeserialize::deserialize_uncompressed(prepared_vk).unwrap(); + + let output_digest = create_output_digest(&WorkOnlyCircuitOutput { + work_u128: self.total_work, + genesis_state_hash: self.genesis_state_hash, + }); + + let claim_digest: [u8; 32] = create_claim_digest(&output_digest, pre_state); + + let claim_digest_hex: String = claim_digest.encode_hex(); + let c0_str = &claim_digest_hex[32..64]; + let c1_str = &claim_digest_hex[0..32]; + + let c0_dec = to_decimal(c0_str).unwrap(); + let c1_dec = to_decimal(c1_str).unwrap(); + + let c0 = Fr::from_str(&c0_dec).unwrap(); + let c1 = Fr::from_str(&c1_dec).unwrap(); + + let public_inputs = vec![A0_ARK, A1_ARK, c0, c1, BN_254_CONTROL_ID_ARK]; + + ark_groth16::Groth16::::verify_proof(&prepared_vk, &ark_proof, &public_inputs) + .unwrap() + } +} + +/// Converts a hexadecimal string to a decimal string representation. +pub fn to_decimal(s: &str) -> Option { + let int = BigUint::from_str_radix(s, 16).ok(); + int.map(|n| n.to_str_radix(10)) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_to_decimal() { + assert_eq!(to_decimal("0"), Some("0".to_string())); + assert_eq!(to_decimal("1"), Some("1".to_string())); + assert_eq!(to_decimal("a"), Some("10".to_string())); + assert_eq!(to_decimal("f"), Some("15".to_string())); + assert_eq!(to_decimal("10"), Some("16".to_string())); + assert_eq!(to_decimal("1f"), Some("31".to_string())); + assert_eq!(to_decimal("100"), Some("256".to_string())); + assert_eq!(to_decimal("1ff"), Some("511".to_string())); + assert_eq!(to_decimal("citrea"), None); + } +} diff --git a/circuits-lib/src/bridge_circuit/lc_proof.rs b/circuits-lib/src/bridge_circuit/lc_proof.rs new file mode 100644 index 000000000..fd4dd1b1c --- /dev/null +++ b/circuits-lib/src/bridge_circuit/lc_proof.rs @@ -0,0 +1,113 @@ +//! # Light Client Proof Verifier +//! This module implements the light client proof verifier for the bridge circuit. +//! It includes functions to verify light client proofs and extracting the light client circuit output. + +use super::{ + constants::{ + DEVNET_LC_IMAGE_ID, MAINNET_LC_IMAGE_ID, REGTEST_LC_IMAGE_ID, TESTNET_LC_IMAGE_ID, + }, + structs::LightClientProof, +}; +use citrea_sov_rollup_interface::zk::light_client_proof::output::LightClientCircuitOutput; +use risc0_zkvm::guest::env; + +pub const LC_IMAGE_ID: [u8; 32] = { + match option_env!("BITCOIN_NETWORK") { + Some(network) if matches!(network.as_bytes(), b"regtest") => REGTEST_LC_IMAGE_ID, + Some(network) if matches!(network.as_bytes(), b"signet") => DEVNET_LC_IMAGE_ID, + Some(network) if matches!(network.as_bytes(), b"testnet4") => TESTNET_LC_IMAGE_ID, + Some(network) if matches!(network.as_bytes(), b"mainnet") => MAINNET_LC_IMAGE_ID, + None => MAINNET_LC_IMAGE_ID, + _ => panic!("Unsupported BITCOIN_NETWORK environment variable"), + } +}; + +/// Verifies the light client proof and returns the light client circuit output. +pub fn lc_proof_verifier(light_client_proof: LightClientProof) -> LightClientCircuitOutput { + env::verify(LC_IMAGE_ID, &light_client_proof.lc_journal).unwrap(); + + let light_client_circuit_output: LightClientCircuitOutput = + borsh::from_slice(light_client_proof.lc_journal.as_slice()) + .expect("Failed to deserialize light client circuit output"); + + assert!( + check_method_id(&light_client_circuit_output, LC_IMAGE_ID), + "Light client proof method ID does not match the expected LC image ID" + ); + + light_client_circuit_output +} + +pub fn check_method_id( + light_client_circuit_output: &LightClientCircuitOutput, + lc_image_id_circuit: [u8; 32], +) -> bool { + let light_client_method_id_bytes: [u8; 32] = light_client_circuit_output + .light_client_proof_method_id + .iter() + .flat_map(|&x| x.to_le_bytes()) + .collect::>() + .try_into() + .expect("Conversion from [u32; 8] to [u8; 32] cannot fail"); + + light_client_method_id_bytes == lc_image_id_circuit +} + +#[cfg(test)] +mod tests { + + use super::*; + use risc0_zkvm::Receipt; + + #[test] + fn test_lc_proof_verifier() { + let lcp_receipt_bytes = include_bytes!("../../test_data/lcp_receipt.bin"); + let lcp_receipt: Receipt = borsh::from_slice(lcp_receipt_bytes).unwrap(); + + let light_client_proof: LightClientProof = LightClientProof { + l2_height: "0x0".to_string(), + lc_journal: lcp_receipt.journal.bytes.to_vec(), + }; + + let light_client_circuit_output: LightClientCircuitOutput = + borsh::from_slice(light_client_proof.lc_journal.as_slice()) + .expect("Failed to deserialize light client circuit output"); + + assert!( + check_method_id(&light_client_circuit_output, REGTEST_LC_IMAGE_ID), + "Light client proof method ID does not match the expected LC image ID" + ); + + println!("LCP Receipt: {:?}", lcp_receipt.clone()); + + lcp_receipt.verify(REGTEST_LC_IMAGE_ID).unwrap(); + + let light_client_proof: LightClientProof = LightClientProof { + l2_height: "0x0".to_string(), + lc_journal: lcp_receipt.journal.bytes.to_vec(), + }; + + let light_client_circuit_output: LightClientCircuitOutput = + borsh::from_slice(light_client_proof.lc_journal.as_slice()) + .expect("Failed to deserialize light client circuit output"); + + assert!( + check_method_id(&light_client_circuit_output, REGTEST_LC_IMAGE_ID), + "Light client proof method ID does not match the expected LC image ID" + ); + + let expected_state_root = + "8b1e363db80a6c20eb1a31db96d185eb7d5bb4f1e0ef458eb6ae288d58139ca5"; + let expected_last_block_hash = + "6d378db6ada554cb29e67826a320be79bdd3f2138447c24302d6b31dd8951552"; + + assert_eq!( + hex::encode(light_client_circuit_output.l2_state_root), + expected_state_root + ); + assert_eq!( + hex::encode(light_client_circuit_output.latest_da_state.block_hash), + expected_last_block_hash + ); + } +} diff --git a/circuits-lib/src/bridge_circuit/merkle_tree.rs b/circuits-lib/src/bridge_circuit/merkle_tree.rs new file mode 100644 index 000000000..ed2cc4e43 --- /dev/null +++ b/circuits-lib/src/bridge_circuit/merkle_tree.rs @@ -0,0 +1,467 @@ +//! # Bitcoin Merkle Tree Implementation +//! This module implements a Bitcoin Merkle tree structure, which is used to verify the integrity of transactions in a block. +//! It provides functions to construct the tree, calculate the root hash, and verify the inclusion of transactions. +//! The tree is designed to be secure against certain types of attacks, particularly in the context of Simplified Payment Verification (SPV). +//! It also includes a "mid-state" tree for generating secure SPV proofs. +//! **โš ๏ธ Warning:** Use the `new_mid_state` function for secure SPV proofs, as the standard tree is vulnerable to certain attacks. + +use borsh::{BorshDeserialize, BorshSerialize}; +use serde::{Deserialize, Serialize}; + +use crate::common::hashes::{calculate_double_sha256, calculate_sha256}; + +use super::transaction::CircuitTransaction; + +/// Represents a Bitcoin Merkle tree. +#[derive(Debug, Clone)] +pub struct BitcoinMerkleTree { + nodes: Vec>, +} + +impl BitcoinMerkleTree { + /// Constructs a standard Bitcoin Merkle tree. + /// Leaf nodes are transaction IDs (txids), which are double-SHA256 hashes of transaction data. + /// Internal nodes are formed by `DSHA256(LeftChildHash || RightChildHash)`. + /// WARNING! Do not use this tree to generate SPV proofs, as it is vulnerable to certain attacks. See + /// `new_mid_state`. + pub fn new(txids: Vec<[u8; 32]>) -> Self { + if txids.len() == 1 { + // root is the coinbase txid + return BitcoinMerkleTree { nodes: vec![txids] }; + } + + let mut tree = BitcoinMerkleTree { nodes: vec![txids] }; + + // Construct the tree + let mut curr_level_offset: usize = 1; + let mut prev_level_size = tree.nodes[0].len(); + let mut prev_level_index_offset = 0; + let mut preimage: [u8; 64] = [0; 64]; + while prev_level_size > 1 { + tree.nodes.push(vec![]); + for i in 0..(prev_level_size / 2) { + if tree.nodes[curr_level_offset - 1][prev_level_index_offset + i * 2] + == tree.nodes[curr_level_offset - 1][prev_level_index_offset + i * 2 + 1] + { + // This check helps prevent certain attacks involving duplicate hashes, + // although the primary defense against CVE-2012-2459 and similar issues + // in SPV often requires more structural changes or careful proof verification, + // which the `new_mid_state` tree aims to provide. For more, please check: + // https://github.com/bitcoin/bitcoin/blob/31d3eebfb92ae0521e18225d69be95e78fb02672/src/consensus/merkle.cpp#L9 + panic!("Duplicate hashes in the Merkle tree, indicating mutation"); + } + preimage[..32].copy_from_slice( + &tree.nodes[curr_level_offset - 1][prev_level_index_offset + i * 2], + ); + preimage[32..].copy_from_slice( + &tree.nodes[curr_level_offset - 1][prev_level_index_offset + i * 2 + 1], + ); + let combined_hash = calculate_double_sha256(&preimage); + tree.nodes[curr_level_offset].push(combined_hash); + } + if prev_level_size % 2 == 1 { + let mut preimage: [u8; 64] = [0; 64]; + preimage[..32].copy_from_slice( + &tree.nodes[curr_level_offset - 1] + [prev_level_index_offset + prev_level_size - 1], + ); + preimage[32..].copy_from_slice( + &tree.nodes[curr_level_offset - 1] + [prev_level_index_offset + prev_level_size - 1], + ); + let combined_hash = calculate_double_sha256(&preimage); + tree.nodes[curr_level_offset].push(combined_hash); + } + curr_level_offset += 1; + prev_level_size = prev_level_size.div_ceil(2); + prev_level_index_offset = 0; + } + tree + } + + /// Returns the Merkle root. Use this only for Bitcoin merkle tree, not for mid-state trees. + pub fn root(&self) -> [u8; 32] { + self.nodes[self.nodes.len() - 1][0] + } + + /// Constructs a "mid-state" Merkle tree, designed for generating secure SPV (Simplified Payment Verification) proofs. + /// This structure, when used with the corresponding `calculate_root_with_merkle_proof` (or `BlockInclusionProof::get_root`) method, + /// helps mitigate vulnerabilities associated with standard Bitcoin Merkle trees in SPV contexts, such as certain forms of hash duplication or ambiguity attacks (e.g., CVE-2012-2459). + /// Also please check: + /// with the suggested fix: + /// + /// + /// The leaves of this tree are transaction identifiers (`mid_state_txid()`), not typically standard Bitcoin txids (double-SHA256 of the transaction). + /// The internal nodes of this "mid-state" tree are constructed differently from a standard Bitcoin Merkle tree: + /// `N_parent = SHA256(SHA256(N_child_left) || SHA256(N_child_right))` + /// where `N_child_left` and `N_child_right` are nodes from the level below in this mid-state tree. + /// + /// The root of this mid-state tree (`Root_ms`) is an intermediate hash. The actual Bitcoin block Merkle root + /// is expected to be `SHA256(Root_ms)`, as demonstrated in the test cases. + /// + /// The security enhancement for SPV comes from how proofs generated from this tree are verified: + /// specifically, sibling nodes from this tree's proof path are further hashed with `SHA256` + /// before being combined in the standard `double_SHA256` Merkle path computation during proof verification (see `BlockInclusionProof::get_root`). + /// This acts as a domain separation, ensuring that the internal nodes of this mid-state tree cannot be misinterpreted + /// as leaf txids or other hash types during verification. + pub fn new_mid_state(transactions: &[CircuitTransaction]) -> Self { + if transactions.len() == 1 { + // root is the coinbase mid-state txid + return BitcoinMerkleTree { + nodes: vec![vec![transactions[0].mid_state_txid()]], + }; + } + + let mid_state_txids: Vec<[u8; 32]> = + transactions.iter().map(|tx| tx.mid_state_txid()).collect(); + + let mut tree = BitcoinMerkleTree { + nodes: vec![mid_state_txids], // Level 0: Leaf nodes (mid-state txids) + }; + + // Construct the tree + let mut curr_level_offset: usize = 1; + let mut prev_level_size = tree.nodes[0].len(); + let mut preimage: [u8; 64] = [0; 64]; // Preimage for SHA256(SHA256(LeftChild) || SHA256(RightChild)) + while prev_level_size > 1 { + tree.nodes.push(vec![]); + for i in 0..(prev_level_size / 2) { + let left_child_node = tree.nodes[curr_level_offset - 1][i * 2]; + let right_child_node = tree.nodes[curr_level_offset - 1][i * 2 + 1]; + + if left_child_node == right_child_node { + // This check is also present in the mid-state tree construction. + // While the primary defense is in the proof verification, preventing duplicate + // inputs at this stage is good practice. + panic!("Duplicate hashes in the Merkle tree, indicating mutation"); + } + // Preimage construction: SHA256(LeftChildNode) || SHA256(RightChildNode) + preimage[..32].copy_from_slice(&calculate_sha256(&left_child_node)); + preimage[32..].copy_from_slice(&calculate_sha256(&right_child_node)); + // The new node is SHA256 of this preimage + let combined_mid_state_hash = calculate_sha256(&preimage); + tree.nodes[curr_level_offset].push(combined_mid_state_hash); + } + // Handle odd number of nodes at the previous level by duplicating the last node's hash processing + if prev_level_size % 2 == 1 { + let mut preimage: [u8; 64] = [0; 64]; + let last_node = tree.nodes[curr_level_offset - 1][prev_level_size - 1]; + // Preimage: SHA256(LastNode) || SHA256(LastNode) + preimage[..32].copy_from_slice(&calculate_sha256(&last_node)); + preimage[32..].copy_from_slice(&calculate_sha256(&last_node)); + let combined_mid_state_hash = calculate_sha256(&preimage); + tree.nodes[curr_level_offset].push(combined_mid_state_hash); + } + curr_level_offset += 1; + prev_level_size = prev_level_size.div_ceil(2); + } + tree + } + + /// Given an index, returns the path of sibling nodes from the "mid-state" Merkle tree. + fn get_idx_path(&self, index: u32) -> Vec<[u8; 32]> { + assert!( + index < self.nodes[0].len() as u32, + "Index out of bounds when trying to get path from mid-state Merkle tree" + ); + let mut path = vec![]; + let mut level = 0; + let mut i = index; + + while level < self.nodes.len() as u32 - 1 { + if i % 2 == 1 { + // Current node is a right child, sibling is to the left + path.push(self.nodes[level as usize][i as usize - 1]); + } else if (self.nodes[level as usize].len() - 1) as u32 == i { + // Current node is a left child and the last one (odd one out) + path.push(self.nodes[level as usize][i as usize]); // Sibling is itself (implicitly, due to duplication rule) + } else { + // Current node is a left child, sibling is to the right + path.push(self.nodes[level as usize][(i + 1) as usize]); + } + level += 1; + i /= 2; + } + path + } + + /// Generates a Merkle proof for a given index in the "mid-state" Merkle tree. + pub fn generate_proof(&self, idx: u32) -> BlockInclusionProof { + let path = self.get_idx_path(idx); + BlockInclusionProof::new(idx, path) + } + + /// Calculates the Bitcoin Merkle root from a leaf's mid-state transaction ID (`mid_state_txid`) and its inclusion proof + /// derived from a "mid-state" Merkle tree. This function is central to secure SPV. + /// + /// The `inclusion_proof` contains sibling nodes from the "mid-state" Merkle tree. + /// The security enhancement lies in how these proof elements are processed: + /// Each sibling node from the proof path is first hashed with `SHA256` before being + /// combined with the current hash using the standard Bitcoin `calculate_double_sha256` method. + /// + /// `current_hash = calculate_sha256(current_hash || SHA256(sibling_from_mid_state_proof))` + /// + /// This transformation of sibling proof elements acts as a domain separator, + /// robustly distinguishing them from leaf transaction IDs. This prevents vulnerabilities where an + /// attacker might craft a transaction whose ID could collide with or be misinterpreted as an + /// internal node of the mid-state tree, or create other ambiguities that could fool an SPV client. + /// The final `[u8; 32]` returned should match the block's official Merkle root. + pub fn calculate_root_with_merkle_proof( + mid_state_txid: [u8; 32], // This is the leaf mid_state_txid (SHA256 of transaction) + inclusion_proof: BlockInclusionProof, + ) -> [u8; 32] { + inclusion_proof.get_root(mid_state_txid) + } +} + +#[derive(Serialize, Deserialize, Eq, PartialEq, Clone, Debug, BorshDeserialize, BorshSerialize)] +pub struct BlockInclusionProof { + idx: u32, + merkle_proof: Vec<[u8; 32]>, // These are sibling nodes from the "mid-state" Merkle tree +} + +impl BlockInclusionProof { + pub fn new(idx: u32, merkle_proof: Vec<[u8; 32]>) -> Self { + BlockInclusionProof { idx, merkle_proof } + } + + /// Calculates the Merkle root given a leaf transaction mid-state transaction ID (`mid_state_txid`) + /// and the Merkle proof path (sibling nodes from the "mid-state" tree). + /// + /// The core of the SPV security enhancement is here: + /// Each `merkle_proof` element (a sibling node from the mid-state tree) is first hashed + /// with `calculate_sha256`. This transformed hash is then used in the standard Bitcoin + /// Merkle combination step but with single hash (`calculate_sha256`). + /// + /// If `leaf` is the current hash and `P_mid_state` is a sibling from the proof path: + /// `next_hash = SHA256(SHA256(leaf) || SHA256(P_mid_state))` (or reversed order). + /// + /// This ensures that elements from the mid-state tree's structure are treated distinctly + /// from the leaf transaction IDs, preventing cross-interpretation and related attacks. + /// The final hash should be the main Bitcoin block Merkle root. + pub fn get_root(&self, mid_state_txid: [u8; 32]) -> [u8; 32] { + // mid_state_txid is the leaf but the transaction is hashed with SHA256, not DSHA256. + let mut preimage: [u8; 64] = [0; 64]; + let mut combined_hash: [u8; 32] = mid_state_txid; + let mut index = self.idx; + let mut level: u32 = 0; + while level < self.merkle_proof.len() as u32 { + // Get the sibling node from the mid-state tree proof path + let mid_state_sibling_node = self.merkle_proof[level as usize]; + // Secure SPV step: transform the mid-state sibling node by SHA256-ing it + // before using it in the double-SHA256 combination. + let processed_sibling_hash = calculate_sha256(&mid_state_sibling_node); + let processed_combined_hash = calculate_sha256(&combined_hash); + + if index % 2 == 0 { + // `combined_hash` is the left child + preimage[..32].copy_from_slice(&processed_combined_hash); + preimage[32..].copy_from_slice(&processed_sibling_hash); // Use the SHA256'd mid-state sibling + combined_hash = calculate_sha256(&preimage); + } else { + // `combined_hash` is the right child + if processed_sibling_hash == processed_combined_hash { + panic!("Merkle proof is invalid: left hash matches combined hash"); + } + preimage[..32].copy_from_slice(&processed_sibling_hash); // Use the SHA256'd mid-state sibling + preimage[32..].copy_from_slice(&processed_combined_hash); + combined_hash = calculate_sha256(&preimage); + } + level += 1; + index /= 2; + } + calculate_sha256(&combined_hash) // This should be the Bitcoin block's Merkle root + } +} + +#[cfg(test)] +/// Verifies a Merkle proof against a given root using the "mid-state" tree approach. +/// +/// - `mid_state_txid`: The transaction ID of the leaf node for which the proof is provided. +/// - `inclusion_proof`: The proof path containing sibling nodes from the "mid-state" Merkle tree. +/// - `root`: The expected Bitcoin Merkle root of the block. +/// +/// This function recalculates the root using `inclusion_proof.get_root()` (which applies the +/// SPV security measure of SHA256-ing mid-state proof elements) and compares it to the expected `root`. +pub fn verify_merkle_proof( + mid_state_txid: [u8; 32], + inclusion_proof: &BlockInclusionProof, + root: [u8; 32], +) -> bool { + let calculated_root = inclusion_proof.get_root(mid_state_txid); + calculated_root == root +} + +#[cfg(test)] +mod tests { + + use bitcoin::absolute::LockTime; + use bitcoin::hashes::Hash; + use bitcoin::transaction::Version; + use bitcoin::{Block, Transaction}; + + use crate::bridge_circuit::transaction::CircuitTransaction; + + use super::*; + + #[test] + fn test_merkle_tree_0() { + let block: Block = bitcoin::consensus::deserialize(&hex::decode("0100000000000000000000000000000000000000000000000000000000000000000000004e7b2b9128fe0291db0693af2ae418b767e657cd407e80cb1434221eaea7a07a046f3566ffff001dbb0c78170101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff5504ffff001d01044c4c30332f4d61792f323032342030303030303030303030303030303030303030303165626435386332343439373062336161396437383362623030313031316662653865613865393865303065ffffffff0100f2052a010000002321000000000000000000000000000000000000000000000000000000000000000000ac00000000").unwrap()).unwrap(); + let tx_vec: Vec = block + .txdata + .iter() + .map(|tx| CircuitTransaction(tx.clone())) + .collect(); + let txid_vec: Vec<[u8; 32]> = tx_vec.iter().map(|tx| tx.txid()).collect(); + let merkle_tree = BitcoinMerkleTree::new(txid_vec); + let merkle_root = merkle_tree.root(); + assert_eq!( + merkle_root, + block.header.merkle_root.as_raw_hash().to_byte_array() + ); + let mid_state_merkle_tree = BitcoinMerkleTree::new_mid_state(&tx_vec); + let mid_state_txid_0 = tx_vec[0].mid_state_txid(); + let merkle_root_from_mid_state = calculate_sha256(&mid_state_merkle_tree.root()); + assert_eq!( + merkle_root_from_mid_state, + block.header.merkle_root.as_raw_hash().to_byte_array() + ); + let merkle_proof_0 = mid_state_merkle_tree.generate_proof(0); + assert!(verify_merkle_proof( + mid_state_txid_0, + &merkle_proof_0, + merkle_root + )); + } + + #[test] + fn test_merkle_tree_1() { + let block: Block = bitcoin::consensus::deserialize(&hex::decode("00802926b62577e229ae0009b80da0d948a7c934b3abf34a05e67b7d227780000000000071ff9f8ea5a251fa28934d6920f4c87724ef9a552f0e00a5020b83dc11a13870c8152b67ffff001d5c024aac29010000000001010000000000000000000000000000000000000000000000000000000000000000ffffffff270328d20000049af92a67044d1de50a0c71230c6743fc2800000000000a636b706f6f6c032f672fffffffff02249d0a2a01000000160014536182d440abe6e9895e75066fc9dfff1737497f0000000000000000266a24aa21a9ed6d91cee860b3cdbc07e095a9f552381313e65eb266bdee50aa19d0b2ecb7d0c2012000000000000000000000000000000000000000000000000000000000000000000000000001000000000101d0874b7a7f12e721aa7922a3d9e38db39374ecd3124f2575439429f5c9e2c1f40500000000ffffffff56400d030000000000225120e11877100296215d2455ba8ce1e8a5a0ef6959f2407db793b8d310d29e261c93400d030000000000225120012b07d47940bff758004e8c778581598029e9ab3daf65d2f55eb8b188423096400d030000000000225120d61e3e40410a41428e925c6b50ce66cf0674c6172554820796a7a87358483d35400d03000000000022512038c1d55dca433bb034ecf7bf77bf325388769042ce2383aeb74d6bd72fccb7f2400d030000000000225120fed292b6b5b0cd1532fba6c5a268a953e9be5c84ba2392e7083a1d6d965137ad400d0300000000002251205f5e01e0045180729e409a89249970b00354c96898ccc758243caf6f68d2552b400d030000000000225120de43a63c3e2f299dde78dfcb0e633a52831cfe0e50103a680251feaa6723d1c2400d030000000000225120e53a8d91996b0f4ba6d2fe7b5e5937bde509f90aa5f9513bf08b44874dc3c185400d030000000000225120590ca3a044c769a7b5f38a4b332740bc369fcba3acaa37717643c60eac8ea875400d0300000000002251204a2cf1c710d4c14f945e85c6e27d8997cb38848d0b1e4d288ab5085170d3c161400d0300000000002251201d6f2c25286eb2cc3012819f127a6aafdd0751b20ac29bb8f9c517f22c52cbb5400d0300000000002251206e95cc5cf31b3706747bf2973f26468e1a1df4f501e267169dea4f9bf930b489400d030000000000225120e3407288dee0350ed5b865f30d32136c4169e1515c2f9453a6db84c549da0dca400d030000000000225120da195ff8d13386fa3164b35f54452bfd261fc8dc850c826daa9d4ae9b86ffcd5400d030000000000225120a47df510748031845665f3a512d600f3c145a07ce203341e4901ac05393054b1400d030000000000225120e663fc488244551097a3ddc07cc9800925ced00d2d4bb535d7b60a30eb479fc5400d030000000000225120d9bf66a5eed503c925b2f960af19732c795cf1fe7e3ea73f8d7ab995ee875948400d030000000000225120c8bdd7c2e696fee1abe8b322ce9ee3b8bf7bff7546fe0ca9377da75376c1d92c400d0300000000002251203c00aaaf8af2197ecb3417669ca2c1dea09dc354b685af21c7194afec58107a0400d0300000000002251207296a76471c44f42e423c2e7c9dc64c3afa991feec488a135ac54f8a56a3b496400d030000000000225120fca30ebf5b1573b2f7dd7542f6e6796c4927d775f43edf274cc2a7fcf9f2ca66400d030000000000225120cd2a8532ddc4d8510ea9d1d5e7305b1328a53d4b0654d439643d11e33a4ccb5b400d03000000000022512093b5732855048193c1d921468cbf01288433fa6ca98397835cd2b50977a4bcf3400d0300000000002251204d9c4f81ff5788a6aaecf3065bbeec32e610e580852385e9ce1a3f761d065b93400d0300000000002251209955a72ea46122230c6fadeadb39f597841ecdd91ab0b2e832d337b9ed868624400d030000000000225120344c772b4987e8058b44cb09938875d6485283e370b03e6b7f4d651fa1494082400d030000000000225120d7a659d7199a5f54b7d1876b62ba0d6140adecf36b2a3b343f6b0000b395d63f400d03000000000022512071ecff91e39bc6df0f8b194d8df03b0952ecd869f73caecccaf39902651acbf2400d030000000000225120fcdcea1f03bffd488c9f12aafc4392e444b7873c489a05e97f433be7c02d806b400d030000000000225120ffff30e51593bbd7c73cef68bcffdecbc72475fe5664c4be5447ec59773c439e400d0300000000002251204a3ec17459bcbb130a2e5b8a346c3784a132656ae354079206043f737aafd007400d030000000000225120f58b369aa9c9c429f9fda9a5ad56141554ce89b404630ba63fa9d91553a73c60400d030000000000225120187c1036952ece7f6b7de3b008f98847f99984ab4f3e67daef3019f32248b9e2400d030000000000225120d9e5a5ebb0d2f973d80886d30c5e3f7ddb9b3b9414ee6e06fd9f6c36133b53f0400d03000000000022512037223e88dc8e3c653b50572d9ab752e37316e7a37f7b44a4fd83e396e9021c3b400d0300000000002251203954a18199ebddfaeb2b5082835357376cfbb4a4b38ac2fa0ba0261099e5a7b2400d03000000000022512019081384ac60ccd75e5084456d109b1e3007de26a622d9b710c935c2e10dbc1f400d03000000000022512072ec350bec46b2dcf114626d57c13c29bc926f46c12cbd576e4e61f5938366a3400d030000000000225120e8e3351e1a3efa0438587d6f5ad8ccfac3a0c665bc9c17a44ff9fd2e3e7697f4400d030000000000225120cccbc14141bc0091002a6beb74d8b99b1457d06483983ec613ebf003f2175f62400d03000000000022512019a116d71d7bf1bd5db46c917d5e880e965ce4651594e301e625ab2a41295c02400d0300000000002251204c4df805cac8a86e0fa6336df13c708095b26763c2de42154a12a3e16212dac5400d0300000000002251208191e1c629ccf3b79e6b384b775cb76458afbc5103839e4846165d530daf8f47400d0300000000002251201ca3ec0844c8c54a8078fe411fa76b07cfb3aacef40f71cf06765277bef9b8ea400d03000000000022512092b62ac803b726aea1b7f12cacedc19a1e8088c3df73dc5aacb52ee8c9581b1f400d0300000000002251200c0eaa84dc7e7348ba914fbc4a78dc13c8384d23472dd6fa84fca4e435fae4ca400d03000000000022512042ce2f661da192f7aace80066b4f321df3ebf4214afe5ff524d72b42e8bf61a4400d0300000000002251200d411c0396da6f8812ce94e75c9d60545bb1ed5483ae4ff28d9b14a0e97188f0400d0300000000002251200d7538352e5552450cece443a8058637fda3288304126257c7d5288ee790c690400d0300000000002251205393956f43d9470c6ed90f112b41b41140920a1cc67cc84113d3f973378e9302e0fd1c00000000002251200c820a4f62a95168918c3c0fd9b2f74ee6eda157ee83c097e10cf186615cf809400d03000000000022512054e84bf1916f3942dfba58dec5f4c66b0a3ee50f255b9108a6631221c0ce9dfd400d0300000000002251202c7dfd938b5f26a43b9d80fe737a35e120b0cf87b44187466ccc59acd444965e400d03000000000022512035dbbab23efd26df70a6651cdb31c5872e43fdc14e19b30af7ab630ee22e328a400d030000000000225120f16681177c6726e464bf5e345b086aedf2a47e3b89feb30e0e25747c3b851536400d0300000000002251207fb3c0cd398b631233b5672eaef7b47e320d1811967af76b344fde681a2ac457400d030000000000225120f106dfc0e33ea1f69fdc5665946d5542b6ea2445a80c34c49d94418ce11568aa400d030000000000225120d7382ae22ff42d2275f05f646d08e78430ad6605250f8298093021648329d08b400d03000000000022512096024546315188307eaa97e1f52499cec4b386ba69f55553644762ac2816d4e1400d0300000000002251209ce3090c4496d37205d644accd85f767220e40c19c9a94e2fa9b3c61e20a6bd8400d030000000000225120e03067b7a87746c56bd535976a92db840b0a6a46398e95f0e44e832f7ada01bd400d030000000000225120696b672a1a44611c48f3f3a19ec507319e74151df0d551735950276a2f94d0bb400d030000000000225120300d29d3ba0cf354433b8438c28fb0fe492baf28711f5e9668093e3c1fe73a65400d030000000000225120545f606e43b4579ccdb1b04f8a2ae1af32c20082590eaf77fe17a77dfb30a60f400d0300000000002251202ee4b3f2df4adb7b5384a831daf43fcbdb2ba6ead7a11393e990d9ae4001b9c1400d030000000000225120799f5969c69b76169ac8cc43f403baa0f2a7d91f33bee2f7e78a92b7bab44b47400d0300000000002251202f11c5651b4aa0d8d0cf85dfe79c4e29ed9c16c99a9a7cf469ee015886128cba400d0300000000002251203c561e507924f3b1fb587661ec47f6d26aa0eb7106f47242b964b20cb763ea79400d030000000000225120531b37a969e7152f1ff6ce9b4f6f127047d4e7d6cc4e8be91ed81e85d8048e80400d030000000000225120b54bc9b12593990f7eadfa8b2fa87b590a81f0d0d25aa9b048d2bacae177486d400d03000000000022512028b42afac36727c0cdce9b980220dc519721fdde80c6c19c0eaf0f4e86067ac7400d030000000000225120e09e614c027f50038b5f5b6770f52d92b612b58c9d58c53c9151a71a2f2cc014400d030000000000225120c6eb3b061dbfc876399fad6371ca00712d2ea8bf243f0c1bfae02e30076a1da0400d030000000000225120ee4ada4fd527ad86d9370433554436d6e24d615d95cf1d7819069fd348e0ba6140420f0000000000225120c04b15c90149df98fc4679081ff94131329bc23e4319502b2876d90228e3e37d400d03000000000022512050b618aa1953112c692efaff3ec0811edc9a20ccaa42153ea9e58613912a57b5400d030000000000225120d172584c827287cf4b99b17552f8e4d14588b2417d4be6f61afb78cfd1314b56400d0300000000002251202352f368849d1b3e7af24ad9e63e6d633ff5304d61fdbb87cc2900f6356204d9400d03000000000022512040e87325471610a96d70e1641f2ca00a20352a050624b5d6e35c3d138e59e7c7400d030000000000225120350f90cdc8dd7dba36a2445e1b067b43368cded6100f83d3819109dd964fd14c400d0300000000002251207f3a1309add12d2b6387e9375351c49be2d274697311443a8b0e10ceac087e40400d0300000000002251209db5233f2abcaf2780dc1955b6b8cd217a54622b3dac7b6510ba2714963030dd400d030000000000225120c4c95664724ce044ff77c77c24c24aa3c0b0ec289cb574bed6b00e5541ebb72e400d0300000000002251201acc8fde62205048f932c2c7a45b48c1456824516b8c604f78eb143e119c95c9400d03000000000022512067159985de51b57095ed4e20398c2b3d37c2093708751324d22e959852440d3e899c287c01000000160014dbd359f23e01f8752cc193fefc04aaa9e3a441400247304402206731bfecbd6c6e67c3212edbd71debb3e1d92197a953a21ffaacf2a6430e7bb20220575b4092e683e33e6a648447a815e5ddbc4b384edbbbe1a3a71c53f7fbdd0171012102836b1dbc3d40d023ec913ce3d04455a05873ea28e08c6b07536c0f08b3d3d17e00000000010000000001011ba7dcd9d08007bfdb31b27ab323b7958f17549b973a98b721d7108bb40534470100000000ffffffff56400d0300000000002251207022fbb7a6ae628fa593e35e402fbc19ab30cc2991e4eecd5f1ba9aeda50b65e400d030000000000225120903226eae1df825893ceeeaa21f9345a2de523be9325f992ea7fec92b94a5d1e400d03000000000022512017df484096cb1f3cf265653d1926be60b29e77213ae331cbf22d37f1c49934de400d030000000000225120d20bcc3265bf8f7da708df2e8d1964aa17139a39f9b58b7315edb04f6143d9c3400d0300000000002251205cfdf9c25bfa9b527a543234b927f083a020feb2967622e173458e59dcbd0003400d030000000000225120d403abbac8683e0455e1e5736daedc980753d8178991771f1b0d51a6ba82614f400d0300000000002251206e7bca480ddd4f22bbf246e972e8bfdaae1352127f7c303ec7e4f6a8c4b21ad160e3160000000000225120786d148fb2d85e10db8969dba73a7351786b4b7ddb9efcc67436f740598e548d400d03000000000022512049d9c2d0eb093398da626969e25362bbd9d50ee6d71481a6f86c706495fd5989400d0300000000002251206663a8a94c49f429ecaeb68c6be3e41d8636b2f6d088f58f3d591a492cf1d503400d0300000000002251201cafa0d92594feba9f0637f201e7568bf5d702af301f2c14b072b87c12388e5e400d0300000000002251200912ffbf4df4625c1a80142f49258a315e2488a2e33c61d044bf7820dc612b97400d03000000000022512022c066bd646fe5171b175c3bc2b25e189683facdf110b430a7cc3d6e0f510f73400d030000000000225120bd9396e4a61acb9444b1c18e32c252bfe7b835f099bf86bd96de5803c6d77aa1400d030000000000225120553dbce7f9ae57d55b7bf3d7c68aba36d2df67bc0d18b87de56d2601d9e8ab3fe0fd1c00000000002251203e73ce12f41c2d401f7e09e8f3707a593ea9c8081d66624dc556855013bd6ede400d0300000000002251207ec03c57a8075ec4bdc13083692961105b56ce0504b1c5bb09b9588eb523bce9400d0300000000002251205050433441e745276d208bf4f9f1a0137e7a91c7e80838a0f1a3c6a0b104a3d1400d030000000000225120184cacc7a13ea684c3597e7bcf109bee4e6d0aa5133ae8c57fba8ec7e336d3c1400d0300000000002251208816daf591eb7b4f145933093d493e3fea200d332628281b9f16dd46805436db400d030000000000225120e6fda9104c7c50bf0ba89fc5a4440b4fda7f1d1b76e37f3237d97dbdcb27e7d640771b000000000022512005d2303c9e9587fcf515b4afb0e6c2c252b0481f206f5515be05fb90c1fe422ec091210000000000225120a0955ba3445caccba50e66b087220f13050b76afdf954e0835bc107281da0cc9400d030000000000225120755247883535915e6fe04261c1db20f1a08495e60220d7c82b526c9027497847400d030000000000225120f427311e06daa16f076f701d3771b7743f15a22dd46cf932e61adbc6de9a2eac400d0300000000002251209a4b59f038ce312cc7098dd07c99beaadecf90de67e0c27e8a34950a04cad436400d0300000000002251203d02c8c86760c8dd13a75082ecacf3bbf70e2400dbe8626e28072b6216852856400d030000000000225120af8fc5a1cbd1b5ac02e34892bec4a50d9f815276a6be002521c63b557ee292ec400d030000000000225120c256c25d219c833d56bc2102863700f399ddb7793b6f4f0d2b35decb2a943c33400d030000000000225120af112e22c6a8f5181211e7403f5f9975319853da3c72702f5b8099e93c17c158400d03000000000022512093ca9bc3fd3b90978d4ae9d307725acadb1ff9afba72db1876de728e68da5c5d400d030000000000225120ccd8b5a2a3377b7e3e63e6266a41eb7bd9a834a907ca91858df54c485e909753400d03000000000022512096ba1089e3908cc36057a62e4107b566616a8ca67ddc0a66e18df1aedd50cd3a400d030000000000225120ec12ca07b363661532950fcc9b163b33462ed730a5e5a03605aba84914de56e7400d030000000000225120af9d8501ac444b7d958d96845193c485400855cdb8cdaee31b9798b14e9eac87400d030000000000225120a7a67f34d0bdfef934e2dadbd77da915072edd48560ed3602f33b8225256a2b7400d0300000000002251203338a42ead8ba65ae6e71cc2b781f039053a956cc03acd45c266787684122f93400d03000000000022512036287049ce89faae4087e356c6894b4f7efc102639011c2727744a89a4527850400d0300000000002251206e8207ed0b4fc44b417526b97685094b04318bafa32c03f8b9843e440c81879b801a0600000000002251206c9d33e2c4a8bd1054afe49bd88f0867b60b254ffb0e58239b5b31059b4255e3400d030000000000225120064f4ff1ebd592358ec04fa1de7050eb8caec7661bffe74f189c51cd708ff1ed400d030000000000225120733985c6b5bd84dd51d0973d3fca2740b328a71dd462ad1a53fdb8ec119607cd400d030000000000225120916be5e4a2a578a3bcc8b9965b8465777ae5ef796665221eadb8efb55d65b556400d030000000000225120c1bf7325d66a2e2dedc1599b11ba4df74bfdfada9d2b9cd738a9c68dcb32c418400d030000000000225120ce6f2bf1ccb6e96a94ed185ca1d604a62631842cdfba59ffcd4aad101e1b9265e0fd1c000000000022512007e5345ceee2afc72bab07c4a233df90e97c5cea7030591d6cca3063b50f5c70400d0300000000002251206251ae0562566be092857da794aef7a6858bc10c2d92e2df2b67b3e7f2151507400d030000000000225120b58fcb7954419017efbb0c17a04d8121fb921a8c5b1184dc8a2d15920d38d58e400d03000000000022512092c4f29a3dbb840ad3cbabdcfb527ff08fc21f454a2018e71776568d731e1ef3400d030000000000225120fa433de4ec37ba7d13ed94fa294e528d53c587a54d01a0f2dc53069fafd5806d400d0300000000002251208c3a2f8534dfda45ab37f6a8acb0a7119002b7ca2d519bcaaf9d4c24ecf45436400d0300000000002251205c0938706fc4d3a72b98f82a5a239bcc8bdd6b3a7d96a016a5ae67210ea82a3a400d030000000000225120667f7b015d4b113cc9c5d068cd56d5d301f4c04898a5dd4b9117a245082dec77400d030000000000225120d9708045075d338a7a7e0e2236a9cb4ce3e107226e370e55b79e37ee3bdf4e9c006a180000000000225120135da47172343a84f365a427965afe757293cff0ef962983457c116034ac90f9400d030000000000225120bdcfa69f9805759d5929ac2514eae0d53a5ab1d27cbb61a69337674217a85884400d030000000000225120dd54f910695c8e366927cd5d5ec8dd60c3945e620f593a551b3f558e3fd56d85400d0300000000002251207919a59fee2cb97245213c9b4da8f907153ad0ff096ff03d0eb999f93bddecbc400d03000000000022512096ad18837720b09663053e541206b3ef2accb0d4012815bb0e8b603410c69d41400d0300000000002251207273dc83d40445a87b0eae651cab7a08242ac4d972e43eafbdaeae15acdcaee2400d030000000000225120f9c45a63059d3250b2ad77a9fa0a037dacbfa10099dea50f2bda5190c38a0604400d030000000000225120fcb99825e04d6b864970fe4d1558182a5a5810c4674a687f43ee76b3d9738f86400d030000000000225120b4c7f5b5843bffff9166c9e2fb73719bc23019aff8365bd8ed77c1cd1e499f9b400d030000000000225120f5967b69cf197321174329fff226cc716bc36da5b5a5d562a2837c3462f6d594400d0300000000002251207ef2cc1f698411808d202040ca7289e82c95d389a2e8166a584431054b18b22e400d030000000000225120c960a50984fa31471671ce8141b952f34b4d6781b477355880e8c6623bff56dc400d0300000000002251205fadfebdba8ba6d21479aa3cdaf1e4084bcb2e9ad33c1402db35ef855b6e405a400d0300000000002251206ca2bd21fc40541ac9558e1c543215632eecc7c669436f1bdb10a8c2ea97f9a5400d030000000000225120d319e5ecf9b03f11ec30684fded5567b00b2585c271d62130945c02353f8bd51400d0300000000002251202e5f34c00973e3a6ccdeacc28c7994c0f345d773a382b09a331dea877deb4ab4400d03000000000022512002db565cc8f05949eedc666e5516e40233e3a8bb646ae9bb52a8378405b31c3d400d0300000000002251201ee15f85439086366d7822a16656880261cf6e76171d266ae109401f7e2db090400d030000000000225120a48aa4e2a241f11a88228a4d1c390c3076cdeacbd6e32dc850ccddbe9f4c8317400d030000000000225120b8e4dd32fc592c89d5c9cd34f9c724e02c5231b79ddaebce7e31178bc832c7a2400d030000000000225120d28865fb1e2de25889a61f81922dd3e612499262888125b28935096ecf151bdc400d030000000000225120b5b1da7738076ddc62ab5e5d432dd9b69c8b94bcbf69f2a7a2fbcb2e00c168e2400d0300000000002251200128adb161ca60cf2d792eebc01edeb507eef9a26b6291e086620f4d333f2f9c400d03000000000022512031a414653f51873c1bfdbeef40dc141eec43cfabeab7a670c18985d2463986e9400d0300000000002251207f282164ac8d6ef71cb3d15b4f4fca98d93c2a0b45290783fd06b68757101c0e400d0300000000002251208a1fb6cfe49b9a9d6189c1919fe26e29b8ebc3c856ea9d63b99df0b09f488724400d03000000000022512096289a10977378095b7b74e89c79b3911f83c3704caa18e0d356a736a499f41f400d030000000000225120934250d813563e9c4810fdf973a0f3091b900ba4c08337f4842800e046aae8f5400d030000000000225120e0cefb9755a28f5ae69bb103b45fe9dac9bee5d38d97d65293b721c3de95fcda400d0300000000002251201f9fb2d560663936c4c015f9ae69f80ae3de5a1c64eba7c35b16a3b875576458400d030000000000225120bec66229179615c91e0309b4ec9ffeb8a77f4fff5ce7c812b61c91252d3b2f30815cc07c01000000160014dbd359f23e01f8752cc193fefc04aaa9e3a4414002483045022100f5c71e82ee329df4a5ff97de36a2b2b79f86d949b7d9a3d11d32f4ab8be660b3022072f192dc45e43060bde81a4356f838e2e7110d66267e15b89afed98dab10cf4e012102836b1dbc3d40d023ec913ce3d04455a05873ea28e08c6b07536c0f08b3d3d17e00000000020000000001018c1624b7fd02ba5d018688c4d4b64b708c8a657cc7701db8ac02b970532b29ae0100000000fdffffff02af72000000000000160014e4fd3abbb644375588f09ef4899361dfe239b05fb8d1ab020000000016001498b3992769881aa136620eb1801f625b38f2c15b014072c7353f7e1ccc0916901671bded4ded93a984b1a1faf246acd794499094e9bca9a9cba56fec52dba9857573caeb4207b3b3fdfa494e0df586eb01c143667a5027d2000002000000000102caee0386f72276a521fc3364e47a162c563a5d783b07b1e6e158ec105d08fedc0300000000feffffff52c7b36f469023be085280625c16a7f541844c393d63132d9498d2407329d7b30200000000feffffff07d74d000000000000160014f6fa357adbc3efdfd88cf3dc4520a456c2e55099da4d00000000000016001449de8133adcdf7bb3681435ac08f1c61cf63bb6a45c6000000000000160014ccebf5490d07887998e364d20264ce26f4d8cc06da4d00000000000016001474b2e4c51a530f49e9fe0a153c15b2c5bb2db017da4d000000000000160014cacbbe4dfec42a4bca49102ee70cf7890437a107da4d000000000000160014f5ea3664efb45b99bbbe7a3fe71ef63269139812da4d0000000000001600143bb3a7c824ab1affaeced2997561d2e9c08c7318024730440220165bad5fe52454797aa27782bb209a650d8e275ed09bce5f399ab5f0a175e1c40220076b1a83be7ad29d3cec72cbc2486249d8a22da5f6a77c1b1b9e4225880278a301210351a58277f6cf9e1a7c48b770359a7be383a6309528674bf2a0de830ebd104d5c0247304402204bd6314bcd8926f05a6529dce35c3d9a75b7ba5a52c16dc348e998ce5f87dc5202203c404ff313b3498154f4318fa7246a94c00a197c496df4b6888c9c179b247e0a0121024547f79f1a6628d26723abfaa101accc4cfe369693b3d474771b18a245ed27b226d2000002000000000101cc3db261169c955242298e8dab61c706bc2aed52e745e4be29867cac81e77ab70400000000ffffffff01d10c0300000000002251206a0cc6c8cc7caae4bbb6aadf779ccbd5689cd18c3ed90a5229870fcf9e4bdc1101408cd793eadf49fe2914d7728eddf0a8383a696d26b911487f9bb425e0e127082e6dc0a73cebd8d35230cd53b8c2a85e536d571788f990de4af0fa9767b9a26d23000000000200000000010a3b12d1df8ef6f6bf437a8cb1a5c44de442a5a24be872f4cc7685d30080f796750100000000feffffffc78bc4036394b2ada2cb10b2d39592069ce2aeb1b3b7b54fdca8b4275849140d0200000000feffffffa180251b2107744778735975322c9e777cf648d2c4eb29f57f1f6922922e3ef90100000000feffffff041cf6d6688987e00137566a0df6c5e579c2023927956806868a56c8908f77060100000000feffffffb60fdc173b95aa0c6557897a98ae189937ec1fb8c876ea1a15604d2ba8c758c80000000000feffffffd778a25494c755ea34f1fdffd4c4524d0e12718c9dab65e35ef1f7bd707f11430000000000feffffff1a6d87659c06107cc6550492761bef3d51cff46cbefd345268d7db8565f7d7cb0000000000feffffff5b91ca42c3191b1130c8796fced1895150f40abfbef937398a6e98afb556983e0300000000feffffffaf1e3bf8114dbf067133dc671eedb38445e6034faa140975efcc8e6166f9348a0100000000feffffffe12c7fb9f57d52b1c72d5d87e945f788d14f508e9b97d9fd7faafb2bff84aa4c0000000000feffffff0242fe3c0000000000160014e4fd3abbb644375588f09ef4899361dfe239b05fc973010000000000160014765028867a1b6b1195ad2d3fb6e2b065116bd1510247304402205e2bb9f8a92bd7932ef142833259840fa8c60660b6f96093e0b69bb5d47dea5e022012eb002d4bd053d96c449182a9a05cd3b3dce0f70196541e4aa69aa2a55ab6ea01210393239407efa6250ce0973f50cb829de921aee32db677817ef96ef8b56c369e8f0247304402202d4bc696752852f41213d3138f60288ec60dd0828bba0aebaa93ee2553e722c502206e43c5b3ff32f6f85f88f4e1d6c182ae4710ac81aefab6ffac984fd8e46b1e4e0121022f448382613678be144e5dcc252819333d16190cb5c3717a214e65a5a3e5672502473044022065820982ad95861e94806b981a439ee682b2cf74daaa2cc157a2bf800ac14b970220467a66443376b658eada1a3dee227ebbdc68e9e3dc824cd9874bccbe544740cd012103098239071fa5d61c8d046db4d823992efd7e9353346977314e81ea89a74a272c024730440220728a81de49e662baf3e437417a5f9979323de29e19abcf9d20bfbe72848ba23e02202b9e30ff85c517fd93f6f20c3f8f2cb8c2dafbd40aabc805e835e5bf92c143ef01210259316cde14df89e7259623b770fcc5d559bf44ab0aa94df1d22ef3eb03fa91640247304402203b74f263d7834293ec576b175a294ca8f3d2e3326f73f7e2295162c43961de930220055c52e70f32b68bf435431962ce1aa977b887fa56748f8289aec9d93d519572012103e73d95f6d4305f91910eeb22b616cb41b70ee1d2fe15f7be22f8c0f6f1e68b1d0247304402200c390f675cf6f598275745212f0dd19905f7996b0426bc1a54e679de858e59c102207f65baede91dbc069def5cc4dee53e86239e4a0d7de0fa1deb9834bbe80ec00c01210352c76e118039f87d7414b6f43ebea6b2751ec7be97bee5482207af8bbd2588eb0247304402200c1ccaeaf7a27910e35ab2cbd4097aa69a00287149740b635f5910269a252d3e02202daef72974401ae19473e95da17c823ed28d879205edc61ee7be729025fec4be012102d80c616c1fb9dba1b4ae73313d88f03a68ade3a4760ea12ca14a13a7ade2b97702473044022067bae6844b4139f88f7d8ee1182e6366048ccf495c0c59169a7fa3e0a9c7bb560220196ab2d2c740274b4eb68b0d1c7726d7e956206d0b2d41cfeb4ed8aa6dc8ec9e0121020b0318da2b655e849a5a4739a7b11146b1cc5e901f4e25e7febea0f26f83a65102473044022051e01b410b032dcc45d26817ea95acfbb9ff0bacc4cb21a8275af38181824c3b0220719cf12dab0a0a16483a5bb4047055fb1aecd3a822e5ef7e672d518037950f74012102a9f47ae23f06e0232e87183f7d0e779693271034a10042fc8a6d3217dfc9f7750247304402202c139393fc08c979764d9ecfd5634ca782736b389c8aa01042f61e3cf388f9ca022005b76058fd540e3ab659db3404b862c26182168c9299c63c592b8c2960d76e8401210397653767e64932ea2c83c0238926a74c711132cfbd3c4f03a35372fa7426d89ce6d1000002000000000102b381d86b5991500119f1bd95e3fcbcc4a9107c7ea21bda41ff298358f2d1fd180700000000ffffffff01f01c8efa965c8c2cf4f4b18cd297675d09cba71e1473b419963b4faf1d28b90100000000ffffffff0260e316000000000022512080eb3e9db450c51afa4daf8f1b57c770c556991d91f36887d436a3dde47c0a869215000000000000225120786d148fb2d85e10db8969dba73a7351786b4b7ddb9efcc67436f740598e548d0140be9962adedaf1a51f62a18f12e5b53c476bf9b946ced6b4f8445fb7fd32364a49e85a2fb2982a5f307a82f244234c16fb19a467f9a0da173359bae4d4b23ddde014050298fbcec95a0a96848efe3b4ec24b42ad1b08deb2d0c26057b1b0ef1da667c098122416c8b389e391cb365d1fdab4b1c33a975a2cc45ca8cb20315c9f0ac1f00000000020000000001010722303828d572d3fb1e3f646a052ce68451473a86cce02b726baaf7dae3ca570200000000ffffffff01d10c0300000000002251206a0cc6c8cc7caae4bbb6aadf779ccbd5689cd18c3ed90a5229870fcf9e4bdc1101400dd9e3b6563c580940898e86aba16bc3925bf42bc4183c310d5334c7753fd21f69d65ccad54b8c902dbb0920c32119296a10f1d41e8629bedc6207f85551e5f6000000000200000000010122aee43b30cdd01765a316c5d6837a540af4a9c6690cb0e440ddfc5608eb064f0000000000fdffffff0225c2d1cc1e0000001600147b458433d0c04323426ef88365bd4cfef141ac758813000000000000225120d0f61eac8291b8689461aa5e08e2aaaa627199196c37b9bd2a9959182bcf6d0102473044022015ff9f5c1cb9b0874b54dce2aef396522d2ef20891cd0724a7409c43ce34de5b02203d2b9d5ab8bbec6e7cd5d5e3eb206375fb8b386e911c026114875760099a05ad0121030db9616d96a7b7a8656191b340f77e905ee2885a09a7a1e80b9c8b64ec746fb3000000000200000000010108eac5eab9998b46ccdb0fca2aee70062cf48d2351ee9bff906296fb520e1c860000000000ffffffff01d10c0300000000002251206a0cc6c8cc7caae4bbb6aadf779ccbd5689cd18c3ed90a5229870fcf9e4bdc110140091ab528a2910bc686b4dac9912da4a59df173e8be7befed11f85c487bb7b68815c3a4c4a457159ed5e1f7999045f8b6dcd5ae63643aefbd1eeb6a749e3ecb260000000002000000000101771928fe801bc5b368b7899491efba72c5a761bb09be761ce7f80f14851bf8cb0100000000ffffffff01d10c0300000000002251206a0cc6c8cc7caae4bbb6aadf779ccbd5689cd18c3ed90a5229870fcf9e4bdc110140eef79c5d2ddab489e1fcfa3a82b554dfafa637017225592c97ece6df7e792bd4b8c74c3afc098203cc8578bcaf9bac77d29d3db13be7a8fd8c9d31d7685a80c4000000000200000001d0e9cfbcdcfbd1422632c90586365e85fb4d6a9f3a186c6459bc115ccc5db820010000006a47304402205227e61a70642da96194d94cde92927b242386e98d876667e7a87725b485d2da02200bc948407069f2ddc17cfefbe06ddfd290a6b5cc51a842b23917973d938ad71f012103816c333f0b3de4ccc0c19b0839d6fb9b05f17d6c84af91a48f93f328957d600bfdffffff024ef72506000000002251205ca3400e7f0a03ccb0d1d8591446edf094d2e2b34ad347cbcc7a0333819031cc92560000000000002251206c9d33e2c4a8bd1054afe49bd88f0867b60b254ffb0e58239b5b31059b4255e327d2000002000000000101771928fe801bc5b368b7899491efba72c5a761bb09be761ce7f80f14851bf8cb0600000000ffffffff01d10c0300000000002251206a0cc6c8cc7caae4bbb6aadf779ccbd5689cd18c3ed90a5229870fcf9e4bdc1101403728e23db6509b7bf57763959ba151341d4012e206f159af878242360eab3a598a5c1272bb603dd580c0e7771602d0d0d334e227cf24e658e092e49947a86f440000000002000000000101771928fe801bc5b368b7899491efba72c5a761bb09be761ce7f80f14851bf8cb0c00000000ffffffff01d10c0300000000002251206a0cc6c8cc7caae4bbb6aadf779ccbd5689cd18c3ed90a5229870fcf9e4bdc1101405383d27fa066921b4bfaf882088b68cc2001bceeb7455681f17db44fa4c1fb7cb58da79c1335fa116d7acd625bcfcc207fae7405bb3d967036b73cce0a3f87c60000000002000000000101805fd5f2b35a00fd4bd80b6a4e7eea9aef743b416d84d2e2659170fccba0c22a0100000000ffffffff02204e00000000000022512080eb3e9db450c51afa4daf8f1b57c770c556991d91f36887d436a3dde47c0a8682ad6c00000000002251208a482830e94b85d843e95c2448f6d160b056aea668dcdd9b8d7f218094411f180140e61574ffc8cc85dd0de1b0d8cc1b113ef6211b7e165d368a0fafaa578e8056dd35738bbcf047f01923bcc6a130385c600473a78ce3dc91df8460c9d2e340bf060000000002000000000101910f822c831c0a256bc01e50c590f91c554f46b008f0180c5e3131fedaca51630100000000ffffffff02803801000000000022512080eb3e9db450c51afa4daf8f1b57c770c556991d91f36887d436a3dde47c0a8668746b00000000002251208a482830e94b85d843e95c2448f6d160b056aea668dcdd9b8d7f218094411f18014032f909383a2ca58020a9180fdfe210158c7b7397cd168d928533e3be619098e77d8ce6be4688bc9bb14d937ee77c1d54944f0f3a20a1c2e2d9b9b85f4ea557ea0000000002000000000101771928fe801bc5b368b7899491efba72c5a761bb09be761ce7f80f14851bf8cb0200000000ffffffff01d10c0300000000002251206a0cc6c8cc7caae4bbb6aadf779ccbd5689cd18c3ed90a5229870fcf9e4bdc110140cd136f7ec1f4beb9f063556f66a446037c8b8cf37f746c1b8748f688ede7eca5367f8cbaa6353681ea515f469275103dbc7e8175056a99094fe56a43b0c4ddbe000000000200000000010108eac5eab9998b46ccdb0fca2aee70062cf48d2351ee9bff906296fb520e1c860900000000ffffffff01d10c0300000000002251206a0cc6c8cc7caae4bbb6aadf779ccbd5689cd18c3ed90a5229870fcf9e4bdc110140ad023b4f959806fff187247cdbc57fc7159bbdc45afe5492fe78314f3396f0ba1aaf686b8e3c53bb6182bd64cbab0380dfec36677c98891cc3fcdbea6227d3a40000000002000000000101570f9ab70e0ed40f234e0672443e45bab32577dffefb6db22f7c131699223eae0000000000feffffff02894d00000000000016001410488094091f5aa7d04a994ff937a1ffdb974bf718e8000000000000160014fecc9392d0da66d2f61ab86d90be2d339d38fc7302473044022076fb7191ce8102b636ae02d581b44618ade858e86f70e495c7826d4e8131de1802204ab87877d842de643417732ba272612c6a4699d1a00fa01f0382c5a7a6b4df02012102d8058c60963858e23ee0cc55c3cb5eca6216c0989060da353774c9bc7289297727d200000200000000010108eac5eab9998b46ccdb0fca2aee70062cf48d2351ee9bff906296fb520e1c860700000000ffffffff01d10c0300000000002251206a0cc6c8cc7caae4bbb6aadf779ccbd5689cd18c3ed90a5229870fcf9e4bdc110140e289a5ece4b6d1bda4f28c46ce9707f6052924aa801649f469f8d28b906709c1b32dc09e0d86b8326d202dbacadfe5ed9f70c9a6bbd409a5b7072c8e132fb9e8000000000200000000010108eac5eab9998b46ccdb0fca2aee70062cf48d2351ee9bff906296fb520e1c860300000000ffffffff01d10c0300000000002251206a0cc6c8cc7caae4bbb6aadf779ccbd5689cd18c3ed90a5229870fcf9e4bdc1101405c5adcb347f17b947abeebf4146ecf2723ed2257fba39e3ddf02e56ca4907afb5e6871e650eb287d2173abb9d4667f2b14904abde85e8f0e2f82759cee5decdd0000000002000000000101771928fe801bc5b368b7899491efba72c5a761bb09be761ce7f80f14851bf8cb0400000000ffffffff01d10c0300000000002251206a0cc6c8cc7caae4bbb6aadf779ccbd5689cd18c3ed90a5229870fcf9e4bdc110140b1e168a3d2ff0723f9c32aeb3c1c2522829a58418db38cfd94422538fb172e81d4a8baeb2f0f4fe32cbd4f4b7983d982e4f9283098acb169e69f7d86f9cf60b800000000020000000001014049c949ab389bd8385b1b4dae60d3027646bca0f6bc0b56f341f43420f698210100000000ffffffff02905f01000000000022512080eb3e9db450c51afa4daf8f1b57c770c556991d91f36887d436a3dde47c0a863e146a00000000002251208a482830e94b85d843e95c2448f6d160b056aea668dcdd9b8d7f218094411f180140ee24fe241950f9ad1356d6817872d97e4a6bc2ebf99a14f83d7805440ec4532a80425d019cc1d65c2397c048709e64c0f53ac376db8a9285558b1f96565a6dea0000000002000000000101cc3db261169c955242298e8dab61c706bc2aed52e745e4be29867cac81e77ab70200000000ffffffff01d10c0300000000002251206a0cc6c8cc7caae4bbb6aadf779ccbd5689cd18c3ed90a5229870fcf9e4bdc110140fc85ae0bc99d4f03162182e73f1114815c32808dbfb8f1f9604e8d13c3f08a88bc19e17999cbb26dcdace1ad2994da9747c1efac609178bcbab61c0d4f8c532c000000000200000000010108eac5eab9998b46ccdb0fca2aee70062cf48d2351ee9bff906296fb520e1c860400000000ffffffff01d10c0300000000002251206a0cc6c8cc7caae4bbb6aadf779ccbd5689cd18c3ed90a5229870fcf9e4bdc11014010db562114bba933a0f869b7ca2ca1d1febfb2698e145686f11037fb8205f902ce4b1cd22ecd0815356f8203da5d08b1f07a81eb167196f004453f738af687d4000000000200000000010108eac5eab9998b46ccdb0fca2aee70062cf48d2351ee9bff906296fb520e1c860100000000ffffffff01d10c0300000000002251206a0cc6c8cc7caae4bbb6aadf779ccbd5689cd18c3ed90a5229870fcf9e4bdc1101403158fc9a8ba466d3785ec8ff1f9950cad9b8a2da2a5fcdf54177219ecc5895c98b2ab394402924c4fe630c2c5de7fc3a5db1350a82654984ea586c8c53e463dc000000000200000000010108eac5eab9998b46ccdb0fca2aee70062cf48d2351ee9bff906296fb520e1c860500000000ffffffff01d10c0300000000002251206a0cc6c8cc7caae4bbb6aadf779ccbd5689cd18c3ed90a5229870fcf9e4bdc110140caf8e0fbf94b44a00334e61b500f1837b3e7808548916cd8e8db14d153ac110d8770716e50d618f223e0d711111a47803b13312e825b47d281c0bfef369fd2980000000002000000000101771928fe801bc5b368b7899491efba72c5a761bb09be761ce7f80f14851bf8cb1100000000ffffffff01d10c0300000000002251206a0cc6c8cc7caae4bbb6aadf779ccbd5689cd18c3ed90a5229870fcf9e4bdc1101404016086cd5b0895739606edeaefdf817b93eb09071e95ea23f8c47986997c8de0fbc5a1ce406793bac8458625fff2e51591e02ced594b2345b1c3277a3251bf20000000002000000000101771928fe801bc5b368b7899491efba72c5a761bb09be761ce7f80f14851bf8cb0a00000000ffffffff01d10c0300000000002251206a0cc6c8cc7caae4bbb6aadf779ccbd5689cd18c3ed90a5229870fcf9e4bdc110140ab917683a9a9f39cedc8395f11b7bc2445c2402eecc2e8ad67c4fd804328d17b2e4b44e083f3874385a2a5c44aeaa382cc6f5178a72538c491e22bf82487462200000000020000000001010722303828d572d3fb1e3f646a052ce68451473a86cce02b726baaf7dae3ca570600000000ffffffff01d10c0300000000002251206a0cc6c8cc7caae4bbb6aadf779ccbd5689cd18c3ed90a5229870fcf9e4bdc110140f10df53597f6ee50a44ccf6e643ccb723f224dc96c00e14c86aef5116b4c82142953b4db3503ee1d6cfed2ae0cb7acd94523516ef110b3042249602765c9bc6f0000000002000000000103c4b8ab4e4a2162195dca57d23a2f0be7115a16f7ee96eea843af779843fda6170500000000fffffffff353ae7c11bc07651d38a7d72cb50ae9375648a3497e505b16b4cd9f48613cca0100000000ffffffff2ec1cee2a9c91e040bc3632e6877a983d85e4c117b496077932ab09a8566d7880100000000ffffffff02c0c62d000000000022512080eb3e9db450c51afa4daf8f1b57c770c556991d91f36887d436a3dde47c0a86b73e0f000000000022512020cb02edeb4b69afcd08a6901514629b5531e2c6a30ce112eebe5a49eb94bf9c0140e444abb9bc1cbc406071d6318c3fb059e5ed6b538391f47ba5e5889a06f43156254d4ee107f2549b2a0627d19591c857d4376d8ff5801d0267ae31337ba8d8d60140031ed566b1f79ef05134474910635803fd050277a96c8f40f29614d7cf5fcfb5024ea7f799c3944ba9c4e29d36c6a4156818257c2bf7263382a0742b67139859014078e54fd05719f77b3123691b03bca895ab8d3d0038b0d1354a83dc16ad818f0d79ad2f7492a01fa8ed5d9d81360947c401cac4d75911068de3a19e49db1b9ad6000000000200000000010108eac5eab9998b46ccdb0fca2aee70062cf48d2351ee9bff906296fb520e1c860600000000ffffffff01d10c0300000000002251206a0cc6c8cc7caae4bbb6aadf779ccbd5689cd18c3ed90a5229870fcf9e4bdc110140be76626a3f207e60628c09d2158510f27f030fe2a7c12617403ee1fe07efcff7bd77de49e47b04b709a4a721454524320b4f0b24f7ef675a3e34e43c017078ea0000000002000000000101771928fe801bc5b368b7899491efba72c5a761bb09be761ce7f80f14851bf8cb0500000000ffffffff01d10c0300000000002251206a0cc6c8cc7caae4bbb6aadf779ccbd5689cd18c3ed90a5229870fcf9e4bdc110140de11962fa987dae8d84326bb9ea7c3593d59f6e736b1eafc752483172f9437a6b75fbc54c01dd2e2e49eb7016ec7ea9e51d0bedd54c4bb416fc198422cb525db0000000002000000000101771928fe801bc5b368b7899491efba72c5a761bb09be761ce7f80f14851bf8cb0f00000000ffffffff01d10c0300000000002251206a0cc6c8cc7caae4bbb6aadf779ccbd5689cd18c3ed90a5229870fcf9e4bdc11014087013a16f5728d3d51ac12699d3000614dc0a8db5bf7b08485e0ab6b0d0d070cb4e27e8ddb5466f763eb66eec504281e749f3c5340af4121eb10ac2c3cfa915e00000000020000000001010e77b7bccc47c756b0e6327627940a650d411608e0cd0784176610d7b4c768250000000000ffffffff018a821e00000000002251209321b660eaf1d8487ecc799c9f3bbb74b7bb1793de9df41d04a479669b2617300140705be8f3732c372a836804334dfc6f16d511b18ad448123cd877c548e1b0d6ce0b43764376c19d401ca9cb98079c6432be682594ba29096a9698238cfc822294000000000200000000010180ce346d13519be8de8c9221e776b877694628c88dcb570850e628032e81b3520000000000fdffffff0217e7d7e6160000001600147b458433d0c04323426ef88365bd4cfef141ac758813000000000000225120d0f61eac8291b8689461aa5e08e2aaaa627199196c37b9bd2a9959182bcf6d010247304402202cd798c91fb2974277c969a920bf20797395ed0d1824321f57b9d5f83a59121602202a392615fede776c6348d608775fc6077914efc2b9b982c5bcdccf76cc1b35f40121030db9616d96a7b7a8656191b340f77e905ee2885a09a7a1e80b9c8b64ec746fb30000000002000000000101d8773a94e31a4b5adcea618a6c8532f4141ac88a354fffd501958d6a6b7e98020000000000fdffffff020d5f73416f000000225120aac35fe91f20d48816b3c83011d117efa35acd2414d36c1e02b0f29fc3106d900ff200000000000016001400f56bc22372ce3648abc4af21baea9a72b3475e01400051d2c9b67cc536921a56bce9a6b50498f4e72137bdae6027e4e8d9adb7d9d9cf181129ed5fd2f4eac0bbc4cefcff468fba485c667b2ee93242fa185010721200000000020000000001010722303828d572d3fb1e3f646a052ce68451473a86cce02b726baaf7dae3ca570800000000ffffffff01d10c0300000000002251206a0cc6c8cc7caae4bbb6aadf779ccbd5689cd18c3ed90a5229870fcf9e4bdc110140f94e3f51bb785ffdb773759b48048a00cec748c7cd880483c29ead5be682c632f08a24ae9961bfa3f084996ffd957c15a8e26b1cea6476822bdf6064c67040de0000000002000000000101771928fe801bc5b368b7899491efba72c5a761bb09be761ce7f80f14851bf8cb1000000000ffffffff01d10c0300000000002251206a0cc6c8cc7caae4bbb6aadf779ccbd5689cd18c3ed90a5229870fcf9e4bdc110140c9fe47399fc6cc2ed357a46967a006e8ac39241c200d4757cbf43501c5e9c015ff17c01c6a4ec8b99ea34ab2d813c8bcad02654543bc7d69bd2624b8aea84ec700000000020000000001012d71f720be469ccdd6dc41b6a7d09c82e895f6ed78fa7df7af350ac9e78933ee0000000000fdffffff02ea0f83c5000000001600147b458433d0c04323426ef88365bd4cfef141ac758813000000000000225120d0f61eac8291b8689461aa5e08e2aaaa627199196c37b9bd2a9959182bcf6d010247304402202e354e8f135603dd752b999dbeeced525d35b14a40e356ad7c91149dc99d496f022076bdb2bec3317f3889bb9fba5bc6e9b52064f99c4c223186fbfd32e7f54eb33f0121030db9616d96a7b7a8656191b340f77e905ee2885a09a7a1e80b9c8b64ec746fb300000000").unwrap()).unwrap(); + let tx_vec: Vec = block + .txdata + .iter() + .map(|tx| CircuitTransaction(tx.clone())) + .collect(); + let txid_vec: Vec<[u8; 32]> = tx_vec.iter().map(|tx| tx.txid()).collect(); + let merkle_tree = BitcoinMerkleTree::new(txid_vec); + let merkle_root = merkle_tree.root(); + assert_eq!( + merkle_root, + block.header.merkle_root.as_raw_hash().to_byte_array() + ); + let mid_state_merkle_tree: BitcoinMerkleTree = BitcoinMerkleTree::new_mid_state(&tx_vec); + let mid_state_merkle_root = calculate_sha256(&mid_state_merkle_tree.root()); + assert_eq!( + mid_state_merkle_root, + block.header.merkle_root.as_raw_hash().to_byte_array() + ); + for (i, tx) in tx_vec.into_iter().enumerate() { + let mid_state_txid = tx.mid_state_txid(); + let merkle_proof_i = mid_state_merkle_tree.generate_proof(i as u32); + assert!(verify_merkle_proof( + mid_state_txid, + &merkle_proof_i, + merkle_root + )); + } + } + + // Should panic + #[test] + #[should_panic(expected = "Duplicate hashes in the Merkle tree, indicating mutation")] + fn test_malicious_merkle_tree_1() { + let txid_vec = vec![[1u8; 32], [2u8; 32], [3u8; 32]]; + let _merkle_tree = BitcoinMerkleTree::new(txid_vec); + let malicious_tx_vec = vec![[1u8; 32], [2u8; 32], [3u8; 32], [3u8; 32]]; + let _malicious_merkle_tree = BitcoinMerkleTree::new(malicious_tx_vec); + } + + // Should panic + #[test] + #[should_panic(expected = "Duplicate hashes in the Merkle tree, indicating mutation")] + fn test_malicious_merkle_tree_2() { + let txid_vec = vec![ + [1u8; 32], [2u8; 32], [3u8; 32], [4u8; 32], [5u8; 32], [6u8; 32], + ]; + let _merkle_tree = BitcoinMerkleTree::new(txid_vec); + let malicious_tx_vec = vec![ + [1u8; 32], [2u8; 32], [3u8; 32], [3u8; 32], [4u8; 32], [5u8; 32], [6u8; 32], [5u8; 32], + [6u8; 32], + ]; + let _malicious_merkle_tree = BitcoinMerkleTree::new(malicious_tx_vec); + } + + #[test] + /// a b c + /// but try to cheat and say c is index 3 + #[should_panic(expected = "Merkle proof is invalid: left hash matches combined hash")] + fn test_merkle_root_with_proof_wrong_idx_a() { + let mut transactions: Vec = vec![]; + for i in 0u8..3u8 { + let tx = Transaction { + version: Version::non_standard(i as i32), + lock_time: LockTime::ZERO, + input: vec![], + output: vec![], + }; + transactions.push(CircuitTransaction(tx)); + } + let mut tx_hashes: Vec<[u8; 32]> = vec![]; + for tx in transactions.iter() { + tx_hashes.push(tx.txid()); + } + let tree = BitcoinMerkleTree::new(tx_hashes.clone()); + let mid_state_tree = BitcoinMerkleTree::new_mid_state(&transactions); + let tree_root = tree.root(); + let mid_state_root = mid_state_tree.root(); + assert_eq!(tree_root, calculate_sha256(&mid_state_root)); + let proof = mid_state_tree.generate_proof(2); + assert!(verify_merkle_proof( + transactions[2].mid_state_txid(), + &proof, + tree_root + )); + + // Now try to cheat and say c is at index 3 + let idx_path = mid_state_tree.get_idx_path(2); // Get from real index 2 + let false_proof = BlockInclusionProof::new(3, idx_path); + verify_merkle_proof(transactions[2].mid_state_txid(), &false_proof, tree_root); + } + + #[test] + /// a b c d e f + /// but try to cheat and say e is index 6 + #[should_panic(expected = "Merkle proof is invalid: left hash matches combined hash")] + fn test_merkle_root_with_proof_wrong_idx_b() { + let mut transactions: Vec = vec![]; + for i in 0u8..6u8 { + let tx = Transaction { + version: Version::non_standard(i as i32), + lock_time: LockTime::ZERO, + input: vec![], + output: vec![], + }; + transactions.push(CircuitTransaction(tx)); + } + let mut tx_hashes: Vec<[u8; 32]> = vec![]; + for tx in transactions.iter() { + tx_hashes.push(tx.txid()); + } + let tree = BitcoinMerkleTree::new(tx_hashes.clone()); + let mid_state_tree = BitcoinMerkleTree::new_mid_state(&transactions); + let tree_root = tree.root(); + let mid_state_root = mid_state_tree.root(); + assert_eq!(tree_root, calculate_sha256(&mid_state_root)); + let proof = mid_state_tree.generate_proof(4); + assert!(verify_merkle_proof( + transactions[4].mid_state_txid(), + &proof, + tree_root + )); + + // Now try to cheat and say e is at index 6 + let idx_path = mid_state_tree.get_idx_path(4); // Get from real index 4 + let false_proof = BlockInclusionProof::new(6, idx_path); + verify_merkle_proof(transactions[4].mid_state_txid(), &false_proof, tree_root); + } +} diff --git a/circuits-lib/src/bridge_circuit/mod.rs b/circuits-lib/src/bridge_circuit/mod.rs new file mode 100644 index 000000000..e87968c35 --- /dev/null +++ b/circuits-lib/src/bridge_circuit/mod.rs @@ -0,0 +1,1408 @@ +//! # Bridge Circuit Module +//! +//! This module implements the Bridge Circuit for Clementine protocol. +//! It defines the main entry point, `bridge_circuit`, which executes a comprehensive sequence +//! of cryptographic verifications to securely validate a payout (fronting) transaction +//! for a valid peg-out request for an existing peg-in transaction. The circuit +//! ensures that an operator's claimed Bitcoin chain state is valid and that it has more +//! cumulative proof-of-work than any challenging watchtower. +//! +//! ## Core Workflow +//! The `bridge_circuit` function orchestrates the entire verification process: +//! 1. **Input Reading:** Reads the `BridgeCircuitInput` from the host environment. +//! 2. **Header Chain Proof (HCP) Verification:** Validates the operator's proof of the +//! Bitcoin header chain. +//! 3. **Watchtower Challenge Processing:** Iterates through all submitted watchtower challenges, +//! verifying their transaction signatures (`verify_watchtower_challenges`) and their +//! accompanying Groth16 proofs of work. It identifies the valid challenger with the +//! highest total work (`total_work_and_watchtower_flags`). +//! 4. **Work Comparison:** Asserts that the operator's claimed work is greater than the +//! maximum work submitted by any valid watchtower. +//! 5. **SPV Proof Verification:** Confirms the inclusion of the payout transaction in the +//! operator's claimed Bitcoin chain using a Simple Payment Verification (SPV) proof. +//! 6. **Light Client & Storage Verification:** Validates the Citrea rollup's state via a light +//! client proof (`lc_proof_verifier`) and verifies EVM storage proofs to confirm deposit +//! and withdrawal details (`verify_storage_proofs`). +//! 7. **Final Output Generation:** Computes a unique `deposit_constant` and a final +//! `journal_hash` from critical data points across the proofs. This hash is committed +//! to the zkVM journal, serving as the circuit's public, verifiable output. +//! +//! ## Key Components and Sub-modules +//! This module relies on several specialized sub-modules for handling specific cryptographic tasks: +//! - `groth16` & `groth16_verifier`: For handling Groth16 proof deserialization and verification. +//! - `spv`: Implements SPV proof logic. +//! - `lc_proof`: Verifies light client proofs of the rollup state. +//! - `storage_proof`: Verifies EVM storage proofs. +//! - `transaction` & `sighash`: Provides utilities for handling Bitcoin transactions and +//! computing Taproot sighashes for signature verification. +//! - `structs`: Defines the data structures used for circuit inputs and outputs. +//! - `constants`: Contains network-specific constants like method IDs. + +pub mod constants; +pub mod groth16; +pub mod groth16_verifier; +pub mod lc_proof; +pub mod merkle_tree; +pub mod spv; +pub mod storage_proof; +pub mod structs; +pub mod transaction; + +use crate::{ + bridge_circuit::transaction::CircuitTransaction, + common::{ + constants::{ + MAINNET_HEADER_CHAIN_METHOD_ID, MAX_NUMBER_OF_WATCHTOWERS, + REGTEST_HEADER_CHAIN_METHOD_ID, SIGNET_HEADER_CHAIN_METHOD_ID, + TESTNET4_HEADER_CHAIN_METHOD_ID, + }, + zkvm::ZkvmGuest, + }, +}; +use bitcoin::{ + consensus::Encodable, + hashes::{sha256, Hash}, + io::{self}, + opcodes, + script::Instruction, + sighash::{Prevouts, PrevoutsIndexError, SighashCache}, + Script, TapLeafHash, TapSighash, TapSighashType, Transaction, TxOut, +}; + +use core::panic; +use groth16::CircuitGroth16Proof; +use groth16_verifier::CircuitGroth16WithTotalWork; +use k256::{ + ecdsa::signature, + schnorr::{Signature, VerifyingKey}, +}; +use lc_proof::lc_proof_verifier; +use sha2::{Digest, Sha256}; +use signature::hazmat::PrehashVerifier; +use std::borrow::{Borrow, BorrowMut}; +use storage_proof::verify_storage_proofs; +use structs::{ + BridgeCircuitInput, ChallengeSendingWatchtowers, DepositConstant, LatestBlockhash, + PayoutTxBlockhash, TotalWork, WatchTowerChallengeTxCommitment, WatchtowerChallengeSet, +}; + +/// The method ID for the header chain circuit. +pub const HEADER_CHAIN_METHOD_ID: [u32; 8] = { + match option_env!("BITCOIN_NETWORK") { + Some(network) if matches!(network.as_bytes(), b"mainnet") => MAINNET_HEADER_CHAIN_METHOD_ID, + Some(network) if matches!(network.as_bytes(), b"testnet4") => { + TESTNET4_HEADER_CHAIN_METHOD_ID + } + Some(network) if matches!(network.as_bytes(), b"signet") => SIGNET_HEADER_CHAIN_METHOD_ID, + Some(network) if matches!(network.as_bytes(), b"regtest") => REGTEST_HEADER_CHAIN_METHOD_ID, + None => MAINNET_HEADER_CHAIN_METHOD_ID, + _ => panic!("Invalid network type"), + } +}; + +/// Executes the bridge circuit in a zkVM environment, verifying multiple cryptographic proofs +/// related to watchtowers' Bitcoin work, SPV, and storage proofs. +/// +/// # Parameters +/// +/// - `guest`: A reference to a zkVM guest implementing `ZkvmGuest`. +/// - `work_only_image_id`: A 32-byte array representing the work-only image ID used in verification. +/// +/// # Functionality +/// +/// 1. Reads the `BridgeCircuitInput` from the host. +/// 2. Ensures the method ID in `hcp` (header chain proof) matches `HEADER_CHAIN_METHOD_ID`. +/// 3. Verifies the header chain proof (`hcp`). +/// 4. Computes maximum total work and watchtower challenge flags using `total_work_and_watchtower_flags`. +/// 5. Validates that the computed `max_total_work` does not exceed the `total_work` in `hcp.chain_state`. +/// 6. Fetches the MMR (Merkle Mountain Range) for block hashes from `hcp.chain_state`. +/// 7. Verifies the SPV proof (`payout_spv`) using the fetched MMR. +/// 8. Verifies the light client proof using `lc_proof_verifier`. +/// 9. Ensures the L1 block hash from the light client proof matches the payout transaction's block hash. +/// 10. Checks storage proofs for deposit and withdrawal transaction indices using `verify_storage_proofs`. +/// 11. Converts the verified withdrawal outpoint into a Bitcoin transaction ID. +/// 12. Ensures the withdrawal transaction ID matches the input reference in `payout_spv.transaction`. +/// 13. Computes the `deposit_constant` using the first OP_RETURN output of the payout transaction. +/// 14. Extracts and truncates the latest block hash and the payout transactionโ€™s block hash. +/// 15. Computes a Blake3 hash over concatenated block hash and watchtower flags. +/// 16. Generates a final journal hash using Blake3 over concatenated data and commits it. +/// +/// # Panics +/// +/// - If the method ID in `hcp` does not match `HEADER_CHAIN_METHOD_ID`. +/// - If `max_total_work` given by watchtowers is greater than `hcp.chain_state.total_work`. +/// - If the SPV proof is invalid. +/// - If the storage proof verification fails. +/// - If the block hash of the light client proof does not match the payout transaction's block hash. +/// - If the withdrawal transaction ID does not match the referenced input in `payout_spv`. +pub fn bridge_circuit(guest: &impl ZkvmGuest, work_only_image_id: [u8; 32]) { + let input: BridgeCircuitInput = guest.read_from_host(); + assert_eq!( + HEADER_CHAIN_METHOD_ID, input.hcp.method_id, + "Invalid method ID for header chain circuit: expected {:?}, got {:?}", + HEADER_CHAIN_METHOD_ID, input.hcp.method_id + ); + + // Verify the HCP + guest.verify(input.hcp.method_id, &input.hcp); + + let (max_total_work, challenge_sending_watchtowers) = + total_work_and_watchtower_flags(&input, &work_only_image_id); + + let total_work: TotalWork = input.hcp.chain_state.total_work[16..32] + .try_into() + .expect("Cannot fail"); + + // If total work is less than the max total work of watchtowers, panic + if total_work < max_total_work { + panic!( + "Insufficient total work: Total Work {:?} - Max Total Work: {:?}", + total_work, max_total_work + ); + } + + let mmr = input.hcp.chain_state.block_hashes_mmr.clone(); + + if !input.payout_spv.verify(mmr) { + panic!( + "Invalid SPV proof for txid: {}", + input.payout_spv.transaction.compute_txid() + ); + } + + // Light client proof verification + let light_client_circuit_output = lc_proof_verifier(input.lcp.clone()); + + // Make sure the L1 block hash of the LightClientCircuitOutput matches the payout tx block hash + let lc_l1_block_hash = light_client_circuit_output.latest_da_state.block_hash; + let spv_l1_block_hash = input.payout_spv.block_header.compute_block_hash(); + + if lc_l1_block_hash != spv_l1_block_hash { + panic!( + "L1 block hash mismatch: expected {:?}, got {:?}", + lc_l1_block_hash, spv_l1_block_hash + ); + } + + // Storage proof verification for deposit tx index and withdrawal outpoint + let (user_wd_outpoint, vout, move_txid) = + verify_storage_proofs(&input.sp, light_client_circuit_output.l2_state_root); + + let user_wd_txid = bitcoin::Txid::from_byte_array(*user_wd_outpoint); + + let payout_input_index: usize = input.payout_input_index as usize; + + assert_eq!( + user_wd_txid, + input.payout_spv.transaction.input[payout_input_index] + .previous_output + .txid, + "Invalid withdrawal transaction ID" + ); + + assert_eq!( + vout, + input.payout_spv.transaction.input[payout_input_index] + .previous_output + .vout, + "Invalid withdrawal transaction output index" + ); + + let first_op_return_output = get_first_op_return_output(&input.payout_spv.transaction) + .expect("Payout transaction must have an OP_RETURN output"); + + let round_txid = input.kickoff_tx.input[0] + .previous_output + .txid + .to_byte_array(); + let kickoff_round_vout = input.kickoff_tx.input[0].previous_output.vout; + + let operator_xonlypk: [u8; 32] = parse_op_return_data(&first_op_return_output.script_pubkey) + .expect("Invalid operator xonlypk") + .try_into() + .expect("Invalid xonlypk"); + + let deposit_constant = deposit_constant( + operator_xonlypk, + input.watchtower_challenge_connector_start_idx, + &input.all_tweaked_watchtower_pubkeys, + *move_txid, + round_txid, + kickoff_round_vout, + input.hcp.genesis_state_hash, + ); + + // In the future this will be fetched from the LC proof + let latest_blockhash: LatestBlockhash = input.hcp.chain_state.best_block_hash[12..32] + .try_into() + .unwrap(); + + let payout_tx_blockhash: PayoutTxBlockhash = spv_l1_block_hash[12..32].try_into().unwrap(); + + let journal_hash = journal_hash( + payout_tx_blockhash, + latest_blockhash, + challenge_sending_watchtowers, + deposit_constant, + ); + + guest.commit(journal_hash.as_bytes()); +} + +/// Converts a compressed Groth16 proof into a proof structure and verifies it against a given image ID. +/// +/// # Parameters +/// +/// - `compressed_proof`: A reference to a 128-byte array containing the compressed Groth16 proof. +/// - `total_work`: A 16-byte array representing the total accumulated work associated with the proof. +/// - `image_id`: A reference to a 32-byte array representing the image ID used for verification. +/// - `genesis_state_hash`: A 32-byte array representing the genesis state hash. +/// +/// # Returns +/// +/// - `true` if the Groth16 proof is successfully deserialized and verified. +/// - `false` if any step in the process fails (e.g., failed deserialization or proof verification). +/// +/// # Failure Cases +/// +/// - If deserialization of the compressed proof fails, it returns `false`. +/// - If Groth16 proof verification fails, it returns `false`. +fn convert_to_groth16_and_verify( + compressed_proof: &[u8; 128], + total_work: [u8; 16], + image_id: &[u8; 32], + genesis_state_hash: [u8; 32], +) -> bool { + let seal = match CircuitGroth16Proof::from_compressed(compressed_proof) { + Ok(seal) => seal, + Err(_) => return false, + }; + + let groth16_proof = CircuitGroth16WithTotalWork::new(seal, total_work, genesis_state_hash); + + groth16_proof.verify(image_id) +} + +/// Verifies watchtower challenge transactions and collects their outputs. +/// +/// This function performs validation on a set of watchtower challenge transactions +/// and their associated inputs, witnesses, and public keys. It checks that: +/// - Each challenge input corresponds to the correct `kickoff_tx` output (P2TR), +/// - The signature is valid under the Taproot sighash rules, +/// - The public key matches the one registered for the watchtower, +/// - And, if all checks pass, it marks the corresponding bit in a 20-byte bitmap +/// (`challenge_sending_watchtowers`) and collects the first 3 outputs of the +/// watchtower transaction into `watchtower_challenges_outputs`. +/// +/// Note: This function only verifies keypath spends. +/// +/// # Parameters +/// - `circuit_input`: Data structure holding serialized watchtower transactions, UTXOs, input indices, and pubkeys. +/// +/// # Returns +/// A `WatchtowerChallengeSet` containing: +/// - `challenge_senders`: A 20-byte bitmap indicating which watchtower challenges were valid, +/// - `challenge_outputs`: A vector of vectors containing the outputs of valid watchtower challenge transactions. +/// These outputs should conform to the expected structure of either a single OP_RETURN output +/// or a combination of two P2TR outputs and one OP_RETURN output for the challenge to be +/// considered when calculating the maximum work). However, it is enough to have a valid signature +/// to mark the watchtower as a challenge sender. +/// +/// # Notes +/// Invalid or malformed challenge data (e.g., decoding errors, invalid signatures) +/// will be skipped gracefully without causing the function to panic. +pub fn verify_watchtower_challenges(circuit_input: &BridgeCircuitInput) -> WatchtowerChallengeSet { + let mut challenge_sending_watchtowers: [u8; 20] = [0u8; 20]; + let mut watchtower_challenges_outputs: Vec> = vec![]; + + let kickoff_txid = circuit_input.kickoff_tx.compute_txid(); + + if circuit_input.watchtower_inputs.len() > MAX_NUMBER_OF_WATCHTOWERS { + panic!( + "Invalid number of watchtower challenge transactions: {}", + circuit_input.watchtower_inputs.len() + ); + } + + for watchtower_input in circuit_input.watchtower_inputs.iter() { + let inner_txouts: Vec = watchtower_input + .watchtower_challenge_utxos + .iter() + .map(|utxo| utxo.0.clone()) + .collect::>(); + + let prevouts = Prevouts::All(&inner_txouts); + + let watchtower_input_idx = watchtower_input.watchtower_challenge_input_idx as usize; + + if watchtower_input_idx >= watchtower_input.watchtower_challenge_tx.input.len() { + panic!( + "Invalid watchtower challenge input index, watchtower index: {}", + watchtower_input.watchtower_idx + ); + } + + let input = watchtower_input.watchtower_challenge_tx.input[watchtower_input_idx].clone(); + + let (sighash_type, sig_bytes): (TapSighashType, [u8; 64]) = { + // Enforce the witness to be only 1 element, which is the signature + if watchtower_input.watchtower_challenge_witness.0.len() != 1 { + panic!( + "Invalid witness length, expected 1 element, watchtower index: {}", + watchtower_input.watchtower_idx + ); + } + let signature = watchtower_input.watchtower_challenge_witness.0.to_vec()[0].clone(); + + if signature.len() == 64 { + ( + TapSighashType::Default, + signature[0..64].try_into().expect("Cannot fail"), + ) + } else if signature.len() == 65 { + match TapSighashType::from_consensus_u8(signature[64]) { + Ok(sighash_type) => ( + sighash_type, + signature[0..64].try_into().expect("Cannot fail"), + ), + Err(_) => ( + TapSighashType::Default, + signature[0..64].try_into().expect("Cannot fail"), + ), + } + } else { + panic!( + "Invalid witness length, expected 64 or 65 bytes, watchtower index: {}", + watchtower_input.watchtower_idx + ); + } + }; + + let sighash = sighash( + &watchtower_input.watchtower_challenge_tx, + &prevouts, + watchtower_input_idx, + sighash_type, + watchtower_input.annex_digest, + ); + + if input.previous_output.txid != kickoff_txid { + panic!( + "Invalid input: expected input to reference an output from the kickoff transaction (txid: {}), but got txid: {}, vout: {}, watchtower index: {}", + kickoff_txid, + input.previous_output.txid, + input.previous_output.vout, + watchtower_input.watchtower_idx + ); + }; + + if watchtower_input_idx >= inner_txouts.len() { + panic!( + "Invalid watchtower challenge input index, watchtower index: {}", + watchtower_input.watchtower_idx + ); + } + + let output = inner_txouts[watchtower_input_idx].clone(); + + let script_pubkey = output.script_pubkey.clone(); + + if !script_pubkey.is_p2tr() { + panic!( + "Invalid output script type - kickoff, watchtower index: {}", + watchtower_input.watchtower_idx + ); + }; + + if watchtower_input.watchtower_idx as usize + >= circuit_input.all_tweaked_watchtower_pubkeys.len() + { + panic!( + "Invalid watchtower index, watchtower index: {}, number of watchtowers: {}", + watchtower_input.watchtower_idx, + circuit_input.all_tweaked_watchtower_pubkeys.len() + ); + } + + let pubkey: [u8; 32] = script_pubkey.as_bytes()[2..34] + .try_into() + .expect("Cannot fail"); + + if circuit_input.all_tweaked_watchtower_pubkeys[watchtower_input.watchtower_idx as usize] + != pubkey + { + panic!( + "Invalid watchtower public key, watchtower index: {}", + watchtower_input.watchtower_idx + ); + } + + let vout = watchtower_input + .watchtower_idx + .checked_mul(2) + .and_then(|x| x.checked_add(circuit_input.watchtower_challenge_connector_start_idx)) + .map(u32::from) + .expect("Overflow occurred while calculating vout"); + + if vout != input.previous_output.vout { + panic!( + "Invalid output index, watchtower index: {}", + watchtower_input.watchtower_idx + ); + } + + let Ok(verifying_key) = VerifyingKey::from_bytes(&pubkey) else { + panic!( + "Invalid verifying key, watchtower index: {}", + watchtower_input.watchtower_idx + ); + }; + + let Ok(signature) = Signature::try_from(sig_bytes.as_slice()) else { + panic!( + "Invalid signature, watchtower index: {}", + watchtower_input.watchtower_idx + ); + }; + + if verifying_key + .verify_prehash(sighash.as_byte_array(), &signature) + .is_ok() + { + challenge_sending_watchtowers[(watchtower_input.watchtower_idx as usize) / 8] |= + 1 << (watchtower_input.watchtower_idx % 8); + watchtower_challenges_outputs + .push(watchtower_input.watchtower_challenge_tx.output.clone()); + } + } + + WatchtowerChallengeSet { + challenge_senders: challenge_sending_watchtowers, + challenge_outputs: watchtower_challenges_outputs, + } +} + +/// Computes the maximum verified total work and watchtower challenge flags from challenge transactions. +/// +/// # Parameters +/// +/// - `circuit_input`: The `BridgeCircuitInput` containing all watchtower inputs and related data. +/// - `work_only_image_id`: A 32-byte identifier used for Groth16 verification against the work-only circuit. +/// +/// # Returns +/// +/// A tuple containing: +/// - `TotalWork`: The total work from the highest valid watchtower challenge (after successful Groth16 verification). +/// - `ChallengeSendingWatchtowers`: Bitflags representing which watchtowers sent valid challenges (1 bit per watchtower). +/// +/// # Notes +/// +/// - The function robustly skips over any challenges that are malformed, have invalid signatures, +/// or do not adhere to the expected transaction output structure. +/// - Each watchtower challenge transaction is expected to contain one of two distinct output structures: +/// - **Single Output Format:** A single `OP_RETURN` script containing a total of 144 bytes. +/// This includes the entire 128-byte compressed Groth16 proof followed by the 16-byte `total_work` value. +/// - **Three Outputs Format:** +/// - The first two outputs **must** be P2TR (Pay-to-Taproot) outputs. These two outputs +/// collectively contain the first 64 bytes of the compressed Groth16 proof parts +/// (32 bytes from each P2TR output). +/// - The third output **must** be an `OP_RETURN` script, containing the remaining 64 bytes +/// of the compressed Groth16 proof and the 16-byte `total_work` value. +/// - Valid commitments are sorted in descending order by their `total_work` value. The Groth16 +/// verifier is then applied sequentially to these sorted commitments, and the first successfully +/// verified `total_work` is selected as the maximum verified work. +pub fn total_work_and_watchtower_flags( + circuit_input: &BridgeCircuitInput, + work_only_image_id: &[u8; 32], +) -> (TotalWork, ChallengeSendingWatchtowers) { + let watchtower_challenge_set = verify_watchtower_challenges(circuit_input); + + let mut valid_watchtower_challenge_commitments: Vec = vec![]; + + for outputs in watchtower_challenge_set.challenge_outputs { + let compressed_g16_proof: [u8; 128]; + let total_work: [u8; 16]; + + match outputs.as_slice() { + // Single OP_RETURN output with 144 bytes + [op_return_output, ..] if op_return_output.script_pubkey.is_op_return() => { + // If the first output is OP_RETURN, we expect a single output with 144 bytes + let Some(Ok(whole_output)) = parse_op_return_data(&op_return_output.script_pubkey) + .map(TryInto::<[u8; 144]>::try_into) + else { + continue; + }; + compressed_g16_proof = whole_output[0..128] + .try_into() + .expect("Cannot fail: slicing 128 bytes from 144-byte array"); + total_work = whole_output[128..144] + .try_into() + .expect("Cannot fail: slicing 16 bytes from 144-byte array"); + } + // Otherwise, we expect three outputs: + // 1. [out1, out2, out3] where out1 and out2 are P2TR outputs + // and out3 is an OP_RETURN output with 80 bytes + [out1, out2, out3, ..] + if out1.script_pubkey.is_p2tr() + && out2.script_pubkey.is_p2tr() + && out3.script_pubkey.is_op_return() => + { + let first_output: [u8; 32] = out1.script_pubkey.to_bytes()[2..] + .try_into() + .expect("Cannot fail: slicing 32 bytes from P2TR output"); + let second_output: [u8; 32] = out2.script_pubkey.to_bytes()[2..] + .try_into() + .expect("Cannot fail: slicing 32 bytes from P2TR output"); + + let Some(Ok(third_output)) = + parse_op_return_data(&out3.script_pubkey).map(TryInto::<[u8; 80]>::try_into) + else { + continue; + }; + + compressed_g16_proof = + [&first_output[..], &second_output[..], &third_output[0..64]] + .concat() + .try_into() + .expect("Cannot fail: concatenating and converting to 128-byte array"); + + // Borsh deserialization of the final 16 bytes is functionally redundant in this context, + // as it does not alter the byte content. It is retained here for consistency and defensive safety. + total_work = borsh::from_slice(&third_output[64..]) + .expect("Cannot fail: deserializing 16 bytes from 16-byte slice"); + } + _ => continue, + } + + let commitment = WatchTowerChallengeTxCommitment { + compressed_g16_proof, + total_work, + }; + + valid_watchtower_challenge_commitments.push(commitment); + } + + valid_watchtower_challenge_commitments.sort_by(|a, b| b.total_work.cmp(&a.total_work)); + + let mut total_work_result = [0u8; 16]; + + for commitment in valid_watchtower_challenge_commitments { + if convert_to_groth16_and_verify( + &commitment.compressed_g16_proof, + commitment.total_work, + work_only_image_id, + circuit_input.hcp.genesis_state_hash, + ) { + total_work_result = commitment.total_work; + break; + } + } + + ( + TotalWork(total_work_result), + ChallengeSendingWatchtowers(watchtower_challenge_set.challenge_senders), + ) +} + +/// Parses the OP_RETURN data from a Bitcoin script. It retrieves the first data push after an OP_RETURN. +pub fn parse_op_return_data(script: &Script) -> Option<&[u8]> { + let mut instructions = script.instructions(); + if let Some(Ok(Instruction::Op(opcodes::all::OP_RETURN))) = instructions.next() { + if let Some(Ok(Instruction::PushBytes(data))) = instructions.next() { + return Some(data.as_bytes()); + } + } + None +} + +/// Computes a deposit constant hash using various transaction and cryptographic components. +/// +/// # Parameters +/// +/// - `operator_xonlypk`: A 32-byte array representing the operator's X-only public key. +/// - `watchtower_challenge_connector_start_idx`: A 16-bit unsigned integer marking the start index of the watchtower challenge connector. +/// - `watchtower_pubkeys`: A slice of 32-byte arrays representing tweaked watchtower public keys. +/// - `move_txid`: A 32-byte array representing the transaction ID of the move transaction. +/// - `round_txid`: A 32-byte array representing the transaction ID of the round transaction. +/// - `kickoff_round_vout`: A 32-bit unsigned integer indicating the vout of the kickoff round transaction. +/// - `genesis_state_hash`: A 32-byte array representing the genesis state hash. +/// +/// # Returns +/// +/// A `DepositConstant` containing a 32-byte SHA-256 hash of the concatenated input components. +pub fn deposit_constant( + operator_xonlypk: [u8; 32], + watchtower_challenge_connector_start_idx: u16, + watchtower_pubkeys: &[[u8; 32]], + move_txid: [u8; 32], + round_txid: [u8; 32], + kickoff_round_vout: u32, + genesis_state_hash: [u8; 32], +) -> DepositConstant { + // pubkeys are 32 bytes long + let pubkey_concat = watchtower_pubkeys + .iter() + .flat_map(|pubkey| pubkey.to_vec()) + .collect::>(); + + let watchtower_pubkeys_digest: [u8; 32] = Sha256::digest(&pubkey_concat).into(); + + let pre_deposit_constant = [ + &move_txid, + &watchtower_pubkeys_digest, + &operator_xonlypk, + &watchtower_challenge_connector_start_idx.to_be_bytes()[..], + &round_txid, + &kickoff_round_vout.to_be_bytes()[..], + &genesis_state_hash, + ] + .concat(); + + DepositConstant(Sha256::digest(&pre_deposit_constant).into()) +} + +pub fn journal_hash( + payout_tx_blockhash: PayoutTxBlockhash, + latest_blockhash: LatestBlockhash, + challenge_sending_watchtowers: ChallengeSendingWatchtowers, + deposit_constant: DepositConstant, +) -> blake3::Hash { + let concatenated_data = [ + payout_tx_blockhash.0, + latest_blockhash.0, + challenge_sending_watchtowers.0, + ] + .concat(); + + let binding = blake3::hash(&concatenated_data); + let hash_bytes = binding.as_bytes(); + + let concat_journal = [deposit_constant.0, *hash_bytes].concat(); + + blake3::hash(&concat_journal) +} + +/// Retrieves the first output of a transaction that is an OP_RETURN script. Used in various +/// contexts to extract metadata or constants from transactions. +pub fn get_first_op_return_output(tx: &CircuitTransaction) -> Option<&TxOut> { + tx.output + .iter() + .find(|out| out.script_pubkey.is_op_return()) +} + +/// Computes the Taproot sighash for a given transaction input. +fn sighash( + wt_tx: &Transaction, + prevouts: &Prevouts, + input_index: usize, + sighash_type: TapSighashType, + annex_hash: Option<[u8; 32]>, +) -> bitcoin::sighash::TapSighash { + let mut enc = TapSighash::engine(); + let mut sighash_cache = SighashCache::new(wt_tx); + taproot_encode_signing_data_to_with_annex_digest::<_, TxOut, &Transaction>( + sighash_cache.borrow_mut(), + enc.borrow_mut(), + input_index, + prevouts, + annex_hash, + None, + sighash_type, + ); + TapSighash::from_engine(enc) +} + +/// Encodes the BIP341 signing data for any flag type into a given object implementing the +/// [`io::Write`] trait. This version takes a pre-computed annex hash and panics on error. +/// Code mostly taken from: https://github.com/rust-bitcoin/rust-bitcoin/blob/9782fa8412e1c767998d018f6c915e51553a83d6/bitcoin/src/crypto/sighash.rs#L619 +pub fn taproot_encode_signing_data_to_with_annex_digest< + W: io::Write + ?Sized, + T: Borrow, + R: Borrow, +>( + sighash_cache: &mut SighashCache, + writer: &mut W, + input_index: usize, + prevouts: &Prevouts, + annex_hash: Option<[u8; 32]>, + leaf_hash_code_separator: Option<(TapLeafHash, u32)>, + sighash_type: TapSighashType, +) { + let tx = sighash_cache.transaction(); + check_all_prevouts(prevouts, tx); + + let (sighash, anyone_can_pay) = split_anyonecanpay_flag(sighash_type); + let expect_msg = "writer should not fail"; + + // Epoch + 0u8.consensus_encode(writer).expect(expect_msg); + + // Control: hash_type (1). + (sighash_type as u8) + .consensus_encode(writer) + .expect(expect_msg); + + // Transaction Data: + tx.version.consensus_encode(writer).expect(expect_msg); + tx.lock_time.consensus_encode(writer).expect(expect_msg); + + if !anyone_can_pay { + // Manually compute sha_prevouts + let mut enc_prevouts = sha256::Hash::engine(); + for txin in tx.input.iter() { + txin.previous_output + .consensus_encode(&mut enc_prevouts) + .expect(expect_msg); + } + sha256::Hash::from_engine(enc_prevouts) + .consensus_encode(writer) + .expect(expect_msg); + + // Manually compute sha_amounts + let all_prevouts = unwrap_all_prevouts(prevouts); + let mut enc_amounts = sha256::Hash::engine(); + for prevout in all_prevouts.iter() { + prevout + .borrow() + .value + .consensus_encode(&mut enc_amounts) + .expect(expect_msg); + } + sha256::Hash::from_engine(enc_amounts) + .consensus_encode(writer) + .expect(expect_msg); + + // Manually compute sha_scriptpubkeys + let mut enc_script_pubkeys = sha256::Hash::engine(); + for prevout in all_prevouts.iter() { + prevout + .borrow() + .script_pubkey + .consensus_encode(&mut enc_script_pubkeys) + .expect(expect_msg); + } + sha256::Hash::from_engine(enc_script_pubkeys) + .consensus_encode(writer) + .expect(expect_msg); + + // Manually compute sha_sequences + let mut enc_sequences = sha256::Hash::engine(); + for txin in tx.input.iter() { + txin.sequence + .consensus_encode(&mut enc_sequences) + .expect(expect_msg); + } + sha256::Hash::from_engine(enc_sequences) + .consensus_encode(writer) + .expect(expect_msg); + } + + if sighash != TapSighashType::None && sighash != TapSighashType::Single { + // Manually compute sha_outputs + let mut enc_outputs = sha256::Hash::engine(); + for txout in tx.output.iter() { + txout.consensus_encode(&mut enc_outputs).expect(expect_msg); + } + sha256::Hash::from_engine(enc_outputs) + .consensus_encode(writer) + .expect(expect_msg); + } + + // Data about this input: + let mut spend_type = 0u8; + if annex_hash.is_some() { + spend_type |= 1u8; + } + if leaf_hash_code_separator.is_some() { + spend_type |= 2u8; + } + spend_type.consensus_encode(writer).expect(expect_msg); + + if anyone_can_pay { + let txin = tx.tx_in(input_index).expect("invalid input index"); + let previous_output = + get_for_prevouts(prevouts, input_index).expect("invalid prevout for input index"); + txin.previous_output + .consensus_encode(writer) + .expect(expect_msg); + previous_output + .borrow() + .value + .consensus_encode(writer) + .expect(expect_msg); + previous_output + .borrow() + .script_pubkey + .consensus_encode(writer) + .expect(expect_msg); + txin.sequence.consensus_encode(writer).expect(expect_msg); + } else { + (input_index as u32) + .consensus_encode(writer) + .expect(expect_msg); + } + + if let Some(hash) = annex_hash { + hash.consensus_encode(writer).expect(expect_msg); + } + + // Data about this output: + if sighash == TapSighashType::Single { + let mut enc_single_output = sha256::Hash::engine(); + let output = tx + .output + .get(input_index) + .expect("SIGHASH_SINGLE requires a corresponding output"); + output + .consensus_encode(&mut enc_single_output) + .expect(expect_msg); + let hash = sha256::Hash::from_engine(enc_single_output); + hash.consensus_encode(writer).expect(expect_msg); + } + + const KEY_VERSION_0: u8 = 0; + + if let Some((hash, code_separator_pos)) = leaf_hash_code_separator { + hash.as_byte_array() + .consensus_encode(writer) + .expect(expect_msg); + KEY_VERSION_0.consensus_encode(writer).expect(expect_msg); + code_separator_pos + .consensus_encode(writer) + .expect(expect_msg); + } +} + +// Helper functions for getting prevouts +fn get_for_prevouts<'a, T: Borrow>( + prevouts: &'a Prevouts<'a, T>, + input_index: usize, +) -> Result<&'a T, PrevoutsIndexError> { + match prevouts { + Prevouts::One(index, prevout) => { + if input_index == *index { + Ok(prevout) + } else { + Err(PrevoutsIndexError::InvalidOneIndex) + } + } + Prevouts::All(prevouts) => prevouts + .get(input_index) + .ok_or(PrevoutsIndexError::InvalidAllIndex), + } +} + +fn unwrap_all_prevouts<'a, T: Borrow>(prevouts: &'a Prevouts<'a, T>) -> &'a [T] { + match prevouts { + Prevouts::All(prevouts) => prevouts, + _ => panic!("cannot get all prevouts from a single prevout"), + } +} + +fn check_all_prevouts>(prevouts: &Prevouts<'_, T>, tx: &Transaction) { + if let Prevouts::All(prevouts) = prevouts { + if prevouts.len() != tx.input.len() { + panic!( + "Invalid number of prevouts: expected {}, got {}", + tx.input.len(), + prevouts.len() + ); + } + } +} + +fn split_anyonecanpay_flag(sighash: TapSighashType) -> (TapSighashType, bool) { + match sighash { + TapSighashType::Default => (TapSighashType::Default, false), + TapSighashType::All => (TapSighashType::All, false), + TapSighashType::None => (TapSighashType::None, false), + TapSighashType::Single => (TapSighashType::Single, false), + TapSighashType::AllPlusAnyoneCanPay => (TapSighashType::All, true), + TapSighashType::NonePlusAnyoneCanPay => (TapSighashType::None, true), + TapSighashType::SinglePlusAnyoneCanPay => (TapSighashType::Single, true), + } +} + +#[cfg(test)] +mod tests { + use super::{ + merkle_tree::BlockInclusionProof, + spv::SPV, + structs::{CircuitTxOut, CircuitWitness, WatchtowerInput}, + transaction::CircuitTransaction, + *, + }; + use crate::{ + bridge_circuit::structs::{LightClientProof, StorageProof}, + common::constants::{FIRST_FIVE_OUTPUTS, NUMBER_OF_ASSERT_TXS}, + header_chain::{ + mmr_native::MMRInclusionProof, BlockHeaderCircuitOutput, ChainState, CircuitBlockHeader, + }, + }; + use bitcoin::{ + absolute::Height, + consensus::{Decodable, Encodable}, + sighash::Annex, + taproot::TAPROOT_ANNEX_PREFIX, + transaction::Version, + Amount, ScriptBuf, Transaction, TxIn, Txid, Witness, + }; + use lazy_static::lazy_static; + use risc0_zkvm::compute_image_id; + use std::io::Cursor; + + const TESTNET4_WORK_ONLY_ELF: &[u8] = + include_bytes!("../../../risc0-circuits/elfs/testnet4-work-only-guest.bin"); + + lazy_static! { + static ref TESTNET4_WORK_ONLY_IMAGE_ID: [u8; 32] = compute_image_id(TESTNET4_WORK_ONLY_ELF) + .expect("Elf must be valid") + .as_bytes() + .try_into() + .expect("Elf must be valid"); + } + + fn total_work_and_watchtower_flags_setup() -> (BridgeCircuitInput, Txid) { + let wt_tx_bytes = include_bytes!("../../test_data/wt_raw_tx.bin"); + let kickoff_raw_tx_bytes = include_bytes!("../../test_data/kickoff_raw_tx.bin"); + let pubkey_hex = "412c00124e48ab8b082a5fa3ee742eb763387ef67adb9f0d5405656ff12ffd50"; + + let mut wt_tx: Transaction = + Decodable::consensus_decode(&mut Cursor::new(&wt_tx_bytes)).unwrap(); + + let witness = wt_tx.input[0].witness.clone(); + + wt_tx.input[0].witness.clear(); + + let kickoff_tx: Transaction = + Decodable::consensus_decode(&mut Cursor::new(&kickoff_raw_tx_bytes)) + .expect("Failed to decode kickoff tx"); + + let kickoff_txid = kickoff_tx.compute_txid(); + + let output = kickoff_tx.output[wt_tx.input[0].previous_output.vout as usize].clone(); + + // READ FROM THE FILE TO PREVENT THE ISSUE WITH ELF - IMAGE ID UPDATE CYCLE + let mut encoded_tx_out = vec![]; + let _ = Encodable::consensus_encode(&output, &mut encoded_tx_out); + + let tx_out = Decodable::consensus_decode(&mut Cursor::new(&encoded_tx_out)) + .expect("Failed to decode kickoff tx"); + + let mut watchtower_pubkeys = vec![[0u8; 32]; 160]; + + let operator_idx: u16 = 6; + + let pubkey = hex::decode(pubkey_hex).unwrap(); + + watchtower_pubkeys[operator_idx as usize] = + pubkey.try_into().expect("Pubkey must be 32 bytes"); + + let watchtower_challenge_connector_start_idx: u16 = + (FIRST_FIVE_OUTPUTS + NUMBER_OF_ASSERT_TXS) as u16; + + let input = BridgeCircuitInput { + kickoff_tx: CircuitTransaction(kickoff_tx), + watchtower_inputs: vec![WatchtowerInput { + watchtower_idx: operator_idx, + watchtower_challenge_witness: CircuitWitness(witness), + watchtower_challenge_input_idx: 0, + watchtower_challenge_utxos: vec![CircuitTxOut(tx_out)], + watchtower_challenge_tx: CircuitTransaction(wt_tx.clone()), + annex_digest: None, + }], + hcp: BlockHeaderCircuitOutput { + method_id: [0; 8], + genesis_state_hash: [0u8; 32], + chain_state: ChainState::new(), + }, + payout_spv: SPV { + transaction: CircuitTransaction(wt_tx), + block_inclusion_proof: BlockInclusionProof::new(0, vec![]), + block_header: CircuitBlockHeader { + version: 0, + prev_block_hash: [0u8; 32], + merkle_root: [0u8; 32], + time: 0, + bits: 0, + nonce: 0, + }, + mmr_inclusion_proof: MMRInclusionProof { + subroot_idx: 0, + internal_idx: 0, + inclusion_proof: vec![], + }, + }, + lcp: LightClientProof::default(), + sp: StorageProof::default(), + all_tweaked_watchtower_pubkeys: watchtower_pubkeys, + watchtower_challenge_connector_start_idx, + payout_input_index: 0, + }; + + (input, kickoff_txid) + } + + #[test] + fn test_total_work_and_watchtower_flags() { + let (input, _) = total_work_and_watchtower_flags_setup(); + + let (total_work, challenge_sending_watchtowers) = + total_work_and_watchtower_flags(&input, &TESTNET4_WORK_ONLY_IMAGE_ID); + + let expected_challenge_sending_watchtowers = + [64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]; + + assert_eq!(*total_work, [0u8; 16], "Total work is not correct"); + assert_eq!( + *challenge_sending_watchtowers, expected_challenge_sending_watchtowers, + "Challenge sending watchtowers is not correct" + ); + } + + #[test] + fn test_total_work_and_watchtower_flags_incorrect_witness() { + let (mut input, _) = total_work_and_watchtower_flags_setup(); + + let mut old_witness = input.watchtower_inputs[0] + .watchtower_challenge_witness + .0 + .to_vec()[0] + .clone(); + old_witness[0] = 0x00; + + let mut new_witness = Witness::new(); + new_witness.push(old_witness); + + input.watchtower_inputs[0].watchtower_challenge_witness = CircuitWitness(new_witness); + + let (total_work, challenge_sending_watchtowers) = + total_work_and_watchtower_flags(&input, &TESTNET4_WORK_ONLY_IMAGE_ID); + + assert_eq!(*total_work, [0u8; 16], "Total work is not correct"); + assert_eq!( + *challenge_sending_watchtowers, [0u8; 20], + "Challenge sending watchtowers is not correct" + ); + } + + #[test] + fn test_total_work_and_watchtower_flags_incorrect_tx() { + let (mut input, kickoff_txid) = total_work_and_watchtower_flags_setup(); + + input.watchtower_inputs[0].watchtower_challenge_tx = CircuitTransaction(Transaction { + version: Version(2), + lock_time: bitcoin::absolute::LockTime::Blocks(Height::from_consensus(0).unwrap()), + input: vec![TxIn { + previous_output: bitcoin::OutPoint::new( + kickoff_txid, + input.watchtower_inputs[0].watchtower_challenge_tx.input[0] + .previous_output + .vout, + ), + script_sig: ScriptBuf::new(), + sequence: bitcoin::Sequence(0), + witness: Witness::new(), + }], + output: vec![], + }); + + let (total_work, challenge_sending_watchtowers) = + total_work_and_watchtower_flags(&input, &TESTNET4_WORK_ONLY_IMAGE_ID); + + assert_eq!(*total_work, [0u8; 16], "Total work is not correct"); + assert_eq!( + *challenge_sending_watchtowers, [0u8; 20], + "Challenge sending watchtowers is not correct" + ); + } + + #[test] + #[should_panic(expected = "Invalid watchtower challenge input index")] + fn test_total_work_and_watchtower_flags_tx_in_incorrect_format() { + let (mut input, _) = total_work_and_watchtower_flags_setup(); + + // Create invalid transaction with no inputs + input.watchtower_inputs[0].watchtower_challenge_tx = CircuitTransaction(Transaction { + version: Version(2), + lock_time: bitcoin::absolute::LockTime::Blocks(Height::from_consensus(0).unwrap()), + input: vec![], + output: vec![], + }); + + // Keep the input index at 0, which would now be invalid + input.watchtower_inputs[0].watchtower_challenge_input_idx = 0; + + let (_total_work, _challenge_sending_watchtowers) = + total_work_and_watchtower_flags(&input, &TESTNET4_WORK_ONLY_IMAGE_ID); + } + + #[test] + #[should_panic(expected = "Invalid witness length")] + fn test_total_work_and_watchtower_flags_utxo_in_invalid_format() { + let (mut input, _) = total_work_and_watchtower_flags_setup(); + + // Create a witness with more than one item, which would be invalid + let mut invalid_witness = Witness::new(); + invalid_witness.push([0x00]); + invalid_witness.push([0x01]); + input.watchtower_inputs[0].watchtower_challenge_witness = CircuitWitness(invalid_witness); + + let (_total_work, _challenge_sending_watchtowers) = + total_work_and_watchtower_flags(&input, &TESTNET4_WORK_ONLY_IMAGE_ID); + } + + #[test] + #[should_panic(expected = "Invalid watchtower public key")] + fn test_total_work_and_watchtower_flags_invalid_pubkey() { + let (mut input, _) = total_work_and_watchtower_flags_setup(); + + // Modify the all_tweaked_watchtower_pubkeys (the array that's actually used in the new code) + let watch_tower_idx = input.watchtower_inputs[0].watchtower_idx as usize; + input.all_tweaked_watchtower_pubkeys[watch_tower_idx] = [0u8; 32]; + + let (_total_work, _challenge_sending_watchtowers) = + total_work_and_watchtower_flags(&input, &TESTNET4_WORK_ONLY_IMAGE_ID); + } + + #[test] + #[should_panic(expected = "Invalid watchtower challenge input index")] + fn test_total_work_and_watchtower_flags_invalid_wt_index() { + let (mut input, _) = total_work_and_watchtower_flags_setup(); + + // Set an invalid index that's out of bounds + input.watchtower_inputs[0].watchtower_challenge_input_idx = 160; + + let (_total_work, _challenge_sending_watchtowers) = + total_work_and_watchtower_flags(&input, &TESTNET4_WORK_ONLY_IMAGE_ID); + } + + #[test] + #[should_panic(expected = "Invalid watchtower challenge input index")] + fn test_total_work_and_watchtower_flags_invalid_wt_input_index() { + let (mut input, _) = total_work_and_watchtower_flags_setup(); + + // Set an input index that's beyond the transaction's inputs + input.watchtower_inputs[0].watchtower_challenge_input_idx = 10; + + let (_total_work, _challenge_sending_watchtowers) = + total_work_and_watchtower_flags(&input, &TESTNET4_WORK_ONLY_IMAGE_ID); + } + + #[test] + #[should_panic(expected = "Invalid witness length, expected 64 or 65 bytes")] + fn test_total_work_and_watchtower_flags_invalid_witness() { + let (mut input, _) = total_work_and_watchtower_flags_setup(); + + // Create an invalid witness with 65 bytes but all zeros (signature validation will fail) + let mut invalid_witness = Witness::new(); + invalid_witness.push([0u8; 63]); // 63 bytes instead of 64/65 + input.watchtower_inputs[0].watchtower_challenge_witness = CircuitWitness(invalid_witness); + + let (_total_work, _challenge_sending_watchtowers) = + total_work_and_watchtower_flags(&input, &TESTNET4_WORK_ONLY_IMAGE_ID); + } + + #[test] + #[should_panic(expected = "Invalid signature")] + fn test_total_work_and_watchtower_flags_invalid_witness_2() { + let (mut input, _) = total_work_and_watchtower_flags_setup(); + + // Create an invalid witness with 64 bytes but all zeros (signature validation will fail) + let mut invalid_witness = Witness::new(); + invalid_witness.push([0u8; 64]); + input.watchtower_inputs[0].watchtower_challenge_witness = CircuitWitness(invalid_witness); + + let (_total_work, _challenge_sending_watchtowers) = + total_work_and_watchtower_flags(&input, &TESTNET4_WORK_ONLY_IMAGE_ID); + } + + #[test] + #[should_panic(expected = "Invalid witness length, expected 64 or 65 bytes")] + fn test_total_work_and_watchtower_flags_invalid_witness_length() { + let (mut input, _) = total_work_and_watchtower_flags_setup(); + + // Create an invalid witness with incorrect length + let mut invalid_witness = Witness::new(); + invalid_witness.push([0u8; 60]); // Not 64 or 65 bytes + input.watchtower_inputs[0].watchtower_challenge_witness = CircuitWitness(invalid_witness); + + let (_total_work, _challenge_sending_watchtowers) = + total_work_and_watchtower_flags(&input, &TESTNET4_WORK_ONLY_IMAGE_ID); + } + + #[test] + fn test_parse_op_return_data() { + let op_return_data = "6a4c500000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"; + let script = ScriptBuf::from(hex::decode(op_return_data).unwrap()); + assert!(script.is_op_return(), "Script is not OP_RETURN"); + let parsed_data = parse_op_return_data(&script).expect("Failed to parse OP_RETURN data"); + assert_eq!(parsed_data, [0u8; 80], "Parsed data is not correct"); + } + + #[test] + fn test_parse_op_return_data_short() { + let op_return_data = "6a09000000000000000000"; + let script = ScriptBuf::from(hex::decode(op_return_data).unwrap()); + assert!(script.is_op_return(), "Script is not OP_RETURN"); + let parsed_data = parse_op_return_data(&script).expect("Failed to parse OP_RETURN data"); + assert_eq!(parsed_data, [0u8; 9], "Parsed data is not correct"); + } + + #[test] + fn test_parse_op_return_data_fail() { + let op_return_data = "6a4c4f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"; + let script = ScriptBuf::from(hex::decode(op_return_data).unwrap()); + assert!(script.is_op_return(), "Script is not OP_RETURN"); + let parsed_data = parse_op_return_data(&script).expect("Failed to parse OP_RETURN data"); + assert_ne!(parsed_data, [0u8; 80], "Parsed data should not be correct"); + } + + #[test] + fn test_operator_xonlypk_from_op_return() { + let payout_tx = include_bytes!("../../test_data/payout_tx.bin"); + let mut payout_tx: Transaction = + Decodable::consensus_decode(&mut Cursor::new(&payout_tx)).unwrap(); + + // since this is old payout tx I'll manually change the output. Later replace it with the new one + let last_output_idx = payout_tx.output.len() - 1; + payout_tx.output[last_output_idx].script_pubkey = ScriptBuf::from( + hex::decode("6a204f355bdcb7cc0af728ef3cceb9615d90684bb5b2ca5f859ab0f0b704075871aa") + .unwrap(), + ); + let last_output = payout_tx.output.last().unwrap(); + let operator_pk: [u8; 32] = parse_op_return_data(&last_output.script_pubkey) + .expect("Invalid operator xonlypk") + .try_into() + .expect("Invalid xonlypk"); + + let expected_pk = "4f355bdcb7cc0af728ef3cceb9615d90684bb5b2ca5f859ab0f0b704075871aa"; + assert_eq!( + hex::encode(operator_pk), + expected_pk, + "Operator xonlypk is not correct" + ); + } + + // Helper function to extract and hash the annex + fn get_annex_hash(witness: &Witness) -> Option<[u8; 32]> { + // Using a constant for the annex prefix as defined in bitcoin library + + let watchtower_challenge_annex: Option = { + if let Some(last_witness_element) = witness.last() { + // Check if the first byte is 0x50 before attempting to create an Annex + if last_witness_element.first() == Some(&TAPROOT_ANNEX_PREFIX) { + Annex::new(last_witness_element).ok() // Convert Result to Option + } else { + None + } + } else { + None + } + }; + + watchtower_challenge_annex.and_then(|annex| { + let mut enc = sha256::Hash::engine(); + match annex.consensus_encode(&mut enc) { + Ok(_) => { + let hash = sha256::Hash::from_engine(enc); + Some(hash.to_byte_array()) // Use to_byte_array() for owned array + } + Err(_) => None, + } + }) + } + + #[test] + fn test_annex_signature() { + let bitcoin_tx: Transaction = bitcoin::consensus::deserialize(&hex::decode("020000000001017a48f6958d00c4ab052b0a09589cb0c71df95ec6593fa39aabf3bd130d96da2f8800000000fdffffff010c8602000000000022512065b9b1db7b1d648097913234091a8a7703ca330178efa12437ea97fbc3e14bf2024036f4cd2cf3cf433c9dac8b3205f44ffa4cd8d63f9a6f8191e4fea443ad74c7c50bc8962c2689185f8bb6062ac9ff62b1a8221df45aa377cf3c34566088bff4edfd300250005249464626020000574542505650384c190200002f36800d0097c026008034c8e7fe93810ac4a680d5b004b0ed5a0d3601002699bb3bf659229a10893684426d6cdb50177f0fed1d08d52053a158131a2265fee3ff7f3f059855f202d15da33a5f98ac7a7c07f6342e6d693f826c36ecc857946d392fe8cf906f658025db569cca0b5eec4814e73f5b9a405ef3bd4f44ff27a0fc7cfb215cec477061b71fc00576ec477cfe07d4e7032713ed64e07594ec4c90bd8e27c3bbd13813decd56844e83777335ff183e782a1cc82dce96a553ec4364390801025bb1731df3f135d70f3dfc68b4157c98954f43f45ca92111652bae0361e23476ae1f76ac4844490e1a9e26763e60454098093047ec37ac802f34e6dc5e37f0c57608c44bc28dca0e8e60077889b604b01d6470c653358dedf85d5514e528d0a0390dca4cb841a92795d96c82d203a800aa24a6bd1fbcad272e6ada45d59bcd666d3a4087ea8de6bd1f2b1e5ab0e96165e0f87b9ae0d6a3a2dde02d2b2b680836716fb30910653e3722ad04f73e244159f1b4285aba4b824a49a22ce4f594c50045a2fa26d1b2b294173138d9a0fa264954acc12d5664810d91acb92a03d8b725ee249913c0981515e8db3749772581cc0900d9ce90746fa8ca0d3026882807acf660e92e29ddd3d73cf7c0e664a0d4308043951bc18e09501fb77bba2b09af83e1400e51766c1ccf96b827bc6945388428198bc048f880f01025dbc402bc361c724f8428d824166500a19e88a2b049ac69670604ea010000000000").unwrap()).unwrap(); + let prevout: TxOut = bitcoin::consensus::deserialize(&hex::decode("949902000000000022512065b9b1db7b1d648097913234091a8a7703ca330178efa12437ea97fbc3e14bf2").unwrap()).unwrap(); + + let annex_hash = get_annex_hash(&bitcoin_tx.input[0].witness); + + let sighash = sighash( + &bitcoin_tx, + &Prevouts::All(&[prevout]), + 0, + TapSighashType::Default, + annex_hash, // Pass the computed annex hash + ); + + let xonly_pk_bytes = + hex::decode("65b9b1db7b1d648097913234091a8a7703ca330178efa12437ea97fbc3e14bf2") + .unwrap(); + let xonly_pk: VerifyingKey = + VerifyingKey::from_bytes(&xonly_pk_bytes).expect("Invalid xonly pk"); + + // The actual signature is the first element in the witness stack + let signature_bytes = bitcoin_tx.input[0] + .witness + .nth(0) + .expect("Signature not found in witness") + .to_vec(); + + let signature: Signature = + Signature::try_from(signature_bytes.as_slice()).expect("Invalid signature"); + + xonly_pk + .verify_prehash(sighash.as_byte_array(), &signature) + .expect("Signature verification failed"); + } + + #[test] + #[should_panic(expected = "Signature verification failed")] // This panic is expected if the original signature was created with an annex + fn test_annex_removed_signature() { + let mut bitcoin_tx: Transaction = bitcoin::consensus::deserialize(&hex::decode("020000000001017a48f6958d00c4ab052b0a09589cb0c71df95ec6593fa39aabf3bd130d96da2f8800000000fdffffff010c8602000000000022512065b9b1db7b1d648097913234091a8a7703ca330178efa12437ea97fbc3e14bf2024036f4cd2cf3cf433c9dac8b3205f44ffa4cd8d63f9a6f8191e4fea443ad74c7c50bc8962c2689185f8bb6062ac9ff62b1a8221df45aa377cf3c34566088bff4edfd300250005249464626020000574542505650384c190200002f36800d0097c026008034c8e7fe93810ac4a680d5b004b0ed5a0d3601002699bb3bf659229a10893684426d6cdb50177f0fed1d08d52053a158131a2265fee3ff7f3f059855f202d15da33a5f98ac7a7c07f6342e6d693f826c36ecc857946d392fe8cf906f658025db569cca0b5eec4814e73f5b9a405ef3bd4f44ff27a0fc7cfb215cec477061b71fc00576ec477cfe07d4e7032713ed64e07594ec4c90bd8e27c3bbd13813decd56844e83777335ff183e782a1cc82dce96a553ec4364390801025bb1731df3f135d70f3dfc68b4157c98954f43f45ca92111652bae0361e23476ae1f76ac4844490e1a9e26763e60454098093047ec37ac802f34e6dc5e37f0c57608c44bc28dca0e8e60077889b604b01d6470c653358dedf85d5514e528d0a0390dca4cb841a92795d96c82d203a800aa24a6bd1fbcad272e6ada45d59bcd666d3a4087ea8de6bd1f2b1e5ab0e96165e0f87b9ae0d6a3a2dde02d2b2b680836716fb30910653e3722ad04f73e244159f1b4285aba4b824a49a22ce4f594c50045a2fa26d1b2b294173138d9a0fa264954acc12d5664810d91acb92a03d8b725ee249913c0981515e8db3749772581cc0900d9ce90746fa8ca0d3026882807acf660e92e29ddd3d73cf7c0e664a0d4308043951bc18e09501fb77bba2b09af83e1400e51766c1ccf96b827bc6945388428198bc048f880f01025dbc402bc361c724f8428d824166500a19e88a2b049ac69670604ea010000000000").unwrap()).unwrap(); + let prevout: TxOut = bitcoin::consensus::deserialize(&hex::decode("949902000000000022512065b9b1db7b1d648097913234091a8a7703ca330178efa12437ea97fbc3e14bf2").unwrap()).unwrap(); + + // Remove the annex from the witness stack + let signature_bytes = bitcoin_tx.input[0] + .witness + .nth(0) + .expect("Signature not found in witness") + .to_vec(); + bitcoin_tx.input[0].witness.clear(); + bitcoin_tx.input[0].witness.push(signature_bytes.clone()); // Only push the signature + + // Now, call sighash without providing the annex_hash + let sighash = sighash( + &bitcoin_tx, + &Prevouts::All(&[prevout]), + 0, + TapSighashType::Default, + None, // Explicitly pass None for annex_hash + ); + + let xonly_pk_bytes = + hex::decode("65b9b1db7b1d648097913234091a8a7703ca330178efa12437ea97fbc3e14bf2") + .unwrap(); + let xonly_pk: VerifyingKey = + VerifyingKey::from_bytes(&xonly_pk_bytes).expect("Invalid xonly pk"); + + let signature: Signature = + Signature::try_from(signature_bytes.as_slice()).expect("Invalid signature"); + + // This verification should fail because the sighash was computed WITHOUT an annex, + // but the original signature was likely created WITH an annex. + xonly_pk + .verify_prehash(sighash.as_byte_array(), &signature) + .expect("Signature verification failed"); + } + + #[test] + fn test_parsing_op_return_data_144_bytes() { + let op_return_data = "6a4c90000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"; + let txout = TxOut { + value: Amount::from_sat(0), + script_pubkey: ScriptBuf::from(hex::decode(op_return_data).unwrap()), + }; + assert!( + txout.script_pubkey.is_op_return(), + "Script is not OP_RETURN" + ); + let parsed_data = + parse_op_return_data(&txout.script_pubkey).expect("Failed to parse OP_RETURN data"); + assert_eq!(parsed_data.len(), 144, "Parsed data length is not correct"); + assert_eq!(parsed_data, [0u8; 144], "Parsed data is not correct"); + } +} diff --git a/circuits-lib/src/bridge_circuit/spv.rs b/circuits-lib/src/bridge_circuit/spv.rs new file mode 100644 index 000000000..de415e650 --- /dev/null +++ b/circuits-lib/src/bridge_circuit/spv.rs @@ -0,0 +1,141 @@ +//! SPV (Simplified Payment Verification) +//! This module provides the SPV structure and verification logic for the bridge circuit. +//! It includes the transaction, block inclusion proof, block header, and MMR inclusion proof. + +use super::{merkle_tree::BlockInclusionProof, transaction::CircuitTransaction}; +use crate::header_chain::{mmr_guest::MMRGuest, mmr_native::MMRInclusionProof, CircuitBlockHeader}; +use borsh::{BorshDeserialize, BorshSerialize}; + +/// SPV (Simplified Payment Verification) structure that contains +/// the transaction, block inclusion proof, block header, and MMR inclusion proof. +#[derive(Eq, PartialEq, Clone, Debug, BorshDeserialize, BorshSerialize)] +pub struct SPV { + pub transaction: CircuitTransaction, + pub block_inclusion_proof: BlockInclusionProof, + pub block_header: CircuitBlockHeader, + pub mmr_inclusion_proof: MMRInclusionProof, +} + +impl SPV { + pub fn new( + transaction: CircuitTransaction, + block_inclusion_proof: BlockInclusionProof, + block_header: CircuitBlockHeader, + mmr_inclusion_proof: MMRInclusionProof, + ) -> Self { + SPV { + transaction, + block_inclusion_proof, + block_header, + mmr_inclusion_proof, + } + } + + /// Verifies the SPV proof using the provided MMRGuest. + pub fn verify(&self, mmr_guest: MMRGuest) -> bool { + let mid_state_txid: [u8; 32] = self.transaction.mid_state_txid(); + let block_merkle_root = self.block_inclusion_proof.get_root(mid_state_txid); + assert_eq!(block_merkle_root, self.block_header.merkle_root, + "Calculated block Merkle root from the block inclusion proof does not match the one in the block header" + ); + let block_hash = self.block_header.compute_block_hash(); + mmr_guest.verify_proof(block_hash, &self.mmr_inclusion_proof) + } +} + +#[cfg(test)] +mod tests { + use borsh::BorshDeserialize; + use hex_literal::hex; + + use crate::{ + bridge_circuit::{ + merkle_tree::{verify_merkle_proof, BitcoinMerkleTree, BlockInclusionProof}, + spv::SPV, + transaction::CircuitTransaction, + }, + common::hashes::calculate_sha256, + header_chain::{mmr_guest::MMRGuest, mmr_native::MMRNative, CircuitBlockHeader}, + }; + + // Mainnet block headers from 0 to 16 + const MAINNET_BLOCK_HEADERS: [[u8; 80]; 16] = [ + hex!("0100000000000000000000000000000000000000000000000000000000000000000000003ba3edfd7a7b12b27ac72c3e67768f617fc81bc3888a51323a9fb8aa4b1e5e4a29ab5f49ffff001d1dac2b7c"), + hex!("010000006fe28c0ab6f1b372c1a6a246ae63f74f931e8365e15a089c68d6190000000000982051fd1e4ba744bbbe680e1fee14677ba1a3c3540bf7b1cdb606e857233e0e61bc6649ffff001d01e36299"), + hex!("010000004860eb18bf1b1620e37e9490fc8a427514416fd75159ab86688e9a8300000000d5fdcc541e25de1c7a5addedf24858b8bb665c9f36ef744ee42c316022c90f9bb0bc6649ffff001d08d2bd61"), + hex!("01000000bddd99ccfda39da1b108ce1a5d70038d0a967bacb68b6b63065f626a0000000044f672226090d85db9a9f2fbfe5f0f9609b387af7be5b7fbb7a1767c831c9e995dbe6649ffff001d05e0ed6d"), + hex!("010000004944469562ae1c2c74d9a535e00b6f3e40ffbad4f2fda3895501b582000000007a06ea98cd40ba2e3288262b28638cec5337c1456aaf5eedc8e9e5a20f062bdf8cc16649ffff001d2bfee0a9"), + hex!("0100000085144a84488ea88d221c8bd6c059da090e88f8a2c99690ee55dbba4e00000000e11c48fecdd9e72510ca84f023370c9a38bf91ac5cae88019bee94d24528526344c36649ffff001d1d03e477"), + hex!("01000000fc33f596f822a0a1951ffdbf2a897b095636ad871707bf5d3162729b00000000379dfb96a5ea8c81700ea4ac6b97ae9a9312b2d4301a29580e924ee6761a2520adc46649ffff001d189c4c97"), + hex!("010000008d778fdc15a2d3fb76b7122a3b5582bea4f21f5a0c693537e7a03130000000003f674005103b42f984169c7d008370967e91920a6a5d64fd51282f75bc73a68af1c66649ffff001d39a59c86"), + hex!("010000004494c8cf4154bdcc0720cd4a59d9c9b285e4b146d45f061d2b6c967100000000e3855ed886605b6d4a99d5fa2ef2e9b0b164e63df3c4136bebf2d0dac0f1f7a667c86649ffff001d1c4b5666"), + hex!("01000000c60ddef1b7618ca2348a46e868afc26e3efc68226c78aa47f8488c4000000000c997a5e56e104102fa209c6a852dd90660a20b2d9c352423edce25857fcd37047fca6649ffff001d28404f53"), + hex!("010000000508085c47cc849eb80ea905cc7800a3be674ffc57263cf210c59d8d00000000112ba175a1e04b14ba9e7ea5f76ab640affeef5ec98173ac9799a852fa39add320cd6649ffff001d1e2de565"), + hex!("01000000e915d9a478e3adf3186c07c61a22228b10fd87df343c92782ecc052c000000006e06373c80de397406dc3d19c90d71d230058d28293614ea58d6a57f8f5d32f8b8ce6649ffff001d173807f8"), + hex!("010000007330d7adf261c69891e6ab08367d957e74d4044bc5d9cd06d656be9700000000b8c8754fabb0ffeb04ca263a1368c39c059ca0d4af3151b876f27e197ebb963bc8d06649ffff001d3f596a0c"), + hex!("010000005e2b8043bd9f8db558c284e00ea24f78879736f4acd110258e48c2270000000071b22998921efddf90c75ac3151cacee8f8084d3e9cb64332427ec04c7d562994cd16649ffff001d37d1ae86"), + hex!("0100000089304d4ba5542a22fb616d1ca019e94222ee45c1ad95a83120de515c00000000560164b8bad7675061aa0f43ced718884bdd8528cae07f24c58bb69592d8afe185d36649ffff001d29cbad24"), + hex!("01000000378a6f6593e2f0251132d96616e837eb6999bca963f6675a0c7af180000000000d080260d107d269ccba9247cfc64c952f1d13514b49e9f1230b3a197a8b7450fa276849ffff001d38d8fb98"), + ]; + + // Mainnet block transactions from 0 to 16, one for each block. + const MAINNET_BLOCK_TRANSACTIONS: [&[u8]; 16] = [ + &hex!("01000000010000000000000000000000000000000000000000000000000000000000000000ffffffff4d04ffff001d0104455468652054696d65732030332f4a616e2f32303039204368616e63656c6c6f72206f6e206272696e6b206f66207365636f6e64206261696c6f757420666f722062616e6b73ffffffff0100f2052a01000000434104678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5fac00000000"), + &hex!("01000000010000000000000000000000000000000000000000000000000000000000000000ffffffff0704ffff001d0104ffffffff0100f2052a0100000043410496b538e853519c726a2c91e61ec11600ae1390813a627c66fb8be7947be63c52da7589379515d4e0a604f8141781e62294721166bf621e73a82cbf2342c858eeac00000000"), + &hex!("01000000010000000000000000000000000000000000000000000000000000000000000000ffffffff0704ffff001d010bffffffff0100f2052a010000004341047211a824f55b505228e4c3d5194c1fcfaa15a456abdf37f9b9d97a4040afc073dee6c89064984f03385237d92167c13e236446b417ab79a0fcae412ae3316b77ac00000000"), + &hex!("01000000010000000000000000000000000000000000000000000000000000000000000000ffffffff0704ffff001d010effffffff0100f2052a0100000043410494b9d3e76c5b1629ecf97fff95d7a4bbdac87cc26099ada28066c6ff1eb9191223cd897194a08d0c2726c5747f1db49e8cf90e75dc3e3550ae9b30086f3cd5aaac00000000"), + &hex!("01000000010000000000000000000000000000000000000000000000000000000000000000ffffffff0704ffff001d011affffffff0100f2052a01000000434104184f32b212815c6e522e66686324030ff7e5bf08efb21f8b00614fb7690e19131dd31304c54f37baa40db231c918106bb9fd43373e37ae31a0befc6ecaefb867ac00000000"), + &hex!("01000000010000000000000000000000000000000000000000000000000000000000000000ffffffff0704ffff001d0120ffffffff0100f2052a0100000043410456579536d150fbce94ee62b47db2ca43af0a730a0467ba55c79e2a7ec9ce4ad297e35cdbb8e42a4643a60eef7c9abee2f5822f86b1da242d9c2301c431facfd8ac00000000"), + &hex!("01000000010000000000000000000000000000000000000000000000000000000000000000ffffffff0704ffff001d0123ffffffff0100f2052a0100000043410408ce279174b34c077c7b2043e3f3d45a588b85ef4ca466740f848ead7fb498f0a795c982552fdfa41616a7c0333a269d62108588e260fd5a48ac8e4dbf49e2bcac00000000"), + &hex!("01000000010000000000000000000000000000000000000000000000000000000000000000ffffffff0704ffff001d012bffffffff0100f2052a01000000434104a59e64c774923d003fae7491b2a7f75d6b7aa3f35606a8ff1cf06cd3317d16a41aa16928b1df1f631f31f28c7da35d4edad3603adb2338c4d4dd268f31530555ac00000000"), + &hex!("01000000010000000000000000000000000000000000000000000000000000000000000000ffffffff0704ffff001d012cffffffff0100f2052a01000000434104cc8d85f5e7933cb18f13b97d165e1189c1fb3e9c98b0dd5446b2a1989883ff9e740a8a75da99cc59a21016caf7a7afd3e4e9e7952983e18d1ff70529d62e0ba1ac00000000"), + &hex!("01000000010000000000000000000000000000000000000000000000000000000000000000ffffffff0704ffff001d0134ffffffff0100f2052a0100000043410411db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5cb2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a3ac00000000"), + &hex!("01000000010000000000000000000000000000000000000000000000000000000000000000ffffffff0704ffff001d0136ffffffff0100f2052a01000000434104fcc2888ca91cf0103d8c5797c256bf976e81f280205d002d85b9b622ed1a6f820866c7b5fe12285cfa78c035355d752fc94a398b67597dc4fbb5b386816425ddac00000000"), + &hex!("01000000010000000000000000000000000000000000000000000000000000000000000000ffffffff0704ffff001d013bffffffff0100f2052a010000004341046cc86ddcd0860b7cef16cbaad7fe31fda1bf073c25cb833fa9e409e7f51e296f39b653a9c8040a2f967319ff37cf14b0991b86173462a2d5907cb6c5648b5b76ac00000000"), + &hex!("01000000010000000000000000000000000000000000000000000000000000000000000000ffffffff0704ffff001d010cffffffff0100f2052a0100000043410478ebe2c28660cd2fa1ba17cc04e58d6312679005a7cad1fd56a7b7f4630bd700bcdb84a888a43fe1a2738ea1f3d2301d02faef357e8a5c35a706e4ae0352a6adac00000000"), + &hex!("01000000010000000000000000000000000000000000000000000000000000000000000000ffffffff0704ffff001d013cffffffff0100f2052a01000000434104c5a68f5fa2192b215016c5dfb384399a39474165eea22603cd39780e653baad9106e36947a1ba3ad5d3789c5cead18a38a538a7d834a8a2b9f0ea946fb4e6f68ac00000000"), + &hex!("01000000010000000000000000000000000000000000000000000000000000000000000000ffffffff0704ffff001d013effffffff0100f2052a010000004341043e8ac6b8ea64e85928b6469f17db0096de0bcae7d09a4497413d9bba49c00ffdf9cb0ce07c404784928b3976f0beea42fe2691a8f0430bcb2b0daaf5aa02b30eac00000000"), + &hex!("01000000010000000000000000000000000000000000000000000000000000000000000000ffffffff0704ffff001d010affffffff0100f2052a01000000434104e0041b4b4d9b6feb7221803a35d997efada6e2b5d24f5fc7205f2ea6b62a1adc9983a7a7dab7e93ea791bed5928e7a32286fa4facadd16313b75b467aea77499ac00000000"), + ]; + + #[test] + fn test_spv() { + let mut mmr_native = MMRNative::new(); + let mut mmr_guest = MMRGuest::new(); + let block_headers = MAINNET_BLOCK_HEADERS + .iter() + .map(|header| CircuitBlockHeader::try_from_slice(header).unwrap()) + .collect::>(); + let txs = MAINNET_BLOCK_TRANSACTIONS + .iter() + .map(|tx| CircuitTransaction(bitcoin::consensus::deserialize(tx).unwrap())) + .collect::>(); + let mut bitcoin_merkle_proofs: Vec = vec![]; + for tx in txs.clone().into_iter() { + let bitcoin_merkle_tree = BitcoinMerkleTree::new_mid_state(&[tx.clone()]); + let bitcoin_merkle_proof = bitcoin_merkle_tree.generate_proof(0); + assert!(verify_merkle_proof( + tx.mid_state_txid(), + &bitcoin_merkle_proof, + calculate_sha256(&bitcoin_merkle_tree.root()) + )); + bitcoin_merkle_proofs.push(bitcoin_merkle_proof); + } + for (i, header) in block_headers.iter().enumerate() { + mmr_native.append(header.compute_block_hash()); + mmr_guest.append(header.compute_block_hash()); + for j in 0..i { + let (mmr_leaf, mmr_proof) = mmr_native.generate_proof(j as u32).unwrap(); + assert!(mmr_native.verify_proof(mmr_leaf, &mmr_proof)); + assert_eq!(mmr_leaf, block_headers[j].compute_block_hash()); + let spv = SPV::new( + txs[j].clone(), + bitcoin_merkle_proofs[j].clone(), + block_headers[j].clone(), + mmr_proof, + ); + assert!(spv.verify(mmr_guest.clone())); + } + } + } +} diff --git a/circuits-lib/src/bridge_circuit/storage_proof.rs b/circuits-lib/src/bridge_circuit/storage_proof.rs new file mode 100644 index 000000000..a85730c59 --- /dev/null +++ b/circuits-lib/src/bridge_circuit/storage_proof.rs @@ -0,0 +1,253 @@ +//! # Ethereum Storage Proof Verifier +//! This module implements the Ethereum storage proof verifier for the bridge circuit. +//! It includes functions to verify storage proofs related to deposit and withdrawal UTXOs, +//! ensuring the integrity of the Bridge contract's state. The verifier checks the storage keys +//! and values against the expected state root, and it handles the conversion of hexadecimal strings to decimal. + +use alloy_primitives::Bytes; +use alloy_primitives::{Keccak256, U256}; +use alloy_rpc_types::EIP1186StorageProof; +use jmt::KeyHash; +use sha2::{Digest, Sha256}; + +use super::structs::{MoveTxid, StorageProof, WithdrawalOutpointTxid}; + +const ADDRESS: [u8; 20] = hex_literal::hex!("3100000000000000000000000000000000000002"); + +// STORAGRE SLOTES of DATA STRUCTURES ON BRIDGE CONTRACT +const UTXOS_STORAGE_INDEX: [u8; 32] = + hex_literal::hex!("0000000000000000000000000000000000000000000000000000000000000007"); + +const DEPOSIT_STORAGE_INDEX: [u8; 32] = + hex_literal::hex!("0000000000000000000000000000000000000000000000000000000000000008"); + +/// Verifies Ethereum storage proofs related to deposit and withdrawal UTXOs. +/// +/// # Parameters +/// +/// - `storage_proof`: A reference to `StorageProof`, containing UTXO, vout and deposit proofs. +/// - `state_root`: A 32-byte array representing the Ethereum state root. +/// +/// # Returns +/// +/// A tuple containing: +/// - A `WithdrawalOutpointTxid` representing the transaction ID (txid) of the withdrawal outpoint. +/// - A `u32` representing the output index (vout) of the withdrawal outpoint. +/// - A `MoveTxid` array representing the move-to-vault transaction ID. +/// +/// # Panics +/// +/// - If JSON deserialization fails. +/// - If the computed deposit storage key does not match the proof. +/// - If the computed UTXO storage key or deposit index is invalid. +/// - If the proof verification via `storage_verify` fails. +pub fn verify_storage_proofs( + storage_proof: &StorageProof, + state_root: [u8; 32], +) -> (WithdrawalOutpointTxid, u32, MoveTxid) { + let utxo_storage_proof: EIP1186StorageProof = + serde_json::from_str(&storage_proof.storage_proof_utxo) + .expect("Failed to deserialize UTXO storage proof"); + + let vout_storage_proof: EIP1186StorageProof = + serde_json::from_str(&storage_proof.storage_proof_vout) + .expect("Failed to deserialize vout storage proof"); + + let deposit_storage_proof: EIP1186StorageProof = + serde_json::from_str(&storage_proof.storage_proof_deposit_txid) + .expect("Failed to deserialize deposit storage proof"); + + let storage_address: U256 = { + let mut keccak = Keccak256::new(); + keccak.update(UTXOS_STORAGE_INDEX); + let hash = keccak.finalize(); + U256::from_be_bytes( + <[u8; 32]>::try_from(&hash[..]).expect("Hash slice has incorrect length"), + ) + }; + + let storage_key_utxo: alloy_primitives::Uint<256, 4> = + storage_address + U256::from(storage_proof.index * 2); + + let storage_key_vout: alloy_primitives::Uint<256, 4> = + storage_address + U256::from(storage_proof.index * 2 + 1); + + let storage_address_deposit: U256 = { + let mut keccak = Keccak256::new(); + keccak.update(DEPOSIT_STORAGE_INDEX); + let hash = keccak.finalize(); + U256::from_be_bytes( + <[u8; 32]>::try_from(&hash[..]).expect("Hash slice has incorrect length"), + ) + }; + + let deposit_storage_key: alloy_primitives::Uint<256, 4> = + storage_address_deposit + U256::from(storage_proof.index); + + let deposit_storage_key_bytes = deposit_storage_key.to_be_bytes::<32>(); + + if deposit_storage_key_bytes != deposit_storage_proof.key.as_b256().0 { + panic!( + "Invalid deposit storage key. left: {:?} right: {:?}", + deposit_storage_key_bytes, + deposit_storage_proof.key.as_b256().0 + ); + } + + if storage_key_utxo.to_be_bytes() != utxo_storage_proof.key.as_b256().0 { + panic!( + "Invalid withdrawal UTXO storage key. left: {:?} right: {:?}", + storage_key_utxo.to_be_bytes::<32>(), + utxo_storage_proof.key.as_b256().0 + ); + } + + if storage_key_vout.to_be_bytes() != vout_storage_proof.key.as_b256().0 { + panic!( + "Invalid withdrawal vout storage key. left: {:?} right: {:?}", + storage_key_vout.to_be_bytes::<32>(), + vout_storage_proof.key.as_b256().0 + ); + } + + storage_verify(&utxo_storage_proof, state_root); + + storage_verify(&deposit_storage_proof, state_root); + + storage_verify(&vout_storage_proof, state_root); + + let buf: [u8; 32] = vout_storage_proof.value.to_be_bytes(); + + // ENDIANNESS SHOULD BE CHECKED THIS FIELD IS 4 BYTES in the contract + let vout = u32::from_le_bytes( + buf[28..32] + .try_into() + .expect("Vout value conversion failed"), + ); + + let wd_outpoint = WithdrawalOutpointTxid(utxo_storage_proof.value.to_be_bytes()); + + let move_txid = MoveTxid(deposit_storage_proof.value.to_be_bytes()); + + (wd_outpoint, vout, move_txid) +} + +/// Verifies an Ethereum storage proof against an expected root hash. +/// +/// # Parameters +/// +/// - `storage_proof`: A reference to an `EIP1186StorageProof` containing the key, value, and Merkle proof. +/// - `expected_root_hash`: A 32-byte array representing the expected root hash of the storage Merkle tree. +/// +/// # Panics +/// +/// - If Borsh deserialization of `storage_proof.proof[0]` fails. +/// - If Merkle proof verification fails. +fn storage_verify(storage_proof: &EIP1186StorageProof, expected_root_hash: [u8; 32]) { + let kaddr = { + let mut hasher: Sha256 = sha2::Digest::new_with_prefix(ADDRESS.as_slice()); + #[allow(clippy::unnecessary_fallible_conversions)] + hasher.update( + U256::try_from(storage_proof.key.as_b256()) + .unwrap() + .as_le_slice(), + ); + let arr = hasher.finalize(); + U256::from_le_slice(&arr) + }; + let storage_key = [b"E/s/".as_slice(), kaddr.as_le_slice()].concat(); + let key_hash = KeyHash::with::(storage_key.clone()); + + let proved_value = if storage_proof.proof[1] == Bytes::from("y") { + // Storage value exists and it's serialized form is: + let bytes = storage_proof.value.as_le_bytes().to_vec(); + Some(bytes) + } else { + // Storage value does not exist + panic!("storage does not exist"); + }; + + let storage_proof: jmt::proof::SparseMerkleProof = + borsh::from_slice(&storage_proof.proof[0]).unwrap(); + + let expected_root_hash = jmt::RootHash(expected_root_hash); + + storage_proof + .verify(expected_root_hash, key_hash, proved_value) + .expect("Account storage proof must be valid"); +} + +#[cfg(test)] +mod tests { + use super::*; + + const STORAGE_PROOF: &[u8] = include_bytes!("../../test_data/storage_proof.bin"); + + #[test] + fn test_verify_storage_proofs() { + let storage_proof: StorageProof = borsh::from_slice(STORAGE_PROOF).unwrap(); + + let state_root: [u8; 32] = + hex::decode("6dbacc5110eea06620bf7ec00a96bdc652dceaa1712acaa86a32e976d7e18658") + .expect("Valid hex, cannot fail") + .try_into() + .expect("Valid length, cannot fail"); + + let (user_wd_outpoint, vout, move_tx_id) = + verify_storage_proofs(&storage_proof, state_root); + + let move_tx_id_hex = hex::encode(*move_tx_id); + + let expected_user_wd_outpoint_bytes = [ + 140, 60, 152, 247, 242, 161, 54, 101, 52, 130, 197, 223, 104, 145, 231, 202, 144, 45, + 92, 26, 90, 11, 193, 221, 203, 172, 255, 218, 172, 14, 240, 110, + ]; + + let expected_vout: u32 = 1; + + let expected_move_tx_id_hex = + "93742351a8c68d0f102bd5bd92c477fdc4374168feb1fb81d083ec6cca5838a4"; + + assert_eq!( + move_tx_id_hex, expected_move_tx_id_hex, + "Invalid transaction ID" + ); + + assert_eq!( + *user_wd_outpoint, expected_user_wd_outpoint_bytes, + "Invalid UTXO value" + ); + + assert_eq!(vout, expected_vout, "Invalid vout value"); + } + + #[test] + #[should_panic] + fn test_verify_storage_proofs_invalid_proof() { + let mut storage_proof: StorageProof = borsh::from_slice(STORAGE_PROOF).unwrap(); + + let state_root: [u8; 32] = + hex::decode("18f3fda28dd327044edc9ff0054ab2a51d6e36edb77a8b8ab028217f90221a5b") + .expect("Valid hex, cannot fail") + .try_into() + .expect("Valid length, cannot fail"); + + storage_proof.storage_proof_utxo = "invalid_proof".to_string(); + + verify_storage_proofs(&storage_proof, state_root); + } + + #[test] + #[should_panic] + fn test_verify_storage_proofs_invalid_state_root() { + let storage_proof: StorageProof = borsh::from_slice(STORAGE_PROOF).unwrap(); + + let state_root: [u8; 32] = + hex::decode("18f3fda28dd327044edc9ff0054ab2a51d6e36edb77a8b8ab028217f90221a5a") + .expect("Valid hex, cannot fail") + .try_into() + .expect("Valid length, cannot fail"); + + verify_storage_proofs(&storage_proof, state_root); + } +} diff --git a/circuits-lib/src/bridge_circuit/structs.rs b/circuits-lib/src/bridge_circuit/structs.rs new file mode 100644 index 000000000..677a60b31 --- /dev/null +++ b/circuits-lib/src/bridge_circuit/structs.rs @@ -0,0 +1,565 @@ +//! # Bridge Circuit Structs +//! This module defines the data structures used in the Bridge Circuit. +//! It includes structures for light client proofs, work-only circuit outputs, and various constants used in the circuit. +//! ## Key Structures +//! - **LightClientProof:** Represents a light client proof with a journal and L2 height. +//! - **WorkOnlyCircuitOutput:** Represents the output of a work-only circuit, including work done and genesis state hash. +//! - **WatchTowerChallengeTxCommitment:** Represents a commitment to a watchtower challenge transaction, including the Groth16 proof and total work. +//! - **WithdrawalOutpointTxid:** Represents the transaction ID (txid) of a withdrawal outpoint. +//! - **MoveTxid:** Represents the transaction ID (txid) of a move-to-vault transaction. +//! - **StorageProof:** Represents the storage proof for Ethereum, including UTXO, vout, and deposit proofs. +//! - **WatchtowerInput:** Represents the input for a watchtower, including the watchtower index, challenge inputs, and transaction details. + +use std::ops::{Deref, DerefMut}; + +use crate::common::constants::MAX_NUMBER_OF_WATCHTOWERS; +use bitcoin::{ + consensus::Encodable, + hashes::{sha256, Hash}, + sighash::Annex, + taproot::TAPROOT_ANNEX_PREFIX, + Amount, ScriptBuf, Transaction, TxOut, Txid, Witness, +}; +use borsh::{BorshDeserialize, BorshSerialize}; +use serde::{Deserialize, Serialize}; + +use crate::header_chain::BlockHeaderCircuitOutput; + +use super::{spv::SPV, transaction::CircuitTransaction}; + +#[derive(Debug, Clone, Copy, PartialEq, PartialOrd, Eq, BorshDeserialize, BorshSerialize)] +pub struct WithdrawalOutpointTxid(pub [u8; 32]); + +impl Deref for WithdrawalOutpointTxid { + type Target = [u8; 32]; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +#[derive(Debug, Clone, Copy, PartialEq, PartialOrd, Eq, BorshDeserialize, BorshSerialize)] +pub struct MoveTxid(pub [u8; 32]); + +impl Deref for MoveTxid { + type Target = [u8; 32]; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +/// Represents a constant value used for each deposit in the bridge circuit. +#[derive(Debug, Clone, Copy, PartialEq, PartialOrd, Eq, BorshDeserialize, BorshSerialize)] +pub struct DepositConstant(pub [u8; 32]); + +#[derive(Debug, Clone, Copy, PartialEq, PartialOrd, Eq, BorshDeserialize, BorshSerialize)] +pub struct ChallengeSendingWatchtowers(pub [u8; 20]); + +impl Deref for ChallengeSendingWatchtowers { + type Target = [u8; 20]; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +#[derive(Debug, Clone, Copy, PartialEq, PartialOrd, Eq, BorshDeserialize, BorshSerialize)] +pub struct PayoutTxBlockhash(pub [u8; 20]); + +impl TryFrom<&[u8]> for PayoutTxBlockhash { + type Error = &'static str; + + fn try_from(value: &[u8]) -> Result { + let arr: [u8; 20] = value + .try_into() + .map_err(|_| "Expected 20 bytes for PayoutTxBlockhash")?; + Ok(PayoutTxBlockhash(arr)) + } +} + +#[derive(Debug, Clone, Copy, PartialEq, PartialOrd, Eq, BorshDeserialize, BorshSerialize)] +pub struct LatestBlockhash(pub [u8; 20]); + +impl TryFrom<&[u8]> for LatestBlockhash { + type Error = &'static str; + + fn try_from(value: &[u8]) -> Result { + let arr: [u8; 20] = value + .try_into() + .map_err(|_| "Expected 20 bytes for LatestBlockhash")?; + Ok(LatestBlockhash(arr)) + } +} + +#[derive(Debug, Clone, Copy, PartialEq, PartialOrd, Eq, BorshDeserialize, BorshSerialize)] +pub struct TotalWork(pub [u8; 16]); + +impl Deref for TotalWork { + type Target = [u8; 16]; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl TryFrom<&[u8]> for TotalWork { + type Error = &'static str; + + fn try_from(value: &[u8]) -> Result { + let arr: [u8; 16] = value + .try_into() + .map_err(|_| "Expected 16 bytes for TotalWork")?; + Ok(TotalWork(arr)) + } +} + +#[derive(Serialize, Deserialize, Eq, PartialEq, Clone, Debug, BorshDeserialize, BorshSerialize)] +pub struct WorkOnlyCircuitInput { + pub header_chain_circuit_output: BlockHeaderCircuitOutput, +} + +#[derive(Serialize, Deserialize, Eq, PartialEq, Clone, Debug, BorshDeserialize, BorshSerialize)] +pub struct WorkOnlyCircuitOutput { + pub work_u128: [u8; 16], + pub genesis_state_hash: [u8; 32], +} + +#[derive(Eq, PartialEq, Clone, Debug, BorshDeserialize, BorshSerialize)] +pub struct WatchTowerChallengeTxCommitment { + pub compressed_g16_proof: [u8; 128], + pub total_work: [u8; 16], +} + +#[derive(Debug, Clone, Eq, PartialEq, BorshDeserialize, BorshSerialize, Default)] +pub struct LightClientProof { + pub lc_journal: Vec, + pub l2_height: String, +} + +#[derive(Debug, Clone, Eq, PartialEq, BorshDeserialize, BorshSerialize, Default)] +pub struct StorageProof { + pub storage_proof_utxo: String, // This will be an Outpoint + pub storage_proof_vout: String, // This is the vout of the txid + pub storage_proof_deposit_txid: String, // This is the txid of the deposit tx + pub index: u32, // This is the index of the storage proof in the contract +} + +#[derive(Clone, Debug, BorshDeserialize, BorshSerialize)] +pub struct WatchtowerInput { + pub watchtower_idx: u16, // Which watchtower this is + pub watchtower_challenge_input_idx: u16, // Which input index this challenge connector txout goes to + pub watchtower_challenge_utxos: Vec, // BridgeCircuitUTXO TxOut serialized and all the prevouts for watchtower challenge tx, Vec + pub watchtower_challenge_tx: CircuitTransaction, // BridgeCircuitTransaction challenge tx itself for each watchtower + pub watchtower_challenge_witness: CircuitWitness, // Witness + pub annex_digest: Option<[u8; 32]>, // Optional annex digest for the watchtower challenge tx +} + +impl WatchtowerInput { + pub fn new( + watchtower_idx: u16, + watchtower_challenge_input_idx: u16, + watchtower_challenge_utxos: Vec, + watchtower_challenge_tx: Transaction, + watchtower_challenge_witness: Witness, + annex_digest: Option<[u8; 32]>, + ) -> Result { + if watchtower_idx as usize >= MAX_NUMBER_OF_WATCHTOWERS { + return Err("Watchtower index out of bounds"); + } + + let watchtower_challenge_tx = CircuitTransaction::from(watchtower_challenge_tx); + + let watchtower_challenge_witness: CircuitWitness = + CircuitWitness::from(watchtower_challenge_witness); + + let watchtower_challenge_utxos: Vec = watchtower_challenge_utxos + .into_iter() + .map(CircuitTxOut::from) + .collect::>(); + + Ok(Self { + watchtower_idx, + watchtower_challenge_input_idx, + watchtower_challenge_utxos, + watchtower_challenge_tx, + watchtower_challenge_witness, + annex_digest, + }) + } + + /// Constructs a `WatchtowerInput` instance from the kickoff transaction, the watchtower transaction and + /// an optional slice of previous transactions. + /// + /// # Parameters + /// - `kickoff_tx_id`: The kickoff transaction id whose output is consumed by an input of the watchtower transaction + /// - `watchtower_tx`: The watchtower challenge transaction that includes an input referencing the `kickoff_tx` + /// - `prevout_txs`: A slice of transactions, each including at least one output spent as input in `watchtower_tx` + /// - `watchtower_challenge_connector_start_idx`: Starting index for watchtower challenge connectors + /// + /// # Returns + /// Result containing the WatchtowerInput or an error message + /// + /// # Note + /// + /// All previous transactions other than kickoff tx whose outputs are spent by the `watchtower_tx` + /// should be supplied in `prevout_txs` if they exist. + /// + /// # Errors + /// + /// This function will return errors if: + /// - The kickoff transaction is not referenced by any input in the watchtower transaction. + /// - The output index underflows when computing the watchtower index. + /// - The watchtower index exceeds `MAX_NUMBER_OF_WATCHTOWERS`. + /// - A previous transaction required to resolve an input is not provided. + /// - An output referenced by an input is missing or out of bounds. + /// + /// # Panics + /// + /// Panics if: + /// - The watchtower index cannot be converted to `u8` (should be unreachable due to earlier bounds check). + /// + pub fn from_txs( + kickoff_tx_id: Txid, + watchtower_tx: Transaction, + prevout_txs: &[Transaction], + watchtower_challenge_connector_start_idx: u16, + ) -> Result { + let watchtower_challenge_input_idx = watchtower_tx + .input + .iter() + .position(|input| input.previous_output.txid == kickoff_tx_id) + .map(|ind| ind as u16) + .ok_or("Kickoff txid not found in watchtower inputs")?; + + let output_index = watchtower_tx.input[watchtower_challenge_input_idx as usize] + .previous_output + .vout as usize; + + let watchtower_index = output_index + .checked_sub(watchtower_challenge_connector_start_idx as usize) + .ok_or("Output index underflow")? + / 2; + + if watchtower_index >= MAX_NUMBER_OF_WATCHTOWERS { + return Err("Watchtower index out of bounds"); + } + + let watchtower_idx = + u16::try_from(watchtower_index).expect("Cannot fail, already checked bounds"); + + let watchtower_challenge_utxos: Vec = watchtower_tx + .input + .iter() + .map(|input| { + let txid = input.previous_output.txid; + let vout = input.previous_output.vout as usize; + + let tx = prevout_txs + .iter() + .find(|tx| tx.compute_txid() == txid) + .ok_or("Previous transaction not found")?; + + let tx_out = tx + .output + .get(vout) + .cloned() + .ok_or("Output index out of bounds")?; + + Ok::(CircuitTxOut::from(tx_out)) + }) + .collect::, _>>()?; + + let mut watchtower_challenge_tx = CircuitTransaction::from(watchtower_tx); + + let watchtower_challenge_annex: Option = { + // If there are at most one element in the witness, then there are no annexes + if watchtower_challenge_tx.input[watchtower_challenge_input_idx as usize] + .witness + .len() + <= 1 + { + None + } + // Otherwise, if the last element starts with 0x50, then it is an Annex + else if let Some(last_witness_element) = watchtower_challenge_tx.input + [watchtower_challenge_input_idx as usize] + .witness + .last() + { + // Check if the first byte is 0x50 before attempting to create an Annex + // This avoids creating a Result that we immediately unwrap or map to None + if last_witness_element.first() == Some(&TAPROOT_ANNEX_PREFIX) { + Annex::new(last_witness_element).ok() // Convert Result to Option + } else { + None + } + } else { + None + } + }; + + let annex_digest: Option<[u8; 32]> = watchtower_challenge_annex.and_then(|annex| { + // Use and_then to flatten the Option> to Option + let mut enc = sha256::Hash::engine(); + match annex.consensus_encode(&mut enc) { + Ok(_) => { + // Discard the usize, we only care if it succeeded + let hash = sha256::Hash::from_engine(enc); + Some(hash.to_byte_array()) // Use to_byte_array() for owned array + } + Err(_) => { + // Handle the error during encoding, e.g., log it or return None + // For now, returning None if encoding fails + None + } + } + }); + + // Get the first witness item, returning an error if it doesn't exist. + let Some(signature) = watchtower_challenge_tx.input + [watchtower_challenge_input_idx as usize] + .witness + .nth(0) + else { + return Err("Watchtower challenge input witness is empty"); + }; + + // The rest of the logic proceeds with the guaranteed `signature`. + let mut witness = Witness::new(); + witness.push(signature); + + let watchtower_challenge_witness = CircuitWitness::from(witness); + + for input in &mut watchtower_challenge_tx.input { + input.witness.clear(); + } + + Ok(Self { + watchtower_idx, + watchtower_challenge_input_idx, + watchtower_challenge_utxos, + watchtower_challenge_tx, + watchtower_challenge_witness, + annex_digest, + }) + } +} + +#[derive(Clone, Debug, BorshDeserialize, BorshSerialize)] +pub struct BridgeCircuitInput { + pub kickoff_tx: CircuitTransaction, + // Add all watchtower pubkeys as global input as Vec<[u8; 32]> Which should be shorter than or equal to 160 elements + pub all_tweaked_watchtower_pubkeys: Vec<[u8; 32]>, // Per watchtower [u8; 34] or OP_PUSHNUM_1 OP_PUSHBYTES_32 which is [u8; 32] + pub watchtower_inputs: Vec, + pub hcp: BlockHeaderCircuitOutput, + pub payout_spv: SPV, + pub payout_input_index: u16, + pub lcp: LightClientProof, + pub sp: StorageProof, + pub watchtower_challenge_connector_start_idx: u16, +} + +#[allow(clippy::too_many_arguments)] +impl BridgeCircuitInput { + pub fn new( + kickoff_tx: Transaction, + watchtower_inputs: Vec, + all_tweaked_watchtower_pubkeys: Vec<[u8; 32]>, + hcp: BlockHeaderCircuitOutput, + payout_spv: SPV, + payout_input_index: u16, + lcp: LightClientProof, + sp: StorageProof, + watchtower_challenge_connector_start_idx: u16, + ) -> Self { + Self { + kickoff_tx: CircuitTransaction::from(kickoff_tx), + watchtower_inputs, + hcp, + payout_spv, + payout_input_index, + lcp, + sp, + all_tweaked_watchtower_pubkeys, + watchtower_challenge_connector_start_idx, + } + } +} + +#[derive(Serialize, Deserialize, Eq, PartialEq, Clone, Debug)] +pub struct WatchtowerChallengeSet { + pub challenge_senders: [u8; 20], + pub challenge_outputs: Vec>, +} + +fn serialize_txout(txout: &TxOut, writer: &mut W) -> borsh::io::Result<()> { + BorshSerialize::serialize(&txout.value.to_sat(), writer)?; + BorshSerialize::serialize(&txout.script_pubkey.as_bytes(), writer) +} + +fn deserialize_txout(reader: &mut R) -> borsh::io::Result { + let value = Amount::from_sat(u64::deserialize_reader(reader)?); + let script_pubkey = ScriptBuf::from_bytes(Vec::::deserialize_reader(reader)?); + + Ok(TxOut { + value, + script_pubkey, + }) +} + +#[derive(Clone, PartialEq, Eq, Debug, Hash)] +pub struct CircuitTxOut(pub TxOut); + +impl CircuitTxOut { + pub fn from(tx_out: TxOut) -> Self { + Self(tx_out) + } + + pub fn inner(&self) -> &TxOut { + &self.0 + } +} + +impl BorshSerialize for CircuitTxOut { + #[inline] + fn serialize(&self, writer: &mut W) -> borsh::io::Result<()> { + serialize_txout(&self.0, writer)?; + Ok(()) + } +} + +impl BorshDeserialize for CircuitTxOut { + #[inline] + fn deserialize_reader(reader: &mut R) -> borsh::io::Result { + let tx_out = deserialize_txout(reader)?; + Ok(Self(tx_out)) + } +} + +impl Deref for CircuitTxOut { + type Target = TxOut; + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl DerefMut for CircuitTxOut { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +impl From for CircuitTxOut { + fn from(tx_out: TxOut) -> Self { + Self(tx_out) + } +} + +#[derive(Clone, PartialEq, Eq, Debug, Hash)] +pub struct CircuitWitness(pub Witness); + +impl CircuitWitness { + pub fn from(witness: Witness) -> Self { + Self(witness) + } + + pub fn inner(&self) -> &Witness { + &self.0 + } +} + +impl BorshSerialize for CircuitWitness { + #[inline] + fn serialize(&self, writer: &mut W) -> borsh::io::Result<()> { + BorshSerialize::serialize(&self.0.to_vec(), writer)?; + Ok(()) + } +} + +impl BorshDeserialize for CircuitWitness { + #[inline] + fn deserialize_reader(reader: &mut R) -> borsh::io::Result { + let witness_data = Vec::>::deserialize_reader(reader)?; + let witness = Witness::from(witness_data); + Ok(Self(witness)) + } +} + +impl Deref for CircuitWitness { + type Target = Witness; + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl DerefMut for CircuitWitness { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +impl From for CircuitWitness { + fn from(witness: Witness) -> Self { + Self(witness) + } +} + +#[derive(Clone, PartialEq, Eq, Debug, Hash, Copy)] +pub struct CircuitTxid(pub Txid); + +impl CircuitTxid { + pub fn from(tx_id: Txid) -> Self { + Self(tx_id) + } + + pub fn inner(&self) -> &Txid { + &self.0 + } +} + +impl BorshSerialize for CircuitTxid { + #[inline] + fn serialize(&self, writer: &mut W) -> borsh::io::Result<()> { + BorshSerialize::serialize(&self.0.as_byte_array(), writer)?; + Ok(()) + } +} + +impl BorshDeserialize for CircuitTxid { + #[inline] + fn deserialize_reader(reader: &mut R) -> borsh::io::Result { + let tx_data: [u8; 32] = + Vec::::deserialize_reader(reader)? + .try_into() + .map_err(|_| { + borsh::io::Error::new( + borsh::io::ErrorKind::InvalidData, + "Failed to convert Vec to [u8; 32]", + ) + })?; + + let tx_id = Txid::from_byte_array(tx_data); + + Ok(Self(tx_id)) + } +} + +impl Deref for CircuitTxid { + type Target = Txid; + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl DerefMut for CircuitTxid { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +impl From for CircuitTxid { + fn from(tx_id: Txid) -> Self { + Self(tx_id) + } +} diff --git a/circuits-lib/src/bridge_circuit/transaction.rs b/circuits-lib/src/bridge_circuit/transaction.rs new file mode 100644 index 000000000..8e6383fbd --- /dev/null +++ b/circuits-lib/src/bridge_circuit/transaction.rs @@ -0,0 +1,296 @@ +//! # Bitcoin Transaction Wrapper +//! This module provides a wrapper around Bitcoin's `Transaction` type, adding functionality +//! for calculating transaction IDs, serializing and deserializing transactions, and providing +//! a more convenient interface for working with Bitcoin transactions in the context of the bridge circuit. +/// Code is taken from Citrea +/// https://github.com/chainwayxyz/citrea/blob/0acb887b1a766fac1a482a68c6d51ecf9661f538/crates/bitcoin-da/src/spec/transaction.rs +/// +use core::ops::{Deref, DerefMut}; + +use bitcoin::absolute::LockTime; +use bitcoin::consensus::Encodable; +use bitcoin::hashes::Hash; +use bitcoin::transaction::Version; +use bitcoin::{Amount, OutPoint, ScriptBuf, Sequence, Transaction, TxIn, TxOut, Witness}; +use borsh::{BorshDeserialize, BorshSerialize}; + +use crate::common::hashes::calculate_sha256; + +/// A wrapper around Bitcoin's `Transaction` type that provides additional and simpler functionality. +#[derive(Clone, PartialEq, Eq, Debug, Hash)] +pub struct CircuitTransaction(pub Transaction); + +impl CircuitTransaction { + pub fn from(transaction: Transaction) -> Self { + Self(transaction) + } + + pub fn inner(&self) -> &Transaction { + &self.0 + } + + /// Returns the transaction id, in big-endian byte order. One must be careful when dealing with + /// Bitcoin transaction ids, as they are little-endian in the Bitcoin protocol. + pub fn txid(&self) -> [u8; 32] { + let mid_state = self.mid_state_txid(); + calculate_sha256(&mid_state) + } + + /// Returns the first digest of the transaction to be used in SPV + pub fn mid_state_txid(&self) -> [u8; 32] { + let mut tx_bytes_vec = vec![]; + self.inner() + .version + .consensus_encode(&mut tx_bytes_vec) + .unwrap(); + self.inner() + .input + .consensus_encode(&mut tx_bytes_vec) + .unwrap(); + self.inner() + .output + .consensus_encode(&mut tx_bytes_vec) + .unwrap(); + self.inner() + .lock_time + .consensus_encode(&mut tx_bytes_vec) + .unwrap(); + calculate_sha256(&tx_bytes_vec) + } +} + +impl BorshSerialize for CircuitTransaction { + #[inline] + fn serialize(&self, writer: &mut W) -> borsh::io::Result<()> { + BorshSerialize::serialize(&self.0.version.0, writer)?; + BorshSerialize::serialize(&self.0.lock_time.to_consensus_u32(), writer)?; + BorshSerialize::serialize(&self.0.input.len(), writer)?; + for input in &self.0.input { + serialize_txin(input, writer)?; + } + BorshSerialize::serialize(&self.0.output.len(), writer)?; + for output in &self.0.output { + serialize_txout(output, writer)?; + } + Ok(()) + } +} + +impl BorshDeserialize for CircuitTransaction { + #[inline] + fn deserialize_reader(reader: &mut R) -> borsh::io::Result { + let version = Version(i32::deserialize_reader(reader)?); + let lock_time = LockTime::from_consensus(u32::deserialize_reader(reader)?); + let input_len = usize::deserialize_reader(reader)?; + let mut input = Vec::with_capacity(input_len); + for _ in 0..input_len { + input.push(deserialize_txin(reader)?); + } + let output_len = usize::deserialize_reader(reader)?; + let mut output = Vec::with_capacity(output_len); + for _ in 0..output_len { + output.push(deserialize_txout(reader)?); + } + + let tx = Transaction { + version, + lock_time, + input, + output, + }; + + Ok(Self(tx)) + } +} + +fn serialize_txin(txin: &TxIn, writer: &mut W) -> borsh::io::Result<()> { + BorshSerialize::serialize(&txin.previous_output.txid.to_byte_array(), writer)?; + BorshSerialize::serialize(&txin.previous_output.vout, writer)?; + BorshSerialize::serialize(&txin.script_sig.as_bytes(), writer)?; + BorshSerialize::serialize(&txin.sequence.0, writer)?; + BorshSerialize::serialize(&txin.witness.to_vec(), writer) +} + +fn deserialize_txin(reader: &mut R) -> borsh::io::Result { + let txid = bitcoin::Txid::from_byte_array(<[u8; 32]>::deserialize_reader(reader)?); + let vout = u32::deserialize_reader(reader)?; + let script_sig = ScriptBuf::from_bytes(Vec::::deserialize_reader(reader)?); + let sequence = Sequence(u32::deserialize_reader(reader)?); + let witness = Witness::from(Vec::>::deserialize_reader(reader)?); + + Ok(TxIn { + previous_output: OutPoint { txid, vout }, + script_sig, + sequence, + witness, + }) +} + +fn serialize_txout(txout: &TxOut, writer: &mut W) -> borsh::io::Result<()> { + BorshSerialize::serialize(&txout.value.to_sat(), writer)?; + BorshSerialize::serialize(&txout.script_pubkey.as_bytes(), writer) +} + +fn deserialize_txout(reader: &mut R) -> borsh::io::Result { + let value = Amount::from_sat(u64::deserialize_reader(reader)?); + let script_pubkey = ScriptBuf::from_bytes(Vec::::deserialize_reader(reader)?); + + Ok(TxOut { + value, + script_pubkey, + }) +} + +impl Deref for CircuitTransaction { + type Target = Transaction; + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl DerefMut for CircuitTransaction { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +impl From for CircuitTransaction { + fn from(tx: Transaction) -> Self { + Self(tx) + } +} + +impl From for Transaction { + fn from(val: CircuitTransaction) -> Self { + val.0 + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_txid_legacy() { + let tx = CircuitTransaction(bitcoin::consensus::deserialize(&hex::decode("0100000001c997a5e56e104102fa209c6a852dd90660a20b2d9c352423edce25857fcd3704000000004847304402204e45e16932b8af514961a1d3a1a25fdf3f4f7732e9d624c6c61548ab5fb8cd410220181522ec8eca07de4860a4acdd12909d831cc56cbbac4622082221a8768d1d0901ffffffff0200ca9a3b00000000434104ae1a62fe09c5f51b13905f07f06b99a2f7159b2225f374cd378d71302fa28414e7aab37397f554a7df5f142c21c1b7303b8a0626f1baded5c72a704f7e6cd84cac00286bee0000000043410411db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5cb2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a3ac00000000").unwrap()).unwrap()); + let mut txid = tx.txid(); + txid.reverse(); + assert_eq!( + hex::encode(txid), + "f4184fc596403b9d638783cf57adfe4c75c605f6356fbc91338530e9831e9e16" + ); + } + + #[test] + fn test_txid_segwit() { + let tx = CircuitTransaction(bitcoin::consensus::deserialize(&hex::decode("0100000000010142ec43062180882d239799f134f7d8e9d104f37d87643e35fda84c47e4fc67a00000000000ffffffff026734000000000000225120e86c9c8c6777f28af40ef0c4cbd8308d27b60c7adf4f668d2433113616ddaa33cf660000000000001976a9149893ea81967d770f07f9bf0f659e3bce155be99a88ac01418a3d2a2182154dfd083cf48bfcd9f7dfb9d09eb46515e0043cdf39b688e9e711a2ce47f0f535191368be52fd706d77eb82eacd293a6a881491cdadf99b1df4400100000000").unwrap()).unwrap()); + let mut txid = tx.txid(); + txid.reverse(); + assert_eq!( + hex::encode(txid), + "a6a150fcdbabaf26040f4dea78ff53d794da2807d8600ead4758b065c5339324" + ); + } + + #[test] + fn test_from_transaction() { + let original_tx = Transaction { + version: Version(1), + lock_time: LockTime::from_consensus(0), + input: vec![], + output: vec![], + }; + + let bridge_tx = CircuitTransaction::from(original_tx.clone()); + assert_eq!(bridge_tx.inner(), &original_tx); + + let bridge_tx2: CircuitTransaction = original_tx.clone().into(); + assert_eq!(bridge_tx2.inner(), &original_tx); + assert_eq!(bridge_tx.txid(), bridge_tx2.txid()); + assert_eq!(bridge_tx.txid(), bridge_tx2.txid()); + } + + #[test] + fn test_into_transaction() { + let bridge_tx = CircuitTransaction(Transaction { + version: Version(1), + lock_time: LockTime::from_consensus(0), + input: vec![], + output: vec![], + }); + + let original_tx: Transaction = bridge_tx.clone().into(); + assert_eq!(&original_tx, bridge_tx.inner()); + assert_eq!(original_tx.compute_txid().to_byte_array(), bridge_tx.txid()); + } + + #[test] + fn test_borsh_serialization() { + let original_tx = Transaction { + version: Version(1), + lock_time: LockTime::from_consensus(0), + input: vec![], + output: vec![], + }; + let bridge_tx = CircuitTransaction(original_tx); + + // Serialize + let serialized = borsh::to_vec(&bridge_tx).unwrap(); + + // Deserialize + let deserialized: CircuitTransaction = borsh::from_slice(&serialized).unwrap(); + + assert_eq!(bridge_tx, deserialized); + assert_eq!(bridge_tx.txid(), deserialized.txid()); + } + + #[test] + fn test_deref_traits() { + let mut bridge_tx = CircuitTransaction(Transaction { + version: Version(1), + lock_time: LockTime::from_consensus(0), + input: vec![], + output: vec![], + }); + + assert_eq!(bridge_tx.version, Version(1)); + + bridge_tx.version = Version(2); + assert_eq!(bridge_tx.version, Version(2)); + } + + #[test] + fn test_complex_transaction() { + let script_sig = ScriptBuf::from_bytes(vec![0x76, 0xa9, 0x14]); + let script_pubkey = ScriptBuf::from_bytes(vec![0x76, 0xa9, 0x14]); + + let tx = Transaction { + version: Version(1), + lock_time: LockTime::from_consensus(0), + input: vec![TxIn { + previous_output: OutPoint { + txid: bitcoin::Txid::from_byte_array([0; 32]), + vout: 0, + }, + script_sig: script_sig.clone(), + sequence: Sequence(0xffffffff), + witness: Witness::new(), + }], + output: vec![TxOut { + value: Amount::from_sat(50000), + script_pubkey: script_pubkey.clone(), + }], + }; + + let bridge_tx = CircuitTransaction(tx.clone()); + + assert_eq!(bridge_tx.version, tx.version); + assert_eq!(bridge_tx.lock_time, tx.lock_time); + assert_eq!(bridge_tx.input.len(), 1); + assert_eq!(bridge_tx.output.len(), 1); + assert_eq!(bridge_tx.input[0].script_sig, script_sig); + assert_eq!(bridge_tx.output[0].script_pubkey, script_pubkey); + assert_eq!(bridge_tx.output[0].value, Amount::from_sat(50000)); + assert_eq!(bridge_tx.txid(), tx.compute_txid().to_byte_array()); + } +} diff --git a/circuits-lib/src/common/constants.rs b/circuits-lib/src/common/constants.rs new file mode 100644 index 000000000..d03a6d2a7 --- /dev/null +++ b/circuits-lib/src/common/constants.rs @@ -0,0 +1,30 @@ +//! # Common Constants +//! Common constants used for Risc0 circuits in the Clementine protocol. +//! These constants are used across various modules to ensure consistency and correctness in the circuit operations. +//! They include constants for the number of outputs, transaction assertions, and watchtowers. +//! ## Header Chain Method IDs +//! These constants represent the method IDs for different network header chains, such as Mainnet, Testnet4, Signet, and Regtest. + +/// The number of kickoff outputs before the first assert utxo. +pub const FIRST_FIVE_OUTPUTS: usize = 5; +/// The number of assertion transactions that a challenged operator should send. +pub const NUMBER_OF_ASSERT_TXS: usize = 33; +/// The theoretical maximum number of watchtowers that can be used in the Clementine protocol. +pub const MAX_NUMBER_OF_WATCHTOWERS: usize = 160; + +// Header chain method IDs for different networks. +pub const MAINNET_HEADER_CHAIN_METHOD_ID: [u32; 8] = [ + 3652626281, 3002766787, 3722627615, 4039790070, 4147537278, 1544178083, 1650283956, 1173916822, +]; + +pub const TESTNET4_HEADER_CHAIN_METHOD_ID: [u32; 8] = [ + 1238069516, 2440952130, 296067507, 548718500, 2300908118, 2762236706, 1765400336, 2839141719, +]; + +pub const SIGNET_HEADER_CHAIN_METHOD_ID: [u32; 8] = [ + 805076607, 3918709423, 1059613400, 4117586246, 1585144107, 652222522, 1455179921, 993675895, +]; + +pub const REGTEST_HEADER_CHAIN_METHOD_ID: [u32; 8] = [ + 1301496876, 1115288630, 3669357080, 2302899584, 2983230370, 2760464479, 2701535872, 2516705902, +]; diff --git a/circuits-lib/src/common/hashes.rs b/circuits-lib/src/common/hashes.rs new file mode 100644 index 000000000..698042d72 --- /dev/null +++ b/circuits-lib/src/common/hashes.rs @@ -0,0 +1,31 @@ +//! # Hashes +//! Common hashing functions used in the Clementine protocol. +//! These functions include double SHA256 hashing, single SHA256 hashing, and a utility function to +//! hash two nodes together. They are essential for cryptographic operations in the bridge circuit and other +//! components of the protocol. + +use sha2::{Digest, Sha256}; + +/// Calculates the double SHA256 hash of the input data. +pub fn calculate_double_sha256(input: &[u8]) -> [u8; 32] { + let mut hasher = Sha256::default(); + hasher.update(input); + let result = hasher.finalize_reset(); + hasher.update(result); + hasher.finalize().into() +} + +/// Calculates the SHA256 hash of the input data. +pub fn calculate_sha256(input: &[u8]) -> [u8; 32] { + let mut hasher = Sha256::default(); + hasher.update(input); + hasher.finalize().into() +} + +/// Utility function to hash two nodes together +pub fn hash_pair(left: [u8; 32], right: [u8; 32]) -> [u8; 32] { + let mut hasher = Sha256::default(); + hasher.update(left); + hasher.update(right); + hasher.finalize().into() +} diff --git a/circuits-lib/src/common/mod.rs b/circuits-lib/src/common/mod.rs new file mode 100644 index 000000000..bb3d6dd07 --- /dev/null +++ b/circuits-lib/src/common/mod.rs @@ -0,0 +1,33 @@ +//! # Common Module +//! This module contains common constants and utility functions used across the Risc0 circuits in the Clementine protocol. +//! It includes definitions for constants related to the bridge amount, number of outputs, transaction assertions, and watchtowers. +//! It also provides hashing functions for cryptographic operations, such as double SHA256 and single SHA256 hashing, +//! as well as a utility function to hash two nodes together. +//! The `ZkvmGuest` and `ZkvmHost` traits define the interface for zkVM guest and host interactions, +//! allowing for reading from and writing to the host, committing data, and verifying proofs. + +pub mod constants; +pub mod hashes; +pub mod zkvm; + +pub const NETWORK_TYPE: &str = { + #[cfg(test)] + { + "testnet4" + } + #[cfg(not(test))] + { + match option_env!("BITCOIN_NETWORK") { + Some(network) if matches!(network.as_bytes(), b"mainnet") => "mainnet", + Some(network) if matches!(network.as_bytes(), b"testnet4") => "testnet4", + Some(network) if matches!(network.as_bytes(), b"signet") => "signet", + Some(network) if matches!(network.as_bytes(), b"regtest") => "regtest", + None => "testnet4", + _ => panic!("Invalid network type"), + } + } +}; + +pub const fn get_network() -> &'static str { + NETWORK_TYPE +} diff --git a/circuits-lib/src/common/zkvm.rs b/circuits-lib/src/common/zkvm.rs new file mode 100644 index 000000000..5a0db310a --- /dev/null +++ b/circuits-lib/src/common/zkvm.rs @@ -0,0 +1,77 @@ +//! # ZkVM Module +//! This module defines the traits and structures for zkVM guest and host interactions. +//! It includes the `ZkvmGuest` and `ZkvmHost` traits for reading from and writing to the host, +//! as well as committing data and verifying proofs. The `Risc0Guest` struct implements the `ZkvmGuest` trait +//! for RISC0 zkVM interactions. +//! The `VerificationContext` struct is used to represent a proof that can be used in zkVM interactions, +//! containing a method ID and a journal of data for verification. +//! The module also provides a default implementation for the `Risc0Guest` struct, which +//! initializes a new instance of the guest. + +use std::io::Write; + +use borsh::BorshDeserialize; +use risc0_zkvm::guest::env::{self}; + +/// This module defines the traits and structures for zkVM guest and host interactions for convenience. +pub trait ZkvmGuest { + fn read_from_host(&self) -> T; + fn commit(&self, item: &T); + fn verify(&self, method_id: [u32; 8], journal: &T); +} + +/// This struct represents a proof that can be used in zkVM interactions. +/// It contains a method ID and a journal of data that can be used to verify the proof. +/// Proof itself is not included here, as it is added as an assumption by the host. +#[derive(Debug, Clone)] +pub struct VerificationContext { + pub method_id: [u32; 8], + pub journal: Vec, +} + +pub trait ZkvmHost { + /// Adding data to the host + fn write(&self, value: &T); + + /// Adds an assumption to the the guest code to be verified. + fn add_assumption(&self, proof: VerificationContext); + + /// Proves with the given data + fn prove(&self, elf: &[u32]) -> VerificationContext; +} + +#[derive(Debug, Clone)] +pub struct Risc0Guest; + +impl Risc0Guest { + pub fn new() -> Self { + Self {} + } +} + +impl Default for Risc0Guest { + fn default() -> Self { + Self::new() + } +} + +impl ZkvmGuest for Risc0Guest { + /// This uses little endianness in the items it deserializes + fn read_from_host(&self) -> T { + let mut reader = env::stdin(); + BorshDeserialize::deserialize_reader(&mut reader) + .expect("Failed to deserialize input from host") + } + + /// This uses little endianness in the items it serializes + fn commit(&self, item: &T) { + // use risc0_zkvm::guest::env::Write as _; + let buf = borsh::to_vec(item).expect("Serialization to vec is infallible"); + let mut journal = env::journal(); + journal.write_all(&buf).unwrap(); + } + + fn verify(&self, method_id: [u32; 8], output: &T) { + env::verify(method_id, &borsh::to_vec(output).unwrap()).unwrap(); + } +} diff --git a/circuits-lib/src/header_chain/mmr_guest.rs b/circuits-lib/src/header_chain/mmr_guest.rs new file mode 100644 index 000000000..ed09ad90c --- /dev/null +++ b/circuits-lib/src/header_chain/mmr_guest.rs @@ -0,0 +1,71 @@ +//! # MMR Guest - Merkle Mountain Range for zkVM +//! +//! Lightweight MMR implementation optimized for zero-knowledge virtual machine environments. +//! Stores only subroots and size for efficient proof verification within circuit constraints. + +use borsh::{BorshDeserialize, BorshSerialize}; +use serde::{Deserialize, Serialize}; + +use crate::common::hashes::hash_pair; + +use super::mmr_native::MMRInclusionProof; + +/// Merkle Mountain Range implementation for zkVM environments. +/// +/// Maintains only the essential data (subroots and size) needed for proof verification +/// within the constrained environment of a zero-knowledge proof system. +#[derive(Serialize, Deserialize, Eq, PartialEq, Clone, Debug, BorshDeserialize, BorshSerialize)] + +pub struct MMRGuest { + pub subroots: Vec<[u8; 32]>, + pub size: u32, +} + +impl Default for MMRGuest { + fn default() -> Self { + MMRGuest::new() + } +} + +impl MMRGuest { + /// Creates a new empty MMR instance. + pub fn new() -> Self { + MMRGuest { + subroots: vec![], + size: 0, + } + } + + /// Appends a new leaf to the MMR, updating subroots as needed. + /// + /// Implements the MMR append algorithm by combining consecutive pairs + /// of nodes to maintain the mountain range structure. + pub fn append(&mut self, leaf: [u8; 32]) { + let mut current = leaf; + let mut size = self.size; + while size % 2 == 1 { + let sibling = self.subroots.pop().unwrap(); + current = hash_pair(sibling, current); + size /= 2 + } + self.subroots.push(current); + self.size += 1; + } + + /// Verifies an inclusion proof against the MMR subroots. + /// + /// Replays the Merkle path from leaf to subroot and checks if the computed + /// subroot matches the stored subroot at the specified index. + pub fn verify_proof(&self, leaf: [u8; 32], mmr_proof: &MMRInclusionProof) -> bool { + let mut current_hash = leaf; + for i in 0..mmr_proof.inclusion_proof.len() { + let sibling = mmr_proof.inclusion_proof[i]; + if mmr_proof.internal_idx & (1 << i) == 0 { + current_hash = hash_pair(current_hash, sibling); + } else { + current_hash = hash_pair(sibling, current_hash); + } + } + self.subroots[mmr_proof.subroot_idx] == current_hash + } +} diff --git a/circuits-lib/src/header_chain/mmr_native.rs b/circuits-lib/src/header_chain/mmr_native.rs new file mode 100644 index 000000000..6927fd226 --- /dev/null +++ b/circuits-lib/src/header_chain/mmr_native.rs @@ -0,0 +1,256 @@ +//! # MMR Native - Merkle Mountain Range for Native Environments +//! +//! Full-featured MMR implementation for native (non-zkVM) environments. +//! Provides proof generation capabilities and maintains complete node structure. + +use borsh::{BorshDeserialize, BorshSerialize}; +use eyre::{eyre, Result}; +use serde::{Deserialize, Serialize}; + +use crate::common::hashes::hash_pair; + +/// Merkle Mountain Range implementation for native environments. +/// +/// Maintains the complete MMR structure with all nodes across all levels, +/// enabling proof generation and full MMR operations outside of zkVM constraints. +#[derive(Serialize, Deserialize, Eq, PartialEq, Clone, Debug, BorshDeserialize, BorshSerialize)] +pub struct MMRNative { + pub nodes: Vec>, +} + +impl Default for MMRNative { + fn default() -> Self { + MMRNative::new() + } +} + +impl MMRNative { + /// Creates a new empty MMR instance. + pub fn new() -> Self { + MMRNative { + nodes: vec![vec![]], + } + } + + /// Appends a leaf and recalculates the mountain peaks. + pub fn append(&mut self, leaf: [u8; 32]) { + self.nodes[0].push(leaf); + self.recalculate_peaks(); + } + + /// Recalculates MMR peaks after appending new leaves. + fn recalculate_peaks(&mut self) { + let depth = self.nodes.len(); + for level in 0..depth - 1 { + if self.nodes[level].len() % 2 == 1 { + break; + } else { + let node = hash_pair( + self.nodes[level][self.nodes[level].len() - 2], + self.nodes[level][self.nodes[level].len() - 1], + ); + self.nodes[level + 1].push(node); + } + } + if self.nodes[depth - 1].len() > 1 { + let node = hash_pair(self.nodes[depth - 1][0], self.nodes[depth - 1][1]); + self.nodes.push(vec![node]); + } + } + + /// Returns the current MMR subroots (peaks of the mountain range). + fn get_subroots(&self) -> Vec<[u8; 32]> { + let mut subroots: Vec<[u8; 32]> = vec![]; + for level in &self.nodes { + if level.len() % 2 == 1 { + subroots.push(level[level.len() - 1]); + } + } + subroots.reverse(); + subroots + } + + /// Generates an inclusion proof for a leaf at the given index. + /// + /// Returns both the leaf value and the proof needed to verify its inclusion. + /// The proof can be verified against the MMR subroots. + pub fn generate_proof(&self, index: u32) -> Result<([u8; 32], MMRInclusionProof)> { + if self.nodes[0].is_empty() { + return Err(eyre!("MMR Native is empty")); + } + if self.nodes[0].len() <= index as usize { + return Err(eyre!( + "Index out of bounds: {} >= {}", + index, + self.nodes[0].len() + )); + } + + let mut proof: Vec<[u8; 32]> = vec![]; + let mut current_index = index; + let mut current_level = 0; + // Returns the subtree proof for the subroot. + while !(current_index == self.nodes[current_level].len() as u32 - 1 + && self.nodes[current_level].len() % 2 == 1) + { + let sibling_index = if current_index % 2 == 0 { + current_index + 1 + } else { + current_index - 1 + }; + proof.push(self.nodes[current_level][sibling_index as usize]); + current_index /= 2; + current_level += 1; + } + let (subroot_idx, internal_idx) = self.get_helpers_from_index(index); + let mmr_proof = MMRInclusionProof::new(subroot_idx, internal_idx, proof); + Ok((self.nodes[0][index as usize], mmr_proof)) + } + + /// Determines subroot index and internal position for a given leaf index. + fn get_helpers_from_index(&self, index: u32) -> (usize, u32) { + let xor = (self.nodes[0].len() as u32) ^ index; + let xor_leading_digit = 31 - xor.leading_zeros() as usize; + let internal_idx = index & ((1 << xor_leading_digit) - 1); + let leading_zeros_size = 31 - (self.nodes[0].len() as u32).leading_zeros() as usize; + let mut subtree_idx = 0; + for i in xor_leading_digit + 1..=leading_zeros_size { + if self.nodes[0].len() & (1 << i) != 0 { + subtree_idx += 1; + } + } + (subtree_idx, internal_idx) + } + + /// Verifies an inclusion proof against the current MMR subroots. + pub fn verify_proof(&self, leaf: [u8; 32], mmr_proof: &MMRInclusionProof) -> bool { + let subroot = mmr_proof.get_subroot(leaf); + let subroots = self.get_subroots(); + subroots[mmr_proof.subroot_idx] == subroot + } +} + +/// Proof of inclusion for an element in the MMR. +/// +/// Contains all data needed to verify that a specific leaf exists at a given +/// position within the MMR structure. +#[derive(Serialize, Deserialize, Eq, PartialEq, Clone, Debug, BorshDeserialize, BorshSerialize)] +pub struct MMRInclusionProof { + pub subroot_idx: usize, + pub internal_idx: u32, + pub inclusion_proof: Vec<[u8; 32]>, +} + +impl MMRInclusionProof { + /// Creates a new inclusion proof. + pub fn new(subroot_idx: usize, internal_idx: u32, inclusion_proof: Vec<[u8; 32]>) -> Self { + MMRInclusionProof { + subroot_idx, + internal_idx, + inclusion_proof, + } + } + + /// Computes the subroot hash by replaying the Merkle path from the leaf. + pub fn get_subroot(&self, leaf: [u8; 32]) -> [u8; 32] { + let mut current_hash = leaf; + for i in 0..self.inclusion_proof.len() { + let sibling = self.inclusion_proof[i]; + if self.internal_idx & (1 << i) == 0 { + current_hash = hash_pair(current_hash, sibling); + } else { + current_hash = hash_pair(sibling, current_hash); + } + } + current_hash + } +} + +#[cfg(test)] +mod tests { + use super::MMRNative; + use crate::header_chain::mmr_guest::MMRGuest; + + #[test] + fn test_mmr_native_fail_empty() { + let mmr = MMRNative::new(); + let result = mmr.generate_proof(0); + assert!(result.is_err()); + assert_eq!(result.unwrap_err().to_string(), "MMR Native is empty"); + } + + #[test] + fn test_mmr_native_fail_out_of_bounds() { + let mut mmr = MMRNative::new(); + mmr.append([0; 32]); + let result = mmr.generate_proof(1); + assert!(result.is_err()); + assert!(result + .unwrap_err() + .to_string() + .contains("Index out of bounds")); + } + + #[test] + fn test_mmr_native() { + let mut mmr = MMRNative::new(); + let mut leaves = vec![]; + + for i in 0..42 { + let leaf = [i as u8; 32]; + leaves.push(leaf); + + mmr.append(leaf); + + for j in 0..=i { + let (leaf, mmr_proof) = mmr.generate_proof(j).unwrap(); + assert!(mmr.verify_proof(leaf, &mmr_proof)); + } + } + } + + #[test] + fn test_mmr_crosscheck() { + let mut mmr_native = MMRNative::new(); + let mut mmr_guest = MMRGuest::new(); + let mut leaves = vec![]; + + for i in 0..42 { + let leaf = [i as u8; 32]; + leaves.push(leaf); + + mmr_native.append(leaf); + mmr_guest.append(leaf); + + let subroots_native = mmr_native.get_subroots(); + let subroots_guest = mmr_guest.subroots.clone(); + assert_eq!( + subroots_native, subroots_guest, + "Subroots do not match after adding leaf {}", + i + ); + + // let root_native = mmr_native.get_root(); + // let root_guest = mmr_guest.get_root(); + // assert_eq!( + // root_native, root_guest, + // "Roots do not match after adding leaf {}", + // i + // ); + + for j in 0..=i { + let (leaf, mmr_proof) = mmr_native.generate_proof(j).unwrap(); + assert!( + mmr_native.verify_proof(leaf, &mmr_proof), + "Failed to verify proof for leaf {} in native MMR", + j + ); + assert!( + mmr_guest.verify_proof(leaf, &mmr_proof), + "Failed to verify proof for leaf {} in guest MMR", + j + ); + } + } + } +} diff --git a/circuits-lib/src/header_chain/mod.rs b/circuits-lib/src/header_chain/mod.rs new file mode 100644 index 000000000..6089352e0 --- /dev/null +++ b/circuits-lib/src/header_chain/mod.rs @@ -0,0 +1,1408 @@ +//! # Circuits-lib - Header Chain Circuit +//! This module contains the implementation of the header chain circuit, which is basically +//! the Bitcoin header chain verification logic. +//! +//! Implementation of this module is inspired by the Bitcoin Core source code and from here: +//! https://github.com/ZeroSync/header_chain/tree/master/program/src/block_header. +//! +//! **โš ๏ธ Warning:** This implementation is not a word-to-word translation of the Bitcoin Core source code. + +use bitcoin::{ + block::{Header, Version}, + hashes::Hash, + BlockHash, CompactTarget, TxMerkleNode, +}; +use borsh::{BorshDeserialize, BorshSerialize}; +use crypto_bigint::{Encoding, U256}; +use mmr_guest::MMRGuest; +use serde::{Deserialize, Serialize}; +use sha2::{Digest, Sha256}; +use std::cmp::Ordering; + +use crate::common::{get_network, zkvm::ZkvmGuest}; + +pub mod mmr_guest; +pub mod mmr_native; + +/// The main entry point of the header chain circuit. +/// +/// This function implements Bitcoin header chain verification logic within a zero-knowledge +/// virtual machine (zkVM) environment. It processes block headers, verifies chain continuity, +/// validates proof of work, and maintains the chain state. +/// +/// ## Verification Process +/// +/// The circuit performs several critical validations: +/// - **Method ID Consistency**: Ensures the input `method_id` matches any previous proof's `method_id` +/// - **Chain Continuity**: Confirms each block's `prev_block_hash` matches the `best_block_hash` of the preceding state +/// - **Block Hash Validity**: Calculates double SHA256 hash and checks it's โ‰ค current difficulty target +/// - **Difficulty Target Validation**: Verifies the `bits` field matches expected difficulty for current network/epoch +/// - **Timestamp Validation**: Ensures block timestamp > median of previous 11 block timestamps +/// - **MMR Integrity**: Maintains Merkle Mountain Range for efficient block hash storage and verification +/// +/// ## Parameters +/// +/// * `guest` - ZkvmGuest implementation for reading input, verifying proofs, and committing output +/// +/// ## Input Format +/// +/// Expects `HeaderChainCircuitInput` containing: +/// - `method_id`: Circuit version identifier +/// - `prev_proof`: Either genesis state or previous circuit output +/// - `block_headers`: Vector of block headers to process +/// +/// ## Output Format +/// +/// Commits `BlockHeaderCircuitOutput` containing: +/// - `method_id`: Same as input for consistency +/// - `genesis_state_hash`: Hash of initial chain state +/// - `chain_state`: Updated chain state after processing all headers +/// +/// ## Panics +/// +/// The function will panic on any validation failure including: +/// - Method ID mismatch between input and previous proof +/// - Invalid block hash (doesn't meet difficulty target) +/// - Chain discontinuity (prev_block_hash mismatch) +/// - Invalid timestamps +/// - Incorrect difficulty bits +pub fn header_chain_circuit(guest: &impl ZkvmGuest) { + // Read the input from the host + let input: HeaderChainCircuitInput = guest.read_from_host(); + let genesis_state_hash: [u8; 32]; + let mut chain_state = match input.prev_proof { + HeaderChainPrevProofType::GenesisBlock(genesis_state) => { + genesis_state_hash = genesis_state.to_hash(); + genesis_state + } + HeaderChainPrevProofType::PrevProof(prev_proof) => { + assert_eq!(prev_proof.method_id, input.method_id, "Method ID mismatch, the input method ID must match the previous proof's method ID to ensure the same circuit is always used. Previous proof method ID: {:?}, input method ID: {:?}", prev_proof.method_id, input.method_id); + guest.verify(input.method_id, &prev_proof); + genesis_state_hash = prev_proof.genesis_state_hash; + prev_proof.chain_state + } + }; + + // Apply the block headers to the chain state + chain_state.apply_block_headers(input.block_headers); + + // Commit the output to the host + guest.commit(&BlockHeaderCircuitOutput { + method_id: input.method_id, + genesis_state_hash, + chain_state, + }); +} + +/// Network configuration holder for Bitcoin-specific constants. +/// +/// Contains different representations of the maximum target for various Bitcoin networks +/// (mainnet, testnet4, signet, regtest). The maximum target defines the lowest possible +/// difficulty for the network. +/// +/// ## Fields +/// +/// * `max_bits` - Compact representation of maximum target (difficulty bits format) +/// * `max_target` - 256-bit representation of maximum target +/// * `max_target_bytes` - 32-byte array representation of maximum target +/// +/// All three fields represent the same value in different formats for computational efficiency. +#[derive(Debug)] +pub struct NetworkConstants { + pub max_bits: u32, + pub max_target: U256, + pub max_target_bytes: [u8; 32], +} + +pub const NETWORK_TYPE: &str = get_network(); + +// Const evaluation of network type from environment +const IS_REGTEST: bool = matches!(NETWORK_TYPE.as_bytes(), b"regtest"); +const IS_TESTNET4: bool = matches!(NETWORK_TYPE.as_bytes(), b"testnet4"); +const MINIMUM_WORK_TESTNET: U256 = + U256::from_be_hex("0000000000000000000000000000000000000000000000000000000100010001"); + +/// Network constants for the Bitcoin network configuration. +/// +/// Determines the maximum target and difficulty bits based on the `BITCOIN_NETWORK` +/// environment variable. Supports mainnet, testnet4, signet, and regtest networks. +/// +/// ## Network-Specific Values +/// +/// - **Mainnet/Testnet4**: `max_bits = 0x1D00FFFF` (standard Bitcoin difficulty) +/// - **Signet**: `max_bits = 0x1E0377AE` (custom signet difficulty) +/// - **Regtest**: `max_bits = 0x207FFFFF` (minimal difficulty for testing) +/// +/// Defaults to mainnet configuration if no environment variable is set. +pub const NETWORK_CONSTANTS: NetworkConstants = { + match option_env!("BITCOIN_NETWORK") { + Some(n) if matches!(n.as_bytes(), b"signet") => NetworkConstants { + max_bits: 0x1E0377AE, + max_target: U256::from_be_hex( + "00000377AE000000000000000000000000000000000000000000000000000000", + ), + max_target_bytes: [ + 0, 0, 3, 119, 174, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, + ], + }, + Some(n) if matches!(n.as_bytes(), b"regtest") => NetworkConstants { + max_bits: 0x207FFFFF, + max_target: U256::from_be_hex( + "7FFFFF0000000000000000000000000000000000000000000000000000000000", + ), + max_target_bytes: [ + 127, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, + ], + }, + Some(n) if matches!(n.as_bytes(), b"testnet4") => NetworkConstants { + max_bits: 0x1D00FFFF, + max_target: U256::from_be_hex( + "00000000FFFF0000000000000000000000000000000000000000000000000000", + ), + max_target_bytes: [ + 0, 0, 0, 0, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, + ], + }, + Some(n) if matches!(n.as_bytes(), b"mainnet") => NetworkConstants { + max_bits: 0x1D00FFFF, + max_target: U256::from_be_hex( + "00000000FFFF0000000000000000000000000000000000000000000000000000", + ), + max_target_bytes: [ + 0, 0, 0, 0, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, + ], + }, + // Default to mainnet for None + None => NetworkConstants { + max_bits: 0x1D00FFFF, + max_target: U256::from_be_hex( + "00000000FFFF0000000000000000000000000000000000000000000000000000", + ), + max_target_bytes: [ + 0, 0, 0, 0, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, + ], + }, + _ => panic!("Unsupported network"), + } +}; + +/// Expected duration of a difficulty adjustment epoch in seconds. +/// +/// Bitcoin adjusts difficulty every 2016 blocks (approximately 2 weeks). +/// - **Standard networks**: 2 weeks = 60 * 60 * 24 * 14 = 1,209,600 seconds +/// - **Custom signet**: Uses 10-second block time, so 60 * 24 * 14 = 20,160 seconds +/// +/// See: +const EXPECTED_EPOCH_TIMESPAN: u32 = match option_env!("BITCOIN_NETWORK") { + Some(n) if matches!(n.as_bytes(), b"signet") => 60 * 24 * 14, + _ => 60 * 60 * 24 * 14, +}; + +/// Number of blocks in a difficulty adjustment epoch. +/// +/// Bitcoin recalculates the difficulty target every 2016 blocks based on the time +/// it took to mine those blocks compared to the expected timespan. +const BLOCKS_PER_EPOCH: u32 = 2016; + +/// Serializable representation of a Bitcoin block header. +/// +/// Contains all fields from the Bitcoin block header in a format suitable for +/// zero-knowledge circuits. This struct can be serialized/deserialized and +/// converted to/from the standard `bitcoin::block::Header` type. +/// +/// ## Fields +/// +/// * `version` - Block version indicating which validation rules to use +/// * `prev_block_hash` - Hash of the previous block in the chain (32 bytes) +/// * `merkle_root` - Merkle tree root of all transactions in the block (32 bytes) +/// * `time` - Block timestamp as Unix time +/// * `bits` - Compact representation of the difficulty target +/// * `nonce` - Counter used in proof-of-work mining +#[derive(Serialize, Deserialize, Eq, PartialEq, Clone, Debug, BorshDeserialize, BorshSerialize)] +pub struct CircuitBlockHeader { + pub version: i32, + pub prev_block_hash: [u8; 32], + pub merkle_root: [u8; 32], + pub time: u32, + pub bits: u32, + pub nonce: u32, +} + +impl CircuitBlockHeader { + /// Computes the double SHA256 hash of the block header. + /// + /// This implements Bitcoin's block hashing algorithm: + /// 1. Serialize header fields in little-endian format + /// 2. Compute SHA256 hash of the serialized data + /// 3. Compute SHA256 hash of the result from step 2 + /// + /// ## Returns + /// + /// * `[u8; 32]` - The double SHA256 hash of the block header + pub fn compute_block_hash(&self) -> [u8; 32] { + let mut hasher = Sha256::new(); + hasher.update(self.version.to_le_bytes()); + hasher.update(self.prev_block_hash); + hasher.update(self.merkle_root); + hasher.update(self.time.to_le_bytes()); + hasher.update(self.bits.to_le_bytes()); + hasher.update(self.nonce.to_le_bytes()); + let first_hash_result = hasher.finalize_reset(); + + hasher.update(first_hash_result); + let result: [u8; 32] = hasher.finalize().into(); + result + } +} + +impl From
for CircuitBlockHeader { + fn from(header: Header) -> Self { + CircuitBlockHeader { + version: header.version.to_consensus(), + prev_block_hash: header.prev_blockhash.to_byte_array(), + merkle_root: header.merkle_root.as_raw_hash().to_byte_array(), + time: header.time, + bits: header.bits.to_consensus(), + nonce: header.nonce, + } + } +} + +impl From for Header { + fn from(val: CircuitBlockHeader) -> Self { + Header { + version: Version::from_consensus(val.version), + prev_blockhash: BlockHash::from_slice(&val.prev_block_hash) + .expect("Previous block hash is 32 bytes"), + merkle_root: TxMerkleNode::from_slice(&val.merkle_root) + .expect("Merkle root is 32 bytes"), + time: val.time, + bits: CompactTarget::from_consensus(val.bits), + nonce: val.nonce, + } + } +} + +/// Verifiable state of the Bitcoin header chain. +/// +/// Maintains all information necessary to verify the next block in the chain, +/// including difficulty adjustment state, timestamp validation data, and an MMR +/// for efficient block hash storage and verification. +/// +/// ## Fields +/// +/// * `block_height` - Current height of the chain (u32::MAX for uninitialized state) +/// * `total_work` - Cumulative proof-of-work as 32-byte big-endian integer +/// * `best_block_hash` - Hash of the most recently validated block +/// * `current_target_bits` - Current difficulty target in compact representation +/// * `epoch_start_time` - Timestamp of first block in current difficulty epoch +/// * `prev_11_timestamps` - Previous 11 block timestamps for median calculation +/// * `block_hashes_mmr` - Merkle Mountain Range storing subroots +#[derive(Serialize, Deserialize, Eq, PartialEq, Clone, Debug, BorshDeserialize, BorshSerialize)] +pub struct ChainState { + pub block_height: u32, + pub total_work: [u8; 32], + pub best_block_hash: [u8; 32], + pub current_target_bits: u32, + pub epoch_start_time: u32, + pub prev_11_timestamps: [u32; 11], + pub block_hashes_mmr: MMRGuest, +} + +impl Default for ChainState { + fn default() -> Self { + ChainState::new() + } +} + +impl ChainState { + /// Creates a new chain state with default values. + pub fn new() -> Self { + ChainState { + block_height: u32::MAX, + total_work: [0u8; 32], + best_block_hash: [0u8; 32], + current_target_bits: NETWORK_CONSTANTS.max_bits, + epoch_start_time: 0, + prev_11_timestamps: [0u32; 11], + block_hashes_mmr: MMRGuest::new(), + } + } + + /// Creates a genesis chain state. + /// + /// Equivalent to `new()` but with clearer semantic meaning for genesis block scenarios. + /// + /// ## Returns + /// + /// * `Self` - A new genesis `ChainState` + pub fn genesis_state() -> Self { + Self::new() + } + + /// Computes a cryptographic hash of the current chain state. + /// + /// Creates a deterministic hash that uniquely identifies this chain state by + /// hashing all relevant fields including block height, total work, best block hash, + /// difficulty parameters, timestamps, and MMR state. + /// + /// ## Returns + /// + /// * `[u8; 32]` - SHA256 hash uniquely identifying this chain state + pub fn to_hash(&self) -> [u8; 32] { + let mut hasher = Sha256::new(); + hasher.update(self.block_height.to_le_bytes()); + hasher.update(self.total_work); + hasher.update(self.best_block_hash); + hasher.update(self.current_target_bits.to_le_bytes()); + hasher.update(self.epoch_start_time.to_le_bytes()); + for timestamp in self.prev_11_timestamps { + hasher.update(timestamp.to_le_bytes()); + } + for hash in self.block_hashes_mmr.subroots.clone() { + hasher.update(hash); + } + hasher.update(self.block_hashes_mmr.size.to_le_bytes()); + hasher.finalize().into() + } + + /// Applies a sequence of block headers to the chain state. + /// + /// Processes each block header in order, performing comprehensive validation + /// and updating the chain state accordingly. This is the core validation logic + /// that ensures Bitcoin consensus rules are followed. + /// + /// ## Validation Steps (per block header) + /// + /// 1. **Chain Continuity**: Verifies `prev_block_hash` matches current `best_block_hash` + /// 2. **Difficulty Validation**: Ensures `bits` field matches expected difficulty + /// 3. **Proof of Work**: Validates block hash meets the difficulty target + /// 4. **Timestamp Validation**: Checks timestamp > median of last 11 timestamps + /// 5. **State Updates**: Updates height, work, best hash, MMR, and timestamps + /// 6. **Difficulty Adjustment**: Recalculates difficulty at epoch boundaries + /// + /// ## Network-Specific Behavior + /// + /// - **Regtest**: Uses minimum difficulty, no difficulty adjustments + /// - **Testnet4**: Allows emergency difficulty reduction after 20+ minute gaps + /// - **Others**: Standard Bitcoin difficulty adjustment rules + /// + /// ## Parameters + /// + /// * `block_headers` - Vector of block headers to process in sequence + /// + /// ## Panics + /// + /// Panics on any validation failure including invalid hashes, chain breaks, + /// or timestamp violations. + pub fn apply_block_headers(&mut self, block_headers: Vec) { + let mut current_target_bytes = if IS_REGTEST { + NETWORK_CONSTANTS.max_target.to_be_bytes() + } else { + bits_to_target(self.current_target_bits) + }; + let mut current_work: U256 = U256::from_be_bytes(self.total_work); + + let mut last_block_time = if IS_TESTNET4 { + if self.block_height == u32::MAX { + 0 + } else { + self.prev_11_timestamps[self.block_height as usize % 11] + } + } else { + 0 + }; + + for block_header in block_headers { + self.block_height = self.block_height.wrapping_add(1); + + let (target_to_use, expected_bits, work_to_add) = if IS_TESTNET4 { + if block_header.time > last_block_time + 1200 { + // If the block is an epoch block, then it still has to have the real target. + if self.block_height % BLOCKS_PER_EPOCH == 0 { + ( + current_target_bytes, + self.current_target_bits, + calculate_work(¤t_target_bytes), + ) + } + // Otherwise, if the timestamp is more than 20 minutes ahead of the last block, the block is allowed to use the maximum target. + else { + ( + NETWORK_CONSTANTS.max_target_bytes, + NETWORK_CONSTANTS.max_bits, + MINIMUM_WORK_TESTNET, + ) + } + } else { + ( + current_target_bytes, + self.current_target_bits, + calculate_work(¤t_target_bytes), + ) + } + } else { + ( + current_target_bytes, + self.current_target_bits, + calculate_work(¤t_target_bytes), + ) + }; + + let new_block_hash = block_header.compute_block_hash(); + + assert_eq!( + block_header.prev_block_hash, self.best_block_hash, + "Previous block hash does not match the best block hash. Expected: {:?}, got: {:?}", + self.best_block_hash, block_header.prev_block_hash + ); + + if IS_REGTEST { + assert_eq!( + block_header.bits, NETWORK_CONSTANTS.max_bits, + "Bits for regtest must be equal to the maximum bits: {}. Got: {}", + NETWORK_CONSTANTS.max_bits, block_header.bits + ); + } else { + assert_eq!( + block_header.bits, expected_bits, + "Bits for the block header must match the expected bits: {}. Got: {}", + expected_bits, block_header.bits + ); + } + + check_hash_valid(&new_block_hash, &target_to_use); + + if !validate_timestamp(block_header.time, self.prev_11_timestamps) { + panic!("Timestamp is not valid, it must be greater than the median of the last 11 timestamps"); + } + + self.block_hashes_mmr.append(new_block_hash); + self.best_block_hash = new_block_hash; + current_work = current_work.wrapping_add(&work_to_add); + + if !IS_REGTEST && self.block_height % BLOCKS_PER_EPOCH == 0 { + self.epoch_start_time = block_header.time; + } + + self.prev_11_timestamps[self.block_height as usize % 11] = block_header.time; + + if IS_TESTNET4 { + last_block_time = block_header.time; + } + + if !IS_REGTEST && self.block_height % BLOCKS_PER_EPOCH == BLOCKS_PER_EPOCH - 1 { + current_target_bytes = calculate_new_difficulty( + self.epoch_start_time, + block_header.time, + self.current_target_bits, + ); + self.current_target_bits = target_to_bits(¤t_target_bytes); + } + } + + self.total_work = current_work.to_be_bytes(); + } +} + +/// Calculates the median of 11 timestamps. +/// +/// Used for Bitcoin's median time past (MTP) rule, which requires that a block's +/// timestamp must be greater than the median of the previous 11 blocks' timestamps. +/// This prevents miners from lying about timestamps to manipulate difficulty. +/// +/// ## Parameters +/// +/// * `arr` - Array of exactly 11 timestamps as u32 values +/// +/// ## Returns +/// +/// * `u32` - The median timestamp (6th element when sorted) +fn median(arr: [u32; 11]) -> u32 { + let mut sorted_arr = arr; + sorted_arr.sort_unstable(); + sorted_arr[5] +} + +/// Validates a block timestamp against the median time past rule. +/// +/// Implements Bitcoin's median time past (MTP) validation which requires that +/// each block's timestamp must be strictly greater than the median of the +/// previous 11 blocks' timestamps. This prevents timestamp manipulation attacks. +/// +/// ## Parameters +/// +/// * `block_time` - The timestamp of the block being validated +/// * `prev_11_timestamps` - Array of the previous 11 block timestamps +/// +/// ## Returns +/// +/// * `bool` - `true` if the timestamp is valid (greater than median), `false` otherwise +fn validate_timestamp(block_time: u32, prev_11_timestamps: [u32; 11]) -> bool { + let median_time = median(prev_11_timestamps); + block_time > median_time +} + +/// Converts compact target representation (bits) to full 32-byte target. +/// +/// Bitcoin uses a compact representation for difficulty targets in block headers. +/// This function expands the 4-byte compact format into the full 32-byte target +/// that hash values are compared against. +/// +/// ## Compact Target Format +/// +/// The compact target uses a floating-point-like representation: +/// - Bits 24-31: Size/exponent (how many bytes the mantissa occupies) +/// - Bits 0-23: Mantissa (the significant digits) +/// +/// ## Parameters +/// +/// * `bits` - Compact target representation from block header +/// +/// ## Returns +/// +/// * `[u8; 32]` - Full 32-byte target in big-endian format +pub fn bits_to_target(bits: u32) -> [u8; 32] { + let size = (bits >> 24) as usize; + let mantissa = bits & 0x00ffffff; + + let target = if size <= 3 { + U256::from(mantissa >> (8 * (3 - size))) + } else { + U256::from(mantissa) << (8 * (size - 3)) + }; + target.to_be_bytes() +} + +/// Converts a full 32-byte target to compact representation (bits). +/// +/// This is the inverse of `bits_to_target()`, converting a full 32-byte target +/// back into Bitcoin's compact 4-byte representation used in block headers. +/// +/// ## Parameters +/// +/// * `target` - Full 32-byte target in big-endian format +/// +/// ## Returns +/// +/// * `u32` - Compact target representation suitable for block headers +fn target_to_bits(target: &[u8; 32]) -> u32 { + let target_u256 = U256::from_be_slice(target); + let target_bits = target_u256.bits(); + let size = (263 - target_bits) / 8; + let mut compact_target = [0u8; 4]; + compact_target[0] = 33 - size as u8; + compact_target[1] = target[size - 1_usize]; + compact_target[2] = target[size]; + compact_target[3] = target[size + 1_usize]; + u32::from_be_bytes(compact_target) +} + +/// Calculates the new difficulty target after a difficulty adjustment epoch. +/// +/// Bitcoin adjusts difficulty every 2016 blocks to maintain ~10 minute block times. +/// The adjustment is based on how long the previous 2016 blocks actually took +/// compared to the expected timespan (2 weeks). +/// +/// ## Algorithm +/// +/// 1. Calculate actual timespan: `last_timestamp - epoch_start_time` +/// 2. Clamp timespan to [expected/4, expected*4] to limit adjustment range +/// 3. New target = old target * actual_timespan / expected_timespan +/// 4. Ensure new target doesn't exceed network maximum +/// +/// ## Parameters +/// +/// * `epoch_start_time` - Timestamp of the first block in the epoch +/// * `last_timestamp` - Timestamp of the last block in the epoch +/// * `current_target` - Current difficulty target in compact format +/// +/// ## Returns +/// +/// * `[u8; 32]` - New difficulty target as 32-byte array +fn calculate_new_difficulty( + epoch_start_time: u32, + last_timestamp: u32, + current_target: u32, +) -> [u8; 32] { + let mut actual_timespan = last_timestamp - epoch_start_time; + if actual_timespan < EXPECTED_EPOCH_TIMESPAN / 4 { + actual_timespan = EXPECTED_EPOCH_TIMESPAN / 4; + } else if actual_timespan > EXPECTED_EPOCH_TIMESPAN * 4 { + actual_timespan = EXPECTED_EPOCH_TIMESPAN * 4; + } + + let current_target_bytes = bits_to_target(current_target); + let mut new_target = U256::from_be_bytes(current_target_bytes) + .wrapping_mul(&U256::from(actual_timespan)) + .wrapping_div(&U256::from(EXPECTED_EPOCH_TIMESPAN)); + + if new_target > NETWORK_CONSTANTS.max_target { + new_target = NETWORK_CONSTANTS.max_target; + } + new_target.to_be_bytes() +} + +/// Validates that a block hash meets the proof-of-work requirement. +/// +/// Compares the block hash against the difficulty target to ensure sufficient +/// work was performed. The hash is interpreted as a big-endian 256-bit integer +/// and must be less than or equal to the target. +/// +/// Bitcoin uses little-endian byte order for hashes in most contexts, but for +/// difficulty comparison the hash bytes are reversed to big-endian format. +/// +/// ## Parameters +/// +/// * `hash` - The block hash to validate (32 bytes, little-endian) +/// * `target_bytes` - The difficulty target (32 bytes, big-endian) +/// +/// ## Panics +/// +/// Panics with "Hash is not valid" if the hash exceeds the target. +fn check_hash_valid(hash: &[u8; 32], target_bytes: &[u8; 32]) { + for i in 0..32 { + match hash[31 - i].cmp(&target_bytes[i]) { + Ordering::Less => return, + Ordering::Greater => panic!("Hash is not valid"), + Ordering::Equal => continue, + } + } +} + +/// Calculates the amount of work represented by a difficulty target. +/// +/// Bitcoin measures cumulative proof-of-work as the sum of work done by all blocks. +/// The work for a single block is inversely proportional to its target: +/// work = max_target / (target + 1) +/// +/// This allows comparing the total work between different chains to determine +/// which has the most accumulated proof-of-work. +/// +/// ## Parameters +/// +/// * `target` - The difficulty target as a 32-byte big-endian array +/// +/// ## Returns +/// +/// * `U256` - The amount of work represented by this target +fn calculate_work(target: &[u8; 32]) -> U256 { + let target = U256::from_be_slice(target); + let target_plus_one = target.saturating_add(&U256::ONE); + U256::MAX.wrapping_div(&target_plus_one) +} + +/// Circuit output containing the updated chain state and metadata. +#[derive(Serialize, Deserialize, Eq, PartialEq, Clone, Debug, BorshDeserialize, BorshSerialize)] +pub struct BlockHeaderCircuitOutput { + pub method_id: [u32; 8], + pub genesis_state_hash: [u8; 32], + pub chain_state: ChainState, +} + +/// Previous proof type - either genesis state or previous circuit output. +#[derive(Serialize, Deserialize, Eq, PartialEq, Clone, Debug, BorshDeserialize, BorshSerialize)] +pub enum HeaderChainPrevProofType { + GenesisBlock(ChainState), + PrevProof(BlockHeaderCircuitOutput), +} + +/// The input of the header chain circuit. +/// It contains the method ID, the previous proof (either a genesis block or a previous proof), and the block headers to be processed. +/// Method ID is used to identify the circuit and is expected to be the same as the one used in the previous proof. +#[derive(Serialize, Deserialize, Eq, PartialEq, Clone, Debug, BorshDeserialize, BorshSerialize)] +pub struct HeaderChainCircuitInput { + pub method_id: [u32; 8], + pub prev_proof: HeaderChainPrevProofType, + pub block_headers: Vec, +} + +#[cfg(test)] +mod tests { + use super::*; + use hex_literal::hex; + + // From block 800000 to 800015 + const BLOCK_HEADERS: [[u8; 80]; 15] = [ + hex!("00601d3455bb9fbd966b3ea2dc42d0c22722e4c0c1729fad17210100000000000000000055087fab0c8f3f89f8bcfd4df26c504d81b0a88e04907161838c0c53001af09135edbd64943805175e955e06"), + hex!("00a0012054a02827d7a8b75601275a160279a3c5768de4c1c4a702000000000000000000394ddc6a5de035874cfa22167bfe923953187b5a19fbb84e186dea3c78fd871c9bedbd6494380517f5c93c8c"), + hex!("00a00127470972473293f6a3514d69d9ede5acc79ef19c236be20000000000000000000035aa0cba25ae1517a257d8c913e24ec0a152fd6a84b7f9ef303626c91cdcd6b287efbd649438051761ba50fb"), + hex!("00e0ff3fe80aaef89174e3668cde4cefecae739cd2f337251e12050000000000000000004ed17162f118bd27ae283be8dabe8afe7583bd353087e2eb712c48e3c3240c3ea3efbd64943805178e55bb2f"), + hex!("00006020f9dd40733234ec3084fa55ae955d2e95f63db75382b4030000000000000000006f440ea93df1e46fa47a6135ce1661cbdb80e703e4cfb6d2c0bcf49ea50f2a1530f5bd64943805175d3a7efb"), + hex!("0040f526ba869c2271583b645767c3bc4acee3f4a5a1ac727d07050000000000000000006ce5ff483f5e9fe028725bd30196a064a761b3ea831e5b81cf1473d5aa11810efbf6bd64943805174c75b45d"), + hex!("0000c0204d770ec7842342bcfebba4447545383c639294a6c10c0500000000000000000059f61d610ef6cbcc1d05dec4ebc5e744c62dc975c4256c5f95833d350303c05521fabd64943805172b8e799e"), + hex!("00400020d9ea5216f276b3f623834e8db837f8b41a8afbda6e8800000000000000000000d5dea9ae25f7f8e6d66064b21c7f5d1481d08d162658785fde59716b1bf98ff50505be6494380517a33ee2b0"), + hex!("0060262ebabd5319d7013811214809650a635c974444813935b203000000000000000000a0ab544e5055c443256debb20e85f8ded28f746436a57c00e914b9fd02ff058bcf07be64943805172436ed21"), + hex!("00000020455bd24740ceb627a3c41c3cecaf097b45779719b0d40400000000000000000043ad55fc5619dd8f2edd7d18212d176cdb6aa2152f12addf9d38c9c29be0da60030bbe649438051704743edc"), + hex!("00e0ff27d53e9a409bf8ce3054862f76d926437c1b1a84ce1ac0010000000000000000004fceebb8a6cee0eaba389e462ae6bb89a8e6dd5396eeba89dc5907ff51112e21760dbe64943805174bd6f6f6"), + hex!("00e0ff3ff9d1af6c7009b9974b4d838a2505bc882a6333f92500030000000000000000002dff4798432eb3beaf3e5b7c7ca318c1b451ba05c560473b6b974138ac73a82f2b0ebe6494380517d26b2853"), + hex!("00403a31ee9197174b65726fa7d78fe8b547c024519642009b4f0100000000000000000025f09dbf49cabe174066ebc2d5329211bd994a2b645e4086cadc5a2bbe7cac687e0ebe64943805171f930c95"), + hex!("0000eb2f06d50bd6ead9973ec74d9f5d77aa9cc6262a497b7ef5040000000000000000004918ae9062a90bfc4c2befca6eb0569c86b53f20bfae39c14d56052eef74f39e2110be64943805176269f908"), + hex!("00a0002049b01d8eea4b9d88fabd6a9633699c579145a8ddc91205000000000000000000368d0d166ae485674d0b794a8e2e2f4e94ac1e5b6d56612b3d725bc793f523514712be6494380517860d95e4") + ]; + + const DIFFICULTY_ADJUSTMENTS: [(u32, u32, u32, u32); 430] = [ + (1231006505, 1233061996, 486604799, 486604799), + (1233063531, 1234465122, 486604799, 486604799), + (1234466190, 1235965467, 486604799, 486604799), + (1235966513, 1237507400, 486604799, 486604799), + (1237508786, 1239054978, 486604799, 486604799), + (1239055463, 1240599092, 486604799, 486604799), + (1240599098, 1242098000, 486604799, 486604799), + (1242098425, 1243735798, 486604799, 486604799), + (1243737085, 1246050840, 486604799, 486604799), + (1246051973, 1248481522, 486604799, 486604799), + (1248481816, 1252066931, 486604799, 486604799), + (1252069298, 1254454291, 486604799, 486604799), + (1254454028, 1257000207, 486604799, 486604799), + (1257002900, 1259358448, 486604799, 486604799), + (1259358667, 1261128623, 486604799, 486604799), + (1261130161, 1262152739, 486604799, 486594666), + (1262153464, 1263249842, 486594666, 486589480), + (1263250117, 1264424481, 486589480, 486588017), + (1264424879, 1265318937, 486588017, 486575299), + (1265319794, 1266190073, 486575299, 476399191), + (1266191579, 1267000203, 476399191, 474199013), + (1267000864, 1268010273, 474199013, 473464687), + (1268010873, 1269211443, 473464687, 473437045), + (1269212064, 1270119474, 473437045, 472518933), + (1270120042, 1271061370, 472518933, 471907495), + (1271061586, 1271886653, 471907495, 471225455), + (1271886772, 1272966003, 471225455, 471067731), + (1272966376, 1274278387, 471067731, 471178276), + (1274278435, 1275140649, 471178276, 470771548), + (1275141448, 1276297992, 470771548, 470727268), + (1276298786, 1277382263, 470727268, 470626626), + (1277382446, 1278381204, 470626626, 470475923), + (1278381464, 1279007808, 470475923, 470131700), + (1279008237, 1279297671, 470131700, 469854461), + (1279297779, 1280196974, 469854461, 469830746), + (1280198558, 1281037393, 469830746, 469809688), + (1281037595, 1281869965, 469809688, 469794830), + (1281870671, 1282863700, 469794830, 459874456), + (1282864403, 1283922146, 459874456, 459009510), + (1283922289, 1284861793, 459009510, 457664237), + (1284861847, 1285703762, 457664237, 456241827), + (1285703908, 1286861405, 456241827, 456101533), + (1286861705, 1287637343, 456101533, 454983370), + (1287637995, 1288478771, 454983370, 454373987), + (1288479527, 1289303926, 454373987, 453931606), + (1289305768, 1290104845, 453931606, 453610282), + (1290105874, 1291134100, 453610282, 453516498), + (1291135075, 1291932610, 453516498, 453335379), + (1291933202, 1292956393, 453335379, 453281356), + (1292956443, 1294030806, 453281356, 453248203), + (1294031411, 1295101259, 453248203, 453217774), + (1295101567, 1296114735, 453217774, 453179945), + (1296116171, 1297140342, 453179945, 453150034), + (1297140800, 1298003311, 453150034, 453102630), + (1298006152, 1298799509, 453102630, 453062093), + (1298800760, 1299683275, 453062093, 453041201), + (1299684355, 1301020485, 453041201, 453047097), + (1301020785, 1302034036, 453047097, 453036989), + (1302034197, 1303112797, 453036989, 453031340), + (1303112976, 1304131540, 453031340, 453023994), + (1304131980, 1304974694, 453023994, 443192243), + (1304975844, 1305755857, 443192243, 440711666), + (1305756287, 1306435280, 440711666, 438735905), + (1306435316, 1307362613, 438735905, 438145839), + (1307363105, 1308145551, 438145839, 437461381), + (1308145774, 1308914894, 437461381, 437004818), + (1308915923, 1309983257, 437004818, 436911055), + (1309984546, 1311102675, 436911055, 436857860), + (1311103389, 1312186259, 436857860, 436789733), + (1312186279, 1313451537, 436789733, 436816518), + (1313451894, 1314680496, 436816518, 436826083), + (1314681303, 1315906303, 436826083, 436833957), + (1315906316, 1317163240, 436833957, 436858461), + (1317163624, 1318555415, 436858461, 436956491), + (1318556675, 1320032359, 436956491, 437121226), + (1320032534, 1321253256, 437121226, 437129626), + (1321253770, 1322576247, 437129626, 437215665), + (1322576420, 1323718660, 437215665, 437159528), + (1323718955, 1324923455, 437159528, 437155514), + (1324925005, 1326046766, 437155514, 437086679), + (1326047176, 1327204081, 437086679, 437048383), + (1327204504, 1328351050, 437048383, 437004555), + (1328351561, 1329564101, 437004555, 437006492), + (1329564255, 1330676346, 437006492, 436942092), + (1330676736, 1331885274, 436942092, 436941447), + (1331885394, 1332999614, 436941447, 436883582), + (1332999707, 1334246594, 436883582, 436904419), + (1334246689, 1335511874, 436904419, 436936439), + (1335512370, 1336565211, 436936439, 436841986), + (1336565313, 1337882969, 436841986, 436898655), + (1337883029, 1339098664, 436898655, 436902102), + (1339099525, 1340208670, 436902102, 436844426), + (1340208964, 1341401376, 436844426, 436835377), + (1341401841, 1342536951, 436835377, 436796718), + (1342537166, 1343645636, 436796718, 436747465), + (1343647577, 1344772046, 436747465, 436709470), + (1344772855, 1345858666, 436709470, 436658110), + (1345859199, 1346955024, 436658110, 436615736), + (1346955037, 1348092805, 436615736, 436591499), + (1348092851, 1349227021, 436591499, 436567560), + (1349226660, 1350429295, 436567560, 436565487), + (1350428168, 1351552830, 436565487, 436540357), + (1351556195, 1352742671, 436540357, 436533995), + (1352743186, 1353928117, 436533995, 436527338), + (1353928229, 1355162497, 436527338, 436533858), + (1355162613, 1356530758, 436533858, 436576619), + (1356530740, 1357639870, 436576619, 436545969), + (1357641634, 1358965635, 436545969, 436577969), + (1358966487, 1360062830, 436577969, 436543292), + (1360063146, 1361148326, 436543292, 436508764), + (1361148470, 1362159549, 436508764, 436459339), + (1362159764, 1363249652, 436459339, 436434426), + (1363249946, 1364125673, 436434426, 436371822), + (1364126425, 1365181981, 436371822, 436350910), + (1365183643, 1366217849, 436350910, 436330132), + (1366218134, 1367295455, 436330132, 436316733), + (1367296471, 1368385955, 436316733, 436305897), + (1368386123, 1369499565, 436305897, 436298084), + (1369499746, 1370441773, 436298084, 436278071), + (1370442318, 1371418407, 436278071, 436264469), + (1371418654, 1372515090, 436264469, 436259150), + (1372515725, 1373502151, 436259150, 436249641), + (1373502163, 1374514657, 436249641, 436242792), + (1374515827, 1375526943, 436242792, 426957810), + (1375527115, 1376417294, 426957810, 424970034), + (1376417490, 1377352245, 424970034, 423711319), + (1377353319, 1378268176, 423711319, 422668188), + (1378268460, 1379202097, 422668188, 421929506), + (1379202248, 1380117691, 421929506, 421321760), + (1380118146, 1381069174, 421321760, 420917450), + (1381070552, 1381925718, 420917450, 420481718), + (1381925788, 1382754194, 420481718, 420150405), + (1382754272, 1383679776, 420150405, 419981299), + (1383681123, 1384695132, 419981299, 419892219), + (1384699499, 1385741656, 419892219, 419828290), + (1385742648, 1386684666, 419828290, 419740270), + (1386684686, 1387615098, 419740270, 419668748), + (1387617112, 1388624139, 419668748, 419628831), + (1388624318, 1389583107, 419628831, 419587686), + (1389583220, 1390569911, 419587686, 419558700), + (1390570126, 1391582444, 419558700, 419537774), + (1391584456, 1392597647, 419537774, 419520339), + (1392597839, 1393589930, 419520339, 419504166), + (1393590585, 1394676535, 419504166, 419496625), + (1394676764, 1395703577, 419496625, 419486617), + (1395703832, 1396693489, 419486617, 419476394), + (1396694478, 1397755194, 419476394, 419470732), + (1397755646, 1398810754, 419470732, 419465580), + (1398811175, 1399904296, 419465580, 410792019), + (1399904311, 1400928544, 410792019, 409544770), + (1400928750, 1402004511, 409544770, 408782234), + (1402004993, 1403061308, 408782234, 408005538), + (1403061280, 1404029522, 408005538, 406937553), + (1404029556, 1405203024, 406937553, 406809574), + (1405205894, 1406325104, 406809574, 406498978), + (1406325092, 1407473800, 406498978, 406305378), + (1407474112, 1408474964, 406305378, 405675096), + (1408475518, 1409527066, 405675096, 405280238), + (1409527152, 1410639387, 405280238, 405068777), + (1410638896, 1411679882, 405068777, 404732051), + (1411680080, 1412877894, 404732051, 404711795), + (1412877866, 1414054419, 404711795, 404655552), + (1414055393, 1415154489, 404655552, 404472624), + (1415154631, 1416343330, 404472624, 404441185), + (1416345124, 1417563570, 404441185, 404454260), + (1417563705, 1418790160, 404454260, 404479356), + (1418791024, 1419965406, 404479356, 404426186), + (1419965588, 1421083565, 404426186, 404291887), + (1421084073, 1422372768, 404291887, 404399040), + (1422372946, 1423495952, 404399040, 404274055), + (1423496415, 1424648263, 404274055, 404196666), + (1424648937, 1425839583, 404196666, 404172480), + (1425840165, 1427068149, 404172480, 404195570), + (1427068411, 1428211256, 404195570, 404110449), + (1428211345, 1429467587, 404110449, 404166640), + (1429467906, 1430676673, 404166640, 404165597), + (1430677341, 1431858092, 404165597, 404129525), + (1431858433, 1433098989, 404129525, 404167307), + (1433099185, 1434257600, 404167307, 404103235), + (1434257763, 1435474473, 404103235, 404111758), + (1435475246, 1436645194, 404111758, 404063944), + (1436646286, 1437828076, 404063944, 404031509), + (1437828285, 1439028210, 404031509, 404020484), + (1439028930, 1440203823, 404020484, 403981252), + (1440204583, 1441356822, 403981252, 403918273), + (1441357507, 1442518636, 403918273, 403867578), + (1442519404, 1443699609, 403867578, 403838066), + (1443700390, 1444908588, 403838066, 403836692), + (1444908751, 1446091729, 403836692, 403810644), + (1446092706, 1447236281, 403810644, 403747465), + (1447236692, 1448331948, 403747465, 403644022), + (1448332462, 1449444509, 403644022, 403564111), + (1449444652, 1450468554, 403564111, 403424265), + (1450469289, 1451557421, 403424265, 403346833), + (1451558562, 1452667067, 403346833, 403288859), + (1452667178, 1453809473, 403288859, 403253488), + (1453810745, 1454818212, 403253488, 403153172), + (1454818360, 1455884612, 403153172, 403093919), + (1455885256, 1457133524, 403093919, 403108008), + (1457133956, 1458291885, 403108008, 403088579), + (1458292068, 1459491849, 403088579, 403085044), + (1459492475, 1460622012, 403085044, 403056459), + (1460622341, 1461832072, 403056459, 403056502), + (1461832110, 1462944601, 403056502, 403024122), + (1462944866, 1464123775, 403024122, 403014710), + (1464123766, 1465353421, 403014710, 403020704), + (1465353718, 1466485981, 403020704, 402997206), + (1466486338, 1467673575, 402997206, 402990845), + (1467674161, 1468883232, 402990845, 402990697), + (1468884162, 1470163257, 402990697, 403010088), + (1470163842, 1471287293, 403010088, 402984668), + (1471287554, 1472478633, 402984668, 402979592), + (1472479861, 1473662270, 402979592, 402972254), + (1473662347, 1474794756, 402972254, 402951892), + (1474795015, 1475923695, 402951892, 402931908), + (1475924010, 1477157004, 402931908, 402937298), + (1477159378, 1478364220, 402937298, 402936180), + (1478364418, 1479457348, 402936180, 402908884), + (1479457815, 1480646474, 402908884, 402904457), + (1480646786, 1481765173, 402904457, 402885509), + (1481765313, 1482946227, 402885509, 402879999), + (1482946855, 1484087479, 402879999, 402867065), + (1484088052, 1485125083, 402867065, 402836551), + (1485125572, 1486251490, 402836551, 402823865), + (1486251529, 1487410067, 402823865, 402816659), + (1487410706, 1488567833, 402816659, 402809567), + (1488567886, 1489739512, 402809567, 402804657), + (1489739775, 1490891447, 402804657, 402797402), + (1490891948, 1492052381, 402797402, 402791539), + (1492052390, 1493259291, 402791539, 402791230), + (1493259601, 1494387130, 402791230, 402781863), + (1494387648, 1495524275, 402781863, 402774100), + (1495524592, 1496586576, 402774100, 402759343), + (1496586907, 1497740528, 402759343, 402754430), + (1497741533, 1498956326, 402754430, 402754864), + (1498956437, 1500021909, 402754864, 402742748), + (1500021942, 1501153235, 402742748, 402736949), + (1501153434, 1502280491, 402736949, 402731232), + (1502282210, 1503539571, 402731232, 402734313), + (1503539857, 1504704167, 402734313, 402731275), + (1504704195, 1505715737, 402731275, 402718488), + (1505716276, 1506903856, 402718488, 402717299), + (1506904066, 1508039962, 402717299, 402713392), + (1508040302, 1509036725, 402713392, 402702781), + (1509036762, 1510324761, 402702781, 402705995), + (1510326831, 1511552082, 402705995, 402706678), + (1511553196, 1512577362, 402706678, 402698477), + (1512577401, 1513604778, 402698477, 402691653), + (1513605320, 1514778580, 402691653, 402690497), + (1514778970, 1515827472, 402690497, 394155916), + (1515827554, 1516862792, 394155916, 392962374), + (1516862900, 1517958218, 392962374, 392292856), + (1517958487, 1519114710, 392292856, 392009692), + (1519114859, 1520220349, 392009692, 391481763), + (1520223678, 1521373214, 391481763, 391203401), + (1521373218, 1522566103, 391203401, 391129783), + (1522566357, 1523672538, 391129783, 390680589), + (1523672932, 1524827574, 390680589, 390462291), + (1524828253, 1526002294, 390462291, 390327465), + (1526003655, 1527167457, 390327465, 390158921), + (1527168053, 1528222495, 390158921, 389609537), + (1528222686, 1529399698, 389609537, 389508950), + (1529400045, 1530545107, 389508950, 389315112), + (1530545661, 1531798474, 389315112, 389437975), + (1531799449, 1532852342, 389437975, 388976507), + (1532852371, 1533978695, 388976507, 388763047), + (1533980459, 1535129301, 388763047, 388618029), + (1535129431, 1536288716, 388618029, 388503969), + (1536290079, 1537477114, 388503969, 388454943), + (1537478139, 1538638684, 388454943, 388350353), + (1538639362, 1539894787, 388350353, 388444093), + (1539895067, 1541104406, 388444093, 388443538), + (1541105656, 1542411813, 388443538, 388648495), + (1542412284, 1543837587, 388648495, 389142908), + (1543838368, 1545175878, 389142908, 389488372), + (1545175965, 1546275302, 389488372, 389159077), + (1546276809, 1547431851, 389159077, 389010995), + (1547432394, 1548656416, 389010995, 389048373), + (1548657313, 1549817652, 389048373, 388919176), + (1549817981, 1551025524, 388919176, 388914000), + (1551026038, 1552236227, 388914000, 388915479), + (1552236304, 1553387053, 388915479, 388767596), + (1553387093, 1554594090, 388767596, 388761373), + (1554594223, 1555811438, 388761373, 388779537), + (1555811668, 1556958256, 388779537, 388628280), + (1556958733, 1558167889, 388628280, 388627269), + (1558168296, 1559255464, 388627269, 388348790), + (1559256184, 1560473993, 388348790, 388365571), + (1560474230, 1561603749, 388365571, 388200748), + (1561604370, 1562663247, 388200748, 387911067), + (1562663868, 1563880228, 387911067, 387922440), + (1563880937, 1564972845, 387922440, 387723321), + (1564973528, 1566159593, 387723321, 387687377), + (1566161382, 1567304898, 387687377, 387588414), + (1567305301, 1568401109, 387588414, 387427317), + (1568401591, 1569528791, 387427317, 387321636), + (1569530001, 1570716515, 387321636, 387294044), + (1570716535, 1571865760, 387294044, 387223263), + (1571866973, 1573168955, 387223263, 387326161), + (1573169436, 1574355426, 387326161, 387297854), + (1574356132, 1575574787, 387297854, 387308498), + (1575576145, 1576779043, 387308498, 387300560), + (1576779421, 1577914494, 387300560, 387212786), + (1577915667, 1579045242, 387212786, 387124344), + (1579045357, 1580201014, 387124344, 387068671), + (1580201043, 1581404369, 387068671, 387062484), + (1581405024, 1582619298, 387062484, 387067068), + (1582619322, 1583751024, 387067068, 386990361), + (1583751917, 1585191082, 386990361, 387201857), + (1585191106, 1586334725, 387201857, 387129532), + (1586336046, 1587451399, 387129532, 387031859), + (1587452724, 1588651347, 387031859, 387021369), + (1588651521, 1589938370, 387021369, 387094518), + (1589940416, 1591273835, 387094518, 387219253), + (1591273852, 1592326176, 387219253, 387044594), + (1592326267, 1593535908, 387044594, 387044633), + (1593537529, 1594638224, 387044633, 386939413), + (1594641060, 1595886443, 386939413, 386970872), + (1595886756, 1597089202, 386970872, 386964396), + (1597089619, 1598257182, 386964396, 386926570), + (1598258059, 1599482443, 386926570, 386939410), + (1599482920, 1600569231, 386939410, 386831018), + (1600570533, 1601781172, 386831018, 386831838), + (1601781592, 1602948896, 386831838, 386798414), + (1602950620, 1604391477, 386798414, 386974771), + (1604392090, 1605546079, 386974771, 386924253), + (1605546119, 1606657197, 386924253, 386838870), + (1606657305, 1607898457, 386838870, 386863986), + (1607899483, 1609113673, 386863986, 386867735), + (1609113744, 1610205491, 386867735, 386771105), + (1610205877, 1611402924, 386771105, 386761815), + (1611403017, 1612578145, 386761815, 386736569), + (1612578303, 1613771771, 386736569, 386725091), + (1613772036, 1614997194, 386725091, 386736012), + (1614997708, 1616184225, 386736012, 386719599), + (1616184405, 1617327513, 386719599, 386673224), + (1617328801, 1618515600, 386673224, 386658195), + (1618515703, 1619899807, 386658195, 386771043), + (1619900822, 1620896111, 386771043, 386612457), + (1620896338, 1622335745, 386612457, 386752379), + (1622337521, 1623614781, 386752379, 386801401), + (1623614836, 1625293501, 386801401, 387160270), + (1625294046, 1626564728, 387160270, 387225124), + (1626564737, 1627705595, 387225124, 387148450), + (1627706126, 1628833331, 387148450, 387061771), + (1628834027, 1629902243, 387061771, 386923168), + (1629902476, 1631059521, 386923168, 386877668), + (1631061045, 1632233558, 386877668, 386846955), + (1632234876, 1633390031, 386846955, 386803250), + (1633390519, 1634588711, 386803250, 386794504), + (1634588757, 1635710294, 386794504, 386727631), + (1635710370, 1636865834, 386727631, 386689514), + (1636866927, 1638094859, 386689514, 386701843), + (1638095408, 1639212040, 386701843, 386638367), + (1639216857, 1640422619, 386638367, 386635947), + (1640422999, 1641627659, 386635947, 386632843), + (1641627937, 1642734420, 386632843, 386568320), + (1642734490, 1643941946, 386568320, 386567092), + (1643942057, 1645096442, 386567092, 386535544), + (1645096491, 1646324392, 386535544, 386545523), + (1646324511, 1647538413, 386545523, 386547904), + (1647538808, 1648700407, 386547904, 386521239), + (1648700729, 1649925810, 386521239, 386529497), + (1649925939, 1651071862, 386529497, 386495093), + (1651072835, 1652226053, 386495093, 386466234), + (1652226078, 1653490447, 386466234, 386492960), + (1653490985, 1654685173, 386492960, 386485098), + (1654686448, 1655925220, 386485098, 386499788), + (1655925489, 1657152407, 386499788, 386508719), + (1657153358, 1658426742, 386508719, 386542084), + (1658427282, 1659616186, 386542084, 386530686), + (1659617683, 1660819735, 386530686, 386526600), + (1660820877, 1661927959, 386526600, 386471456), + (1661928055, 1663097332, 386471456, 386451604), + (1663097346, 1664333361, 386451604, 386464174), + (1664333794, 1665399026, 386464174, 386393970), + (1665399506, 1666568884, 386393970, 386376745), + (1666569091, 1667781109, 386376745, 386377746), + (1667781163, 1668984601, 386377746, 386375189), + (1668986059, 1670291250, 386375189, 386414640), + (1670291429, 1671462730, 386414640, 386397584), + (1671463076, 1672717752, 386397584, 386417022), + (1672719770, 1673816848, 386417022, 386366690), + (1673817110, 1674972595, 386366690, 386344736), + (1674972641, 1676188253, 386344736, 386347065), + (1676188371, 1677288474, 386347065, 386304419), + (1677288852, 1678484625, 386304419, 386299521), + (1678484890, 1679609492, 386299521, 386269758), + (1679609802, 1680793023, 386269758, 386261170), + (1680795199, 1681984324, 386261170, 386254649), + (1681984653, 1683212066, 386254649, 386260225), + (1683214087, 1684385994, 386260225, 386248250), + (1684386462, 1685556292, 386248250, 386236009), + (1685557167, 1686740979, 386236009, 386228333), + (1686742062, 1687992366, 386228333, 386240190), + (1687992515, 1689128861, 386240190, 386218132), + (1689128979, 1690375168, 386218132, 386228482), + (1690375347, 1691583496, 386228482, 386228059), + (1691584068, 1692723421, 386228059, 386207611), + (1692724599, 1693967068, 386207611, 386216622), + (1693967242, 1695113955, 386216622, 386198911), + (1695114421, 1696319769, 386198911, 386197775), + (1696319920, 1697456008, 386197775, 386178217), + (1697455965, 1698637823, 386178217, 386171284), + (1698638003, 1699806178, 386171284, 386161170), + (1699806273, 1700957506, 386161170, 386147408), + (1700957763, 1702179081, 386147408, 386150037), + (1702180644, 1703311291, 386150037, 386132147), + (1703311464, 1704501378, 386132147, 386127977), + (1704501692, 1705760375, 386127977, 386138202), + (1705761155, 1706888111, 386138202, 386120285), + (1706888526, 1708006020, 386120285, 386101681), + (1708008110, 1709253900, 386101681, 386108434), + (1709253937, 1710397305, 386108434, 386095705), + (1710397689, 1711619239, 386095705, 386097875), + (1711619463, 1712783397, 386097875, 386089497), + (1712783853, 1713969900, 386089497, 386085339), + (1713970312, 1715252012, 386085339, 386097818), + (1715252414, 1716444342, 386097818, 386094576), + (1716445130, 1717664336, 386094576, 386096312), + (1717664663, 1718874866, 386096312, 386096421), + (1718875797, 1720149002, 386096421, 386108013), + (1720149673, 1721321644, 386108013, 386100794), + (1721322584, 1722417204, 386100794, 386079422), + (1722417212, 1723679655, 386079422, 386088310), + (1723679961, 1724854413, 386088310, 386082139), + (1724855515, 1726023352, 386082139, 386075020), + (1726025157, 1727293148, 386075020, 386084628), + (1727293228, 1728454931, 386084628, 386076365), + (1728456399, 1729620194, 386076365, 386068776), + ]; + + #[test] + fn test_block_hash_calculation() { + let merkle_root = hex!("3ba3edfd7a7b12b27ac72c3e67768f617fc81bc3888a51323a9fb8aa4b1e5e4a"); + let expected_block_hash = + hex!("6fe28c0ab6f1b372c1a6a246ae63f74f931e8365e15a089c68d6190000000000"); + + let block_header = CircuitBlockHeader { + version: 1, + prev_block_hash: [0u8; 32], + merkle_root, + time: 1231006505, + bits: 486604799, + nonce: 2083236893, + }; + + let block_hash = block_header.compute_block_hash(); + assert_eq!(block_hash, expected_block_hash); + } + + #[test] + fn test_15_block_hash_calculation() { + let block_headers = BLOCK_HEADERS + .iter() + .map(|header| CircuitBlockHeader::try_from_slice(header).unwrap()) + .collect::>(); + + for i in 0..block_headers.len() - 1 { + let block_hash = block_headers[i].compute_block_hash(); + let next_block = &block_headers[i + 1]; + assert_eq!(block_hash, next_block.prev_block_hash); + } + } + + #[test] + fn test_median() { + let arr = [3, 7, 2, 10, 1, 5, 9, 4, 8, 6, 11]; + assert_eq!(median(arr), 6); + } + + #[test] + fn test_timestamp_check_fail() { + let block_headers = BLOCK_HEADERS + .iter() + .map(|header| CircuitBlockHeader::try_from_slice(header).unwrap()) + .collect::>(); + + let first_11_timestamps = block_headers[..11] + .iter() + .map(|header| header.time) + .collect::>(); + + // The validation is expected to return false + assert!(!validate_timestamp( + block_headers[1].time, + first_11_timestamps.try_into().unwrap(), + )); + } + + #[test] + fn test_timestamp_check_pass() { + let block_headers = BLOCK_HEADERS + .iter() + .map(|header| CircuitBlockHeader::try_from_slice(header).unwrap()) + .collect::>(); + + let first_11_timestamps = block_headers[..11] + .iter() + .map(|header| header.time) + .collect::>(); + + assert!(validate_timestamp( + block_headers[11].time, + first_11_timestamps.clone().try_into().unwrap(), + )); + } + + #[test] + #[should_panic(expected = "Hash is not valid")] + fn test_hash_check_fail() { + let block_headers = BLOCK_HEADERS + .iter() + .map(|header| CircuitBlockHeader::try_from_slice(header).unwrap()) + .collect::>(); + + let first_15_hashes = block_headers[..15] + .iter() + .map(|header| header.compute_block_hash()) + .collect::>(); + + // The validation is expected to panic + check_hash_valid( + &first_15_hashes[0], + &U256::from_be_hex("00000000FFFF0000000000000000000000000000000000000000000000000000") + .wrapping_div(&(U256::ONE << 157)) + .to_be_bytes(), + ); + } + + #[test] + fn test_hash_check_pass() { + let block_headers = BLOCK_HEADERS + .iter() + .map(|header| CircuitBlockHeader::try_from_slice(header).unwrap()) + .collect::>(); + + let first_15_hashes = block_headers[..15] + .iter() + .map(|header| header.compute_block_hash()) + .collect::>(); + + for (i, hash) in first_15_hashes.into_iter().enumerate() { + check_hash_valid(&hash, &bits_to_target(block_headers[i].bits)); + } + } + + #[test] + fn test_target_conversion() { + for (_, _, bits, _) in DIFFICULTY_ADJUSTMENTS { + let compact_target = bits_to_target(bits); + let nbits = target_to_bits(&compact_target); + assert_eq!(nbits, bits); + } + } + + #[test] + fn test_bits_to_target() { + // https://learnmeabitcoin.com/explorer/block/00000000000000000002ebe388cb8fa0683fc34984cfc2d7d3b3f99bc0d51bfd + let expected_target = + hex!("00000000000000000002f1280000000000000000000000000000000000000000"); + let bits: u32 = 0x1702f128; + let target = bits_to_target(bits); + assert_eq!(target, expected_target); + + let converted_bits = target_to_bits(&target); + + assert_eq!(converted_bits, bits); + } + + #[test] + fn test_difficulty_adjustments() { + for (start_time, end_time, start_target, end_target) in DIFFICULTY_ADJUSTMENTS { + let new_target_bytes = calculate_new_difficulty(start_time, end_time, start_target); + let bits = target_to_bits(&new_target_bytes); + assert_eq!(bits, end_target); + } + } + + #[test] + fn test_bridge_block_header_from_header() { + let header = Header { + version: Version::from_consensus(1), + prev_blockhash: BlockHash::from_slice(&[0; 32]).unwrap(), + merkle_root: TxMerkleNode::from_slice(&[1; 32]).unwrap(), + time: 1231006505, + bits: CompactTarget::from_consensus(0x1d00ffff), + nonce: 2083236893, + }; + + let bridge_header: CircuitBlockHeader = header.into(); + + assert_eq!(bridge_header.version, header.version.to_consensus()); + assert_eq!( + bridge_header.prev_block_hash, + *header.prev_blockhash.as_byte_array() + ); + assert_eq!( + bridge_header.merkle_root, + *header.merkle_root.as_byte_array() + ); + assert_eq!(bridge_header.time, header.time); + assert_eq!(bridge_header.bits, header.bits.to_consensus()); + assert_eq!(bridge_header.nonce, header.nonce); + assert_eq!( + bridge_header.compute_block_hash(), + header.block_hash().to_byte_array() + ); + } + + #[test] + fn test_bridge_block_header_into_header() { + let bridge_header = CircuitBlockHeader { + version: 1, + prev_block_hash: [0; 32], + merkle_root: [1; 32], + time: 1231006505, + bits: 0x1d00ffff, + nonce: 2083236893, + }; + + let header: Header = bridge_header.clone().into(); + + assert_eq!(header.version.to_consensus(), bridge_header.version); + assert_eq!( + *header.prev_blockhash.as_byte_array(), + bridge_header.prev_block_hash + ); + assert_eq!( + *header.merkle_root.as_byte_array(), + bridge_header.merkle_root + ); + assert_eq!(header.time, bridge_header.time); + assert_eq!(header.bits.to_consensus(), bridge_header.bits); + assert_eq!(header.nonce, bridge_header.nonce); + assert_eq!( + header.block_hash().to_byte_array(), + bridge_header.compute_block_hash() + ); + } + + #[test] + fn test_roundtrip_header_conversion() { + let original_header = Header { + version: Version::from_consensus(1), + prev_blockhash: BlockHash::from_slice(&[0; 32]).unwrap(), + merkle_root: TxMerkleNode::from_slice(&[1; 32]).unwrap(), + time: 1231006505, + bits: CompactTarget::from_consensus(0x1d00ffff), + nonce: 2083236893, + }; + + let bridge_header: CircuitBlockHeader = original_header.into(); + let converted_header: Header = bridge_header.into(); + + assert_eq!(original_header, converted_header); + assert_eq!(original_header.block_hash(), converted_header.block_hash()); + } +} diff --git a/circuits-lib/src/lib.rs b/circuits-lib/src/lib.rs new file mode 100644 index 000000000..6d014c306 --- /dev/null +++ b/circuits-lib/src/lib.rs @@ -0,0 +1,4 @@ +pub mod bridge_circuit; +pub mod common; +pub mod header_chain; +pub mod work_only; diff --git a/circuits-lib/src/work_only/mod.rs b/circuits-lib/src/work_only/mod.rs new file mode 100644 index 000000000..66281bcf6 --- /dev/null +++ b/circuits-lib/src/work_only/mod.rs @@ -0,0 +1,159 @@ +//! # Work-Only Circuit - Proof-of-Work Extraction from Header Chain +//! +//! Specialized zkVM circuit that verifies and extracts accumulated proof-of-work +//! from Bitcoin header chain circuit proofs, converting 256-bit work values to +//! compact 128-bit representations for efficient downstream verification. + +use crate::{ + bridge_circuit::structs::{WorkOnlyCircuitInput, WorkOnlyCircuitOutput}, + common::{ + constants::{ + MAINNET_HEADER_CHAIN_METHOD_ID, REGTEST_HEADER_CHAIN_METHOD_ID, + SIGNET_HEADER_CHAIN_METHOD_ID, TESTNET4_HEADER_CHAIN_METHOD_ID, + }, + zkvm::ZkvmGuest, + }, +}; + +use crypto_bigint::{Encoding, U128, U256}; +use risc0_zkvm::guest::env; + +/// Network-specific method ID for the header chain circuit. +/// +/// Compile-time constant that resolves to the appropriate header chain method ID +/// based on the `BITCOIN_NETWORK` environment variable. Ensures compatibility +/// between work-only and header chain circuits for the same network. +/// +/// ## Supported Networks +/// - **mainnet**: Production Bitcoin network +/// - **testnet4**: Bitcoin test network +/// - **signet**: Custom signet with configurable parameters +/// - **regtest**: Local regression testing network +/// +/// Defaults to mainnet if no network is specified. +const HEADER_CHAIN_METHOD_ID: [u32; 8] = { + match option_env!("BITCOIN_NETWORK") { + Some(network) if matches!(network.as_bytes(), b"mainnet") => MAINNET_HEADER_CHAIN_METHOD_ID, + Some(network) if matches!(network.as_bytes(), b"testnet4") => { + TESTNET4_HEADER_CHAIN_METHOD_ID + } + Some(network) if matches!(network.as_bytes(), b"signet") => SIGNET_HEADER_CHAIN_METHOD_ID, + Some(network) if matches!(network.as_bytes(), b"regtest") => REGTEST_HEADER_CHAIN_METHOD_ID, + None => MAINNET_HEADER_CHAIN_METHOD_ID, + _ => panic!("Invalid network type"), + } +}; + +/// Main entry point for the work-only zkVM circuit. +/// +/// Verifies a header chain circuit proof and extracts the total accumulated +/// proof-of-work, converting it from 256-bit to 128-bit representation for +/// efficient storage and downstream verification. +/// +/// ## Process Flow +/// +/// 1. **Input Reading**: Reads `WorkOnlyCircuitInput` from host +/// 2. **Method ID Validation**: Ensures proof comes from compatible header chain circuit +/// 3. **Proof Verification**: Cryptographically verifies the header chain proof +/// 4. **Work Extraction**: Extracts `total_work` and `genesis_state_hash` +/// 5. **Work Conversion**: Converts 256-bit work to 128-bit representation +/// 6. **Output Commitment**: Commits compact proof output +/// +/// ## Parameters +/// +/// * `guest` - ZkvmGuest implementation for I/O and proof operations +/// +/// ## Panics +/// +/// - Method ID mismatch between input and expected header chain method ID +/// - Proof verification failure (invalid or tampered header chain proof) +/// - Serialization errors (though practically infallible for used types) +pub fn work_only_circuit(guest: &impl ZkvmGuest) { + let input: WorkOnlyCircuitInput = guest.read_from_host(); + assert_eq!( + HEADER_CHAIN_METHOD_ID, input.header_chain_circuit_output.method_id, + "Invalid method ID for header chain circuit: expected {:?}, got {:?}", + HEADER_CHAIN_METHOD_ID, input.header_chain_circuit_output.method_id + ); + env::verify( + input.header_chain_circuit_output.method_id, + &borsh::to_vec(&input.header_chain_circuit_output).unwrap(), + ) + .unwrap(); + let total_work_u256: U256 = + U256::from_be_bytes(input.header_chain_circuit_output.chain_state.total_work); + let words = work_conversion(total_work_u256); + // Due to the nature of borsh serialization, this will use little endian bytes in the items it serializes/deserializes + guest.commit(&WorkOnlyCircuitOutput { + work_u128: words, + genesis_state_hash: input.header_chain_circuit_output.genesis_state_hash, + }); +} + +/// Converts 256-bit total work to compact 128-bit representation. +/// +/// Truncates the 256-bit work value to its lower 128 bits and converts to +/// big-endian byte array format. This conversion reduces storage requirements +/// while preserving sufficient precision for most practical applications. +/// +/// ## Parameters +/// +/// * `work` - The 256-bit accumulated proof-of-work value +/// +/// ## Returns +/// +/// * `[u8; 16]` - 128-bit work value as big-endian byte array +/// +/// ## Note +/// +/// The upper 128 bits are discarded during conversion. For Bitcoin's current +/// difficulty levels, this provides adequate precision for the foreseeable future. +fn work_conversion(work: U256) -> [u8; 16] { + let (_, work): (U128, U128) = work.into(); + work.to_be_bytes() +} + +#[cfg(test)] +mod tests { + use crypto_bigint::{Encoding, U256}; + + use crate::work_only::work_conversion; + #[test] + fn test_work_conversion_one() { + let u128_one_words = work_conversion(U256::ONE); + assert_eq!( + u128_one_words, + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1] + ); + let u128_one_borsh = + borsh::to_vec(&u128_one_words).expect("Serialization to vec is infallible"); + assert_eq!(u128_one_borsh.len(), 16); + assert_eq!( + u128_one_borsh, + vec![0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1] + ); + let u128_one = borsh::from_slice::<[u8; 16]>(&u128_one_borsh) + .expect("Deserialization from slice is infallible"); + assert_eq!(u128_one, u128_one_words); + } + + #[test] + fn test_work_conversion_real() { + let work_bytes = U256::from_be_bytes([ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, + 1, 0, 1, + ]); + let work_words = work_conversion(work_bytes); + assert_eq!(work_words, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1]); + let u128_one_borsh = + borsh::to_vec(&work_words).expect("Serialization to vec is infallible"); + assert_eq!(u128_one_borsh.len(), 16); + assert_eq!( + u128_one_borsh, + vec![0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1] + ); + let u128_one = borsh::from_slice::<[u8; 16]>(&u128_one_borsh) + .expect("Deserialization from slice is infallible"); + assert_eq!(u128_one, work_words); + } +} diff --git a/circuits-lib/test_data/kickoff_raw_tx.bin b/circuits-lib/test_data/kickoff_raw_tx.bin new file mode 100644 index 000000000..44120d87e Binary files /dev/null and b/circuits-lib/test_data/kickoff_raw_tx.bin differ diff --git a/circuits-lib/test_data/lcp_receipt.bin b/circuits-lib/test_data/lcp_receipt.bin new file mode 100644 index 000000000..84e84cb65 Binary files /dev/null and b/circuits-lib/test_data/lcp_receipt.bin differ diff --git a/circuits-lib/test_data/payout_tx.bin b/circuits-lib/test_data/payout_tx.bin new file mode 100644 index 000000000..c710caf73 Binary files /dev/null and b/circuits-lib/test_data/payout_tx.bin differ diff --git a/circuits-lib/test_data/storage_proof.bin b/circuits-lib/test_data/storage_proof.bin new file mode 100644 index 000000000..8e94240e8 Binary files /dev/null and b/circuits-lib/test_data/storage_proof.bin differ diff --git a/circuits-lib/test_data/wt_raw_tx.bin b/circuits-lib/test_data/wt_raw_tx.bin new file mode 100644 index 000000000..0b31501d3 Binary files /dev/null and b/circuits-lib/test_data/wt_raw_tx.bin differ diff --git a/clippy.toml b/clippy.toml new file mode 100644 index 000000000..0358cdb50 --- /dev/null +++ b/clippy.toml @@ -0,0 +1,2 @@ +allow-unwrap-in-tests = true +allow-expect-in-tests = true diff --git a/codespell_ignore.txt b/codespell_ignore.txt new file mode 100644 index 000000000..e7a63db07 --- /dev/null +++ b/codespell_ignore.txt @@ -0,0 +1 @@ +statics \ No newline at end of file diff --git a/core/Cargo.toml b/core/Cargo.toml new file mode 100644 index 000000000..775360239 --- /dev/null +++ b/core/Cargo.toml @@ -0,0 +1,106 @@ +[package] +name = "clementine-core" +version = "0.4.0" +edition = "2021" +rust-version = "1.85.0" + +[features] +automation = [] +integration-tests = ["automation"] + +[build-dependencies] +tonic-build = "0.12" +vergen-git2 = { workspace = true } + +[dependencies] +metrics = { workspace = true } +metrics-derive = { workspace = true } +metrics-exporter-prometheus = { workspace = true } +metrics-util = { workspace = true } +color-eyre = { workspace = true } +bitcoin = { workspace = true, features = ["rand", "bitcoinconsensus"] } +bitcoincore-rpc = { workspace = true } +hex = { workspace = true, features = ["serde"] } +hex-literal = { workspace = true } +lazy_static = { workspace = true, features = ["spin_no_std"] } +sha2 = { workspace = true } +risc0-zkvm = { workspace = true, features = ["prove"] } +serde = { workspace = true } +serde_json = { workspace = true } +secp256k1 = { workspace = true, features = ["serde", "rand", "std"] } +thiserror = { workspace = true } +tracing = { workspace = true } +tracing-subscriber = { workspace = true, features = ["env-filter"] } +tokio = { workspace = true, features = ["full"] } +jsonrpsee = { workspace = true, features = ["http-client", "macros"] } +async-trait = { workspace = true } +futures = { workspace = true } +clap = { workspace = true, features = ["derive"] } +toml = { workspace = true } +sqlx = { workspace = true, features = ["runtime-tokio", "postgres", "macros"] } +borsh = { workspace = true } +tonic = { workspace = true } +prost = { workspace = true } +tokio-stream = { workspace = true } +async-stream = { workspace = true } +futures-util = { workspace = true } +futures-core = { workspace = true } +bitvm = { workspace = true } +tempfile = { workspace = true } +eyre = { workspace = true } +tokio-retry = { workspace = true } +http = { workspace = true } +hyper = { workspace = true } +tower = { workspace = true } +hyper-util = { workspace = true } +alloy = { workspace = true } +alloy-sol-types = { workspace = true } +ark-groth16 = { workspace = true, features = ["default"] } +ark-bn254 = { workspace = true } +ark-serialize = { workspace = true } +statig = { workspace = true, features = ["async", "serde"] } +pgmq = { workspace = true } +serde_with = { workspace = true } +citrea-sov-rollup-interface = { workspace = true, features = ["native"] } +rand_chacha = { workspace = true } +log = { workspace = true } +circuits-lib = { path = "../circuits-lib" } +bridge-circuit-host = { path = "../bridge-circuit-host" } +bincode = { workspace = true } +ark-ff = { workspace = true } +rustls = { workspace = true } +rustls-pki-types = { workspace = true } +once_cell = { workspace = true } +bitcoin-script = { workspace = true } +secrecy = { workspace = true } +reqwest = { workspace = true } +x25519-dalek = { workspace = true, features = ["static_secrets"] } +chacha20poly1305 = { workspace = true, features = ["stream"] } +hkdf = { workspace = true } + +# UNCOMMENT TO DEBUG TOKIO TASKS + +# console-subscriber = { version = "0.4.1" } + +[dev-dependencies] +serial_test = { workspace = true } +citrea-e2e = { workspace = true } +base64 = { workspace = true } +bridge-circuit-host = { path = "../bridge-circuit-host" } +ctor = { workspace = true } +tokio = { workspace = true, features = ["full", "test-util"] } +rand = { workspace = true } +url = { version = "2.5.4" } +jsonrpc-async = { version = "2.0.2" } + +[[bin]] +name = "clementine-cli" +path = "src/bin/cli.rs" + + +[lints.clippy] +unwrap_used = { level = "deny" } + +[lints.rust] +# This is used to enable/disable the tokio-console debugging utility +unexpected_cfgs = { level = "warn", check-cfg = ['cfg(tokio_unstable)'] } diff --git a/core/build.rs b/core/build.rs new file mode 100644 index 000000000..b479b6a09 --- /dev/null +++ b/core/build.rs @@ -0,0 +1,84 @@ +use std::{env, path::Path, process::Command}; + +use vergen_git2::{BuildBuilder, CargoBuilder, Emitter, Git2Builder, RustcBuilder, SysinfoBuilder}; + +fn trim_ascii_end(s: &str) -> &str { + let trimmed_len = s + .as_bytes() + .iter() + .rposition(|&b| !b.is_ascii_whitespace()) + .map_or(0, |pos| pos + 1); + &s[..trimmed_len] +} + +fn compile_protobuf() { + // Try to set PROTOC env var if on *nix. + if let Ok(output) = Command::new("which").args(["protoc"]).output() { + // Skip compilation, if command failed. + if !output.status.success() { + return; + } + + // Set env var. + let path = String::from_utf8_lossy(&output.stdout); + env::set_var("PROTOC", trim_ascii_end(&path)); + } + + // Skip compilation if env var is not set. + if env::var("PROTOC").is_err() { + return; + }; + + let proto_root = Path::new(env!("CARGO_MANIFEST_DIR")).join("src/rpc/"); + let protos = &["clementine.proto"]; + + let proto_files: Vec = protos + .iter() + .map(|proto| { + proto_root + .join(proto) + .to_str() + .expect("proto_root is not a valid path") + .to_owned() + }) + .collect(); + + // Tell Cargo that if a proto file changes, rerun this build script. + for pf in &proto_files { + println!("cargo:rerun-if-changed={}", pf); + } + + // Compile server and client code from proto files + tonic_build::configure() + .build_server(true) + .build_client(true) + .out_dir("./src/rpc") + .compile_protos( + &proto_files, + &[proto_root.to_str().expect("proto_root is not a valid path")], + ) + .expect("Failed to compile protos"); +} + +fn main() { + compile_protobuf(); + let build = BuildBuilder::all_build().expect("Failed to build build instructions"); + let cargo = CargoBuilder::all_cargo().expect("Failed to build cargo instructions"); + let git2 = Git2Builder::all_git().expect("Failed to build git instructions"); + let rustc = RustcBuilder::all_rustc().expect("Failed to build rustc instructions"); + let si = SysinfoBuilder::all_sysinfo().expect("Failed to build sysinfo instructions"); + + Emitter::default() + .add_instructions(&build) + .expect("Failed to add build instructions") + .add_instructions(&cargo) + .expect("Failed to add cargo instructions") + .add_instructions(&git2) + .expect("Failed to add git instructions") + .add_instructions(&rustc) + .expect("Failed to add rustc instructions") + .add_instructions(&si) + .expect("Failed to add sysinfo instructions") + .emit() + .expect("Failed to emit vergen"); +} diff --git a/core/clippy.toml b/core/clippy.toml new file mode 100644 index 000000000..0358cdb50 --- /dev/null +++ b/core/clippy.toml @@ -0,0 +1,2 @@ +allow-unwrap-in-tests = true +allow-expect-in-tests = true diff --git a/core/src/actor.rs b/core/src/actor.rs new file mode 100644 index 000000000..52da802b5 --- /dev/null +++ b/core/src/actor.rs @@ -0,0 +1,1307 @@ +use std::collections::hash_map::Entry; +use std::collections::HashMap; + +use crate::bitvm_client::{self, ClementineBitVMPublicKeys, SECP}; +use crate::builder::script::SpendPath; +use crate::builder::sighash::TapTweakData; +use crate::builder::transaction::input::SpentTxIn; +use crate::builder::transaction::{SighashCalculator, TxHandler}; +use crate::config::protocol::ProtocolParamset; +use crate::errors::{BridgeError, TxError}; +use crate::operator::{PublicHash, RoundIndex}; +use crate::rpc::clementine::tagged_signature::SignatureId; +use crate::rpc::clementine::TaggedSignature; +use crate::EVMAddress; +use alloy::signers::k256; +use alloy::signers::local::PrivateKeySigner; +use bitcoin::hashes::hash160; +use bitcoin::secp256k1::PublicKey; +use bitcoin::taproot::{self, LeafVersion, TaprootSpendInfo}; +use bitcoin::{ + hashes::Hash, + secp256k1::{schnorr, Keypair, Message, SecretKey, XOnlyPublicKey}, + Address, ScriptBuf, TapSighash, TapTweakHash, +}; +use bitcoin::{OutPoint, TapNodeHash, TapSighashType, Witness}; +use bitvm::signatures::winternitz::{self, BinarysearchVerifier, ToBytesConverter, Winternitz}; +use eyre::{Context, OptionExt}; +use hkdf::Hkdf; +use sha2::Sha256; + +#[derive(Debug, Clone, PartialEq, Eq, Hash, thiserror::Error)] +pub enum VerificationError { + #[error("Invalid hex")] + InvalidHex, + #[error("Invalid length")] + InvalidLength, + #[error("Invalid signature")] + InvalidSignature, +} + +#[derive(Debug, Clone)] +pub enum WinternitzDerivationPath { + /// round_idx, kickoff_idx + /// Message length is fixed KICKOFF_BLOCKHASH_COMMIT_LENGTH + Kickoff(RoundIndex, u32, &'static ProtocolParamset), + /// message_length, pk_type_idx, pk_idx, deposit_outpoint + BitvmAssert(u32, u32, u32, OutPoint, &'static ProtocolParamset), + /// watchtower_idx, deposit_outpoint + /// message length is fixed to 1 (because its for one hash) + ChallengeAckHash(u32, OutPoint, &'static ProtocolParamset), +} + +impl WinternitzDerivationPath { + fn get_type_id(&self) -> u8 { + match self { + WinternitzDerivationPath::Kickoff(..) => 0u8, + WinternitzDerivationPath::BitvmAssert(..) => 1u8, + WinternitzDerivationPath::ChallengeAckHash(..) => 2u8, + } + } + + fn to_bytes(&self) -> Vec { + let type_id = self.get_type_id(); + let mut bytes = vec![type_id]; + + match self { + WinternitzDerivationPath::Kickoff(round_idx, kickoff_idx, _) => { + bytes.extend_from_slice(&round_idx.to_index().to_be_bytes()); + bytes.extend_from_slice(&kickoff_idx.to_be_bytes()); + } + WinternitzDerivationPath::BitvmAssert( + message_length, + pk_type_idx, + pk_idx, + deposit_outpoint, + _, + ) => { + bytes.extend_from_slice(&message_length.to_be_bytes()); + bytes.extend_from_slice(&pk_type_idx.to_be_bytes()); + bytes.extend_from_slice(&pk_idx.to_be_bytes()); + bytes.extend_from_slice(&deposit_outpoint.txid.to_byte_array()); + bytes.extend_from_slice(&deposit_outpoint.vout.to_be_bytes()); + } + WinternitzDerivationPath::ChallengeAckHash(watchtower_idx, deposit_outpoint, _) => { + bytes.extend_from_slice(&watchtower_idx.to_be_bytes()); + bytes.extend_from_slice(&deposit_outpoint.txid.to_byte_array()); + bytes.extend_from_slice(&deposit_outpoint.vout.to_be_bytes()); + } + } + + bytes + } + + /// Returns the parameters for the Winternitz signature. + pub fn get_params(&self) -> winternitz::Parameters { + match self { + WinternitzDerivationPath::Kickoff(_, _, paramset) => winternitz::Parameters::new( + paramset.kickoff_blockhash_commit_length, + paramset.winternitz_log_d, + ), + WinternitzDerivationPath::BitvmAssert(message_length, _, _, _, paramset) => { + winternitz::Parameters::new(*message_length, paramset.winternitz_log_d) + } + WinternitzDerivationPath::ChallengeAckHash(_, _, paramset) => { + winternitz::Parameters::new(1, paramset.winternitz_log_d) + } + } + } +} + +fn calc_tweaked_keypair( + keypair: &Keypair, + merkle_root: Option, +) -> Result { + Ok(keypair + .add_xonly_tweak( + &SECP, + &TapTweakHash::from_key_and_tweak(keypair.x_only_public_key().0, merkle_root) + .to_scalar(), + ) + .wrap_err("Failed to add tweak to keypair")?) +} + +fn calc_tweaked_xonly_pk( + pubkey: XOnlyPublicKey, + merkle_root: Option, +) -> Result { + Ok(pubkey + .add_tweak( + &SECP, + &TapTweakHash::from_key_and_tweak(pubkey, merkle_root).to_scalar(), + ) + .wrap_err("Failed to add tweak to xonly_pk")? + .0) +} + +#[derive(Debug, Clone, Default)] +// A cache that holds tweaked keys so that we do not need to repeatedly calculate them. +// This cache will hold data for only one deposit generally because we need to clone the holder of Actor(owner or verifier) +// to spawned threads during deposit and jn general is immutable. +// (Because all grpc functions have &self, we also need to clone Actor to a mutable instance +// to modify the caches) +pub struct TweakCache { + tweaked_key_cache: HashMap<(XOnlyPublicKey, Option), XOnlyPublicKey>, + // A cache to hold actors own tweaked keys. + tweaked_keypair_cache: HashMap<(XOnlyPublicKey, Option), Keypair>, +} + +impl TweakCache { + fn get_tweaked_keypair( + &mut self, + keypair: &Keypair, + merkle_root: Option, + ) -> Result<&Keypair, BridgeError> { + match self + .tweaked_keypair_cache + .entry((keypair.x_only_public_key().0, merkle_root)) + { + Entry::Occupied(entry) => Ok(entry.into_mut()), + Entry::Vacant(entry) => Ok(entry.insert(calc_tweaked_keypair(keypair, merkle_root)?)), + } + } + + fn get_tweaked_xonly_key( + &mut self, + pubkey: XOnlyPublicKey, + merkle_root: Option, + ) -> Result { + match self.tweaked_key_cache.entry((pubkey, merkle_root)) { + Entry::Occupied(entry) => Ok(*entry.get()), + Entry::Vacant(entry) => Ok(*entry.insert(calc_tweaked_xonly_pk(pubkey, merkle_root)?)), + } + } +} + +pub fn verify_schnorr( + signature: &schnorr::Signature, + sighash: &Message, + pubkey: XOnlyPublicKey, + tweak_data: TapTweakData, + tweak_cache: Option<&mut TweakCache>, +) -> Result<(), BridgeError> { + let pubkey = match tweak_data { + TapTweakData::KeyPath(merkle_root) => match tweak_cache { + Some(cache) => cache.get_tweaked_xonly_key(pubkey, merkle_root)?, + None => calc_tweaked_xonly_pk(pubkey, merkle_root)?, + }, + TapTweakData::ScriptPath => pubkey, + TapTweakData::Unknown => return Err(eyre::eyre!("Spend Path Unknown").into()), + }; + SECP.verify_schnorr(signature, sighash, &pubkey) + .map_err(|_| eyre::eyre!("Failed to verify Schnorr signature").into()) +} + +#[derive(Debug, Clone)] +pub struct Actor { + pub keypair: Keypair, + winternitz_secret_key: Option, + pub xonly_public_key: XOnlyPublicKey, + pub public_key: PublicKey, + pub address: Address, +} + +impl Actor { + #[tracing::instrument(ret(level = tracing::Level::TRACE))] + pub fn new( + sk: SecretKey, + winternitz_secret_key: Option, + network: bitcoin::Network, + ) -> Self { + let keypair = Keypair::from_secret_key(&SECP, &sk); + let (xonly, _parity) = XOnlyPublicKey::from_keypair(&keypair); + let address = Address::p2tr(&SECP, xonly, None, network); + + Actor { + keypair, + winternitz_secret_key, + xonly_public_key: xonly, + public_key: keypair.public_key(), + address, + } + } + + #[tracing::instrument(skip(self), err(level = tracing::Level::ERROR), ret(level = tracing::Level::TRACE))] + fn sign_with_tweak( + &self, + sighash: TapSighash, + merkle_root: Option, + tweak_cache: Option<&mut TweakCache>, + ) -> Result { + let keypair; + let keypair_ref = match tweak_cache { + Some(cache) => cache.get_tweaked_keypair(&self.keypair, merkle_root)?, + None => { + keypair = calc_tweaked_keypair(&self.keypair, merkle_root)?; + &keypair + } + }; + + Ok(bitvm_client::SECP + .sign_schnorr(&Message::from_digest(*sighash.as_byte_array()), keypair_ref)) + } + + #[tracing::instrument(skip(self), ret(level = tracing::Level::TRACE))] + fn sign(&self, sighash: TapSighash) -> schnorr::Signature { + bitvm_client::SECP.sign_schnorr( + &Message::from_digest(*sighash.as_byte_array()), + &self.keypair, + ) + } + + pub fn sign_with_tweak_data( + &self, + sighash: TapSighash, + tweak_data: TapTweakData, + tweak_cache: Option<&mut TweakCache>, + ) -> Result { + match tweak_data { + TapTweakData::KeyPath(merkle_root) => { + self.sign_with_tweak(sighash, merkle_root, tweak_cache) + } + TapTweakData::ScriptPath => Ok(self.sign(sighash)), + TapTweakData::Unknown => Err(eyre::eyre!("Spend Data Unknown").into()), + } + } + + pub fn get_evm_address(&self) -> Result { + let x = + k256::ecdsa::SigningKey::from_bytes(&self.keypair.secret_key().secret_bytes().into()) + .wrap_err("Failed to convert secret key to signing key")?; + let key: PrivateKeySigner = x.into(); + let wallet_address = key.address(); + + Ok(EVMAddress(wallet_address.into_array())) + } + + /// Returns derivied Winternitz secret key from given path. + pub fn get_derived_winternitz_sk( + &self, + path: WinternitzDerivationPath, + ) -> Result { + let wsk = self + .winternitz_secret_key + .ok_or_eyre("Root Winternitz secret key is not provided in configuration file")?; + + let hk = Hkdf::::new(None, wsk.as_ref()); + let path_bytes = path.to_bytes(); + let mut derived_key = vec![0u8; 32]; + hk.expand(&path_bytes, &mut derived_key) + .map_err(|e| eyre::eyre!("Key derivation failed: {:?}", e))?; + + Ok(derived_key) + } + + /// Generates a Winternitz public key for the given path. + pub fn derive_winternitz_pk( + &self, + path: WinternitzDerivationPath, + ) -> Result { + let winternitz_params = path.get_params(); + + let altered_secret_key = self.get_derived_winternitz_sk(path)?; + let public_key = winternitz::generate_public_key(&winternitz_params, &altered_secret_key); + + Ok(public_key) + } + + /// Signs given data with Winternitz signature. + pub fn sign_winternitz_signature( + &self, + path: WinternitzDerivationPath, + data: Vec, + ) -> Result { + let winternitz = Winternitz::::new(); + + let winternitz_params = path.get_params(); + + let altered_secret_key = self.get_derived_winternitz_sk(path)?; + + let witness = winternitz.sign(&winternitz_params, &altered_secret_key, &data); + + Ok(witness) + } + + pub fn generate_preimage_from_path( + &self, + path: WinternitzDerivationPath, + ) -> Result { + let first_preimage = self.get_derived_winternitz_sk(path)?; + let second_preimage = hash160::Hash::hash(&first_preimage); + Ok(second_preimage.to_byte_array()) + } + + /// Generates the hashes from the preimages. Preimages are constructed using + /// the Winternitz derivation path and the secret key. + pub fn generate_public_hash_from_path( + &self, + path: WinternitzDerivationPath, + ) -> Result { + let preimage = self.generate_preimage_from_path(path)?; + let hash = hash160::Hash::hash(&preimage); + Ok(hash.to_byte_array()) + } + + pub fn generate_bitvm_pks_for_deposit( + &self, + deposit_outpoint: OutPoint, + paramset: &'static ProtocolParamset, + ) -> Result { + let mut pks = ClementineBitVMPublicKeys::create_replacable(); + let pk_vec = self.derive_winternitz_pk( + ClementineBitVMPublicKeys::get_latest_blockhash_derivation(deposit_outpoint, paramset), + )?; + pks.latest_blockhash_pk = ClementineBitVMPublicKeys::vec_to_array::<44>(&pk_vec); + let pk_vec = self.derive_winternitz_pk( + ClementineBitVMPublicKeys::get_challenge_sending_watchtowers_derivation( + deposit_outpoint, + paramset, + ), + )?; + pks.challenge_sending_watchtowers_pk = + ClementineBitVMPublicKeys::vec_to_array::<44>(&pk_vec); + for i in 0..pks.bitvm_pks.0.len() { + let pk_vec = self.derive_winternitz_pk(WinternitzDerivationPath::BitvmAssert( + 64, + 3, + i as u32, + deposit_outpoint, + paramset, + ))?; + pks.bitvm_pks.0[i] = ClementineBitVMPublicKeys::vec_to_array::<68>(&pk_vec); + } + for i in 0..pks.bitvm_pks.1.len() { + let pk_vec = self.derive_winternitz_pk(WinternitzDerivationPath::BitvmAssert( + 64, + 4, + i as u32, + deposit_outpoint, + paramset, + ))?; + pks.bitvm_pks.1[i] = ClementineBitVMPublicKeys::vec_to_array::<68>(&pk_vec); + } + for i in 0..pks.bitvm_pks.2.len() { + let pk_vec = self.derive_winternitz_pk(WinternitzDerivationPath::BitvmAssert( + 32, + 5, + i as u32, + deposit_outpoint, + paramset, + ))?; + pks.bitvm_pks.2[i] = ClementineBitVMPublicKeys::vec_to_array::<36>(&pk_vec); + } + + Ok(pks) + } + + fn get_saved_signature( + signature_id: SignatureId, + signatures: &[TaggedSignature], + ) -> Option { + signatures + .iter() + .find(|sig| { + sig.signature_id + .map(|id| id == signature_id) + .unwrap_or(false) + }) + .and_then(|sig| schnorr::Signature::from_slice(sig.signature.as_ref()).ok()) + } + + pub fn add_script_path_to_witness( + witness: &mut Witness, + script: &ScriptBuf, + spend_info: &TaprootSpendInfo, + ) -> Result<(), BridgeError> { + let spend_control_block = spend_info + .control_block(&(script.clone(), LeafVersion::TapScript)) + .ok_or_eyre("Failed to find control block for script")?; + witness.push(script.clone()); + witness.push(spend_control_block.serialize()); + Ok(()) + } + + pub fn tx_sign_preimage( + &self, + txhandler: &mut TxHandler, + data: impl AsRef<[u8]>, + ) -> Result<(), BridgeError> { + let mut signed_preimage = false; + + let data = data.as_ref(); + let signer = move |_: usize, + spt: &SpentTxIn, + calc_sighash: SighashCalculator<'_>| + -> Result, BridgeError> { + let spendinfo = spt + .get_spendable() + .get_spend_info() + .as_ref() + .ok_or(TxError::MissingSpendInfo)?; + match spt.get_spend_path() { + SpendPath::ScriptSpend(script_idx) => { + let script = spt + .get_spendable() + .get_scripts() + .get(script_idx) + .ok_or(TxError::NoScriptAtIndex(script_idx))?; + let sighash_type = spt + .get_signature_id() + .get_deposit_sig_owner() + .map(|s| s.sighash_type())? + .unwrap_or(TapSighashType::Default); + + use crate::builder::script::ScriptKind as Kind; + + let mut witness = match script.kind() { + Kind::PreimageRevealScript(script) => { + if script.0 != self.xonly_public_key { + return Err(TxError::NotOwnedScriptPath.into()); + } + let signature = self.sign(calc_sighash(sighash_type)?); + script.generate_script_inputs( + data, + &taproot::Signature { + signature, + sighash_type, + }, + ) + } + Kind::WinternitzCommit(_) + | Kind::CheckSig(_) + | Kind::Other(_) + | Kind::BaseDepositScript(_) + | Kind::ReplacementDepositScript(_) + | Kind::TimelockScript(_) + | Kind::ManualSpend(_) => return Ok(None), + }; + + if signed_preimage { + return Err(eyre::eyre!("Encountered multiple preimage reveal scripts when attempting to commit to only one.").into()); + } + + signed_preimage = true; + + Self::add_script_path_to_witness( + &mut witness, + &script.to_script_buf(), + spendinfo, + )?; + + Ok(Some(witness)) + } + SpendPath::KeySpend => Ok(None), + SpendPath::Unknown => Err(TxError::SpendPathNotSpecified.into()), + } + }; + + txhandler.sign_txins(signer)?; + Ok(()) + } + pub fn tx_sign_winternitz( + &self, + txhandler: &mut TxHandler, + data: &[(Vec, WinternitzDerivationPath)], + ) -> Result<(), BridgeError> { + let mut signed_winternitz = false; + + let signer = move |_: usize, + spt: &SpentTxIn, + calc_sighash: SighashCalculator<'_>| + -> Result, BridgeError> { + let spendinfo = spt + .get_spendable() + .get_spend_info() + .as_ref() + .ok_or(TxError::MissingSpendInfo)?; + match spt.get_spend_path() { + SpendPath::ScriptSpend(script_idx) => { + let script = spt + .get_spendable() + .get_scripts() + .get(script_idx) + .ok_or(TxError::NoScriptAtIndex(script_idx))?; + let sighash_type = spt + .get_signature_id() + .get_deposit_sig_owner() + .map(|s| s.sighash_type())? + .unwrap_or(TapSighashType::Default); + + use crate::builder::script::ScriptKind as Kind; + + let mut witness = match script.kind() { + Kind::WinternitzCommit(script) => { + if script.checksig_pubkey != self.xonly_public_key { + return Err(TxError::NotOwnedScriptPath.into()); + } + + let mut script_data = Vec::with_capacity(data.len()); + for (data, path) in data { + let secret_key = self.get_derived_winternitz_sk(path.clone())?; + script_data.push((data.clone(), secret_key)); + } + script.generate_script_inputs( + &script_data, + &taproot::Signature { + signature: self.sign(calc_sighash(sighash_type)?), + sighash_type, + }, + ) + } + Kind::PreimageRevealScript(_) + | Kind::CheckSig(_) + | Kind::Other(_) + | Kind::BaseDepositScript(_) + | Kind::ReplacementDepositScript(_) + | Kind::TimelockScript(_) + | Kind::ManualSpend(_) => return Ok(None), + }; + + if signed_winternitz { + return Err(eyre::eyre!("Encountered multiple winternitz scripts when attempting to commit to only one.").into()); + } + + signed_winternitz = true; + + Self::add_script_path_to_witness( + &mut witness, + &script.to_script_buf(), + spendinfo, + )?; + + Ok(Some(witness)) + } + SpendPath::KeySpend => Ok(None), + SpendPath::Unknown => Err(TxError::SpendPathNotSpecified.into()), + } + }; + + txhandler.sign_txins(signer)?; + Ok(()) + } + + pub fn tx_sign_and_fill_sigs( + &self, + txhandler: &mut TxHandler, + signatures: &[TaggedSignature], + mut tweak_cache: Option<&mut TweakCache>, + ) -> Result<(), BridgeError> { + let tx_type = txhandler.get_transaction_type(); + let signer = move |_, + spt: &SpentTxIn, + calc_sighash: SighashCalculator<'_>| + -> Result, BridgeError> { + let spendinfo = spt + .get_spendable() + .get_spend_info() + .as_ref() + .ok_or(TxError::MissingSpendInfo)?; + let sighash_type = spt + .get_signature_id() + .get_deposit_sig_owner() + .map(|s| s.sighash_type())? + .unwrap_or(TapSighashType::Default); + + match spt.get_spend_path() { + SpendPath::ScriptSpend(script_idx) => { + let script = spt + .get_spendable() + .get_scripts() + .get(script_idx) + .ok_or(TxError::NoScriptAtIndex(script_idx))?; + let sig = Self::get_saved_signature(spt.get_signature_id(), signatures); + + let sig = sig.map(|sig| taproot::Signature { + signature: sig, + sighash_type, + }); + + use crate::builder::script::ScriptKind as Kind; + + // Set the script inputs of the witness + let mut witness: Witness = match script.kind() { + Kind::BaseDepositScript(script) => { + match (sig, script.0 == self.xonly_public_key) { + (Some(sig), _) => script.generate_script_inputs(&sig), + (None, true) => { + script.generate_script_inputs(&taproot::Signature { + signature: self.sign(calc_sighash(sighash_type)?), + sighash_type, + }) + } + (None, false) => { + return Err(TxError::SignatureNotFound(tx_type).into()) + } + } + } + Kind::ReplacementDepositScript(script) => { + match (sig, script.0 == self.xonly_public_key) { + (Some(sig), _) => script.generate_script_inputs(&sig), + (None, true) => { + script.generate_script_inputs(&taproot::Signature { + signature: self.sign(calc_sighash(sighash_type)?), + sighash_type, + }) + } + (None, false) => { + return Err(TxError::SignatureNotFound(tx_type).into()); + } + } + } + Kind::TimelockScript(script) => match (sig, script.0) { + (Some(sig), Some(_)) => script.generate_script_inputs(Some(&sig)), + (None, Some(xonly_key)) if xonly_key == self.xonly_public_key => script + .generate_script_inputs(Some(&taproot::Signature { + signature: self.sign(calc_sighash(sighash_type)?), + sighash_type, + })), + (None, Some(_)) => { + return Err(TxError::SignatureNotFound(tx_type).into()) + } + (_, None) => Witness::new(), + }, + Kind::CheckSig(script) => match (sig, script.0 == self.xonly_public_key) { + (Some(sig), _) => script.generate_script_inputs(&sig), + + (None, true) => script.generate_script_inputs(&taproot::Signature { + signature: self.sign(calc_sighash(sighash_type)?), + sighash_type, + }), + (None, false) => return Err(TxError::SignatureNotFound(tx_type).into()), + }, + Kind::WinternitzCommit(_) + | Kind::PreimageRevealScript(_) + | Kind::Other(_) + | Kind::ManualSpend(_) => return Ok(None), + }; + + // Add P2TR elements (control block and script) to the witness + Self::add_script_path_to_witness( + &mut witness, + &script.to_script_buf(), + spendinfo, + )?; + Ok(Some(witness)) + } + SpendPath::KeySpend => { + let xonly_public_key = spendinfo.internal_key(); + + let sighash = calc_sighash(sighash_type)?; + let sig = Self::get_saved_signature(spt.get_signature_id(), signatures); + let sig = match sig { + Some(sig) => taproot::Signature { + signature: sig, + sighash_type, + }, + None => { + if xonly_public_key == self.xonly_public_key { + taproot::Signature { + signature: self.sign_with_tweak( + sighash, + spendinfo.merkle_root(), + tweak_cache.as_deref_mut(), + )?, + sighash_type, + } + } else { + return Err(TxError::NotOwnKeyPath.into()); + } + } + }; + Ok(Some(Witness::from_slice(&[&sig.serialize()]))) + } + SpendPath::Unknown => Err(TxError::SpendPathNotSpecified.into()), + } + }; + + txhandler.sign_txins(signer)?; + Ok(()) + } + + /// Generates an auth token using the hash of the public key + /// and a verifiable signature of the hash. + pub fn get_auth_token(&self) -> String { + let pk_hash = bitcoin::hashes::sha256::Hash::hash(&self.xonly_public_key.serialize()); + // sign pk_hash + let sig = SECP.sign_schnorr( + &Message::from_digest(pk_hash.to_byte_array()), + &self.keypair, + ); + + // encode sig and sk_hash + let mut all_bytes = Vec::new(); + all_bytes.extend(pk_hash.to_byte_array()); + all_bytes.extend(sig.serialize()); + + hex::encode(all_bytes) + } + + /// Verifies an auth token using the provided public key. + pub fn verify_auth_token( + &self, + token: &str, + pk: &XOnlyPublicKey, + ) -> Result<(), VerificationError> { + let Ok(bytes) = hex::decode(token) else { + return Err(VerificationError::InvalidHex); + }; + + if bytes.len() != 32 + 64 { + return Err(VerificationError::InvalidLength); + } + + let sk_hash = &bytes[..32]; + let sig = &bytes[32..]; + + let message = Message::from_digest(sk_hash.try_into().expect("checked length")); + SECP.verify_schnorr( + &schnorr::Signature::from_slice(sig).expect("checked length"), + &message, + pk, + ) + .map_err(|_| VerificationError::InvalidSignature) + } +} + +#[cfg(test)] +mod tests { + use super::Actor; + use crate::builder::address::create_taproot_address; + use crate::config::protocol::ProtocolParamsetName; + + use super::*; + use crate::builder::script::{CheckSig, SpendPath, SpendableScript}; + use crate::builder::transaction::input::SpendableTxIn; + use crate::builder::transaction::output::UnspentTxOut; + use crate::builder::transaction::{TransactionType, TxHandler, TxHandlerBuilder}; + + use crate::bitvm_client::SECP; + use crate::rpc::clementine::NormalSignatureKind; + use crate::{actor::WinternitzDerivationPath, test::common::*}; + use bitcoin::secp256k1::{schnorr, Message, SecretKey}; + + use bitcoin::sighash::TapSighashType; + use bitcoin::transaction::Transaction; + + use bitcoin::secp256k1::rand; + use bitcoin::{Amount, Network, OutPoint, Txid}; + use bitcoincore_rpc::RpcApi; + use bitvm::{ + execute_script, + signatures::winternitz::{self, BinarysearchVerifier, ToBytesConverter, Winternitz}, + treepp::script, + }; + use rand::thread_rng; + use std::str::FromStr; + use std::sync::Arc; + + // Helper: create a TxHandler with a single key spend input. + fn create_key_spend_tx_handler(actor: &Actor) -> (bitcoin::TxOut, TxHandler) { + let (tap_addr, spend_info) = + create_taproot_address(&[], Some(actor.xonly_public_key), Network::Regtest); + // Build a transaction with one input that expects a key spend signature. + let prevtxo = bitcoin::TxOut { + value: Amount::from_sat(1000), + script_pubkey: tap_addr.script_pubkey(), + }; + let builder = TxHandlerBuilder::new(TransactionType::Dummy).add_input( + NormalSignatureKind::Reimburse2, + SpendableTxIn::new( + OutPoint::default(), + prevtxo.clone(), + vec![], + Some(spend_info), + ), + SpendPath::KeySpend, + bitcoin::Sequence::ENABLE_RBF_NO_LOCKTIME, + ); + + ( + prevtxo, + builder + .add_output(UnspentTxOut::new( + bitcoin::TxOut { + value: Amount::from_sat(999), + script_pubkey: actor.address.script_pubkey(), + }, + vec![], + None, + )) + .finalize(), + ) + } + + // Helper: create a dummy CheckSig script for script spend. + fn create_dummy_checksig_script(actor: &Actor) -> CheckSig { + // Use a trivial script that is expected to be spent via a signature. + // In production this would be a proper P2TR script. + CheckSig(actor.xonly_public_key) + } + + // Helper: create a TxHandler with a single script spend input using CheckSig. + fn create_script_spend_tx_handler(actor: &Actor) -> (bitcoin::TxOut, TxHandler) { + // Create a dummy spendable input that carries a script. + // Here we simulate that the spendable has one script: a CheckSig script. + let script = create_dummy_checksig_script(actor); + + let (tap_addr, spend_info) = create_taproot_address( + &[script.to_script_buf()], + Some(actor.xonly_public_key), + Network::Regtest, + ); + + let prevutxo = bitcoin::TxOut { + value: Amount::from_sat(1000), + script_pubkey: tap_addr.script_pubkey(), + }; + let spendable_input = SpendableTxIn::new( + OutPoint::default(), + prevutxo.clone(), + vec![Arc::new(script)], + Some(spend_info), + ); + + let builder = TxHandlerBuilder::new(TransactionType::Dummy).add_input( + NormalSignatureKind::KickoffNotFinalized1, + spendable_input, + SpendPath::ScriptSpend(0), + bitcoin::Sequence::ENABLE_RBF_NO_LOCKTIME, + ); + + ( + prevutxo, + builder + .add_output(UnspentTxOut::new( + bitcoin::TxOut { + value: Amount::from_sat(999), + script_pubkey: actor.address.script_pubkey(), + }, + vec![], + None, + )) + .finalize(), + ) + } + + #[test] + fn test_actor_key_spend_verification() { + let sk = SecretKey::new(&mut thread_rng()); + let actor = Actor::new(sk, None, Network::Regtest); + let (utxo, mut txhandler) = create_key_spend_tx_handler(&actor); + + // Actor signs the key spend input. + actor + .tx_sign_and_fill_sigs(&mut txhandler, &[], None) + .expect("Key spend signature should succeed"); + + // Retrieve the cached transaction from the txhandler. + let tx: &Transaction = txhandler.get_cached_tx(); + + tx.verify(|_| Some(utxo.clone())) + .expect("Expected valid signature for key spend"); + } + + #[test] + fn test_actor_script_spend_tx_valid() { + let sk = SecretKey::new(&mut thread_rng()); + let actor = Actor::new(sk, None, Network::Regtest); + let (prevutxo, mut txhandler) = create_script_spend_tx_handler(&actor); + + // Actor performs a partial sign for script spend. + // Using an empty signature slice since our dummy CheckSig uses actor signature. + let signatures: Vec<_> = vec![]; + actor + .tx_sign_and_fill_sigs(&mut txhandler, &signatures, None) + .expect("Script spend partial sign should succeed"); + + // Retrieve the cached transaction. + let tx: &Transaction = txhandler.get_cached_tx(); + + tx.verify(|_| Some(prevutxo.clone())) + .expect("Invalid transaction"); + } + + #[test] + fn test_actor_script_spend_sig_valid() { + let sk = SecretKey::new(&mut thread_rng()); + let actor = Actor::new(sk, None, Network::Regtest); + let (_, mut txhandler) = create_script_spend_tx_handler(&actor); + + // Actor performs a partial sign for script spend. + // Using an empty signature slice since our dummy CheckSig uses actor signature. + let signatures: Vec<_> = vec![]; + actor + .tx_sign_and_fill_sigs(&mut txhandler, &signatures, None) + .expect("Script spend partial sign should succeed"); + + // Retrieve the cached transaction. + let tx: &Transaction = txhandler.get_cached_tx(); + + // For script spend, we extract the witness from the corresponding input. + // Our dummy witness is expected to contain the signature. + let witness = &tx.input[0].witness; + assert!(!witness.is_empty(), "Witness should not be empty"); + let sig = schnorr::Signature::from_slice(&witness[0]) + .expect("Failed to parse Schnorr signature from witness"); + + // Compute the sighash expected for a pubkey spend (similar to key spend). + let sighash = txhandler + .calculate_script_spend_sighash_indexed(0, 0, TapSighashType::Default) + .expect("Sighash computed"); + + let message = Message::from_digest(*sighash.as_byte_array()); + SECP.verify_schnorr(&sig, &message, &actor.xonly_public_key) + .expect("Script spend signature verification failed"); + } + + #[test] + fn actor_new() { + let sk = SecretKey::new(&mut rand::thread_rng()); + let network = Network::Regtest; + + let actor = Actor::new(sk, None, network); + + assert_eq!(sk.public_key(&SECP), actor.public_key); + assert_eq!(sk.x_only_public_key(&SECP).0, actor.xonly_public_key); + } + + #[test] + fn sign_taproot_pubkey_spend() { + let sk = SecretKey::new(&mut rand::thread_rng()); + let network = Network::Regtest; + let actor = Actor::new(sk, None, network); + + // This transaction is matching with prevouts. Therefore signing will + // be successful. + let tx_handler = create_key_spend_tx_handler(&actor).1; + let sighash = tx_handler + .calculate_pubkey_spend_sighash(0, bitcoin::TapSighashType::Default) + .expect("calculating pubkey spend sighash"); + + let signature = actor.sign(sighash); + + let message = Message::from_digest(*sighash.as_byte_array()); + SECP.verify_schnorr(&signature, &message, &actor.xonly_public_key) + .expect("invalid signature"); + } + + #[test] + fn sign_taproot_pubkey_spend_tx_with_sighash() { + let sk = SecretKey::new(&mut rand::thread_rng()); + let network = Network::Regtest; + let actor = Actor::new(sk, None, network); + + // This transaction is matching with prevouts. Therefore signing will + // be successful. + let tx_handler = create_key_spend_tx_handler(&actor).1; + let x = tx_handler + .calculate_pubkey_spend_sighash(0, TapSighashType::Default) + .unwrap(); + actor.sign_with_tweak(x, None, None).unwrap(); + } + + #[tokio::test] + async fn derive_winternitz_pk_uniqueness() { + let paramset: &'static ProtocolParamset = ProtocolParamsetName::Regtest.into(); + let config = create_test_config_with_thread_name().await; + let actor = Actor::new( + config.secret_key, + config.winternitz_secret_key, + Network::Regtest, + ); + + let mut params = WinternitzDerivationPath::Kickoff(RoundIndex::Round(0), 0, paramset); + let pk0 = actor.derive_winternitz_pk(params.clone()).unwrap(); + let pk1 = actor.derive_winternitz_pk(params).unwrap(); + assert_eq!(pk0, pk1); + + params = WinternitzDerivationPath::Kickoff(RoundIndex::Round(0), 1, paramset); + let pk2 = actor.derive_winternitz_pk(params).unwrap(); + assert_ne!(pk0, pk2); + } + + impl TweakCache { + fn get_tweaked_xonly_key_cache_size(&self) -> usize { + self.tweaked_key_cache.len() + } + fn get_tweaked_keypair_cache_size(&self) -> usize { + self.tweaked_keypair_cache.len() + } + } + + #[tokio::test] + async fn test_tweak_cache() { + let mut tweak_cache = TweakCache::default(); + let sk = SecretKey::new(&mut rand::thread_rng()); + let keypair = Keypair::from_secret_key(&SECP, &sk); + let sk2 = SecretKey::new(&mut rand::thread_rng()); + let keypair2 = Keypair::from_secret_key(&SECP, &sk2); + let sk3 = SecretKey::new(&mut rand::thread_rng()); + let keypair3 = Keypair::from_secret_key(&SECP, &sk3); + + tweak_cache.get_tweaked_keypair(&keypair, None).unwrap(); + assert!(tweak_cache.get_tweaked_keypair_cache_size() == 1); + tweak_cache + .get_tweaked_keypair(&keypair, Some(TapNodeHash::assume_hidden([0x55; 32]))) + .unwrap(); + assert!(tweak_cache.get_tweaked_keypair_cache_size() == 2); + tweak_cache + .get_tweaked_keypair(&keypair, Some(TapNodeHash::assume_hidden([0x56; 32]))) + .unwrap(); + assert!(tweak_cache.get_tweaked_keypair_cache_size() == 3); + tweak_cache + .get_tweaked_keypair(&keypair, Some(TapNodeHash::assume_hidden([0x57; 32]))) + .unwrap(); + assert!(tweak_cache.get_tweaked_keypair_cache_size() == 4); + tweak_cache + .get_tweaked_keypair(&keypair, Some(TapNodeHash::assume_hidden([0x55; 32]))) + .unwrap(); + tweak_cache.get_tweaked_keypair(&keypair, None).unwrap(); + assert!(tweak_cache.get_tweaked_keypair_cache_size() == 4); + tweak_cache.get_tweaked_keypair(&keypair2, None).unwrap(); + assert!(tweak_cache.get_tweaked_keypair_cache_size() == 5); + let xonly_pk1 = keypair.x_only_public_key(); + let xonly_pk2 = keypair2.x_only_public_key(); + let xonly_pk3 = keypair3.x_only_public_key(); + + // Test for get_tweaked_xonly_key + tweak_cache + .get_tweaked_xonly_key(xonly_pk1.0, None) + .unwrap(); + assert!(tweak_cache.get_tweaked_xonly_key_cache_size() == 1); + tweak_cache + .get_tweaked_xonly_key(xonly_pk1.0, Some(TapNodeHash::assume_hidden([0x55; 32]))) + .unwrap(); + assert!(tweak_cache.get_tweaked_xonly_key_cache_size() == 2); + tweak_cache + .get_tweaked_xonly_key(xonly_pk2.0, Some(TapNodeHash::assume_hidden([0x55; 32]))) + .unwrap(); + assert!(tweak_cache.get_tweaked_xonly_key_cache_size() == 3); + tweak_cache + .get_tweaked_xonly_key(xonly_pk3.0, Some(TapNodeHash::assume_hidden([0x55; 32]))) + .unwrap(); + assert!(tweak_cache.get_tweaked_xonly_key_cache_size() == 4); + tweak_cache + .get_tweaked_xonly_key(xonly_pk1.0, None) + .unwrap(); + tweak_cache + .get_tweaked_xonly_key(xonly_pk3.0, Some(TapNodeHash::assume_hidden([0x55; 32]))) + .unwrap(); + assert!(tweak_cache.get_tweaked_xonly_key_cache_size() == 4); + } + + #[tokio::test] + async fn derive_winternitz_pk_fixed_pk() { + let config = create_test_config_with_thread_name().await; + let paramset: &'static ProtocolParamset = ProtocolParamsetName::Regtest.into(); + let actor = Actor::new( + config.secret_key, + Some( + SecretKey::from_str( + "451F451F451F451F451F451F451F451F451F451F451F451F451F451F451F451F", + ) + .unwrap(), + ), + Network::Regtest, + ); + // Test so that same path always returns the same public key (to not change it accidentally) + // check only first digit + let params = WinternitzDerivationPath::Kickoff(RoundIndex::Round(0), 1, paramset); + let expected_pk = vec![ + 192, 121, 127, 229, 19, 208, 80, 49, 82, 134, 237, 242, 142, 162, 143, 232, 12, 231, + 114, 175, + ]; + assert_eq!( + actor.derive_winternitz_pk(params).unwrap()[0].to_vec(), + expected_pk + ); + + let deposit_outpoint = OutPoint { + txid: Txid::all_zeros(), + vout: 1, + }; + + let params = WinternitzDerivationPath::BitvmAssert(3, 0, 0, deposit_outpoint, paramset); + let expected_pk = vec![ + 218, 227, 228, 186, 246, 108, 123, 3, 33, 207, 96, 230, 46, 129, 189, 62, 72, 179, 83, + 181, + ]; + assert_eq!( + actor.derive_winternitz_pk(params).unwrap()[0].to_vec(), + expected_pk + ); + + let params = WinternitzDerivationPath::ChallengeAckHash(0, deposit_outpoint, paramset); + let expected_pk = vec![ + 179, 152, 124, 47, 40, 83, 205, 159, 21, 85, 233, 82, 128, 55, 176, 166, 37, 43, 80, 0, + ]; + assert_eq!( + actor.derive_winternitz_pk(params).unwrap()[0].to_vec(), + expected_pk + ); + } + + #[tokio::test] + async fn sign_winternitz_signature() { + let config = create_test_config_with_thread_name().await; + let actor = Actor::new( + config.secret_key, + Some( + SecretKey::from_str( + "451F451F451F451F451F451F451F451F451F451F451F451F451F451F451F451F", + ) + .unwrap(), + ), + Network::Regtest, + ); + + let data = "iwantporscheasagiftpls".as_bytes().to_vec(); + let message_len = data.len() as u32 * 2; + let paramset: &'static ProtocolParamset = ProtocolParamsetName::Regtest.into(); + + let deposit_outpoint = OutPoint { + txid: Txid::all_zeros(), + vout: 1, + }; + + let path = + WinternitzDerivationPath::BitvmAssert(message_len, 0, 0, deposit_outpoint, paramset); + let params = winternitz::Parameters::new(message_len, paramset.winternitz_log_d); + + let witness = actor + .sign_winternitz_signature(path.clone(), data.clone()) + .unwrap(); + let pk = actor.derive_winternitz_pk(path.clone()).unwrap(); + + let winternitz = Winternitz::::new(); + let check_sig_script = winternitz.checksig_verify(¶ms, &pk); + + let message_checker = script! { + for i in 0..message_len / 2 { + {data[i as usize]} + if i == message_len / 2 - 1 { + OP_EQUAL + } else { + OP_EQUALVERIFY + } + } + }; + + let script = script!({witness} {check_sig_script} {message_checker}); + let ret = execute_script(script); + assert!(ret.success); + } + + #[tokio::test] + async fn test_key_spend_signing() { + // Setup test node and actor + let mut config = create_test_config_with_thread_name().await; + let regtest = create_regtest_rpc(&mut config).await; + let rpc = regtest.rpc(); + let sk = SecretKey::new(&mut thread_rng()); + let actor = Actor::new(sk, None, Network::Regtest); + + // Create a UTXO controlled by the actor's key spend path + let (tap_addr, spend_info) = + create_taproot_address(&[], Some(actor.xonly_public_key), Network::Regtest); + let prevtxo = bitcoin::TxOut { + value: Amount::from_sat(50000), // Use a reasonable amount + script_pubkey: tap_addr.script_pubkey(), + }; + + // Fund the address (required for testmempoolaccept) + let outpoint = rpc + .send_to_address(&tap_addr, Amount::from_sat(50000)) + .await + .unwrap(); + + rpc.mine_blocks(1).await.unwrap(); // Confirm the funding transaction + + // Build a transaction spending the UTXO with TapSighashType::SinglePlusAnyoneCanPay + let mut builder = TxHandlerBuilder::new(TransactionType::Dummy) + // Use Challenge which maps to NofnSharedDeposit(TapSighashType::SinglePlusAnyoneCanPay) + .add_input( + NormalSignatureKind::Challenge, + SpendableTxIn::new(outpoint, prevtxo.clone(), vec![], Some(spend_info.clone())), + SpendPath::KeySpend, + bitcoin::Sequence::ENABLE_RBF_NO_LOCKTIME, + ); + + // Add a dummy output + builder = builder.add_output(UnspentTxOut::new( + bitcoin::TxOut { + value: Amount::from_sat(49000), // Account for fee + script_pubkey: actor.address.script_pubkey(), + }, + vec![], + None, + )); + + let mut txhandler = builder.finalize(); + + // Actor signs the key spend input using the non-default sighash type + actor + .tx_sign_and_fill_sigs(&mut txhandler, &[], None) + .expect("Key spend signature with SighashNone should succeed"); + + // Retrieve the signed transaction + let tx: &Transaction = txhandler.get_cached_tx(); + + // Use testmempoolaccept to verify the transaction is valid by consensus rules + let mempool_accept_result = rpc.test_mempool_accept(&[tx]).await.unwrap(); + + assert!( + mempool_accept_result[0].allowed.unwrap(), + "Transaction should be allowed in mempool. Rejection reason: {:?}", + mempool_accept_result[0].reject_reason.as_ref().unwrap() + ); + + // Build a transaction spending the UTXO with TapSighashType::Default + let mut builder = TxHandlerBuilder::new(TransactionType::Dummy) + // Use Reimburse2 which maps to NofnSharedDeposit(TapSighashType::Default) + .add_input( + NormalSignatureKind::Reimburse2, + SpendableTxIn::new(outpoint, prevtxo.clone(), vec![], Some(spend_info.clone())), + SpendPath::KeySpend, + bitcoin::Sequence::ENABLE_RBF_NO_LOCKTIME, + ); + + // Add a dummy output + builder = builder.add_output(UnspentTxOut::new( + bitcoin::TxOut { + value: Amount::from_sat(39000), // Account for fee + script_pubkey: actor.address.script_pubkey(), + }, + vec![], + None, + )); + + let mut txhandler = builder.finalize(); + + // Actor signs the key spend input using the non-default sighash type + actor + .tx_sign_and_fill_sigs(&mut txhandler, &[], None) + .expect("Key spend signature with SighashDefault should succeed"); + + // Retrieve the signed transaction + let tx: &Transaction = txhandler.get_cached_tx(); + + // Use testmempoolaccept to verify the transaction is valid by consensus rules + let mempool_accept_result = rpc.test_mempool_accept(&[tx]).await.unwrap(); + + assert!( + mempool_accept_result[0].allowed.unwrap(), + "Transaction should be allowed in mempool. Rejection reason: {:?}", + mempool_accept_result[0].reject_reason.as_ref().unwrap() + ); + } + + #[tokio::test] + async fn test_auth_token() { + let actor = Actor::new(SecretKey::new(&mut thread_rng()), None, Network::Regtest); + let token = actor.get_auth_token(); + assert!(actor + .verify_auth_token(&token, &actor.xonly_public_key) + .is_ok()); + } +} diff --git a/core/src/aggregator.rs b/core/src/aggregator.rs new file mode 100644 index 000000000..57c7fba4e --- /dev/null +++ b/core/src/aggregator.rs @@ -0,0 +1,768 @@ +use std::ops::Deref; +use std::sync::Arc; + +use crate::constants::{ + ENTITY_STATUS_POLL_TIMEOUT, OPERATOR_GET_KEYS_TIMEOUT, PUBLIC_KEY_COLLECTION_TIMEOUT, + VERIFIER_SEND_KEYS_TIMEOUT, +}; +use crate::deposit::DepositData; +use crate::extended_bitcoin_rpc::ExtendedBitcoinRpc; +use crate::rpc::clementine::entity_status_with_id::StatusResult; +use crate::rpc::clementine::EntityId as RPCEntityId; +use crate::rpc::clementine::{ + self, DepositParams, Empty, EntityStatusWithId, EntityType, OperatorKeysWithDeposit, +}; +use crate::task::aggregator_metric_publisher::AGGREGATOR_METRIC_PUBLISHER_POLL_DELAY; +use crate::task::TaskExt; +#[cfg(feature = "automation")] +use crate::tx_sender::TxSenderClient; +use crate::utils::{timed_request, timed_try_join_all}; +use crate::{ + builder::{self}, + config::BridgeConfig, + database::Database, + errors::BridgeError, + musig2::aggregate_partial_signatures, + rpc::{ + self, + clementine::{ + clementine_operator_client::ClementineOperatorClient, + clementine_verifier_client::ClementineVerifierClient, + }, + }, +}; +use bitcoin::hashes::Hash; +use bitcoin::secp256k1::{schnorr, Message, PublicKey}; +use bitcoin::XOnlyPublicKey; +use eyre::Context; +use futures::future::join_all; +use secp256k1::musig::{AggregatedNonce, PartialSignature}; +use std::future::Future; +use tokio::sync::RwLock; +use tonic::{Request, Status}; +use tracing::{debug_span, Instrument}; + +/// Aggregator struct. +/// This struct is responsible for aggregating partial signatures from the verifiers. +/// It will have in total 3 * num_operator + 1 aggregated nonces. +/// \[0\] -> Aggregated nonce for the move transaction. +/// [1..num_operator + 1] -> Aggregated nonces for the operator_takes transactions. +/// [num_operator + 1..2 * num_operator + 1] -> Aggregated nonces for the slash_or_take transactions. +/// [2 * num_operator + 1..3 * num_operator + 1] -> Aggregated nonces for the burn transactions. +/// For now, we do not have the last bit. +#[derive(Debug, Clone)] +pub struct Aggregator { + pub(crate) rpc: ExtendedBitcoinRpc, + pub(crate) db: Database, + pub(crate) config: BridgeConfig, + #[cfg(feature = "automation")] + pub(crate) tx_sender: TxSenderClient, + operator_clients: Vec>, + verifier_clients: Vec>, + verifier_keys: Arc>>>, + operator_keys: Arc>>>, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +pub enum EntityId { + Verifier(VerifierId), + Operator(OperatorId), +} + +/// Wrapper struct that renders the verifier id in the logs. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +pub struct VerifierId(pub PublicKey); + +/// Wrapper struct that renders the operator id in the logs. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +pub struct OperatorId(pub XOnlyPublicKey); + +impl std::fmt::Display for EntityId { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + EntityId::Verifier(id) => write!(f, "{}", id), + EntityId::Operator(id) => write!(f, "{}", id), + } + } +} + +impl std::fmt::Display for VerifierId { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "Verifier({})", &self.0.to_string()[..10]) + } +} + +impl std::fmt::Display for OperatorId { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "Operator({})", &self.0.to_string()[..10]) + } +} + +/// Wrapper struct that matches verifier clients with their ids. +#[derive(Debug, Clone)] +pub struct ParticipatingVerifiers( + pub Vec<( + ClementineVerifierClient, + VerifierId, + )>, +); + +impl ParticipatingVerifiers { + pub fn new( + verifiers: Vec<( + ClementineVerifierClient, + VerifierId, + )>, + ) -> Self { + Self(verifiers) + } + + pub fn clients(&self) -> Vec> { + self.0.iter().map(|(client, _)| client.clone()).collect() + } + + pub fn ids(&self) -> Vec { + self.0.iter().map(|(_, id)| *id).collect() + } +} + +/// Wrapper struct that matches operator clients with their ids. +#[derive(Debug, Clone)] +pub struct ParticipatingOperators( + pub Vec<( + ClementineOperatorClient, + OperatorId, + )>, +); + +impl ParticipatingOperators { + pub fn new( + operators: Vec<( + ClementineOperatorClient, + OperatorId, + )>, + ) -> Self { + Self(operators) + } + + pub fn clients(&self) -> Vec> { + self.0.iter().map(|(client, _)| client.clone()).collect() + } + + pub fn ids(&self) -> Vec { + self.0.iter().map(|(_, id)| *id).collect() + } +} + +impl Aggregator { + pub async fn new(config: BridgeConfig) -> Result { + let db = Database::new(&config).await?; + + let rpc = ExtendedBitcoinRpc::connect( + config.bitcoin_rpc_url.clone(), + config.bitcoin_rpc_user.clone(), + config.bitcoin_rpc_password.clone(), + None, + ) + .await?; + + let verifier_endpoints = + config + .verifier_endpoints + .clone() + .ok_or(BridgeError::ConfigError( + "No verifier endpoints provided in config".into(), + ))?; + + let operator_endpoints = + config + .operator_endpoints + .clone() + .ok_or(BridgeError::ConfigError( + "No operator endpoints provided in config".into(), + ))?; + + // Create clients to connect to all verifiers + let verifier_clients = rpc::get_clients( + verifier_endpoints, + crate::rpc::verifier_client_builder(&config), + &config, + true, + ) + .await?; + + // Create clients to connect to all operators + let operator_clients = rpc::get_clients( + operator_endpoints, + crate::rpc::operator_client_builder(&config), + &config, + true, + ) + .await?; + + #[cfg(feature = "automation")] + let tx_sender = TxSenderClient::new(db.clone(), "aggregator".to_string()); + + tracing::info!( + "Aggregator created with {} verifiers and {} operators", + verifier_clients.len(), + operator_clients.len(), + ); + + let operator_keys = Arc::new(RwLock::new(vec![None; operator_clients.len()])); + let verifier_keys = Arc::new(RwLock::new(vec![None; verifier_clients.len()])); + + Ok(Aggregator { + rpc, + db, + config, + #[cfg(feature = "automation")] + tx_sender, + verifier_clients, + operator_clients, + verifier_keys, + operator_keys, + }) + } + + pub fn get_verifier_clients(&self) -> &[ClementineVerifierClient] { + &self.verifier_clients + } + + /// Generic helper function to fetch keys from clients + async fn fetch_pubkeys_from_entities( + &self, + clients: &[C], + keys_storage: &RwLock>>, + pubkey_fetcher: F, + key_type_name: &str, + ) -> Result, BridgeError> + where + T: Clone + Send + Sync, + C: Clone + Send + Sync, + F: Fn(C) -> Fut + Send + Sync, + Fut: Future> + Send, + { + // Check if all keys are collected + let all_collected = { + let keys = keys_storage.read().await; + keys.iter().all(|key| key.is_some()) + }; + + if !all_collected { + // get a write lock early, so that only one thread can try to collect keys + let mut keys = keys_storage.write().await; + + // sanity check because we directly use indexes below + if keys.len() != clients.len() { + return Err(eyre::eyre!( + "Keys storage length does not match clients length, should not happen, keys length: {}, clients length: {}", + keys.len(), + clients.len() + ) + .into()); + } + + let key_collection_futures = clients + .iter() + .zip(keys.iter().enumerate()) + .filter_map(|(client, (idx, key))| { + if key.is_none() { + Some((idx, pubkey_fetcher(client.clone()))) + } else { + None + } + }) + .map(|(idx, fut)| async move { (idx, fut.await) }); + + let collected_keys = join_all(key_collection_futures).await; + let mut missing_keys = Vec::new(); + + // Fill in keys with the results of the futures + for (idx, new_key) in collected_keys { + match new_key { + Ok(new_key) => keys[idx] = Some(new_key), + Err(e) => { + tracing::debug!( + "Failed to collect {} {} (order in config) key: {}", + key_type_name, + idx, + e + ); + missing_keys.push(idx); + } + } + } + + // if not all keys were collected, return an error + if keys.iter().any(|key| key.is_none()) { + return Err(eyre::eyre!( + "Not all {} keys were able to be collected, missing keys at indices: {:?}", + key_type_name, + missing_keys + ) + .into()); + } + } + + // return all keys if they were all collected + Ok(keys_storage + .read() + .await + .iter() + .map(|key| key.clone().expect("should all be collected")) + .collect()) + } + + /// If all verifier keys are already collected, returns them. + /// Otherwise, it tries to collect them from the verifiers, saves them and returns them. + pub async fn fetch_verifier_keys(&self) -> Result, BridgeError> { + self.fetch_pubkeys_from_entities( + &self.verifier_clients, + &self.verifier_keys, + |mut client| async move { + let mut request = Request::new(Empty {}); + request.set_timeout(PUBLIC_KEY_COLLECTION_TIMEOUT); + let verifier_params = client.get_params(request).await?.into_inner(); + let public_key = PublicKey::from_slice(&verifier_params.public_key) + .map_err(|e| eyre::eyre!("Failed to parse verifier public key: {}", e))?; + Ok::<_, BridgeError>(public_key) + }, + "verifier", + ) + .await + } + + /// If all operator keys are already collected, returns them. + /// Otherwise, it tries to collect them from the operators, saves them and returns them. + pub async fn fetch_operator_keys(&self) -> Result, BridgeError> { + self.fetch_pubkeys_from_entities( + &self.operator_clients, + &self.operator_keys, + |mut client| async move { + let mut request = Request::new(Empty {}); + request.set_timeout(PUBLIC_KEY_COLLECTION_TIMEOUT); + let operator_xonly_pk: XOnlyPublicKey = client + .get_x_only_public_key(request) + .await? + .into_inner() + .try_into()?; + Ok::<_, BridgeError>(operator_xonly_pk) + }, + "operator", + ) + .await + } + + pub fn get_operator_clients(&self) -> &[ClementineOperatorClient] { + &self.operator_clients + } + + /// Collects and distributes keys to verifiers from operators and watchtowers for the new deposit + /// for operators: get bitvm assert winternitz public keys and watchtower challenge ack hashes + /// for watchtowers: get winternitz public keys for watchtower challenges + pub async fn collect_and_distribute_keys( + &self, + deposit_params: &DepositParams, + ) -> Result<(), BridgeError> { + tracing::info!("Starting collect_and_distribute_keys"); + + let start_time = std::time::Instant::now(); + + let deposit_data: DepositData = deposit_params.clone().try_into()?; + + // Create channels with larger capacity to prevent blocking + let (operator_keys_tx, operator_keys_rx) = + tokio::sync::broadcast::channel::( + deposit_data.get_num_operators() * deposit_data.get_num_verifiers(), + ); + let operator_rx_handles = (0..deposit_data.get_num_verifiers()) + .map(|_| operator_keys_rx.resubscribe()) + .collect::>(); + + let operators = self.get_participating_operators(&deposit_data).await?; + let operator_clients = operators.clients(); + + let operator_xonly_pks = deposit_data.get_operators(); + let deposit = deposit_params.clone(); + + tracing::info!("Starting operator key collection"); + #[cfg(test)] + let timeout_params = self.config.test_params.timeout_params; + #[allow(clippy::unused_enumerate_index)] + let get_operators_keys_handle = tokio::spawn(timed_try_join_all( + OPERATOR_GET_KEYS_TIMEOUT, + "Operator key collection", + Some(operators.ids()), + operator_clients + .into_iter() + .zip(operator_xonly_pks.into_iter()) + .enumerate() + .map(move |(_idx, (mut operator_client, operator_xonly_pk))| { + let deposit_params = deposit.clone(); + let tx = operator_keys_tx.clone(); + async move { + #[cfg(test)] + timeout_params + .hook_timeout_key_collection_operator(_idx) + .await; + + let operator_keys = operator_client + .get_deposit_keys(deposit_params.clone()) + .instrument( + debug_span!("get_deposit_keys", id=%OperatorId(operator_xonly_pk)), + ) + .await + .wrap_err(Status::internal("Operator key retrieval failed"))? + .into_inner(); + + // A send error means that all receivers are closed, + // receivers only close if they have an error (while + // loop condition) + // We don't care about the result of the send, we + // only care about the error on the other side. + // Ignore this error, and let the other side's error + // propagate. + let _ = tx.send(OperatorKeysWithDeposit { + deposit_params: Some(deposit_params), + operator_keys: Some(operator_keys), + operator_xonly_pk: operator_xonly_pk.serialize().to_vec(), + }); + + Ok(()) + } + }), + )); + + tracing::info!("Starting operator key distribution to verifiers"); + let verifiers = self.get_participating_verifiers(&deposit_data).await?; + + let verifier_clients = verifiers.clients(); + let num_operators = deposit_data.get_num_operators(); + + let verifier_ids = verifiers.ids(); + + #[cfg(test)] + let timeout_params = self.config.test_params.timeout_params; + #[allow(clippy::unused_enumerate_index)] + let distribute_operators_keys_handle = tokio::spawn(timed_try_join_all( + VERIFIER_SEND_KEYS_TIMEOUT, + "Verifier key distribution", + Some(verifier_ids.clone()), + verifier_clients + .into_iter() + .zip(operator_rx_handles) + .zip(verifier_ids) + .enumerate() + .map( + move |(_idx, ((mut verifier, mut rx), verifier_id))| async move { + #[cfg(test)] + timeout_params + .hook_timeout_key_distribution_verifier(_idx) + .await; + + // Only wait for expected number of messages + let mut received_keys = std::collections::HashSet::new(); + while received_keys.len() < num_operators { + tracing::debug!( + "Waiting for operator key (received {}/{})", + received_keys.len(), + num_operators + ); + + // This will not block forever because of the timeout on the join all. + let operator_keys = rx + .recv() + .instrument(debug_span!("operator_keys_recv")) + .await + .wrap_err(Status::internal( + "Operator broadcast channels closed before all keys were received", + ))?; + + let operator_xonly_pk = operator_keys.operator_xonly_pk.clone(); + + if !received_keys.insert(operator_xonly_pk.clone()) { + continue; + } + + timed_request( + VERIFIER_SEND_KEYS_TIMEOUT, + &format!("Setting operator keys for {}", verifier_id), + async { + Ok(verifier + .set_operator_keys(operator_keys) + .await + .wrap_err_with(|| { + Status::internal(format!( + "Failed to set operator keys for {}", + verifier_id + )) + })) + }, + ) + .await??; + } + Ok::<_, BridgeError>(()) + }, + ), + )); + + // Wait for all tasks to complete + let (get_operators_keys_result, distribute_operators_keys_result) = + tokio::try_join!(get_operators_keys_handle, distribute_operators_keys_handle) + .wrap_err(Status::internal("Task join error in key distribution"))?; + + get_operators_keys_result?; + distribute_operators_keys_result?; + + tracing::info!( + "collect_and_distribute_keys completed in {:?}", + start_time.elapsed() + ); + + Ok(()) + } + + #[tracing::instrument(skip(self), err(level = tracing::Level::ERROR), ret(level = tracing::Level::TRACE))] + async fn _aggregate_move_partial_sigs( + &self, + deposit_data: &mut DepositData, + agg_nonce: &AggregatedNonce, + partial_sigs: Vec, + ) -> Result { + let tx = builder::transaction::create_move_to_vault_txhandler( + deposit_data, + self.config.protocol_paramset(), + )?; + + let message = Message::from_digest( + tx.calculate_script_spend_sighash_indexed(0, 0, bitcoin::TapSighashType::Default)? + .to_byte_array(), + ); + + let verifiers_public_keys = deposit_data.get_verifiers(); + + let final_sig = aggregate_partial_signatures( + verifiers_public_keys, + None, + *agg_nonce, + &partial_sigs, + message, + )?; + + Ok(final_sig) + } + + /// Returns a list of verifier clients that are participating in the deposit. + pub async fn get_participating_verifiers( + &self, + deposit_data: &DepositData, + ) -> Result { + let verifier_keys = self.fetch_verifier_keys().await?; + let mut participating_verifiers = Vec::new(); + + let verifiers = deposit_data.get_verifiers(); + + for verifier_pk in verifiers { + if let Some(pos) = verifier_keys.iter().position(|key| key == &verifier_pk) { + participating_verifiers + .push((self.verifier_clients[pos].clone(), VerifierId(verifier_pk))); + } else { + tracing::error!( + "Verifier public key not found. Deposit data verifier keys: {:?}, self verifier keys: {:?}", + deposit_data.get_verifiers(), + self.verifier_keys + ); + return Err(BridgeError::VerifierNotFound(verifier_pk)); + } + } + + Ok(ParticipatingVerifiers::new(participating_verifiers)) + } + + /// Returns a list of operator clients that are participating in the deposit. + pub async fn get_participating_operators( + &self, + deposit_data: &DepositData, + ) -> Result { + let operator_keys = self.fetch_operator_keys().await?; + let mut participating_operators = Vec::new(); + + let operators = deposit_data.get_operators(); + + for operator_pk in operators { + if let Some(pos) = operator_keys.iter().position(|key| key == &operator_pk) { + participating_operators + .push((self.operator_clients[pos].clone(), OperatorId(operator_pk))); + } else { + return Err(BridgeError::OperatorNotFound(operator_pk)); + } + } + + Ok(ParticipatingOperators::new(participating_operators)) + } + + /// Retrieves the status of all entities (operators and verifiers) and restarts background tasks if needed. + /// Returns a vector of EntityStatusWithId. Only returns an error if restarting tasks fails when requested. + pub async fn get_entity_statuses( + &self, + restart_tasks: bool, + ) -> Result, BridgeError> { + tracing::debug!("Getting entities status"); + + let operator_clients = self.get_operator_clients(); + let verifier_clients = self.get_verifier_clients(); + tracing::debug!("Operator clients: {:?}", operator_clients.len()); + + // try to reach all operators and verifiers to collect keys, but do not return err if some of them can't be reached + let _ = self.fetch_operator_keys().await; + let _ = self.fetch_verifier_keys().await; + + let operator_keys = self.operator_keys.read().await.clone(); + let verifier_keys = self.verifier_keys.read().await.clone(); + + // we will only ask status of entities that we could collect keys for, others are unreachable + + let operator_status = join_all( + operator_clients + .iter() + .zip(operator_keys.iter()) + // filter out operators that have None as key + .filter_map(|(client, key)| key.map(|key| (client, key))) + .map(|(client, key)| { + let mut client = client.clone(); + async move { + tracing::debug!("Getting operator status for {:?}", key); + let mut request = Request::new(Empty {}); + request.set_timeout(ENTITY_STATUS_POLL_TIMEOUT); + let response = client.get_current_status(request).await; + + EntityStatusWithId { + entity_id: Some(RPCEntityId { + kind: EntityType::Operator as i32, + id: key.to_string(), + }), + status_result: match response { + Ok(response) => Some(StatusResult::Status(response.into_inner())), + Err(e) => Some(StatusResult::Err(clementine::EntityError { + error: e.to_string(), + })), + }, + } + } + }), + ) + .await; + + let verifier_status = join_all( + verifier_clients + .iter() + .zip(verifier_keys.iter()) + .filter_map(|(client, key)| key.map(|key| (client, key))) + .map(|(client, key)| { + let mut client = client.clone(); + async move { + tracing::debug!("Getting verifier status for {:?}", key); + let mut request = Request::new(Empty {}); + request.set_timeout(ENTITY_STATUS_POLL_TIMEOUT); + let response = client.get_current_status(request).await; + + EntityStatusWithId { + entity_id: Some(RPCEntityId { + kind: EntityType::Verifier as i32, + id: key.to_string(), + }), + status_result: match response { + Ok(response) => Some(StatusResult::Status(response.into_inner())), + Err(e) => Some(StatusResult::Err(clementine::EntityError { + error: e.to_string(), + })), + }, + } + } + }), + ) + .await; + + // Combine operator and verifier status into a single vector + let mut entity_statuses = operator_status; + entity_statuses.extend(verifier_status); + + // try to restart background tasks if needed + if restart_tasks { + let operator_tasks = operator_clients.iter().map(|client| { + let mut client = client.clone(); + async move { + client + .restart_background_tasks(Request::new(Empty {})) + .await + } + }); + + let verifier_tasks = verifier_clients.iter().map(|client| { + let mut client = client.clone(); + async move { + client + .restart_background_tasks(Request::new(Empty {})) + .await + } + }); + + futures::try_join!( + futures::future::try_join_all(operator_tasks), + futures::future::try_join_all(verifier_tasks) + )?; + } + Ok(entity_statuses) + } +} + +/// Aggregator server wrapper that manages background tasks. +#[derive(Debug)] +pub struct AggregatorServer { + pub aggregator: Aggregator, + background_tasks: crate::task::manager::BackgroundTaskManager, +} + +impl AggregatorServer { + pub async fn new(config: BridgeConfig) -> Result { + let aggregator = Aggregator::new(config.clone()).await?; + let background_tasks = crate::task::manager::BackgroundTaskManager::default(); + + Ok(Self { + aggregator, + background_tasks, + }) + } + + /// Starts the background tasks for the aggregator. + /// If called multiple times, it will restart only the tasks that are not already running. + pub async fn start_background_tasks(&self) -> Result<(), BridgeError> { + // Start the aggregator metric publisher task + self.background_tasks + .ensure_task_looping( + crate::task::aggregator_metric_publisher::AggregatorMetricPublisher::new( + self.aggregator.clone(), + ) + .await? + .with_delay(AGGREGATOR_METRIC_PUBLISHER_POLL_DELAY), + ) + .await; + + tracing::info!("Aggregator metric publisher task started"); + + Ok(()) + } + + pub async fn shutdown(&mut self) { + self.background_tasks.graceful_shutdown().await; + } +} + +impl Deref for AggregatorServer { + type Target = Aggregator; + + fn deref(&self) -> &Self::Target { + &self.aggregator + } +} diff --git a/core/src/bin/cli.rs b/core/src/bin/cli.rs new file mode 100644 index 000000000..e49a39427 --- /dev/null +++ b/core/src/bin/cli.rs @@ -0,0 +1,1102 @@ +//! This module defines a command line interface for the RPC client. + +use std::path::PathBuf; +use std::str::FromStr; + +use bitcoin::{hashes::Hash, ScriptBuf, Txid, XOnlyPublicKey}; +use bitcoincore_rpc::json::SignRawTransactionInput; +use clap::{Parser, Subcommand}; +use clementine_core::{ + builder::transaction::TransactionType, + config::BridgeConfig, + deposit::SecurityCouncil, + rpc::clementine::{ + self, clementine_aggregator_client::ClementineAggregatorClient, deposit::DepositData, + Actors, AggregatorWithdrawalInput, BaseDeposit, Deposit, Empty, GetEntityStatusesRequest, + Outpoint, ReplacementDeposit, SendMoveTxRequest, VerifierPublicKeys, XOnlyPublicKeyRpc, + XOnlyPublicKeys, + }, + EVMAddress, +}; +use tonic::Request; + +#[derive(Parser)] +#[command(author, version, about, long_about = None)] +struct Cli { + /// The URL of the service + #[arg(short, long)] + node_url: String, + + #[command(subcommand)] + command: Commands, +} + +#[derive(Subcommand)] +enum Commands { + /// Operator service commands + Operator { + #[command(subcommand)] + command: OperatorCommands, + }, + /// Verifier service commands + Verifier { + #[command(subcommand)] + command: VerifierCommands, + }, + /// Aggregator service commands + Aggregator { + #[command(subcommand)] + command: AggregatorCommands, + }, + /// Commands for interacting with Bitcoin only + /// Give Bitcoin RPC URL as node-url + Bitcoin { + #[command(subcommand)] + command: BitcoinCommands, + }, +} + +#[derive(Subcommand)] +enum OperatorCommands { + /// Get deposit keys + GetDepositKeys { + #[arg(long)] + deposit_outpoint_txid: String, + #[arg(long)] + deposit_outpoint_vout: u32, + }, + /// Get operator parameters + GetParams, + /// Withdraw funds + Withdraw { + #[arg(long)] + withdrawal_id: u32, + #[arg(long)] + input_signature: String, + #[arg(long)] + input_outpoint_txid: String, + #[arg(long)] + input_outpoint_vout: u32, + #[arg(long)] + output_script_pubkey: String, + #[arg(long)] + output_amount: u64, + }, + /// Get vergen build information + Vergen, + /// Get kickoff related txs for sending kickoff manually + GetReimbursementTxs { + #[arg(long)] + deposit_outpoint_txid: String, + #[arg(long)] + deposit_outpoint_vout: u32, + }, + // Add other operator commands as needed +} + +#[derive(Subcommand)] +enum VerifierCommands { + /// Get verifier parameters + GetParams, + /// Generate nonces + NonceGen { + #[arg(long)] + num_nonces: u32, + }, + /// Get vergen build information + Vergen, + // /// Set verifier public keys + // SetVerifiers { + // #[arg(long, num_args = 1.., value_delimiter = ',')] + // public_keys: Vec, + // }, + // Add other verifier commands as needed +} + +#[derive(Subcommand)] +enum AggregatorCommands { + /// Setup the system + Setup, + /// Process new deposit + NewDeposit { + #[arg(long)] + deposit_outpoint_txid: String, + #[arg(long)] + deposit_outpoint_vout: u32, + #[arg(long)] + evm_address: Option, + #[arg(long)] + recovery_taproot_address: Option, + }, + /// Sign a replacement deposit + NewReplacementDeposit { + #[arg(long)] + deposit_outpoint_txid: String, + #[arg(long)] + deposit_outpoint_vout: u32, + #[arg(long)] + old_move_txid: String, + }, + /// Get the aggregated NofN x-only public key + GetNofnAggregatedKey, + /// Get deposit address + GetDepositAddress { + #[arg(long)] + evm_address: Option, + #[arg(long)] + recovery_taproot_address: Option, + #[arg(long)] + network: Option, + #[arg(long)] + user_takes_after: Option, + }, + GetReplacementDepositAddress { + #[arg(long)] + move_txid: String, + #[arg(long)] + network: Option, + #[arg(long)] + security_council: Option, + }, + /// Process a new withdrawal + NewWithdrawal { + #[arg(long)] + withdrawal_id: u32, + #[arg(long)] + input_signature: String, + #[arg(long)] + input_outpoint_txid: String, + #[arg(long)] + input_outpoint_vout: u32, + #[arg(long)] + output_script_pubkey: String, + #[arg(long)] + output_amount: u64, + #[arg(long)] + verification_signature: Option, + #[arg(long)] + operator_xonly_pks: Option>, + }, + /// Get the status of all entities (operators and verifiers) + GetEntityStatuses { + #[arg(long)] + restart_tasks: Option, + }, + /// Internal command to get the emergency stop encryption public key + InternalGetEmergencyStopTx { + #[arg(long)] + /// A comma-separated list of move txids + move_txids: String, + }, + /// Get vergen build information + Vergen, +} + +#[derive(Subcommand)] +enum BitcoinCommands { + /// Send a transaction with CPFP package + SendTxWithCpfp { + #[arg(long)] + raw_tx: String, + #[arg(long)] + fee_payer_address: Option, + #[arg(long)] + fee_rate: Option, + #[arg(long)] + bitcoin_rpc_user: String, + #[arg(long)] + bitcoin_rpc_password: String, + }, +} + +fn get_path_from_env_or_default(env_var: &str, default: &str) -> PathBuf { + let path = std::env::var(env_var); + let path = match path { + Ok(path) => { + println!( + "Using cert path from environment variable {}: {}", + env_var, path + ); + path + } + Err(_) => { + println!("Warning: {} is not set, using default cert path: {}.\nIf this path is incorrect, please set the environment variable {} to the correct path or call the binary from the correct directory, or any aggregator/operator/verifier command may not work.", env_var, default, env_var); + default.to_string() + } + }; + PathBuf::from(path) +} + +// Create a minimal config with default TLS paths +fn create_minimal_config() -> BridgeConfig { + // CLIENT_KEY_PATH env var will be used if it is set + // CLIENT_CERT_PATH env var will be used if it is set + // CA_CERT_PATH env var will be used if it is set + BridgeConfig { + ca_cert_path: get_path_from_env_or_default("CA_CERT_PATH", "core/certs/ca/ca.pem"), + client_cert_path: get_path_from_env_or_default( + "CLIENT_CERT_PATH", + "core/certs/client/client.pem", + ), + client_key_path: get_path_from_env_or_default( + "CLIENT_KEY_PATH", + "core/certs/client/client.key", + ), + ..Default::default() + } +} + +async fn handle_operator_call(url: String, command: OperatorCommands) { + let config = create_minimal_config(); + let mut operator = clementine_core::rpc::get_clients( + vec![url], + clementine_core::rpc::operator_client_builder(&config), + &config, + true, + ) + .await + .expect("Exists")[0] + .clone(); + + match command { + OperatorCommands::GetDepositKeys { + deposit_outpoint_txid, + deposit_outpoint_vout, + } => { + println!( + "Getting deposit keys for outpoint {}:{}", + deposit_outpoint_txid, deposit_outpoint_vout + ); + let params = clementine_core::rpc::clementine::DepositParams { + security_council: Some(clementine::SecurityCouncil { + pks: vec![], + threshold: 0, + }), + deposit: Some(Deposit { + deposit_outpoint: Some(Outpoint { + txid: Some(clementine::Txid { + txid: hex::decode(deposit_outpoint_txid) + .expect("Failed to decode txid"), + }), + vout: deposit_outpoint_vout, + }), + deposit_data: Some(DepositData::BaseDeposit(BaseDeposit { + evm_address: vec![1; 20], + recovery_taproot_address: String::new(), + })), + }), + actors: Some(Actors { + verifiers: Some(VerifierPublicKeys { + verifier_public_keys: vec![], + }), + watchtowers: Some(XOnlyPublicKeys { + xonly_public_keys: vec![], + }), + operators: Some(XOnlyPublicKeys { + xonly_public_keys: vec![], + }), + }), + }; + let response = operator + .get_deposit_keys(Request::new(params)) + .await + .expect("Failed to make a request to operator"); + println!("Get deposit keys response: {:?}", response); + } + OperatorCommands::GetParams => { + let params = operator + .get_params(Empty {}) + .await + .expect("Failed to make a request to operator"); + println!("Operator params: {:?}", params); + } + OperatorCommands::Withdraw { + withdrawal_id, + input_signature, + input_outpoint_txid, + input_outpoint_vout, + output_script_pubkey, + output_amount, + } => { + println!("Processing withdrawal with id {}", withdrawal_id); + + let params = clementine_core::rpc::clementine::WithdrawParams { + withdrawal_id, + input_signature: hex::decode(input_signature) + .expect("Failed to decode input signature"), + input_outpoint: Some(Outpoint { + txid: Some(clementine_core::rpc::clementine::Txid { + txid: Txid::from_str(&input_outpoint_txid) + .expect("Failed to decode txid") + .to_byte_array() + .to_vec(), + }), + vout: input_outpoint_vout, + }), + output_script_pubkey: hex::decode(output_script_pubkey) + .expect("Failed to decode output script pubkey"), + output_amount, + }; + operator + .internal_withdraw(Request::new(params)) + .await + .expect("Failed to make a request to operator"); + } + OperatorCommands::Vergen => { + let params = Empty {}; + let response = operator + .vergen(Request::new(params)) + .await + .expect("Failed to make a request to operator"); + println!("Vergen response:\n{}", response.into_inner().response); + } + OperatorCommands::GetReimbursementTxs { + deposit_outpoint_txid, + deposit_outpoint_vout, + } => { + #[cfg(feature = "automation")] + { + println!("WARNING: Automation is enabled, do not use this command unless some error happens with the automation \n + Automation should handle the reimbursement process automatically"); + } + + println!( + "Getting kickoff txs for outpoint {}:{}", + deposit_outpoint_txid, deposit_outpoint_vout + ); + let mut txid_bytes = hex::decode(deposit_outpoint_txid).expect("Failed to decode txid"); + txid_bytes.reverse(); + let response = operator + .get_reimbursement_txs(Request::new(Outpoint { + txid: Some(clementine_core::rpc::clementine::Txid { txid: txid_bytes }), + vout: deposit_outpoint_vout, + })) + .await + .expect("Failed to make a request to operator") + .into_inner(); + for signed_tx in &response.signed_txs { + let tx_type: TransactionType = signed_tx + .transaction_type + .expect("Tx type should not be None") + .try_into() + .expect("Failed to convert tx type"); + let transaction: bitcoin::Transaction = + bitcoin::consensus::deserialize(&signed_tx.raw_tx) + .expect("Failed to decode transaction"); + match tx_type { + TransactionType::Kickoff => { + println!("Round tx is on chain, time to send the kickoff tx. This tx is non-standard and cannot be sent by using normal Bitcoin RPC"); + } + TransactionType::BurnUnusedKickoffConnectors => { + println!("To be able to send ready to reimburse tx, all unused kickoff connectors must be burned, otherwise the operator will get slashed. + This tx is standard and requires CPFP to be sent (last output is the anchor output)"); + } + TransactionType::ReadyToReimburse => { + println!("All unused kickoff connectors are burned, and all live kickoffs kickoff finalizer utxo's are + spent, meaning it is safe to send ready to reimburse tx. This tx is standard and requires CPFP to be sent (last output is the anchor output)"); + } + TransactionType::Reimburse => { + println!("Reimburse tx is ready to be sent. This tx is standard and requires CPFP to be sent (last output is the anchor output)"); + } + TransactionType::ChallengeTimeout => { + println!("After kickoff, challenge timeout tx needs to be sent. Due to the timelock, it can only be sent after 216 blocks pass from the kickoff tx {:?}. + This tx is standard and requires CPFP to be sent (last output is the anchor output)", + transaction.input[0].previous_output.txid); + } + TransactionType::Round => { + println!("Time to send the round tx either for sending the kickoff tx, or getting the reimbursement for the past kickoff by advancing the round. Round tx is a non-standard tx and cannot be sent by using normal Bitcoin RPC. + If the round is not the first round, 216 number of blocks need to pass from the previous ready to reimburse tx {:?} (If this is not collateral)", + transaction.input[0].previous_output.txid); + } + _ => {} + } + let hex_tx = hex::encode(&signed_tx.raw_tx); + println!("Tx type: {:?}, Tx hex: {:?}", tx_type, hex_tx); + } + } + } +} + +async fn handle_verifier_call(url: String, command: VerifierCommands) { + println!("Connecting to verifier at {}", url); + let config = create_minimal_config(); + let mut verifier = clementine_core::rpc::get_clients( + vec![url], + clementine_core::rpc::verifier_client_builder(&config), + &config, + true, + ) + .await + .expect("Exists")[0] + .clone(); + + match command { + VerifierCommands::GetParams => { + let params = verifier + .get_params(Empty {}) + .await + .expect("Failed to make a request"); + println!("Verifier params: {:?}", params); + } + VerifierCommands::NonceGen { num_nonces } => { + let params = clementine_core::rpc::clementine::NonceGenRequest { num_nonces }; + let response = verifier + .nonce_gen(Request::new(params)) + .await + .expect("Failed to make a request"); + println!("Noncegen response: {:?}", response); + } + VerifierCommands::Vergen => { + let params = Empty {}; + let response = verifier + .vergen(Request::new(params)) + .await + .expect("Failed to make a request"); + println!("Vergen response:\n{}", response.into_inner().response); + } + } +} + +async fn handle_aggregator_call(url: String, command: AggregatorCommands) { + println!("Connecting to aggregator at {}", url); + let config = create_minimal_config(); + let mut aggregator = clementine_core::rpc::get_clients( + vec![url], + ClementineAggregatorClient::new, + &config, + false, + ) + .await + .expect("Exists")[0] + .clone(); + + match command { + AggregatorCommands::Setup => { + let setup = aggregator + .setup(Empty {}) + .await + .expect("Failed to make a request"); + println!("{:?}", setup); + } + AggregatorCommands::NewDeposit { + deposit_outpoint_txid, + deposit_outpoint_vout, + evm_address, + recovery_taproot_address, + } => { + let evm_address = match evm_address { + Some(address) => EVMAddress( + hex::decode(address) + .expect("Failed to decode evm address") + .try_into() + .expect("Failed to convert evm address to array"), + ), + None => EVMAddress([1; 20]), + }; + + let recovery_taproot_address = match recovery_taproot_address { + Some(address) => bitcoin::Address::from_str(&address) + .expect("Failed to parse recovery taproot address"), + None => bitcoin::Address::from_str( + "tb1p9k6y4my6vacczcyc4ph2m5q96hnxt5qlrqd9484qd9cwgrasc54qw56tuh", + ) + .expect("Failed to parse recovery taproot address"), + }; + + let mut deposit_outpoint_txid = + hex::decode(deposit_outpoint_txid).expect("Failed to decode txid"); + deposit_outpoint_txid.reverse(); + + let move_to_vault_tx = aggregator + .new_deposit(Deposit { + deposit_outpoint: Some(Outpoint { + txid: Some(clementine_core::rpc::clementine::Txid { + txid: deposit_outpoint_txid.clone(), + }), + vout: deposit_outpoint_vout, + }), + deposit_data: Some(DepositData::BaseDeposit(BaseDeposit { + evm_address: evm_address.0.to_vec(), + recovery_taproot_address: recovery_taproot_address + .assume_checked() + .to_string(), + })), + }) + .await + .expect("Failed to make a request"); + + let move_to_vault_tx = move_to_vault_tx.into_inner(); + + let deposit = aggregator + .send_move_to_vault_tx(SendMoveTxRequest { + raw_tx: Some(move_to_vault_tx.clone()), + deposit_outpoint: Some(Outpoint { + txid: Some(clementine_core::rpc::clementine::Txid { + txid: deposit_outpoint_txid, + }), + vout: deposit_outpoint_vout, + }), + }) + .await; + + match deposit { + Ok(deposit) => { + let move_txid = deposit.get_ref().txid.clone(); + let txid = bitcoin::Txid::from_byte_array( + move_txid + .try_into() + .expect("Failed to convert txid to array"), + ); + println!("Move txid: {}", txid); + } + Err(e) => { + println!("Failed to send move transaction: {}", e); + println!( + "Please send manually: {}", + hex::encode(move_to_vault_tx.raw_tx) + ); + } + } + } + AggregatorCommands::GetNofnAggregatedKey => { + let response = aggregator + .get_nofn_aggregated_xonly_pk(Request::new(Empty {})) + .await + .expect("Failed to make a request"); + let xonly_pk = bitcoin::XOnlyPublicKey::from_slice(&response.get_ref().nofn_xonly_pk) + .expect("Failed to parse xonly_pk"); + println!("{:?}", xonly_pk.to_string()); + } + AggregatorCommands::GetDepositAddress { + evm_address, + recovery_taproot_address, + network, + user_takes_after, + } => { + let response = aggregator + .get_nofn_aggregated_xonly_pk(Request::new(Empty {})) + .await + .expect("Failed to make a request"); + let xonly_pk = bitcoin::XOnlyPublicKey::from_slice(&response.get_ref().nofn_xonly_pk) + .expect("Failed to parse xonly_pk"); + + let recovery_taproot_address = match recovery_taproot_address { + Some(address) => bitcoin::Address::from_str(&address) + .expect("Failed to parse recovery taproot address"), + None => bitcoin::Address::from_str( + "tb1p9k6y4my6vacczcyc4ph2m5q96hnxt5qlrqd9484qd9cwgrasc54qw56tuh", + ) + .expect("Failed to parse recovery taproot address"), + }; + + let evm_address = match evm_address { + Some(address) => EVMAddress( + hex::decode(address) + .expect("Failed to decode evm address") + .try_into() + .expect("Failed to convert evm address to array"), + ), + None => EVMAddress([1; 20]), + }; + + let network = match network { + Some(network) => { + bitcoin::Network::from_str(&network).expect("Failed to parse network") + } + None => bitcoin::Network::Regtest, + }; + + let user_takes_after = match user_takes_after { + Some(amount) => amount as u16, + None => 200, + }; + + let deposit_address = clementine_core::builder::address::generate_deposit_address( + xonly_pk, + &recovery_taproot_address, + evm_address, + network, + user_takes_after, + ) + .expect("Failed to generate deposit address"); + + println!("Deposit address: {}", deposit_address.0); + } + AggregatorCommands::InternalGetEmergencyStopTx { move_txids } => { + let move_txids = move_txids + .split(',') + .map(|txid| Txid::from_str(txid).expect("Failed to parse txid")) + .collect::>(); + let emergency_stop_tx = aggregator + .internal_get_emergency_stop_tx(Request::new( + clementine::GetEmergencyStopTxRequest { + txids: move_txids + .clone() + .into_iter() + .map(|txid| clementine::Txid { + txid: txid.to_byte_array().to_vec(), + }) + .collect(), + }, + )) + .await + .expect("Failed to make a request"); + println!("Emergency stop tx: {:?}", emergency_stop_tx); + for (i, tx) in emergency_stop_tx + .into_inner() + .encrypted_emergency_stop_txs + .iter() + .enumerate() + { + println!( + "Emergency stop tx {} for move tx {}: {}", + i, + move_txids[i], + hex::encode(tx) + ); + } + } + AggregatorCommands::GetReplacementDepositAddress { + move_txid, + network, + security_council, + } => { + let mut move_txid = hex::decode(move_txid).expect("Failed to decode txid"); + move_txid.reverse(); + let move_txid = bitcoin::Txid::from_byte_array( + move_txid + .try_into() + .expect("Failed to convert txid to array"), + ); + + let response = aggregator + .get_nofn_aggregated_xonly_pk(Request::new(Empty {})) + .await + .expect("Failed to make a request"); + + let nofn_xonly_pk = + bitcoin::XOnlyPublicKey::from_slice(&response.get_ref().nofn_xonly_pk) + .expect("Failed to parse xonly_pk"); + + let network = match network { + Some(network) => { + bitcoin::Network::from_str(&network).expect("Failed to parse network") + } + None => bitcoin::Network::Regtest, + }; + + let (replacement_deposit_address, _) = + clementine_core::builder::address::generate_replacement_deposit_address( + move_txid, + nofn_xonly_pk, + network, + security_council.expect("Security council is required"), + ) + .expect("Failed to generate replacement deposit address"); + + println!( + "Replacement deposit address: {}", + replacement_deposit_address + ); + } + AggregatorCommands::NewReplacementDeposit { + deposit_outpoint_txid, + deposit_outpoint_vout, + old_move_txid, + } => { + let mut old_move_txid = hex::decode(old_move_txid).expect("Failed to decode txid"); + old_move_txid.reverse(); + + let mut deposit_outpoint_txid = + hex::decode(deposit_outpoint_txid).expect("Failed to decode txid"); + deposit_outpoint_txid.reverse(); + + let deposit = aggregator + .new_deposit(Deposit { + deposit_outpoint: Some(Outpoint { + txid: Some(clementine_core::rpc::clementine::Txid { + txid: deposit_outpoint_txid.clone(), + }), + vout: deposit_outpoint_vout, + }), + deposit_data: Some(DepositData::ReplacementDeposit(ReplacementDeposit { + old_move_txid: Some(clementine::Txid { + txid: old_move_txid, + }), + })), + }) + .await + .expect("Failed to make a request"); + let deposit = aggregator + .send_move_to_vault_tx(SendMoveTxRequest { + raw_tx: Some(deposit.into_inner()), + deposit_outpoint: Some(Outpoint { + txid: Some(clementine_core::rpc::clementine::Txid { + txid: deposit_outpoint_txid, + }), + vout: deposit_outpoint_vout, + }), + }) + .await + .expect("Failed to make a request"); + let move_txid = deposit.get_ref().txid.clone(); + let txid = bitcoin::Txid::from_byte_array( + move_txid + .try_into() + .expect("Failed to convert txid to array"), + ); + println!("Move txid: {}", txid); + } + AggregatorCommands::NewWithdrawal { + withdrawal_id, + input_signature, + input_outpoint_txid, + input_outpoint_vout, + output_script_pubkey, + output_amount, + verification_signature, + operator_xonly_pks, + } => { + println!("Processing withdrawal with id {}", withdrawal_id); + + let mut input_outpoint_txid_bytes = + hex::decode(input_outpoint_txid).expect("Failed to decode input outpoint txid"); + input_outpoint_txid_bytes.reverse(); + + let input_signature_bytes = + hex::decode(input_signature).expect("Failed to decode input signature"); + + let output_script_pubkey_bytes = + hex::decode(output_script_pubkey).expect("Failed to decode output script pubkey"); + + let params = clementine_core::rpc::clementine::WithdrawParams { + withdrawal_id, + input_signature: input_signature_bytes, + input_outpoint: Some(Outpoint { + txid: Some(clementine_core::rpc::clementine::Txid { + txid: input_outpoint_txid_bytes, + }), + vout: input_outpoint_vout, + }), + output_script_pubkey: output_script_pubkey_bytes, + output_amount, + }; + + let withdraw_params_with_sig = + clementine_core::rpc::clementine::WithdrawParamsWithSig { + withdrawal: Some(params), + verification_signature, + }; + + let operator_xonly_pks = operator_xonly_pks + .map(|pks| { + pks.iter() + .map(|pk| { + XOnlyPublicKeyRpc::from( + XOnlyPublicKey::from_str(pk) + .expect("Failed to parse xonly public key"), + ) + }) + .collect::>() + }) + .unwrap_or_default(); + + let response = aggregator + .withdraw(Request::new(AggregatorWithdrawalInput { + withdrawal: Some(withdraw_params_with_sig), + operator_xonly_pks, + })) + .await + .expect("Failed to make a request"); + + let withdraw_responses = response.get_ref().withdraw_responses.clone(); + + for (i, result) in withdraw_responses.iter().enumerate() { + println!("Operator {}: {:?}", i, result); + } + } + AggregatorCommands::GetEntityStatuses { restart_tasks } => { + let restart_tasks = restart_tasks.unwrap_or(false); + let request = GetEntityStatusesRequest { restart_tasks }; + + let response = aggregator + .get_entity_statuses(Request::new(request)) + .await + .expect("Failed to make a request"); + + println!("Entities status:"); + for entity_status in &response.get_ref().entity_statuses { + match &entity_status.entity_id { + Some(entity_id) => { + println!("Entity: {:?} - {}", entity_id.kind, entity_id.id); + match &entity_status.status_result { + Some(clementine_core::rpc::clementine::entity_status_with_id::StatusResult::Status(status)) => { + println!(" Automation: {}", status.automation); + println!(" Wallet balance: {}", status.wallet_balance.as_ref().map_or("N/A".to_string(), |s| s.clone())); + println!(" TX sender synced height: {}", status.tx_sender_synced_height.map_or("N/A".to_string(), |h| h.to_string())); + println!(" Finalized synced height: {}", status.finalized_synced_height.map_or("N/A".to_string(), |h| h.to_string())); + println!(" HCP last proven height: {}", status.hcp_last_proven_height.map_or("N/A".to_string(), |h| h.to_string())); + println!(" RPC tip height: {}", status.rpc_tip_height.map_or("N/A".to_string(), |h| h.to_string())); + println!(" Bitcoin syncer synced height: {}", status.bitcoin_syncer_synced_height.map_or("N/A".to_string(), |h| h.to_string())); + println!(" State manager next height: {}", status.state_manager_next_height.map_or("N/A".to_string(), |h| h.to_string())); + if !status.stopped_tasks.as_ref().is_none_or(|t| t.stopped_tasks.is_empty()) { + println!(" Stopped tasks: {:?}", status.stopped_tasks.as_ref().expect("Stopped tasks are required").stopped_tasks); + } + } + Some(clementine_core::rpc::clementine::entity_status_with_id::StatusResult::Err(error)) => { + println!(" Error: {}", error.error); + } + None => { + println!(" No status available"); + } + } + } + None => { + println!("Entity: Unknown"); + } + } + println!(); + } + + if restart_tasks { + println!("Tasks restart was requested and included in the request."); + } + } + AggregatorCommands::Vergen => { + let params = Empty {}; + let response = aggregator + .vergen(Request::new(params)) + .await + .expect("Failed to make a request"); + println!("Vergen response:\n{}", response.into_inner().response); + } + } +} + +async fn handle_bitcoin_call(url: String, command: BitcoinCommands) { + match command { + BitcoinCommands::SendTxWithCpfp { + raw_tx, + fee_payer_address, + fee_rate, + bitcoin_rpc_user, + bitcoin_rpc_password, + } => { + let tx_hex = hex::decode(raw_tx).expect("Failed to decode transaction"); + let tx: bitcoin::Transaction = bitcoin::consensus::deserialize(&tx_hex) + .expect("Failed to deserialize transaction"); + + println!("Transaction created: {}", tx.compute_txid()); + println!("Raw transaction: {}", hex::encode(tx_hex)); + + // Find P2A anchor output (script: 51024e73) + let p2a_vout = tx + .output + .iter() + .position(|output| { + output.script_pubkey == ScriptBuf::from_hex("51024e73").expect("valid script") + }) + .expect("P2A anchor output not found in transaction"); + + let p2a_txout = tx.output[p2a_vout].clone(); + + println!("Found P2A anchor output at vout: {}", p2a_vout); + + // Connect to Bitcoin RPC + use bitcoincore_rpc::{Auth, Client, RpcApi}; + let rpc = Client::new(&url, Auth::UserPass(bitcoin_rpc_user, bitcoin_rpc_password)) + .await + .expect("Failed to connect to Bitcoin RPC"); + + if fee_payer_address.is_none() { + let temp_address = rpc + .get_new_address( + Some("fee_payer_address"), + Some(bitcoincore_rpc::json::AddressType::Bech32m), + ) + .await + .expect("Failed to get new address"); + println!( + "You haven't provided a fee payer address, so a new one was generated: {}", + temp_address.assume_checked() + ); + println!("Please use this address for the fee payer in the next command"); + return; + } + + let fee_payer_address = bitcoin::Address::from_str( + &fee_payer_address.expect("Fee payer address is required"), + ) + .expect("Failed to parse fee payer address") + .assume_checked(); + + let fee_rate_sat_vb = fee_rate.unwrap_or(10.0) as u64; + + // Calculate package fee requirements + let parent_weight = tx.weight(); + let estimated_child_weight = bitcoin::Weight::from_wu(500); + let total_weight = parent_weight + estimated_child_weight; + let required_fee_sats = + (total_weight.to_wu() as f64 * fee_rate_sat_vb as f64 / 4.0) as u64; + let required_fee = bitcoin::Amount::from_sat(required_fee_sats); + + println!( + "Parent weight: {}, estimated total: {}, required fee: {} sats", + parent_weight, + total_weight, + required_fee.to_sat() + ); + + let unspent = rpc + .list_unspent( + Some(1), + Some(999999999), + Some(&[&fee_payer_address.clone()]), + None, + None, + ) + .await + .expect("Failed to list unspent outputs"); + + if unspent.is_empty() { + let unspent = rpc + .list_unspent(None, None, Some(&[&fee_payer_address.clone()]), None, None) + .await + .expect("Failed to list unspent outputs"); + if unspent.is_empty() { + println!("No unspent outputs available for fee payment."); + println!("Please send some funds to the fee payer address."); + println!("Fee payer address: {}", fee_payer_address); + } else { + println!("Unspent outputs: {:?}", unspent); + println!("Please wait for them to confirm."); + } + return; + } + + let fee_payer_utxo = unspent + .iter() + .find(|utxo| utxo.amount > required_fee) + .unwrap_or_else(|| { + panic!( + "No UTXO found with enough balance for fee payment, required fee is: {}", + required_fee + ) + }); + + // Create child transaction + use bitcoin::{transaction::Version, OutPoint, Sequence, TxIn, TxOut}; + + let child_input = TxIn { + previous_output: OutPoint { + txid: tx.compute_txid(), + vout: p2a_vout as u32, + }, + script_sig: bitcoin::ScriptBuf::new(), + sequence: Sequence::ENABLE_RBF_NO_LOCKTIME, + witness: bitcoin::Witness::new(), + }; + + let fee_payer_input = TxIn { + previous_output: OutPoint { + txid: fee_payer_utxo.txid, + vout: fee_payer_utxo.vout, + }, + script_sig: bitcoin::ScriptBuf::new(), + sequence: Sequence::ENABLE_RBF_NO_LOCKTIME, + witness: bitcoin::Witness::new(), + }; + + let total_input_value = bitcoin::Amount::from_sat(240) + fee_payer_utxo.amount; + let change_amount = total_input_value + .checked_sub(required_fee) + .expect("Insufficient funds for required fee"); + + let child_output = TxOut { + value: change_amount, + script_pubkey: fee_payer_address.script_pubkey(), + }; + + let child_input_utxo = SignRawTransactionInput { + txid: child_input.previous_output.txid, + vout: child_input.previous_output.vout, + script_pub_key: p2a_txout.script_pubkey, + redeem_script: None, + amount: Some(p2a_txout.value), + }; + + let child_tx = bitcoin::Transaction { + version: Version::non_standard(3), + lock_time: bitcoin::absolute::LockTime::ZERO, + input: vec![child_input, fee_payer_input], + output: vec![child_output], + }; + + let signed_tx = rpc + .sign_raw_transaction_with_wallet(&child_tx, Some(&[child_input_utxo]), None) + .await + .expect("Failed to sign child transaction"); + + let signed_child_tx = signed_tx + .transaction() + .expect("Failed to get transaction from sign_raw_transaction_with_wallet"); + + println!( + "Child transaction signed: {}", + signed_child_tx.compute_txid() + ); + + // Submit CPFP package + let package = vec![&tx, &signed_child_tx]; + println!("Submitting CPFP package"); + + match rpc + .submit_package(&package, Some(bitcoin::Amount::ZERO), None) + .await + { + Ok(result) => { + println!("CPFP package submitted successfully"); + println!("Package result: {:?}", result); + println!("Parent transaction TXID: {}", tx.compute_txid()); + println!("Child transaction TXID: {}", signed_child_tx.compute_txid()); + } + Err(e) => { + println!("Failed to submit CPFP package: {}", e); + println!("Manual submission options:"); + println!( + "Parent tx: {}", + hex::encode(bitcoin::consensus::serialize(&tx)) + ); + println!( + "Child tx: {}", + hex::encode(bitcoin::consensus::serialize(&signed_child_tx)) + ); + } + } + } + } +} + +#[tokio::main] +async fn main() { + let cli = Cli::parse(); + + rustls::crypto::ring::default_provider() + .install_default() + .expect("Failed to install rustls crypto provider"); + + match cli.command { + Commands::Operator { command } => { + handle_operator_call(cli.node_url, command).await; + } + Commands::Verifier { command } => { + handle_verifier_call(cli.node_url, command).await; + } + Commands::Aggregator { command } => { + handle_aggregator_call(cli.node_url, command).await; + } + Commands::Bitcoin { command } => { + handle_bitcoin_call(cli.node_url, command).await; + } + } +} diff --git a/core/src/bitcoin_syncer.rs b/core/src/bitcoin_syncer.rs new file mode 100644 index 000000000..35f243baf --- /dev/null +++ b/core/src/bitcoin_syncer.rs @@ -0,0 +1,905 @@ +//! # Bitcoin Syncer +//! +//! This module provides common utilities to fetch Bitcoin state. Other modules +//! can use this module to operate over Bitcoin. Every block starting from +//! `paramset.start_height` is fetched and stored in the database. + +use crate::{ + config::protocol::ProtocolParamset, + database::{Database, DatabaseTransaction}, + errors::BridgeError, + extended_bitcoin_rpc::ExtendedBitcoinRpc, + task::{IntoTask, Task, TaskExt, TaskVariant, WithDelay}, +}; +use bitcoin::{block::Header, BlockHash, OutPoint}; +use bitcoincore_rpc::RpcApi; +use eyre::Context; +use std::time::Duration; +use tonic::async_trait; + +pub const BTC_SYNCER_POLL_DELAY: Duration = if cfg!(test) { + Duration::from_millis(250) +} else { + Duration::from_secs(30) +}; + +/// Represents basic information of a Bitcoin block. +#[derive(Clone, Debug)] +struct BlockInfo { + hash: BlockHash, + _header: Header, + height: u32, +} + +/// Events emitted by the Bitcoin syncer. +/// It emits the block_id of the block in the db that was saved. +#[derive(Clone, Debug)] +pub enum BitcoinSyncerEvent { + NewBlock(u32), + ReorgedBlock(u32), +} + +impl TryFrom<(String, i32)> for BitcoinSyncerEvent { + type Error = eyre::Report; + fn try_from(value: (String, i32)) -> Result { + match value.0.as_str() { + "new_block" => Ok(BitcoinSyncerEvent::NewBlock( + u32::try_from(value.1).wrap_err(BridgeError::IntConversionError)?, + )), + "reorged_block" => Ok(BitcoinSyncerEvent::ReorgedBlock( + u32::try_from(value.1).wrap_err(BridgeError::IntConversionError)?, + )), + _ => Err(eyre::eyre!("Invalid event type: {}", value.0)), + } + } +} + +/// Trait for handling new blocks as they are finalized +#[async_trait] +pub trait BlockHandler: Send + Sync + 'static { + /// Handle a new finalized block + async fn handle_new_block( + &mut self, + dbtx: DatabaseTransaction<'_, '_>, + block_id: u32, + block: bitcoin::Block, + height: u32, + ) -> Result<(), BridgeError>; +} + +/// Fetches the [`BlockInfo`] for a given height from Bitcoin. +async fn fetch_block_info_from_height( + rpc: &ExtendedBitcoinRpc, + height: u32, +) -> Result { + let hash = rpc + .get_block_hash(height as u64) + .await + .wrap_err("Failed to get block hash")?; + let header = rpc + .get_block_header(&hash) + .await + .wrap_err("Failed to get block header")?; + + Ok(BlockInfo { + hash, + _header: header, + height, + }) +} + +/// Saves a Bitcoin block's metadata and it's transactions into the database. +pub(crate) async fn save_block( + db: &Database, + dbtx: DatabaseTransaction<'_, '_>, + block: &bitcoin::Block, + block_height: u32, +) -> Result { + let block_hash = block.block_hash(); + tracing::debug!( + "Saving a block with hash of {} and height of {}", + block_hash, + block_height + ); + + // update the block_info as canonical if it already exists + let block_id = db.update_block_as_canonical(Some(dbtx), block_hash).await?; + db.upsert_full_block(Some(dbtx), block, block_height) + .await?; + + if let Some(block_id) = block_id { + return Ok(block_id); + } + + let block_id = db + .insert_block_info( + Some(dbtx), + &block_hash, + &block.header.prev_blockhash, + block_height, + ) + .await?; + + tracing::debug!( + "Saving {} transactions to a block with hash {}", + block.txdata.len(), + block_hash + ); + for tx in &block.txdata { + save_transaction_spent_utxos(db, dbtx, tx, block_id).await?; + } + + Ok(block_id) +} +async fn _get_block_info_from_hash( + db: &Database, + dbtx: DatabaseTransaction<'_, '_>, + rpc: &ExtendedBitcoinRpc, + hash: BlockHash, +) -> Result<(BlockInfo, Vec>), BridgeError> { + let block = rpc.get_block(&hash).await.wrap_err("Failed to get block")?; + let block_height = db + .get_block_info_from_hash(Some(dbtx), hash) + .await? + .ok_or_else(|| eyre::eyre!("Block not found in get_block_info_from_hash"))? + .1; + + let mut block_utxos: Vec> = Vec::new(); + for tx in &block.txdata { + let txid = tx.compute_txid(); + let spent_utxos = _get_transaction_spent_utxos(db, dbtx, txid).await?; + block_utxos.push(spent_utxos); + } + + let block_info = BlockInfo { + hash, + _header: block.header, + height: block_height, + }; + + Ok((block_info, block_utxos)) +} + +/// Saves a Bitcoin transaction and its spent UTXOs to the database. +async fn save_transaction_spent_utxos( + db: &Database, + dbtx: DatabaseTransaction<'_, '_>, + tx: &bitcoin::Transaction, + block_id: u32, +) -> Result<(), BridgeError> { + let txid = tx.compute_txid(); + db.insert_txid_to_block(dbtx, block_id, &txid).await?; + + for input in &tx.input { + db.insert_spent_utxo( + dbtx, + block_id, + &txid, + &input.previous_output.txid, + input.previous_output.vout as i64, + ) + .await?; + } + + Ok(()) +} +async fn _get_transaction_spent_utxos( + db: &Database, + dbtx: DatabaseTransaction<'_, '_>, + txid: bitcoin::Txid, +) -> Result, BridgeError> { + let utxos = db.get_spent_utxos_for_txid(Some(dbtx), txid).await?; + let utxos = utxos.into_iter().map(|utxo| utxo.1).collect::>(); + + Ok(utxos) +} + +/// If no block info exists in the DB, fetches the current block from the RPC and initializes the DB. +pub async fn set_initial_block_info_if_not_exists( + db: &Database, + rpc: &ExtendedBitcoinRpc, + paramset: &'static ProtocolParamset, +) -> Result<(), BridgeError> { + if db.get_max_height(None).await?.is_some() { + return Ok(()); + } + + let current_height = u32::try_from( + rpc.get_block_count() + .await + .wrap_err("Failed to get block count")?, + ) + .wrap_err(BridgeError::IntConversionError)?; + + if paramset.start_height > current_height { + tracing::error!( + "Bitcoin syncer could not find enough available blocks in chain (Likely a regtest problem). start_height ({}) > current_height ({})", + paramset.start_height, + current_height + ); + return Ok(()); + } + + let height = paramset.start_height; + let mut dbtx = db.begin_transaction().await?; + // first collect previous needed blocks according to paramset start height + let block_info = fetch_block_info_from_height(rpc, height).await?; + let block = rpc + .get_block(&block_info.hash) + .await + .wrap_err("Failed to get block")?; + let block_id = save_block(db, &mut dbtx, &block, height).await?; + db.insert_event(Some(&mut dbtx), BitcoinSyncerEvent::NewBlock(block_id)) + .await?; + + dbtx.commit().await?; + + Ok(()) +} + +/// Fetches the next block from Bitcoin, if it exists. Will also fetch previous +/// blocks if the parent is missing, up to 100 blocks. +/// +/// # Parameters +/// +/// - `current_height`: The height of the current tip **in the database**. +/// +/// # Returns +/// +/// - [`None`] - If no new block is available. +/// - [`Vec`] - If new blocks are found. +async fn fetch_new_blocks( + db: &Database, + rpc: &ExtendedBitcoinRpc, + current_height: u32, +) -> Result>, BridgeError> { + let next_height = current_height + 1; + + // Try to fetch the block hash for the next height. + let block_hash = match rpc.get_block_hash(next_height as u64).await { + Ok(hash) => hash, + Err(_) => return Ok(None), + }; + tracing::debug!( + "Fetching block with hash of {:?} and height of {}...", + block_hash, + next_height + ); + + // Fetch its header. + let mut block_header = rpc + .get_block_header(&block_hash) + .await + .wrap_err("Failed to get block header")?; + let mut new_blocks = vec![BlockInfo { + hash: block_hash, + _header: block_header, + height: next_height, + }]; + + // Walk backwards until the parent is found in the database. + while db + .get_block_info_from_hash(None, block_header.prev_blockhash) + .await? + .is_none() + { + let prev_block_hash = block_header.prev_blockhash; + block_header = rpc + .get_block_header(&prev_block_hash) + .await + .wrap_err("Failed to get block header")?; + let new_height = new_blocks.last().expect("new_blocks is empty").height - 1; + new_blocks.push(BlockInfo { + hash: prev_block_hash, + _header: block_header, + height: new_height, + }); + + if new_blocks.len() >= 100 { + return Err(eyre::eyre!( + "Bitcoin syncer can't synchronize database with active blockchain: Too deep to continue (last saved block was at height {})", + new_height as u64 + ) + .into()); + } + } + + // The chain was built from tip to fork; reverse it to be in ascending order. + new_blocks.reverse(); + + Ok(Some(new_blocks)) +} + +/// Marks blocks above the common ancestor as non-canonical and emits reorg events. +#[tracing::instrument(skip(db, dbtx), err(level = tracing::Level::ERROR), ret(level = tracing::Level::TRACE))] +async fn handle_reorg_events( + db: &Database, + dbtx: DatabaseTransaction<'_, '_>, + common_ancestor_height: u32, +) -> Result<(), BridgeError> { + let reorg_blocks = db + .update_non_canonical_block_hashes(Some(dbtx), common_ancestor_height) + .await?; + if !reorg_blocks.is_empty() { + tracing::debug!("Reorg occurred! Block ids: {:?}", reorg_blocks); + } + + for reorg_block_id in reorg_blocks { + db.insert_event(Some(dbtx), BitcoinSyncerEvent::ReorgedBlock(reorg_block_id)) + .await?; + } + + Ok(()) +} + +/// Processes and inserts new blocks into the database, emitting a new block event for each. +async fn process_new_blocks( + db: &Database, + rpc: &ExtendedBitcoinRpc, + dbtx: DatabaseTransaction<'_, '_>, + new_blocks: &[BlockInfo], +) -> Result<(), BridgeError> { + for block_info in new_blocks { + let block = rpc + .get_block(&block_info.hash) + .await + .wrap_err("Failed to get block")?; + + let block_id = save_block(db, dbtx, &block, block_info.height).await?; + db.insert_event(Some(dbtx), BitcoinSyncerEvent::NewBlock(block_id)) + .await?; + } + + Ok(()) +} + +/// A task that syncs Bitcoin blocks from the Bitcoin node to the local database. +#[derive(Debug)] +pub struct BitcoinSyncerTask { + /// The database to store blocks in + db: Database, + /// The RPC client to fetch blocks from + rpc: ExtendedBitcoinRpc, + /// The current block height that has been synced + current_height: u32, +} + +#[derive(Debug)] +pub struct BitcoinSyncer { + /// The database to store blocks in + db: Database, + /// The RPC client to fetch blocks from + rpc: ExtendedBitcoinRpc, + /// The current block height that has been synced + current_height: u32, +} + +impl BitcoinSyncer { + /// Creates a new Bitcoin syncer task. + /// + /// This function initializes the database with the first block if it's empty. + pub async fn new( + db: Database, + rpc: ExtendedBitcoinRpc, + paramset: &'static ProtocolParamset, + ) -> Result { + // Initialize the database if needed + set_initial_block_info_if_not_exists(&db, &rpc, paramset).await?; + + // Get the current height from the database + let current_height = db + .get_max_height(None) + .await? + .ok_or_else(|| eyre::eyre!("Block not found in BitcoinSyncer::new"))?; + + Ok(Self { + db, + rpc, + current_height, + }) + } +} +impl IntoTask for BitcoinSyncer { + type Task = WithDelay; + + fn into_task(self) -> Self::Task { + BitcoinSyncerTask { + db: self.db, + rpc: self.rpc, + current_height: self.current_height, + } + .with_delay(BTC_SYNCER_POLL_DELAY) + } +} + +#[async_trait] +impl Task for BitcoinSyncerTask { + type Output = bool; + const VARIANT: TaskVariant = TaskVariant::BitcoinSyncer; + + #[tracing::instrument(skip(self))] + async fn run_once(&mut self) -> Result { + let new_blocks = match fetch_new_blocks(&self.db, &self.rpc, self.current_height).await? { + Some(blocks) if !blocks.is_empty() => { + tracing::debug!( + "{} new blocks found after current height {}", + blocks.len(), + self.current_height + ); + + blocks + } + _ => { + tracing::debug!( + "No new blocks found after current height: {}", + self.current_height + ); + + return Ok(false); + } + }; + + // The common ancestor is the block preceding the first new block. + // Please note that this won't always be the `self.current_height`. + // Because `fetch_next_block` can fetch older blocks, if db is missing + // them. + let common_ancestor_height = new_blocks[0].height - 1; + tracing::debug!("Common ancestor height: {:?}", common_ancestor_height); + let mut dbtx = self.db.begin_transaction().await?; + + // Mark reorg blocks (if any) as non-canonical. + handle_reorg_events(&self.db, &mut dbtx, common_ancestor_height).await?; + tracing::debug!("BitcoinSyncer: Marked reorg blocks as non-canonical"); + + // Process and insert the new blocks. + tracing::debug!("BitcoinSyncer: Processing new blocks"); + tracing::debug!("BitcoinSyncer: New blocks: {:?}", new_blocks.len()); + process_new_blocks(&self.db, &self.rpc, &mut dbtx, &new_blocks).await?; + + dbtx.commit().await?; + + // Update the current height to the tip of the new chain. + tracing::debug!("BitcoinSyncer: Updating current height"); + self.current_height = new_blocks.last().expect("new_blocks is not empty").height; + tracing::debug!("BitcoinSyncer: Current height: {:?}", self.current_height); + + // Return true to indicate work was done + Ok(true) + } +} + +#[derive(Debug)] +pub struct FinalizedBlockFetcherTask { + db: Database, + btc_syncer_consumer_id: String, + paramset: &'static ProtocolParamset, + next_height: u32, + handler: H, +} + +impl FinalizedBlockFetcherTask { + pub fn new( + db: Database, + btc_syncer_consumer_id: String, + paramset: &'static ProtocolParamset, + next_height: u32, + handler: H, + ) -> Self { + Self { + db, + btc_syncer_consumer_id, + paramset, + next_height, + handler, + } + } +} + +#[async_trait] +impl Task for FinalizedBlockFetcherTask { + type Output = bool; + const VARIANT: TaskVariant = TaskVariant::FinalizedBlockFetcher; + + async fn run_once(&mut self) -> Result { + let mut dbtx = self.db.begin_transaction().await?; + + // Poll for the next bitcoin syncer event + let Some(event) = self + .db + .fetch_next_bitcoin_syncer_evt(&mut dbtx, &self.btc_syncer_consumer_id) + .await? + else { + // No event found, we can safely commit the transaction and return + dbtx.commit().await?; + return Ok(false); + }; + + // Process the event + let did_find_new_block = match event { + BitcoinSyncerEvent::NewBlock(block_id) => { + let current_tip_height = self + .db + .get_block_info_from_id(Some(&mut dbtx), block_id) + .await? + .ok_or(eyre::eyre!("Block not found in BlockFetcherTask",))? + .1; + let mut new_tip = false; + + // Update states to catch up to finalized chain + while current_tip_height >= self.paramset.finality_depth + && self.next_height <= current_tip_height - self.paramset.finality_depth + { + new_tip = true; + + let block = self + .db + .get_full_block(Some(&mut dbtx), self.next_height) + .await? + .ok_or(eyre::eyre!( + "Block at height {} not found in BlockFetcherTask, current tip height is {}", + self.next_height, current_tip_height + ))?; + + let new_block_id = self + .db + .get_canonical_block_id_from_height(Some(&mut dbtx), self.next_height) + .await?; + + let Some(new_block_id) = new_block_id else { + tracing::error!("Block at height {} not found in BlockFetcherTask, current tip height is {}", self.next_height, current_tip_height); + return Err(eyre::eyre!( + "Block at height {} not found in BlockFetcherTask, current tip height is {}", + self.next_height, current_tip_height + ).into()); + }; + + self.handler + .handle_new_block(&mut dbtx, new_block_id, block, self.next_height) + .await?; + + self.next_height += 1; + } + + new_tip + } + BitcoinSyncerEvent::ReorgedBlock(_) => false, + }; + + dbtx.commit().await?; + // Return whether we found new blocks + Ok(did_find_new_block) + } +} + +#[cfg(test)] +mod tests { + use crate::bitcoin_syncer::BitcoinSyncer; + use crate::builder::transaction::DEFAULT_SEQUENCE; + use crate::task::{IntoTask, TaskExt}; + use crate::{database::Database, test::common::*}; + use bitcoin::absolute::Height; + use bitcoin::hashes::Hash; + use bitcoin::transaction::Version; + use bitcoin::{OutPoint, ScriptBuf, Transaction, TxIn, Witness}; + use bitcoincore_rpc::RpcApi; + + #[tokio::test] + async fn get_block_info_from_height() { + let mut config = create_test_config_with_thread_name().await; + let regtest = create_regtest_rpc(&mut config).await; + let rpc = regtest.rpc().clone(); + + rpc.mine_blocks(1).await.unwrap(); + let height = u32::try_from(rpc.get_block_count().await.unwrap()).unwrap(); + let hash = rpc.get_block_hash(height as u64).await.unwrap(); + let header = rpc.get_block_header(&hash).await.unwrap(); + + let block_info = super::fetch_block_info_from_height(&rpc, height) + .await + .unwrap(); + assert_eq!(block_info._header, header); + assert_eq!(block_info.hash, hash); + assert_eq!(block_info.height, height); + + rpc.mine_blocks(1).await.unwrap(); + let height = u32::try_from(rpc.get_block_count().await.unwrap()).unwrap(); + + let block_info = super::fetch_block_info_from_height(&rpc, height) + .await + .unwrap(); + assert_ne!(block_info._header, header); + assert_ne!(block_info.hash, hash); + assert_eq!(block_info.height, height); + } + + #[tokio::test] + async fn save_get_transaction_spent_utxos() { + let mut config = create_test_config_with_thread_name().await; + let db = Database::new(&config).await.unwrap(); + let regtest = create_regtest_rpc(&mut config).await; + let rpc = regtest.rpc().clone(); + + let mut dbtx = db.begin_transaction().await.unwrap(); + + rpc.mine_blocks(1).await.unwrap(); + let height = u32::try_from(rpc.get_block_count().await.unwrap()).unwrap(); + let hash = rpc.get_block_hash(height as u64).await.unwrap(); + let block = rpc.get_block(&hash).await.unwrap(); + let block_id = super::save_block(&db, &mut dbtx, &block, height) + .await + .unwrap(); + + let inputs = vec![ + TxIn { + previous_output: OutPoint { + txid: bitcoin::Txid::all_zeros(), + vout: 0, + }, + script_sig: ScriptBuf::default(), + sequence: DEFAULT_SEQUENCE, + witness: Witness::default(), + }, + TxIn { + previous_output: OutPoint { + txid: bitcoin::Txid::all_zeros(), + vout: 1, + }, + script_sig: ScriptBuf::default(), + sequence: DEFAULT_SEQUENCE, + witness: Witness::default(), + }, + ]; + let tx = Transaction { + version: Version::TWO, + lock_time: bitcoin::absolute::LockTime::Blocks(Height::ZERO), + input: inputs.clone(), + output: vec![], + }; + super::save_transaction_spent_utxos(&db, &mut dbtx, &tx, block_id) + .await + .unwrap(); + + let utxos = super::_get_transaction_spent_utxos(&db, &mut dbtx, tx.compute_txid()) + .await + .unwrap(); + + for (index, input) in inputs.iter().enumerate() { + assert_eq!(input.previous_output, utxos[index]); + } + + dbtx.commit().await.unwrap(); + } + + #[tokio::test] + async fn save_get_block() { + let mut config = create_test_config_with_thread_name().await; + let db = Database::new(&config).await.unwrap(); + let regtest = create_regtest_rpc(&mut config).await; + let rpc = regtest.rpc().clone(); + + let mut dbtx = db.begin_transaction().await.unwrap(); + + rpc.mine_blocks(1).await.unwrap(); + let height = u32::try_from(rpc.get_block_count().await.unwrap()).unwrap(); + let hash = rpc.get_block_hash(height as u64).await.unwrap(); + let block = rpc.get_block(&hash).await.unwrap(); + + super::save_block(&db, &mut dbtx, &block, height) + .await + .unwrap(); + + let (block_info, utxos) = super::_get_block_info_from_hash(&db, &mut dbtx, &rpc, hash) + .await + .unwrap(); + assert_eq!(block_info._header, block.header); + assert_eq!(block_info.hash, hash); + assert_eq!(block_info.height, height); + for (tx_index, tx) in block.txdata.iter().enumerate() { + for (txin_index, txin) in tx.input.iter().enumerate() { + assert_eq!(txin.previous_output, utxos[tx_index][txin_index]); + } + } + + dbtx.commit().await.unwrap(); + } + + #[tokio::test] + async fn set_initial_block_info_if_not_exists() { + let mut config = create_test_config_with_thread_name().await; + let db = Database::new(&config).await.unwrap(); + let regtest = create_regtest_rpc(&mut config).await; + let rpc = regtest.rpc().clone(); + + let mut dbtx = db.begin_transaction().await.unwrap(); + + rpc.mine_blocks(1).await.unwrap(); + // let height = u32::try_from(rpc.get_block_count().await.unwrap()).unwrap(); + let hash = rpc + .get_block_hash(config.protocol_paramset().start_height as u64) + .await + .unwrap(); + let block = rpc.get_block(&hash).await.unwrap(); + + assert!(super::_get_block_info_from_hash(&db, &mut dbtx, &rpc, hash) + .await + .is_err()); + + super::set_initial_block_info_if_not_exists(&db, &rpc, config.protocol_paramset()) + .await + .unwrap(); + + let (block_info, utxos) = super::_get_block_info_from_hash(&db, &mut dbtx, &rpc, hash) + .await + .unwrap(); + assert_eq!(block_info.hash, hash); + assert_eq!(block_info.height, config.protocol_paramset().start_height); + + for (tx_index, tx) in block.txdata.iter().enumerate() { + for (txin_index, txin) in tx.input.iter().enumerate() { + assert_eq!(txin.previous_output, utxos[tx_index][txin_index]); + } + } + } + + #[tokio::test] + async fn fetch_new_blocks_forward() { + let mut config = create_test_config_with_thread_name().await; + let db = Database::new(&config).await.unwrap(); + let regtest = create_regtest_rpc(&mut config).await; + let rpc = regtest.rpc().clone(); + + let mut dbtx = db.begin_transaction().await.unwrap(); + + rpc.mine_blocks(1).await.unwrap(); + let height = u32::try_from(rpc.get_block_count().await.unwrap()).unwrap(); + let hash = rpc.get_block_hash(height as u64).await.unwrap(); + let block = rpc.get_block(&hash).await.unwrap(); + super::save_block(&db, &mut dbtx, &block, height) + .await + .unwrap(); + dbtx.commit().await.unwrap(); + + let new_blocks = super::fetch_new_blocks(&db, &rpc, height).await.unwrap(); + assert!(new_blocks.is_none()); + + let new_block_hashes = rpc.mine_blocks(1).await.unwrap(); + let new_height = u32::try_from(rpc.get_block_count().await.unwrap()).unwrap(); + let new_blocks = super::fetch_new_blocks(&db, &rpc, height) + .await + .unwrap() + .unwrap(); + assert_eq!(new_blocks.len(), 1); + assert_eq!(new_blocks.first().unwrap().height, new_height); + assert_eq!( + new_blocks.first().unwrap().hash, + *new_block_hashes.first().unwrap() + ); + } + + #[tokio::test] + async fn fetch_new_blocks_backwards() { + let mut config = create_test_config_with_thread_name().await; + let db = Database::new(&config).await.unwrap(); + let regtest = create_regtest_rpc(&mut config).await; + let rpc = regtest.rpc().clone(); + + // Prepare chain. + rpc.mine_blocks(1).await.unwrap(); + let height = u32::try_from(rpc.get_block_count().await.unwrap()).unwrap(); + let hash = rpc.get_block_hash(height as u64).await.unwrap(); + let block = rpc.get_block(&hash).await.unwrap(); + + // Save the tip. + let mut dbtx = db.begin_transaction().await.unwrap(); + super::save_block(&db, &mut dbtx, &block, height) + .await + .unwrap(); + dbtx.commit().await.unwrap(); + + let new_blocks = super::fetch_new_blocks(&db, &rpc, height).await.unwrap(); + assert!(new_blocks.is_none()); + + // Mine new blocks without saving them. + let mine_count: u32 = 12; + let new_block_hashes = rpc.mine_blocks(mine_count as u64).await.unwrap(); + let new_height = u32::try_from(rpc.get_block_count().await.unwrap()).unwrap(); + + let new_blocks = super::fetch_new_blocks(&db, &rpc, new_height - 1) + .await + .unwrap() + .unwrap(); + assert_eq!(new_blocks.len(), mine_count as usize); + for (index, block) in new_blocks.iter().enumerate() { + assert_eq!(block.height, new_height - mine_count + index as u32 + 1); + assert_eq!(block.hash, new_block_hashes[index]); + } + + // Mine too many blocks. + let mine_count: u32 = 101; + rpc.mine_blocks(mine_count as u64).await.unwrap(); + let new_height = u32::try_from(rpc.get_block_count().await.unwrap()).unwrap(); + + assert!(super::fetch_new_blocks(&db, &rpc, new_height - 1) + .await + .is_err()); + } + #[ignore] + #[tokio::test] + async fn set_non_canonical_block_hashes() { + let mut config = create_test_config_with_thread_name().await; + let db = Database::new(&config).await.unwrap(); + let regtest = create_regtest_rpc(&mut config).await; + let rpc = regtest.rpc().clone(); + + let hashes = rpc.mine_blocks(4).await.unwrap(); + let height = u32::try_from(rpc.get_block_count().await.unwrap()).unwrap(); + + super::set_initial_block_info_if_not_exists(&db, &rpc, config.protocol_paramset()) + .await + .unwrap(); + + rpc.invalidate_block(hashes.get(3).unwrap()).await.unwrap(); + rpc.invalidate_block(hashes.get(2).unwrap()).await.unwrap(); + + let mut dbtx = db.begin_transaction().await.unwrap(); + + let last_db_block = + super::_get_block_info_from_hash(&db, &mut dbtx, &rpc, *hashes.get(3).unwrap()) + .await + .unwrap(); + assert_eq!(last_db_block.0.height, height); + assert_eq!(last_db_block.0.hash, *hashes.get(3).unwrap()); + + super::handle_reorg_events(&db, &mut dbtx, height - 2) + .await + .unwrap(); + + assert!( + super::_get_block_info_from_hash(&db, &mut dbtx, &rpc, *hashes.get(3).unwrap()) + .await + .is_err() + ); + + dbtx.commit().await.unwrap(); + } + + #[tokio::test] + async fn start_bitcoin_syncer_new_block_mined() { + let mut config = create_test_config_with_thread_name().await; + let db = Database::new(&config).await.unwrap(); + let regtest = create_regtest_rpc(&mut config).await; + let rpc = regtest.rpc().clone(); + + rpc.mine_blocks(1).await.unwrap(); + let height = u32::try_from(rpc.get_block_count().await.unwrap()).unwrap(); + let hash = rpc.get_block_hash(height as u64).await.unwrap(); + + let (looping_task, _cancel_tx) = + BitcoinSyncer::new(db.clone(), rpc.clone(), config.protocol_paramset()) + .await + .unwrap() + .into_task() + .cancelable_loop(); + + looping_task.into_bg(); + + loop { + let mut dbtx = db.begin_transaction().await.unwrap(); + + let last_db_block = + match super::_get_block_info_from_hash(&db, &mut dbtx, &rpc, hash).await { + Ok(block) => block, + Err(_) => { + dbtx.commit().await.unwrap(); + continue; + } + }; + + assert_eq!(last_db_block.0.height, height); + assert_eq!(last_db_block.0.hash, hash); + + dbtx.commit().await.unwrap(); + break; + } + } +} diff --git a/core/src/bitvm_client.rs b/core/src/bitvm_client.rs new file mode 100644 index 000000000..8c513166b --- /dev/null +++ b/core/src/bitvm_client.rs @@ -0,0 +1,771 @@ +use crate::actor::WinternitzDerivationPath; +use crate::builder::address::taproot_builder_with_scripts; +use crate::builder::script::{SpendableScript, WinternitzCommit}; + +use crate::config::protocol::ProtocolParamset; +use crate::constants::MAX_SCRIPT_REPLACEMENT_OPERATIONS; +use crate::errors::BridgeError; +use bitcoin::{self}; +use bitcoin::{ScriptBuf, XOnlyPublicKey}; + +use bitvm::chunk::api::{ + api_generate_full_tapscripts, api_generate_partial_script, Assertions, NUM_HASH, NUM_PUBS, + NUM_U256, +}; + +use bitvm::signatures::{Wots, Wots20}; +use borsh::{BorshDeserialize, BorshSerialize}; +use bridge_circuit_host::utils::{get_verifying_key, is_dev_mode}; +use std::fs; +use tokio::sync::Mutex; + +use std::str::FromStr; +use std::sync::{Arc, LazyLock, OnceLock}; +use std::time::Instant; + +/// Replacing bitvm scripts require cloning the scripts, which can be ~4GB. And this operation is done every deposit. +/// So we ensure only 1 thread is doing this at a time to avoid OOM. +pub static REPLACE_SCRIPTS_LOCK: LazyLock> = LazyLock::new(|| Mutex::new(())); + +lazy_static::lazy_static! { + /// Global secp context. + pub static ref SECP: bitcoin::secp256k1::Secp256k1 = bitcoin::secp256k1::Secp256k1::new(); +} + +lazy_static::lazy_static! { + /// This is an unspendable pubkey. + /// + /// See https://github.com/bitcoin/bips/blob/master/bip-0341.mediawiki#constructing-and-spending-taproot-outputs + /// + /// It is used to create a taproot address where the internal key is not spendable. + /// Here are the other protocols that use this key: + /// - Babylon:https://github.com/babylonlabs-io/btc-staking-ts/blob/v0.4.0-rc.2/src/constants/internalPubkey.ts + /// - Ark: https://github.com/ark-network/ark/blob/cba48925bcc836cc55f9bb482f2cd1b76d78953e/common/tree/validation.go#L47 + /// - BitVM: https://github.com/BitVM/BitVM/blob/2dd2e0e799d2b9236dd894da3fee8c4c4893dcf1/bridge/src/scripts.rs#L16 + /// - Best in Slot: https://github.com/bestinslot-xyz/brc20-programmable-module/blob/2113bdd73430a8c3757e537cb63124a6cb33dfab/src/evm/precompiles/get_locked_pkscript_precompile.rs#L53 + /// - https://github.com/BlockstreamResearch/options/blob/36a77175919101393b49f1211732db762cc7dfc1/src/options_lib/src/contract.rs#L132 + /// + pub static ref UNSPENDABLE_XONLY_PUBKEY: bitcoin::secp256k1::XOnlyPublicKey = + XOnlyPublicKey::from_str("50929b74c1a04954b78b4b6035e97a5e078a5a0f28ec96d547bfee9ace803ac0").expect("this key is valid"); +} + +/// Global BitVM cache wrapped in a OnceLock. +/// +/// # Usage +/// Use with `BITVM_CACHE.get_or_init(load_or_generate_bitvm_cache)` to get the cache or optionally load it. +/// The cache will be initialized from a file, and if that fails, the fresh data will be generated. +pub static BITVM_CACHE: OnceLock = OnceLock::new(); + +pub fn load_or_generate_bitvm_cache() -> BitvmCache { + let start = Instant::now(); + + let cache_path = if is_dev_mode() { + "bitvm_cache_dev.bin".to_string() + } else { + std::env::var("BITVM_CACHE_PATH").unwrap_or_else(|_| "bitvm_cache.bin".to_string()) + }; + + let bitvm_cache = { + tracing::debug!("Attempting to load BitVM cache from file: {}", cache_path); + + match BitvmCache::load_from_file(&cache_path) { + Ok(cache) => { + tracing::debug!("Loaded BitVM cache from file"); + + cache + } + Err(_) => { + tracing::info!("No BitVM cache found, generating fresh data"); + + let fresh_data = generate_fresh_data(); + + if let Err(e) = fresh_data.save_to_file(&cache_path) { + tracing::error!( + "Failed to save freshly generated BitVM cache to file: {}", + e + ); + } + fresh_data + } + } + }; + + tracing::debug!("BitVM initialization took: {:?}", start.elapsed()); + bitvm_cache +} + +#[derive(BorshSerialize, BorshDeserialize, Debug)] +pub struct BitvmCache { + pub disprove_scripts: Vec>, + pub replacement_places: Box, +} + +impl BitvmCache { + fn save_to_file(&self, path: &str) -> Result<(), BridgeError> { + let serialized = borsh::to_vec(self).map_err(|e| { + tracing::error!("Failed to serialize BitVM cache: {}", e); + BridgeError::ConfigError("Failed to serialize BitVM cache".to_string()) + })?; + + fs::write(path, serialized).map_err(|e| { + tracing::error!("Failed to save BitVM cache: {}", e); + BridgeError::ConfigError("Failed to save BitVM cache".to_string()) + }) + } + + fn load_from_file(path: &str) -> Result { + let bytes = fs::read(path).map_err(|e| { + tracing::error!("Failed to read BitVM cache: {}", e); + BridgeError::ConfigError("No BitVM cache found".to_string()) + })?; + + tracing::info!("Loaded BitVM cache from file, read {} bytes", bytes.len()); + + Self::try_from_slice(&bytes).map_err(|e| { + tracing::error!("Failed to deserialize BitVM cache: {}", e); + BridgeError::ConfigError("Failed to deserialize BitVM cache".to_string()) + }) + } +} + +fn generate_fresh_data() -> BitvmCache { + let vk = get_verifying_key(); + + let dummy_pks = ClementineBitVMPublicKeys::create_replacable(); + + let partial_scripts = api_generate_partial_script(&vk); + + let scripts = partial_scripts + .iter() + .map(|s| s.clone().to_bytes()) + .collect::>(); + + for (script_idx, script) in scripts.iter().enumerate() { + let mut pos = 0; + while pos + 20 <= script.len() { + // Check if this window matches our pattern (255u8 in the end) + if script[pos + 4..pos + 20] == [255u8; 16] { + panic!("Dummy value found in script {}", script_idx); + } + pos += 1; + } + } + + let disprove_scripts = api_generate_full_tapscripts(dummy_pks.bitvm_pks, &partial_scripts); + + let scripts: Vec> = disprove_scripts + .iter() + .map(|s| s.clone().to_bytes()) + .collect(); + + // Build mapping of dummy keys to their positions + let mut replacement_places: ClementineBitVMReplacementData = Default::default(); + // For each script + for (script_idx, script) in scripts.iter().enumerate() { + let mut pos = 0; + while pos + 20 <= script.len() { + // Check if this window matches our pattern (255u8 in the end) + if script[pos + 4..pos + 20] == [255u8; 16] { + let pk_type_idx = script[pos]; + let pk_idx = u16::from_be_bytes([script[pos + 1], script[pos + 2]]); + let digit_idx = script[pos + 3]; + + match pk_type_idx { + 0 => { + replacement_places.payout_tx_blockhash_pk[digit_idx as usize] + .push((script_idx, pos)); + } + 1 => { + replacement_places.latest_blockhash_pk[digit_idx as usize] + .push((script_idx, pos)); + } + 2 => { + replacement_places.challenge_sending_watchtowers_pk[digit_idx as usize] + .push((script_idx, pos)); + } + 3 => { + replacement_places.bitvm_pks.0[pk_idx as usize][digit_idx as usize] + .push((script_idx, pos)); + } + 4 => { + replacement_places.bitvm_pks.1[pk_idx as usize][digit_idx as usize] + .push((script_idx, pos)); + } + 5 => { + replacement_places.bitvm_pks.2[pk_idx as usize][digit_idx as usize] + .push((script_idx, pos)); + } + _ => { + panic!("Invalid pk type index: {}", pk_type_idx); + } + } + pos += 20; + } else { + pos += 1; + } + } + } + + BitvmCache { + disprove_scripts: scripts, + replacement_places: Box::new(replacement_places), + } +} + +#[derive(Debug, Clone, BorshSerialize, BorshDeserialize, PartialEq, Eq)] +pub struct ClementineBitVMPublicKeys { + pub combined_method_id_constant: [u8; 32], + pub deposit_constant: [u8; 32], + pub payout_tx_blockhash_pk: ::PublicKey, + pub latest_blockhash_pk: ::PublicKey, + pub challenge_sending_watchtowers_pk: ::PublicKey, + pub bitvm_pks: bitvm::chunk::api::PublicKeys, +} + +#[derive(Debug, Clone, BorshSerialize, BorshDeserialize)] +#[allow(clippy::type_complexity)] +pub struct ClementineBitVMReplacementData { + pub payout_tx_blockhash_pk: [Vec<(usize, usize)>; 44], + pub latest_blockhash_pk: [Vec<(usize, usize)>; 44], + pub challenge_sending_watchtowers_pk: [Vec<(usize, usize)>; 44], + pub bitvm_pks: ( + [[Vec<(usize, usize)>; 68]; NUM_PUBS], + [[Vec<(usize, usize)>; 68]; NUM_U256], + [[Vec<(usize, usize)>; 36]; NUM_HASH], + ), +} + +impl Default for ClementineBitVMReplacementData { + fn default() -> Self { + Self { + payout_tx_blockhash_pk: std::array::from_fn(|_| Vec::new()), + latest_blockhash_pk: std::array::from_fn(|_| Vec::new()), + challenge_sending_watchtowers_pk: std::array::from_fn(|_| Vec::new()), + bitvm_pks: ( + std::array::from_fn(|_| std::array::from_fn(|_| Vec::new())), + std::array::from_fn(|_| std::array::from_fn(|_| Vec::new())), + std::array::from_fn(|_| std::array::from_fn(|_| Vec::new())), + ), + } + } +} + +impl ClementineBitVMPublicKeys { + pub fn get_replacable_value(pk_type_idx: u8, pk_idx: u16, digit_idx: u8) -> [u8; 20] { + let mut dummy_value = [255u8; 20]; + dummy_value[0] = pk_type_idx; + dummy_value[1] = pk_idx.to_be_bytes()[0]; + dummy_value[2] = pk_idx.to_be_bytes()[1]; + dummy_value[3] = digit_idx; + dummy_value + } + + pub fn get_replacable_wpks( + pk_type_idx: u8, + pk_idx: u16, + ) -> [[u8; 20]; DIGIT_LEN] { + (0..DIGIT_LEN as u8) + .map(|digit_idx| Self::get_replacable_value(pk_type_idx, pk_idx, digit_idx)) + .collect::>() + .try_into() + .expect("Should be able to convert to array") + } + + pub fn get_multiple_replacable_wpks( + pk_type_idx: u8, + ) -> [[[u8; 20]; DIGIT_LEN]; PK_LEN] { + (0..PK_LEN as u16) + .map(|pk_idx| Self::get_replacable_wpks(pk_type_idx, pk_idx)) + .collect::>() + .try_into() + .expect("Should be able to convert to array") + } + + pub fn create_replacable() -> Self { + let combined_method_id_constant = [255u8; 32]; + let deposit_constant = [255u8; 32]; + let payout_tx_blockhash_pk = Self::get_replacable_wpks(0, 0); + let latest_blockhash_pk = Self::get_replacable_wpks(1, 0); + let challenge_sending_watchtowers_pk = Self::get_replacable_wpks(2, 0); + let bitvm_part_1 = Self::get_multiple_replacable_wpks(3); + let bitvm_part_2 = Self::get_multiple_replacable_wpks(4); + let bitvm_part_3 = Self::get_multiple_replacable_wpks(5); + let bitvm_pks = (bitvm_part_1, bitvm_part_2, bitvm_part_3); + Self { + combined_method_id_constant, + deposit_constant, + payout_tx_blockhash_pk, + latest_blockhash_pk, + challenge_sending_watchtowers_pk, + bitvm_pks, + } + } + + pub fn get_number_of_32_bytes_wpks() -> usize { + NUM_PUBS + NUM_U256 + } + + pub fn get_number_of_160_bytes_wpks() -> usize { + NUM_HASH + 2 + } + + pub fn from_flattened_vec(flattened_wpks: &[Vec<[u8; 20]>]) -> Self { + // These are dummy since they are coming from another source + let combined_method_id_constant = [255u8; 32]; + let deposit_constant = [255u8; 32]; + + // Use the first element for payout_tx_blockhash_pk + let payout_tx_blockhash_pk = Self::vec_to_array::<44>(&flattened_wpks[0]); + + // Use the second element for latest_blockhash_pk + let latest_blockhash_pk = Self::vec_to_array::<44>(&flattened_wpks[1]); + + // Use the third element for challenge_sending_watchtowers_pk + let challenge_sending_watchtowers_pk = Self::vec_to_array::<44>(&flattened_wpks[2]); + + // Create the nested arrays for bitvm_pks, starting from the fourth element + let bitvm_pks_1 = + Self::vec_slice_to_nested_array::<68, NUM_PUBS>(&flattened_wpks[3..3 + NUM_PUBS]); + + let bitvm_pks_2 = Self::vec_slice_to_nested_array::<68, NUM_U256>( + &flattened_wpks[3 + NUM_PUBS..3 + NUM_PUBS + NUM_U256], + ); + + let bitvm_pks_3 = Self::vec_slice_to_nested_array::<36, NUM_HASH>( + &flattened_wpks[3 + NUM_PUBS + NUM_U256..3 + NUM_PUBS + NUM_U256 + NUM_HASH], + ); + + Self { + combined_method_id_constant, + deposit_constant, + payout_tx_blockhash_pk, + latest_blockhash_pk, + challenge_sending_watchtowers_pk, + bitvm_pks: (bitvm_pks_1, bitvm_pks_2, bitvm_pks_3), + } + } + + pub fn to_flattened_vec(&self) -> Vec> { + let mut flattened_wpks = Vec::new(); + + // Convert each array to Vec<[u8; 20]> + flattened_wpks.push(self.payout_tx_blockhash_pk.to_vec()); + flattened_wpks.push(self.latest_blockhash_pk.to_vec()); + flattened_wpks.push(self.challenge_sending_watchtowers_pk.to_vec()); + + // Convert and add each nested array from bitvm_pks + for arr in &self.bitvm_pks.0 { + flattened_wpks.push(arr.to_vec()); + } + + for arr in &self.bitvm_pks.1 { + flattened_wpks.push(arr.to_vec()); + } + + for arr in &self.bitvm_pks.2 { + flattened_wpks.push(arr.to_vec()); + } + + flattened_wpks + } + + // Helper to convert Vec<[u8; 20]> to [[u8; 20]; N] + pub fn vec_to_array(vec: &[[u8; 20]]) -> [[u8; 20]; N] { + let mut result = [[255u8; 20]; N]; + for (i, item) in vec.iter().enumerate() { + if i < N { + result[i] = *item; + } + } + result + } + + // Helper to convert &[Vec<[u8; 20]>] to [[[u8; 20]; INNER_LEN]; OUTER_LEN] + pub fn vec_slice_to_nested_array( + slice: &[Vec<[u8; 20]>], + ) -> [[[u8; 20]; INNER_LEN]; OUTER_LEN] { + let mut result = [[[255u8; 20]; INNER_LEN]; OUTER_LEN]; + for (i, vec) in slice.iter().enumerate() { + if i < OUTER_LEN { + result[i] = Self::vec_to_array::(vec); + } + } + result + } + + pub const fn number_of_assert_txs() -> usize { + 33 + } + + pub const fn number_of_flattened_wpks() -> usize { + 381 + } + + pub fn get_assert_scripts( + &self, + xonly_public_key: XOnlyPublicKey, + ) -> Vec> { + let mut scripts = Vec::new(); + let first_script: Arc = Arc::new(WinternitzCommit::new( + vec![ + (self.challenge_sending_watchtowers_pk.to_vec(), 40), + (self.bitvm_pks.0[0].to_vec(), 64), + (self.bitvm_pks.1[NUM_U256 - 2].to_vec(), 64), + (self.bitvm_pks.1[NUM_U256 - 1].to_vec(), 64), + (self.bitvm_pks.2[NUM_HASH - 3].to_vec(), 32), + (self.bitvm_pks.2[NUM_HASH - 2].to_vec(), 32), + (self.bitvm_pks.2[NUM_HASH - 1].to_vec(), 32), + ], + xonly_public_key, + 4, + )); + scripts.push(first_script); + // iterate NUM_U256 6 by 6 + for i in (0..NUM_U256 - 2).step_by(6) { + let last_idx = std::cmp::min(i + 6, NUM_U256); + let script: Arc = Arc::new(WinternitzCommit::new( + self.bitvm_pks.1[i..last_idx] + .iter() + .map(|x| (x.to_vec(), 64)) + .collect::>(), + xonly_public_key, + 4, + )); + scripts.push(script); + } + // iterate NUM_HASH 12 by 12 + for i in (0..NUM_HASH - 3).step_by(12) { + let last_idx = std::cmp::min(i + 12, NUM_HASH); + let script: Arc = Arc::new(WinternitzCommit::new( + self.bitvm_pks.2[i..last_idx] + .iter() + .map(|x| (x.to_vec(), 32)) + .collect::>(), + xonly_public_key, + 4, + )); + scripts.push(script); + } + scripts + } + + pub fn get_assert_commit_data( + asserts: Assertions, + challenge_sending_watchtowers: &[u8; 20], + ) -> Vec>> { + let mut commit_data = Vec::new(); + tracing::info!( + "Getting assert commit data, challenge_sending_watchtowers: {:?}", + challenge_sending_watchtowers + ); + commit_data.push(vec![ + challenge_sending_watchtowers.to_vec(), + asserts.0[0].to_vec(), + asserts.1[NUM_U256 - 2].to_vec(), + asserts.1[NUM_U256 - 1].to_vec(), + asserts.2[NUM_HASH - 3].to_vec(), + asserts.2[NUM_HASH - 2].to_vec(), + asserts.2[NUM_HASH - 1].to_vec(), + ]); + for i in (0..NUM_U256 - 2).step_by(6) { + let last_idx = std::cmp::min(i + 6, NUM_U256); + commit_data.push( + asserts.1[i..last_idx] + .iter() + .map(|x| x.to_vec()) + .collect::>(), + ); + } + for i in (0..NUM_HASH - 3).step_by(12) { + let last_idx = std::cmp::min(i + 12, NUM_HASH); + commit_data.push( + asserts.2[i..last_idx] + .iter() + .map(|x| x.to_vec()) + .collect::>(), + ); + } + commit_data + } + + pub fn get_latest_blockhash_derivation( + deposit_outpoint: bitcoin::OutPoint, + paramset: &'static ProtocolParamset, + ) -> WinternitzDerivationPath { + WinternitzDerivationPath::BitvmAssert(20 * 2, 1, 0, deposit_outpoint, paramset) + } + + pub fn get_payout_tx_blockhash_derivation( + deposit_outpoint: bitcoin::OutPoint, + paramset: &'static ProtocolParamset, + ) -> WinternitzDerivationPath { + WinternitzDerivationPath::BitvmAssert(20 * 2, 0, 0, deposit_outpoint, paramset) + } + + pub fn get_challenge_sending_watchtowers_derivation( + deposit_outpoint: bitcoin::OutPoint, + paramset: &'static ProtocolParamset, + ) -> WinternitzDerivationPath { + WinternitzDerivationPath::BitvmAssert(20 * 2, 2, 0, deposit_outpoint, paramset) + } + + pub fn mini_assert_derivations_0( + deposit_outpoint: bitcoin::OutPoint, + paramset: &'static ProtocolParamset, + ) -> Vec { + vec![ + Self::get_challenge_sending_watchtowers_derivation(deposit_outpoint, paramset), // Will not go into BitVM disprove scripts + WinternitzDerivationPath::BitvmAssert(32 * 2, 3, 0, deposit_outpoint, paramset), // This is the Groth16 public output + WinternitzDerivationPath::BitvmAssert(32 * 2, 4, 12, deposit_outpoint, paramset), // This is the extra 13th NUM_U256, after chunking by 6 for the first 2 asserts + WinternitzDerivationPath::BitvmAssert(32 * 2, 4, 13, deposit_outpoint, paramset), // This is the extra 14th NUM_U256, after chunking by 6 for the first 2 asserts + WinternitzDerivationPath::BitvmAssert(16 * 2, 5, 360, deposit_outpoint, paramset), + WinternitzDerivationPath::BitvmAssert(16 * 2, 5, 361, deposit_outpoint, paramset), + WinternitzDerivationPath::BitvmAssert(16 * 2, 5, 362, deposit_outpoint, paramset), + ] + } + + pub fn get_assert_derivations( + mini_assert_idx: usize, + deposit_outpoint: bitcoin::OutPoint, + paramset: &'static ProtocolParamset, + ) -> Vec { + if mini_assert_idx == 0 { + Self::mini_assert_derivations_0(deposit_outpoint, paramset) + } else if (1..=2).contains(&mini_assert_idx) { + // for 1, we will have 6 derivations index starting from 0 to 5 + // for 2, we will have 6 derivations index starting from 6 to 11 + let derivations: u32 = (mini_assert_idx as u32 - 1) * 6; + + let mut derivations_vec = vec![]; + for i in 0..6 { + if derivations + i < NUM_U256 as u32 - 2 { + derivations_vec.push(WinternitzDerivationPath::BitvmAssert( + 32 * 2, + 4, + derivations + i, + deposit_outpoint, + paramset, + )); + } + } + derivations_vec + } else { + let derivations: u32 = (mini_assert_idx as u32 - 3) * 12; + let mut derivations_vec = vec![]; + for i in 0..12 { + if derivations + i < NUM_HASH as u32 - 3 { + derivations_vec.push(WinternitzDerivationPath::BitvmAssert( + 16 * 2, + 5, + derivations + i, + deposit_outpoint, + paramset, + )); + } + } + derivations_vec + } + } + pub fn get_assert_taproot_leaf_hashes( + &self, + xonly_public_key: XOnlyPublicKey, + ) -> Vec { + let assert_scripts = self.get_assert_scripts(xonly_public_key); + assert_scripts + .into_iter() + .map(|script| { + let taproot_builder = taproot_builder_with_scripts(&[script.to_script_buf()]); + taproot_builder + .try_into_taptree() + .expect("taproot builder always builds a full taptree") + .root_hash() + }) + .collect::>() + } + + pub fn get_g16_verifier_disprove_scripts(&self) -> Result, BridgeError> { + if cfg!(debug_assertions) { + Ok(vec![ScriptBuf::from_bytes(vec![0x51])]) // OP_TRUE + } else { + Ok(replace_disprove_scripts(self)?) + } + } +} + +pub fn replace_disprove_scripts( + pks: &ClementineBitVMPublicKeys, +) -> Result, BridgeError> { + let start = Instant::now(); + tracing::info!("Starting script replacement"); + + let cache = BITVM_CACHE.get_or_init(load_or_generate_bitvm_cache); + let replacement_places = &cache.replacement_places; + + // Calculate estimated operations to prevent DoS attacks + let estimated_operations = calculate_replacement_operations(replacement_places); + tracing::info!( + "Estimated operations for script replacement: {}", + estimated_operations + ); + if estimated_operations > MAX_SCRIPT_REPLACEMENT_OPERATIONS { + tracing::warn!( + "Rejecting script replacement: estimated {} operations exceeds limit of {}", + estimated_operations, + MAX_SCRIPT_REPLACEMENT_OPERATIONS + ); + return Err(BridgeError::BitvmReplacementResourceExhaustion( + estimated_operations, + )); + } + + tracing::info!("Estimated operations: {}", estimated_operations); + + let mut result: Vec> = cache.disprove_scripts.clone(); + + for (digit, places) in replacement_places.payout_tx_blockhash_pk.iter().enumerate() { + for (script_idx, pos) in places.iter() { + result[*script_idx][*pos..*pos + 20] + .copy_from_slice(&pks.payout_tx_blockhash_pk[digit]); + } + } + + for (digit, places) in replacement_places.latest_blockhash_pk.iter().enumerate() { + for (script_idx, pos) in places.iter() { + result[*script_idx][*pos..*pos + 20].copy_from_slice(&pks.latest_blockhash_pk[digit]); + } + } + + for (digit, places) in replacement_places + .challenge_sending_watchtowers_pk + .iter() + .enumerate() + { + for (script_idx, pos) in places.iter() { + result[*script_idx][*pos..*pos + 20] + .copy_from_slice(&pks.challenge_sending_watchtowers_pk[digit]); + } + } + + for (digit, places) in replacement_places.bitvm_pks.0.iter().enumerate() { + for (pk_idx, places) in places.iter().enumerate() { + for (script_idx, pos) in places.iter() { + result[*script_idx][*pos..*pos + 20] + .copy_from_slice(&pks.bitvm_pks.0[digit][pk_idx]); + } + } + } + + for (digit, places) in replacement_places.bitvm_pks.1.iter().enumerate() { + for (pk_idx, places) in places.iter().enumerate() { + for (script_idx, pos) in places.iter() { + result[*script_idx][*pos..*pos + 20] + .copy_from_slice(&pks.bitvm_pks.1[digit][pk_idx]); + } + } + } + + for (digit, places) in replacement_places.bitvm_pks.2.iter().enumerate() { + for (pk_idx, places) in places.iter().enumerate() { + for (script_idx, pos) in places.iter() { + result[*script_idx][*pos..*pos + 20] + .copy_from_slice(&pks.bitvm_pks.2[digit][pk_idx]); + } + } + } + + let result: Vec = result.into_iter().map(ScriptBuf::from_bytes).collect(); + + let elapsed = start.elapsed(); + tracing::info!( + "Script replacement completed in {:?} with {} operations", + elapsed, + estimated_operations + ); + + Ok(result) +} + +/// Helper function to calculate the total number of replacement operations +fn calculate_replacement_operations(replacement_places: &ClementineBitVMReplacementData) -> usize { + let mut total_operations = 0; + + // Count payout_tx_blockhash_pk operations + for places in &replacement_places.payout_tx_blockhash_pk { + total_operations += places.len(); + } + + // Count latest_blockhash_pk operations + for places in &replacement_places.latest_blockhash_pk { + total_operations += places.len(); + } + + // Count challenge_sending_watchtowers_pk operations + for places in &replacement_places.challenge_sending_watchtowers_pk { + total_operations += places.len(); + } + + // Count bitvm_pks operations (this is typically the largest contributor) + for digit_places in &replacement_places.bitvm_pks.0 { + for places in digit_places { + total_operations += places.len(); + } + } + + for digit_places in &replacement_places.bitvm_pks.1 { + for places in digit_places { + total_operations += places.len(); + } + } + + for digit_places in &replacement_places.bitvm_pks.2 { + for places in digit_places { + total_operations += places.len(); + } + } + + total_operations +} + +#[cfg(test)] +mod tests { + use bitcoin::secp256k1::rand::thread_rng; + use bitcoin::{hashes::Hash, Txid}; + + use super::*; + use crate::{actor::Actor, test::common::create_test_config_with_thread_name}; + #[test] + fn test_to_flattened_vec() { + let bitvm_pks = ClementineBitVMPublicKeys::create_replacable(); + let flattened_vec = bitvm_pks.to_flattened_vec(); + let from_vec_to_array = ClementineBitVMPublicKeys::from_flattened_vec(&flattened_vec); + assert_eq!(bitvm_pks, from_vec_to_array); + } + + #[tokio::test] + async fn test_vec_to_array_with_actor_values() { + let config = create_test_config_with_thread_name().await; + + let sk = bitcoin::secp256k1::SecretKey::new(&mut thread_rng()); + let signer = Actor::new(sk, Some(sk), config.protocol_paramset().network); + let deposit_outpoint = bitcoin::OutPoint { + txid: Txid::all_zeros(), + vout: 0, + }; + let bitvm_pks = signer + .generate_bitvm_pks_for_deposit(deposit_outpoint, config.protocol_paramset()) + .unwrap(); + + let flattened_vec = bitvm_pks.to_flattened_vec(); + let from_vec_to_array = ClementineBitVMPublicKeys::from_flattened_vec(&flattened_vec); + assert_eq!(bitvm_pks, from_vec_to_array); + } + + #[tokio::test] + #[ignore = "This test is too slow to run on every commit"] + async fn test_generate_fresh_data() { + let bitvm_cache = generate_fresh_data(); + bitvm_cache + .save_to_file("bitvm_cache_new.bin") + .expect("Failed to save BitVM cache"); + } +} diff --git a/core/src/builder/address.rs b/core/src/builder/address.rs new file mode 100644 index 000000000..7c5ac1008 --- /dev/null +++ b/core/src/builder/address.rs @@ -0,0 +1,356 @@ +//! # Bitcoin Address Construction +//! +//! Contains helper functions to create taproot addresses with given scripts and internal key. +//! Contains helper functions to create correct deposit addresses. Addresses need to be of a specific format to be +//! valid deposit addresses. + +use super::script::{ + BaseDepositScript, CheckSig, Multisig, ReplacementDepositScript, SpendableScript, + TimelockScript, +}; +use crate::bitvm_client::SECP; +use crate::deposit::SecurityCouncil; +use crate::errors::BridgeError; +use crate::utils::ScriptBufExt; +use crate::{bitvm_client, EVMAddress}; +use bitcoin::address::NetworkUnchecked; +use bitcoin::{ + secp256k1::XOnlyPublicKey, + taproot::{TaprootBuilder, TaprootSpendInfo}, + Address, ScriptBuf, +}; + +use eyre::Context; + +/// A helper to construct a `TaprootBuilder` from a slice of script buffers, forming the script tree. +/// Finds the needed depth the script tree needs to be to fit all the scripts and inserts the scripts. +pub fn taproot_builder_with_scripts(scripts: impl Into>) -> TaprootBuilder { + // doesn't clone if its already an owned Vec + let mut scripts: Vec = scripts.into(); + let builder = TaprootBuilder::new(); + let num_scripts = scripts.len(); + + // Special return cases for n = 0 or n = 1 + match num_scripts { + 0 => return builder, + 1 => { + return builder + .add_leaf(0, scripts.remove(0)) + .expect("one root leaf added on empty builder") + } + _ => {} + } + + let deepest_layer_depth: u8 = ((num_scripts - 1).ilog2() + 1) as u8; + + let num_empty_nodes_in_final_depth = 2_usize.pow(deepest_layer_depth.into()) - num_scripts; + let num_nodes_in_final_depth = num_scripts - num_empty_nodes_in_final_depth; + + scripts + .into_iter() + .enumerate() + .fold(builder, |acc, (i, script)| { + let is_node_in_last_minus_one_depth = (i >= num_nodes_in_final_depth) as u8; + + acc.add_leaf( + deepest_layer_depth - is_node_in_last_minus_one_depth, + script, + ) + .expect("algorithm tested to be correct") + }) +} + +/// Calculates the depth of each leaf in a balanced Taproot tree structure. +/// The returned Vec contains the depth for each script at the corresponding index. +pub fn calculate_taproot_leaf_depths(num_scripts: usize) -> Vec { + match num_scripts { + 0 => return vec![], + 1 => return vec![0], + _ => {} + } + + let deepest_layer_depth: u8 = ((num_scripts - 1).ilog2() + 1) as u8; + + let num_empty_nodes_in_final_depth = 2_usize.pow(deepest_layer_depth.into()) - num_scripts; + let num_nodes_in_final_depth = num_scripts - num_empty_nodes_in_final_depth; + + (0..num_scripts) + .map(|i| { + let is_node_in_last_minus_one_depth = (i >= num_nodes_in_final_depth) as u8; + deepest_layer_depth - is_node_in_last_minus_one_depth + }) + .collect() +} + +/// Creates a taproot address with given scripts and internal key. +/// +/// # Arguments +/// +/// - `scripts`: If empty, it is most likely a key path spend address +/// - `internal_key`: If not given, will be defaulted to an unspendable x-only public key +/// - `network`: Bitcoin network +/// - If both `scripts` and `internal_key` are given, it means one can spend using both script and key path. +/// - If none given, it is an unspendable address. +/// +/// # Returns +/// +/// - [`Address`]: Generated taproot address +/// - [`TaprootSpendInfo`]: Taproot spending information +/// +/// # Panics +/// +/// Will panic if some of the operations have invalid parameters. +pub fn create_taproot_address( + scripts: &[ScriptBuf], + internal_key: Option, + network: bitcoin::Network, +) -> (Address, TaprootSpendInfo) { + // Build script tree + let taproot_builder = taproot_builder_with_scripts(scripts); + // Finalize the tree + let tree_info = match internal_key { + Some(xonly_pk) => taproot_builder + .finalize(&SECP, xonly_pk) + .expect("builder return is finalizable"), + None => taproot_builder + .finalize(&SECP, *bitvm_client::UNSPENDABLE_XONLY_PUBKEY) + .expect("builder return is finalizable"), + }; + + // Create the address + let taproot_address: Address = Address::p2tr_tweaked(tree_info.output_key(), network); + + (taproot_address, tree_info) +} + +/// Generates a deposit address for the user. Funds can be spent by N-of-N or +/// user can take after specified time should the deposit fail. +/// +/// # Parameters +/// +/// - `nofn_xonly_pk`: N-of-N x-only public key of the depositor +/// - `recovery_taproot_address`: User's x-only public key that can be used to +/// take funds after some time +/// - `user_evm_address`: User's EVM address. +/// - `amount`: Amount to deposit +/// - `network`: Bitcoin network to work on +/// - `user_takes_after`: User can take the funds back, after this amounts of +/// blocks have passed +/// +/// # Returns +/// +/// - [`Address`]: Deposit taproot Bitcoin address +/// - [`TaprootSpendInfo`]: Deposit address's taproot spending information +/// +/// # Panics +/// +/// Panics if given parameters are malformed. +pub fn generate_deposit_address( + nofn_xonly_pk: XOnlyPublicKey, + recovery_taproot_address: &Address, + user_evm_address: EVMAddress, + network: bitcoin::Network, + user_takes_after: u16, +) -> Result<(Address, TaprootSpendInfo), BridgeError> { + let deposit_script = BaseDepositScript::new(nofn_xonly_pk, user_evm_address).to_script_buf(); + + let recovery_script_pubkey = recovery_taproot_address + .clone() + .assume_checked() + .script_pubkey(); + + let recovery_extracted_xonly_pk = recovery_script_pubkey + .try_get_taproot_pk() + .wrap_err("Recovery taproot address is not a valid taproot address")?; + + let script_timelock = + TimelockScript::new(Some(recovery_extracted_xonly_pk), user_takes_after).to_script_buf(); + + let (addr, spend) = create_taproot_address(&[deposit_script, script_timelock], None, network); + Ok((addr, spend)) +} + +/// Builds a Taproot address specifically for replacement deposits. +/// Replacement deposits are to replace old move_to_vault transactions in case any issue is found on the bridge. +/// This address incorporates a script committing to an old move transaction ID +/// and a multisig script for the security council. +/// This replacement deposit address will be used to create a new deposit transaction, which will then be used to +/// sign the new related bridge deposit tx's. +/// +/// # Parameters +/// +/// - `old_move_txid`: The `Txid` of the old move_to_vault transaction that is being replaced. +/// - `nofn_xonly_pk`: The N-of-N XOnlyPublicKey for the deposit. +/// - `network`: The Bitcoin network on which the address will be used. +/// - `security_council`: The `SecurityCouncil` configuration for the multisig script. +/// +/// # Returns +/// +/// - `Ok((Address, TaprootSpendInfo))` containing the new replacement deposit address +/// and its associated `TaprootSpendInfo` if successful. +/// - `Err(BridgeError)` if any error occurs during address generation. +pub fn generate_replacement_deposit_address( + old_move_txid: bitcoin::Txid, + nofn_xonly_pk: XOnlyPublicKey, + network: bitcoin::Network, + security_council: SecurityCouncil, +) -> Result<(Address, TaprootSpendInfo), BridgeError> { + let deposit_script = + ReplacementDepositScript::new(nofn_xonly_pk, old_move_txid).to_script_buf(); + + let security_council_script = Multisig::from_security_council(security_council).to_script_buf(); + + let (addr, spend) = + create_taproot_address(&[deposit_script, security_council_script], None, network); + Ok((addr, spend)) +} + +/// Shorthand function for creating a checksig taproot address: A single checksig script with the given xonly PK and no internal key. +/// +/// # Returns +/// +/// See [`create_taproot_address`]. +/// +/// - [`Address`]: Checksig taproot Bitcoin address +/// - [`TaprootSpendInfo`]: Checksig address's taproot spending information +pub fn create_checksig_address( + xonly_pk: XOnlyPublicKey, + network: bitcoin::Network, +) -> (Address, TaprootSpendInfo) { + let script = CheckSig::new(xonly_pk); + create_taproot_address(&[script.to_script_buf()], None, network) +} + +#[cfg(test)] +mod tests { + use crate::{ + bitvm_client::{self, SECP}, + builder::{self, address::calculate_taproot_leaf_depths}, + }; + use bitcoin::secp256k1::rand; + use bitcoin::{ + key::{Keypair, TapTweak}, + secp256k1::SecretKey, + AddressType, ScriptBuf, XOnlyPublicKey, + }; + + #[test] + fn create_taproot_address() { + let secret_key = SecretKey::new(&mut rand::thread_rng()); + let internal_key = + XOnlyPublicKey::from_keypair(&Keypair::from_secret_key(&SECP, &secret_key)).0; + + // No internal key or scripts (key path spend). + let (address, spend_info) = + builder::address::create_taproot_address(&[], None, bitcoin::Network::Regtest); + assert_eq!(address.address_type().unwrap(), AddressType::P2tr); + assert!(address.is_related_to_xonly_pubkey( + &bitvm_client::UNSPENDABLE_XONLY_PUBKEY + .tap_tweak(&SECP, spend_info.merkle_root()) + .0 + .to_x_only_public_key() + )); + assert_eq!( + spend_info.internal_key(), + *bitvm_client::UNSPENDABLE_XONLY_PUBKEY + ); + assert!(spend_info.merkle_root().is_none()); + + // Key path spend. + let (address, spend_info) = builder::address::create_taproot_address( + &[], + Some(internal_key), + bitcoin::Network::Regtest, + ); + assert_eq!(address.address_type().unwrap(), AddressType::P2tr); + assert!(address.is_related_to_xonly_pubkey( + &internal_key + .tap_tweak(&SECP, spend_info.merkle_root()) + .0 + .to_x_only_public_key() + )); + assert_eq!(spend_info.internal_key(), internal_key); + assert!(spend_info.merkle_root().is_none()); + + let scripts = [ScriptBuf::new()]; + let (address, spend_info) = builder::address::create_taproot_address( + &scripts, + Some(internal_key), + bitcoin::Network::Regtest, + ); + assert_eq!(address.address_type().unwrap(), AddressType::P2tr); + assert!(address.is_related_to_xonly_pubkey( + &internal_key + .tap_tweak(&SECP, spend_info.merkle_root()) + .0 + .to_x_only_public_key() + )); + assert_eq!(spend_info.internal_key(), internal_key); + assert!(spend_info.merkle_root().is_some()); + + let scripts = [ScriptBuf::new(), ScriptBuf::new()]; + let (address, spend_info) = builder::address::create_taproot_address( + &scripts, + Some(internal_key), + bitcoin::Network::Regtest, + ); + assert_eq!(address.address_type().unwrap(), AddressType::P2tr); + assert!(address.is_related_to_xonly_pubkey( + &internal_key + .tap_tweak(&SECP, spend_info.merkle_root()) + .0 + .to_x_only_public_key() + )); + assert_eq!(spend_info.internal_key(), internal_key); + assert!(spend_info.merkle_root().is_some()); + } + + #[test] + pub fn test_taproot_builder_with_scripts() { + for i in [0, 1, 10, 50, 100, 1000].into_iter() { + let scripts = (0..i) + .map(|k| ScriptBuf::builder().push_int(k).into_script()) + .collect::>(); + let builder = super::taproot_builder_with_scripts(scripts); + let tree_info = builder + .finalize(&SECP, *bitvm_client::UNSPENDABLE_XONLY_PUBKEY) + .unwrap(); + + assert_eq!(tree_info.script_map().len(), i as usize); + } + } + + #[test] + fn test_calculate_taproot_leaf_depths() { + // Test case 1: 0 scripts + let expected: Vec = vec![]; + assert_eq!(calculate_taproot_leaf_depths(0), expected); + + // Test case 2: 1 script + assert_eq!(calculate_taproot_leaf_depths(1), vec![0]); + + // Test case 3: 2 scripts (balanced tree, depth 1 for both) + assert_eq!(calculate_taproot_leaf_depths(2), vec![1, 1]); + + // Test case 4: 3 scripts (unbalanced) + // The first two scripts are at depth 2, the last is promoted to depth 1. + assert_eq!(calculate_taproot_leaf_depths(3), vec![2, 2, 1]); + + // Test case 5: 4 scripts (perfectly balanced tree, all at depth 2) + assert_eq!(calculate_taproot_leaf_depths(4), vec![2, 2, 2, 2]); + + // Test case 6: 5 scripts (unbalanced) + // num_nodes_in_final_depth is 2, so first two are at depth 3, rest are at depth 2. + // deepest_layer_depth = ilog2(4) + 1 = 3 + // num_empty_nodes = 2^3 - 5 = 3 + // num_nodes_in_final_depth = 5 - 3 = 2 + // Depths: (3, 3, 2, 2, 2) + assert_eq!(calculate_taproot_leaf_depths(5), vec![3, 3, 2, 2, 2]); + + // Test case 7: 8 scripts (perfectly balanced tree, all at depth 3) + assert_eq!( + calculate_taproot_leaf_depths(8), + vec![3, 3, 3, 3, 3, 3, 3, 3] + ); + } +} diff --git a/core/src/builder/block_cache.rs b/core/src/builder/block_cache.rs new file mode 100644 index 000000000..b269ca17c --- /dev/null +++ b/core/src/builder/block_cache.rs @@ -0,0 +1,74 @@ +use bitcoin::{Block, OutPoint, Transaction, Txid, Witness}; +use std::collections::HashMap; + +/// Block cache to optimize Txid and UTXO lookups for a block +#[derive(Debug, Clone, Default)] +pub struct BlockCache { + pub(crate) txids: HashMap, + pub(crate) spent_utxos: HashMap, + pub(crate) block_height: u32, + pub(crate) block: Option, +} + +impl BlockCache { + pub fn new() -> Self { + Self { + txids: HashMap::new(), + spent_utxos: HashMap::new(), + block_height: 0, + block: None, + } + } + + pub fn from_block(block: &Block, block_height: u32) -> Self { + let mut block_cache = Self::new(); + block_cache.update_with_block(block, block_height); + block_cache + } + + /// Updates the block cache with the new block + /// Creates HashMap's of txids and spent utxos for efficient lookups + pub fn update_with_block(&mut self, block: &Block, block_height: u32) { + self.block_height = block_height; + for (idx, tx) in block.txdata.iter().enumerate() { + self.txids.insert(tx.compute_txid(), idx); + + // Mark UTXOs as spent + for input in &tx.input { + self.spent_utxos.insert(input.previous_output, idx); + } + } + self.block = Some(block.clone()); + } + + pub fn get_tx_with_index(&self, idx: usize) -> Option<&Transaction> { + self.block.as_ref().map(|block| &block.txdata[idx]) + } + + pub fn get_tx_of_utxo(&self, utxo: &OutPoint) -> Option<&Transaction> { + self.spent_utxos + .get(utxo) + .and_then(|idx| self.get_tx_with_index(*idx)) + } + + pub fn get_txid_of_utxo(&self, utxo: &OutPoint) -> Option { + self.get_tx_of_utxo(utxo).map(|tx| tx.compute_txid()) + } + + pub fn get_witness_of_utxo(&self, utxo: &OutPoint) -> Option { + self.get_tx_of_utxo(utxo).and_then(|tx| { + tx.input + .iter() + .find(|input| input.previous_output == *utxo) + .map(|input| input.witness.clone()) + }) + } + + pub fn contains_txid(&self, txid: &Txid) -> bool { + self.txids.contains_key(txid) + } + + pub fn is_utxo_spent(&self, outpoint: &OutPoint) -> bool { + self.spent_utxos.contains_key(outpoint) + } +} diff --git a/core/src/builder/mod.rs b/core/src/builder/mod.rs new file mode 100644 index 000000000..afa767a64 --- /dev/null +++ b/core/src/builder/mod.rs @@ -0,0 +1,21 @@ +//! # Bitcoin Transaction Builder for Clementine Bridge +//! +//! This module provides a helper struct TxHandler for constructing +//! the tx's needed for the bridge. TxHandler's purpose is to store additional +//! data compared to a normal Bitcoin transaction to facilitate easier signing as the +//! scripts used in bridge can be quite complex. +//! +//! Modules: +//! - address: Contains helper functions to create taproot addresses and deposit addresses. +//! - script: Contains all kinds of scripts that are used in the bridge. There is a struct for each kind of script to +//! facilitate both easier script creation and easier signing. +//! - sighash: As its possible more than 100000 tx's can be signed in a single deposit (depends on number of round tx's, number of +//! kickoff utxo's, and number of operators), the sighash functions create a stream that verifiers and operators consume to sign the tx's +//! during a deposit. +//! - transaction: Contains the functions that create TxHandler's of every single tx needed for the bridge. For detailed information +//! about the tx's see the [clementine whitepaper](https://citrea.xyz/clementine_whitepaper.pdf). +pub mod address; +pub mod block_cache; +pub mod script; +pub mod sighash; +pub mod transaction; diff --git a/core/src/builder/script.rs b/core/src/builder/script.rs new file mode 100644 index 000000000..f991385aa --- /dev/null +++ b/core/src/builder/script.rs @@ -0,0 +1,1182 @@ +//! # Bitcoin Script Construction +//! +//! This module provides a collection of builders for creating various Bitcoin +//! scripts utilized within the Clementine bridge. It defines a `SpendableScript` +//! trait, implemented by specific script structures (e.g., `CheckSig`, +//! `WinternitzCommit`, `TimelockScript`, `BaseDepositScript`) to standardize +//! script generation and witness creation. +//! +//! Each script builder offers: +//! - A constructor to initialize the script with its specific parameters. +//! - A method to convert the script structure into a `bitcoin::ScriptBuf`. +//! - A method to generate the corresponding `bitcoin::Witness` required to spend +//! an output locked with this script. +//! +//! The module also includes `ScriptKind`, an enum to differentiate between various +//! spendable script types, facilitating dynamic dispatch and script management. +//! Helper functions like `extract_winternitz_commits` are provided for parsing +//! specific data committed using witnernitz keys from witness. + +#![allow(dead_code)] + +use crate::actor::WinternitzDerivationPath; +use crate::config::protocol::ProtocolParamset; +use crate::deposit::SecurityCouncil; +use crate::EVMAddress; +use bitcoin::hashes::Hash; +use bitcoin::opcodes::OP_TRUE; +use bitcoin::{ + opcodes::{all::*, OP_FALSE}, + script::Builder, + ScriptBuf, XOnlyPublicKey, +}; +use bitcoin::{taproot, Txid, Witness}; +use bitvm::signatures::winternitz::{Parameters, PublicKey, SecretKey}; +use eyre::{Context, Result}; +use std::any::Any; +use std::fmt::Debug; + +#[derive(Debug, Copy, Clone, serde::Serialize, serde::Deserialize)] +pub enum SpendPath { + ScriptSpend(usize), + KeySpend, + Unknown, +} + +/// Converts a minimal serialized u32 (trailing zeros removed) to full 4 byte representation +fn from_minimal_to_u32_le_bytes(minimal: &[u8]) -> Result<[u8; 4]> { + if minimal.len() > 4 { + return Err(eyre::eyre!("u32 bytes length is greater than 4")); + } + let mut bytes = [0u8; 4]; + bytes[..minimal.len()].copy_from_slice(minimal); + Ok(bytes) +} + +/// Extracts the committed data from the witness. +/// Note: The function is hardcoded for winternitz_log_d = 4 currently, will not work for others. +pub fn extract_winternitz_commits( + witness: Witness, + wt_derive_paths: &[WinternitzDerivationPath], + paramset: &'static ProtocolParamset, +) -> Result>> { + if paramset.winternitz_log_d != 4 { + return Err(eyre::eyre!("Only winternitz_log_d = 4 is supported")); + } + let mut commits: Vec> = Vec::new(); + let mut cur_witness_iter = witness.into_iter().skip(1); + + for wt_path in wt_derive_paths.iter().rev() { + let wt_params = wt_path.get_params(); + let message_digits = + (wt_params.message_byte_len() * 8).div_ceil(paramset.winternitz_log_d) as usize; + let checksum_digits = wt_params.total_digit_len() as usize - message_digits; + + let mut elements: Vec<&[u8]> = cur_witness_iter + .by_ref() + .skip(1) + .step_by(2) + .take(message_digits) + .collect(); + elements.reverse(); + + // advance iterator to skip checksum digits at the end + cur_witness_iter.by_ref().nth(checksum_digits * 2 - 1); + + commits.push( + elements + .chunks_exact(2) + .map(|digits| { + let first_digit = u32::from_le_bytes(from_minimal_to_u32_le_bytes(digits[0])?); + let second_digit = u32::from_le_bytes(from_minimal_to_u32_le_bytes(digits[1])?); + + let first_u8 = u8::try_from(first_digit) + .wrap_err("Failed to convert first digit to u8")?; + let second_u8 = u8::try_from(second_digit) + .wrap_err("Failed to convert second digit to u8")?; + + Ok(second_u8 * (1 << paramset.winternitz_log_d) + first_u8) + }) + .collect::>>()?, + ); + } + commits.reverse(); + Ok(commits) +} + +/// Extracts the committed data from the witness. +/// Note: The function is hardcoded for winternitz_log_d = 4 currently, will not work for others. +pub fn extract_winternitz_commits_with_sigs( + witness: Witness, + wt_derive_paths: &[WinternitzDerivationPath], + paramset: &'static ProtocolParamset, +) -> Result>>> { + if paramset.winternitz_log_d != 4 { + return Err(eyre::eyre!("Only winternitz_log_d = 4 is supported")); + } + // Structure: [commit][signature_sequence][element] + // - commit: one signed message + // - signature_sequence: alternating signature elements and signed characters, ending with a checksum + // - element: raw bytes of either a signature part, signed character, or checksum + let mut commits_with_sig: Vec>> = Vec::new(); + let mut cur_witness_iter = witness.into_iter().skip(1); + + for wt_path in wt_derive_paths.iter().rev() { + let wt_params = wt_path.get_params(); + let message_digits = + (wt_params.message_byte_len() * 8).div_ceil(paramset.winternitz_log_d) as usize; + let checksum_digits = wt_params.total_digit_len() as usize - message_digits; + + let elements: Vec> = cur_witness_iter + .by_ref() + .take((message_digits + checksum_digits) * 2) + .map(|x| x.to_vec()) + .collect(); + + commits_with_sig.push(elements); + } + + Ok(commits_with_sig) +} + +/// A trait that marks all script types. Each script has a `generate_script_inputs` (eg. [`WinternitzCommit::generate_script_inputs`]) function that +/// generates the witness for the script using various arguments. A `dyn SpendableScript` is cast into a concrete [`ScriptKind`] to +/// generate a witness, the trait object can be used to generate the script_buf. +/// +/// We store `Arc`s inside a [`super::transaction::TxHandler`] input, and we cast them into a [`ScriptKind`] when signing. +/// +/// When creating a new Script, make sure you add it to the [`ScriptKind`] enum and add a test for it below. +/// Otherwise, it will not be spendable. +pub trait SpendableScript: Send + Sync + 'static + std::any::Any { + fn as_any(&self) -> &dyn Any; + + fn kind(&self) -> ScriptKind; + + fn to_script_buf(&self) -> ScriptBuf; +} + +impl Debug for dyn SpendableScript { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "SpendableScript") + } +} + +/// Struct for scripts that do not conform to any other type of SpendableScripts +#[derive(Debug, Clone)] +pub struct OtherSpendable(ScriptBuf); + +impl From for OtherSpendable { + fn from(script: ScriptBuf) -> Self { + Self(script) + } +} + +impl SpendableScript for OtherSpendable { + fn as_any(&self) -> &dyn Any { + self + } + + fn kind(&self) -> ScriptKind { + ScriptKind::Other(self) + } + + fn to_script_buf(&self) -> ScriptBuf { + self.0.clone() + } +} + +impl OtherSpendable { + fn as_script(&self) -> &ScriptBuf { + &self.0 + } + + fn generate_script_inputs(&self, witness: Witness) -> Witness { + witness + } + + pub fn new(script: ScriptBuf) -> Self { + Self(script) + } +} + +/// Struct for scripts that only includes a CHECKSIG +#[derive(Debug, Clone)] +pub struct CheckSig(pub(crate) XOnlyPublicKey); +impl SpendableScript for CheckSig { + fn as_any(&self) -> &dyn Any { + self + } + + fn kind(&self) -> ScriptKind { + ScriptKind::CheckSig(self) + } + + fn to_script_buf(&self) -> ScriptBuf { + Builder::new() + .push_x_only_key(&self.0) + .push_opcode(OP_CHECKSIG) + .into_script() + } +} + +impl CheckSig { + pub fn generate_script_inputs(&self, signature: &taproot::Signature) -> Witness { + Witness::from_slice(&[signature.serialize()]) + } + + pub fn new(xonly_pk: XOnlyPublicKey) -> Self { + Self(xonly_pk) + } +} + +#[derive(Clone)] +pub struct Multisig { + pubkeys: Vec, + threshold: u32, +} + +impl SpendableScript for Multisig { + fn as_any(&self) -> &dyn Any { + self + } + + fn kind(&self) -> ScriptKind { + ScriptKind::ManualSpend(self) + } + + fn to_script_buf(&self) -> ScriptBuf { + let mut script_builder = Builder::new() + .push_x_only_key(&self.pubkeys[0]) + .push_opcode(OP_CHECKSIG); + for pubkey in self.pubkeys.iter().skip(1) { + script_builder = script_builder.push_x_only_key(pubkey); + script_builder = script_builder.push_opcode(OP_CHECKSIGADD); + } + script_builder = script_builder.push_int(self.threshold as i64); + script_builder = script_builder.push_opcode(OP_NUMEQUAL); + script_builder.into_script() + } +} + +impl Multisig { + pub fn new(pubkeys: Vec, threshold: u32) -> Self { + Self { pubkeys, threshold } + } + + pub fn from_security_council(security_council: SecurityCouncil) -> Self { + Self { + pubkeys: security_council.pks, + threshold: security_council.threshold, + } + } + + pub fn generate_script_inputs( + &self, + signatures: &[Option], + ) -> eyre::Result { + let mut witness = Witness::new(); + + for signature in signatures.iter().rev() { + match signature { + Some(sig) => witness.push(sig.serialize()), + None => witness.push([]), + } + } + Ok(witness) + } +} + +/// Struct for scripts that commit to a message using Winternitz keys +/// Contains the Winternitz PK, CheckSig PK, message length respectively +/// can contain multiple different Winternitz public keys for different messages +#[derive(Clone)] +pub struct WinternitzCommit { + commitments: Vec<(PublicKey, u32)>, + pub(crate) checksig_pubkey: XOnlyPublicKey, + log_d: u32, +} + +impl SpendableScript for WinternitzCommit { + fn as_any(&self) -> &dyn Any { + self + } + + fn kind(&self) -> ScriptKind { + ScriptKind::WinternitzCommit(self) + } + + fn to_script_buf(&self) -> ScriptBuf { + let mut total_script = ScriptBuf::new(); + for (index, (pubkey, _size)) in self.commitments.iter().enumerate() { + let params = self.get_params(index); + let a = bitvm::signatures::signing_winternitz::WINTERNITZ_MESSAGE_VERIFIER + .checksig_verify_and_clear_stack(¶ms, pubkey); + total_script.extend(a.compile().instructions().map(|x| x.expect("just created"))); + } + + total_script.push_slice(self.checksig_pubkey.serialize()); + total_script.push_opcode(OP_CHECKSIG); + total_script + } +} + +impl WinternitzCommit { + pub fn get_params(&self, index: usize) -> Parameters { + Parameters::new(self.commitments[index].1, self.log_d) + } + + pub fn generate_script_inputs( + &self, + commit_data: &[(Vec, SecretKey)], + signature: &taproot::Signature, + ) -> Witness { + let mut witness = Witness::new(); + witness.push(signature.serialize()); + for (index, (data, secret_key)) in commit_data.iter().enumerate().rev() { + #[cfg(debug_assertions)] + { + let pk = bitvm::signatures::winternitz::generate_public_key( + &self.get_params(index), + secret_key, + ); + if pk != self.commitments[index].0 { + tracing::error!( + "Winternitz public key mismatch len: {}", + self.commitments[index].1 + ); + } + } + bitvm::signatures::signing_winternitz::WINTERNITZ_MESSAGE_VERIFIER + .sign(&self.get_params(index), secret_key, data) + .into_iter() + .for_each(|x| witness.push(x)); + } + witness + } + + /// commitments is a Vec of winternitz public key and message length tuple + pub fn new( + commitments: Vec<(PublicKey, u32)>, + checksig_pubkey: XOnlyPublicKey, + log_d: u32, + ) -> Self { + Self { + commitments, + checksig_pubkey, + log_d, + } + } +} + +/// Struct for scripts that include a relative timelock (by block count) and optionally a CHECKSIG if a pubkey is provided. +/// Generates a relative timelock script with a given [`XOnlyPublicKey`] that CHECKSIG checks the signature against. +/// +/// ATTENTION: If you want to spend a UTXO using timelock script, the +/// condition is that (`# in the script`) โ‰ค (`# in the sequence of the tx`) +/// โ‰ค (`# of blocks mined after UTXO appears on the chain`). However, this is not mandatory. +/// One can spend an output delayed for some number of blocks just by using the nSequence field +/// of the input inside the transaction. For more, see: +/// +/// - [BIP-0068](https://github.com/bitcoin/bips/blob/master/bip-0068.mediawiki) +/// - [BIP-0112](https://github.com/bitcoin/bips/blob/master/bip-0112.mediawiki) +#[derive(Debug, Clone)] +pub struct TimelockScript(pub(crate) Option, u16); + +impl SpendableScript for TimelockScript { + fn as_any(&self) -> &dyn Any { + self + } + + fn kind(&self) -> ScriptKind { + ScriptKind::TimelockScript(self) + } + + fn to_script_buf(&self) -> ScriptBuf { + let script_builder = Builder::new() + .push_int(self.1 as i64) + .push_opcode(OP_CSV) + .push_opcode(OP_DROP); + + if let Some(xonly_pk) = self.0 { + script_builder + .push_x_only_key(&xonly_pk) + .push_opcode(OP_CHECKSIG) + } else { + script_builder.push_opcode(OP_TRUE) + } + .into_script() + } +} + +impl TimelockScript { + pub fn generate_script_inputs(&self, signature: Option<&taproot::Signature>) -> Witness { + match signature { + Some(sig) => Witness::from_slice(&[sig.serialize()]), + None => Witness::default(), + } + } + + pub fn new(xonly_pk: Option, block_count: u16) -> Self { + Self(xonly_pk, block_count) + } +} + +/// Struct for scripts that reveal a preimage of a OP_HASH160 and verify it against the given hash in the script. +pub struct PreimageRevealScript(pub(crate) XOnlyPublicKey, [u8; 20]); + +impl SpendableScript for PreimageRevealScript { + fn as_any(&self) -> &dyn Any { + self + } + + fn kind(&self) -> ScriptKind { + ScriptKind::PreimageRevealScript(self) + } + + fn to_script_buf(&self) -> ScriptBuf { + Builder::new() + .push_opcode(OP_HASH160) + .push_slice(self.1) + .push_opcode(OP_EQUALVERIFY) + .push_x_only_key(&self.0) + .push_opcode(OP_CHECKSIG) + .into_script() + } +} + +impl PreimageRevealScript { + pub fn generate_script_inputs( + &self, + preimage: impl AsRef<[u8]>, + signature: &taproot::Signature, + ) -> Witness { + let mut witness = Witness::new(); + #[cfg(debug_assertions)] + assert_eq!( + bitcoin::hashes::hash160::Hash::hash(preimage.as_ref()), + bitcoin::hashes::hash160::Hash::from_byte_array(self.1), + "Preimage does not match" + ); + + witness.push(signature.serialize()); + witness.push(preimage.as_ref()); + witness + } + + pub fn new(xonly_pk: XOnlyPublicKey, hash: [u8; 20]) -> Self { + Self(xonly_pk, hash) + } +} + +/// Struct for deposit script that commits Citrea address to be deposited into onchain. +#[derive(Debug, Clone)] +pub struct BaseDepositScript(pub(crate) XOnlyPublicKey, EVMAddress); + +impl SpendableScript for BaseDepositScript { + fn as_any(&self) -> &dyn Any { + self + } + + fn kind(&self) -> ScriptKind { + ScriptKind::BaseDepositScript(self) + } + + fn to_script_buf(&self) -> ScriptBuf { + let citrea: [u8; 6] = "citrea".as_bytes().try_into().expect("length == 6"); + + Builder::new() + .push_x_only_key(&self.0) + .push_opcode(OP_CHECKSIG) + .push_opcode(OP_FALSE) + .push_opcode(OP_IF) + .push_slice(citrea) + .push_slice(self.1 .0) + .push_opcode(OP_ENDIF) + .into_script() + } +} + +impl BaseDepositScript { + pub fn generate_script_inputs(&self, signature: &taproot::Signature) -> Witness { + Witness::from_slice(&[signature.serialize()]) + } + + pub fn new(nofn_xonly_pk: XOnlyPublicKey, evm_address: EVMAddress) -> Self { + Self(nofn_xonly_pk, evm_address) + } +} + +/// Struct for deposit script that replaces an old move tx with a replacement deposit (to update bridge design on chain) +/// It commits to the old move txid inside the script. +#[derive(Debug, Clone)] +pub struct ReplacementDepositScript(pub(crate) XOnlyPublicKey, Txid); + +impl SpendableScript for ReplacementDepositScript { + fn as_any(&self) -> &dyn Any { + self + } + + fn kind(&self) -> ScriptKind { + ScriptKind::ReplacementDepositScript(self) + } + + fn to_script_buf(&self) -> ScriptBuf { + let citrea_replace: [u8; 13] = "citreaReplace".as_bytes().try_into().expect("length == 13"); + + Builder::new() + .push_x_only_key(&self.0) + .push_opcode(OP_CHECKSIG) + .push_opcode(OP_FALSE) + .push_opcode(OP_IF) + .push_slice(citrea_replace) + .push_slice(self.1.as_byte_array()) + .push_opcode(OP_ENDIF) + .into_script() + } +} + +impl ReplacementDepositScript { + pub fn generate_script_inputs(&self, signature: &taproot::Signature) -> Witness { + Witness::from_slice(&[signature.serialize()]) + } + + pub fn new(nofn_xonly_pk: XOnlyPublicKey, old_move_txid: Txid) -> Self { + Self(nofn_xonly_pk, old_move_txid) + } +} + +#[derive(Clone)] +pub enum ScriptKind<'a> { + CheckSig(&'a CheckSig), + WinternitzCommit(&'a WinternitzCommit), + TimelockScript(&'a TimelockScript), + PreimageRevealScript(&'a PreimageRevealScript), + BaseDepositScript(&'a BaseDepositScript), + ReplacementDepositScript(&'a ReplacementDepositScript), + Other(&'a OtherSpendable), + ManualSpend(&'a Multisig), +} + +#[cfg(test)] +fn get_script_from_arr( + arr: &Vec>, +) -> Option<(usize, &T)> { + arr.iter() + .enumerate() + .find_map(|(i, x)| x.as_any().downcast_ref::().map(|x| (i, x))) +} +#[cfg(test)] +mod tests { + use super::*; + use crate::actor::{Actor, WinternitzDerivationPath}; + use crate::bitvm_client::{self, UNSPENDABLE_XONLY_PUBKEY}; + use crate::builder::address::create_taproot_address; + use crate::config::protocol::ProtocolParamsetName; + use crate::extended_bitcoin_rpc::ExtendedBitcoinRpc; + use crate::operator::RoundIndex; + use bitcoin::hashes::Hash; + use bitcoin::secp256k1::rand::{self, Rng}; + use bitcoin::secp256k1::{PublicKey, SecretKey}; + use bitcoincore_rpc::RpcApi; + use std::sync::Arc; + + // Create some dummy values for testing. + // Note: These values are not cryptographically secure and are only used for tests. + fn dummy_xonly() -> XOnlyPublicKey { + // 32 bytes array filled with 0x03. + *bitvm_client::UNSPENDABLE_XONLY_PUBKEY + } + + fn dummy_scriptbuf() -> ScriptBuf { + ScriptBuf::from_hex("51").expect("valid hex") + } + + fn dummy_pubkey() -> PublicKey { + bitvm_client::UNSPENDABLE_XONLY_PUBKEY.public_key(bitcoin::secp256k1::Parity::Even) + } + + fn dummy_params() -> Parameters { + Parameters::new(32, 4) + } + + fn dummy_evm_address() -> EVMAddress { + // For testing purposes, we use a dummy 20-byte array. + EVMAddress([0u8; 20]) + } + + #[test] + fn test_dynamic_casting_extended() { + // Build a collection of SpendableScript implementations. + let scripts: Vec> = vec![ + Box::new(OtherSpendable::new(dummy_scriptbuf())), + Box::new(CheckSig::new(dummy_xonly())), + Box::new(WinternitzCommit::new( + vec![(vec![[0u8; 20]; 32], 32)], + dummy_xonly(), + 4, + )), + Box::new(TimelockScript::new(Some(dummy_xonly()), 10)), + Box::new(PreimageRevealScript::new(dummy_xonly(), [0; 20])), + Box::new(BaseDepositScript::new(dummy_xonly(), dummy_evm_address())), + ]; + + // helper closures that return Option<(usize, &T)> using get_script_from_arr. + let checksig = get_script_from_arr::(&scripts); + let winternitz = get_script_from_arr::(&scripts); + let timelock = get_script_from_arr::(&scripts); + let preimage = get_script_from_arr::(&scripts); + let deposit = get_script_from_arr::(&scripts); + let others = get_script_from_arr::(&scripts); + + assert!(checksig.is_some(), "CheckSig not found"); + assert!(winternitz.is_some(), "WinternitzCommit not found"); + assert!(timelock.is_some(), "TimelockScript not found"); + assert!(preimage.is_some(), "PreimageRevealScript not found"); + assert!(deposit.is_some(), "DepositScript not found"); + assert!(others.is_some(), "OtherSpendable not found"); + + // Print found items. + println!("CheckSig: {:?}", checksig.unwrap().1); + // println!("WinternitzCommit: {:?}", winternitz.unwrap().1); + println!("TimelockScript: {:?}", timelock.unwrap().1); + // println!("PreimageRevealScript: {:?}", preimage.unwrap().1); + // println!("DepositScript: {:?}", deposit.unwrap().1); + println!("OtherSpendable: {:?}", others.unwrap().1); + } + + #[test] + fn test_dynamic_casting() { + use crate::bitvm_client; + let scripts: Vec> = vec![ + Box::new(OtherSpendable(ScriptBuf::from_hex("51").expect(""))), + Box::new(CheckSig(*bitvm_client::UNSPENDABLE_XONLY_PUBKEY)), + ]; + + let otherspendable = scripts + .first() + .expect("") + .as_any() + .downcast_ref::() + .expect(""); + + let checksig = get_script_from_arr::(&scripts).expect(""); + println!("{:?}", otherspendable); + println!("{:?}", checksig); + } + + #[test] + fn test_scriptkind_completeness() { + let script_variants: Vec<(&str, Arc)> = vec![ + ("CheckSig", Arc::new(CheckSig::new(dummy_xonly()))), + ( + "WinternitzCommit", + Arc::new(WinternitzCommit::new( + vec![(vec![[0u8; 20]; 32], 32)], + dummy_xonly(), + 4, + )), + ), + ( + "TimelockScript", + Arc::new(TimelockScript::new(Some(dummy_xonly()), 15)), + ), + ( + "PreimageRevealScript", + Arc::new(PreimageRevealScript::new(dummy_xonly(), [1; 20])), + ), + ( + "BaseDepositScript", + Arc::new(BaseDepositScript::new(dummy_xonly(), dummy_evm_address())), + ), + ( + "ReplacementDepositScript", + Arc::new(ReplacementDepositScript::new( + dummy_xonly(), + Txid::all_zeros(), + )), + ), + ("Other", Arc::new(OtherSpendable::new(dummy_scriptbuf()))), + ]; + + for (expected, script) in script_variants { + let kind = script.kind(); + match (expected, kind) { + ("CheckSig", ScriptKind::CheckSig(_)) => (), + ("WinternitzCommit", ScriptKind::WinternitzCommit(_)) => (), + ("TimelockScript", ScriptKind::TimelockScript(_)) => (), + ("PreimageRevealScript", ScriptKind::PreimageRevealScript(_)) => (), + ("BaseDepositScript", ScriptKind::BaseDepositScript(_)) => (), + ("ReplacementDepositScript", ScriptKind::ReplacementDepositScript(_)) => (), + ("Other", ScriptKind::Other(_)) => (), + (s, _) => panic!("ScriptKind conversion not comprehensive for variant: {}", s), + } + } + } + // Tests for the spendability of all scripts + use crate::bitvm_client::SECP; + use crate::builder; + use crate::builder::transaction::input::SpendableTxIn; + use crate::builder::transaction::output::UnspentTxOut; + use crate::builder::transaction::{TransactionType, TxHandlerBuilder, DEFAULT_SEQUENCE}; + use bitcoin::{Amount, OutPoint, Sequence, TxOut, Txid}; + + async fn create_taproot_test_tx( + rpc: &ExtendedBitcoinRpc, + scripts: Vec>, + spend_path: SpendPath, + amount: Amount, + ) -> (TxHandlerBuilder, bitcoin::Address) { + let (address, taproot_spend_info) = builder::address::create_taproot_address( + &scripts + .iter() + .map(|s| s.to_script_buf()) + .collect::>(), + None, + bitcoin::Network::Regtest, + ); + + let outpoint = rpc.send_to_address(&address, amount).await.unwrap(); + let sequence = if let SpendPath::ScriptSpend(idx) = spend_path { + if let Some(script) = scripts.get(idx) { + match script.kind() { + ScriptKind::TimelockScript(&TimelockScript(_, seq)) => { + Sequence::from_height(seq) + } + _ => DEFAULT_SEQUENCE, + } + } else { + DEFAULT_SEQUENCE + } + } else { + DEFAULT_SEQUENCE + }; + let mut builder = TxHandlerBuilder::new(TransactionType::Dummy); + builder = builder.add_input( + crate::rpc::clementine::NormalSignatureKind::OperatorSighashDefault, + SpendableTxIn::new( + outpoint, + TxOut { + value: amount, + script_pubkey: address.script_pubkey(), + }, + scripts.clone(), + Some(taproot_spend_info.clone()), + ), + spend_path, + sequence, + ); + + builder = builder.add_output(UnspentTxOut::new( + TxOut { + value: amount - Amount::from_sat(5000), // Subtract fee + script_pubkey: address.script_pubkey(), + }, + scripts, + Some(taproot_spend_info), + )); + + (builder, address) + } + + use crate::test::common::*; + + #[tokio::test] + + async fn test_checksig_spendable() { + let mut config = create_test_config_with_thread_name().await; + let regtest = create_regtest_rpc(&mut config).await; + let rpc = regtest.rpc().clone(); + + let kp = bitcoin::secp256k1::Keypair::new(&SECP, &mut rand::thread_rng()); + let xonly_pk = kp.public_key().x_only_public_key().0; + + let scripts: Vec> = vec![Arc::new(CheckSig::new(xonly_pk))]; + let (builder, _) = create_taproot_test_tx( + &rpc, + scripts, + SpendPath::ScriptSpend(0), + Amount::from_sat(10_000), + ) + .await; + let mut tx = builder.finalize(); + + // Should be able to sign with the key + let signer = Actor::new( + kp.secret_key(), + Some(bitcoin::secp256k1::SecretKey::new(&mut rand::thread_rng())), + bitcoin::Network::Regtest, + ); + + signer + .tx_sign_and_fill_sigs(&mut tx, &[], None) + .expect("should be able to sign checksig"); + let tx = tx + .promote() + .expect("the transaction should be fully signed"); + + rpc.send_raw_transaction(tx.get_cached_tx()) + .await + .expect("bitcoin RPC did not accept transaction"); + } + + #[tokio::test] + async fn test_winternitz_commit_spendable() { + let mut config = create_test_config_with_thread_name().await; + let regtest = create_regtest_rpc(&mut config).await; + let rpc = regtest.rpc(); + + let kp = bitcoin::secp256k1::Keypair::new(&SECP, &mut rand::thread_rng()); + let xonly_pk = kp.public_key().x_only_public_key().0; + + let deposit_outpoint = OutPoint { + txid: Txid::all_zeros(), + vout: 0, + }; + + let derivation = WinternitzDerivationPath::BitvmAssert( + 64, + 3, + 0, + deposit_outpoint, + ProtocolParamsetName::Regtest.into(), + ); + + let derivation2 = WinternitzDerivationPath::BitvmAssert( + 64, + 2, + 0, + deposit_outpoint, + ProtocolParamsetName::Regtest.into(), + ); + + let signer = Actor::new( + kp.secret_key(), + Some(kp.secret_key()), + bitcoin::Network::Regtest, + ); + + let script: Arc = Arc::new(WinternitzCommit::new( + vec![ + ( + signer + .derive_winternitz_pk(derivation.clone()) + .expect("failed to derive Winternitz public key"), + 64, + ), + ( + signer + .derive_winternitz_pk(derivation2.clone()) + .expect("failed to derive Winternitz public key"), + 64, + ), + ], + xonly_pk, + 4, + )); + + let scripts = vec![script]; + let (builder, _) = create_taproot_test_tx( + rpc, + scripts, + SpendPath::ScriptSpend(0), + Amount::from_sat(10_000), + ) + .await; + let mut tx = builder.finalize(); + + signer + .tx_sign_winternitz( + &mut tx, + &[ + (vec![0; 32], derivation.clone()), + (vec![0; 32], derivation2.clone()), + ], + ) + .expect("failed to partially sign commitments"); + + let tx = tx + .promote() + .expect("the transaction should be fully signed"); + + rpc.send_raw_transaction(tx.get_cached_tx()) + .await + .expect("bitcoin RPC did not accept transaction"); + } + + #[tokio::test] + async fn test_timelock_script_spendable() { + let mut config = create_test_config_with_thread_name().await; + let regtest = create_regtest_rpc(&mut config).await; + let rpc = regtest.rpc(); + + let kp = bitcoin::secp256k1::Keypair::new(&SECP, &mut rand::thread_rng()); + let xonly_pk = kp.public_key().x_only_public_key().0; + + let scripts: Vec> = + vec![Arc::new(TimelockScript::new(Some(xonly_pk), 15))]; + let (builder, _) = create_taproot_test_tx( + rpc, + scripts, + SpendPath::ScriptSpend(0), + Amount::from_sat(10_000), + ) + .await; + + let mut tx = builder.finalize(); + + let signer = Actor::new( + kp.secret_key(), + Some(bitcoin::secp256k1::SecretKey::new(&mut rand::thread_rng())), + bitcoin::Network::Regtest, + ); + + signer + .tx_sign_and_fill_sigs(&mut tx, &[], None) + .expect("should be able to sign timelock"); + + rpc.send_raw_transaction(tx.get_cached_tx()) + .await + .expect_err("should not pass without 15 blocks"); + + rpc.mine_blocks(15).await.expect("failed to mine blocks"); + + rpc.send_raw_transaction(tx.get_cached_tx()) + .await + .expect("should pass after 15 blocks"); + } + + #[tokio::test] + async fn test_preimage_reveal_script_spendable() { + let mut config = create_test_config_with_thread_name().await; + let regtest = create_regtest_rpc(&mut config).await; + let rpc = regtest.rpc().clone(); + let kp = bitcoin::secp256k1::Keypair::new(&SECP, &mut rand::thread_rng()); + let xonly_pk = kp.public_key().x_only_public_key().0; + + let preimage = [1; 20]; + let hash = bitcoin::hashes::hash160::Hash::hash(&preimage); + let script: Arc = + Arc::new(PreimageRevealScript::new(xonly_pk, hash.to_byte_array())); + let scripts = vec![script]; + let (builder, _) = create_taproot_test_tx( + &rpc, + scripts, + SpendPath::ScriptSpend(0), + Amount::from_sat(10_000), + ) + .await; + let mut tx = builder.finalize(); + + let signer = Actor::new( + kp.secret_key(), + Some(bitcoin::secp256k1::SecretKey::new(&mut rand::thread_rng())), + bitcoin::Network::Regtest, + ); + + signer + .tx_sign_preimage(&mut tx, preimage) + .expect("failed to sign preimage reveal"); + + let final_tx = tx + .promote() + .expect("the transaction should be fully signed"); + + rpc.send_raw_transaction(final_tx.get_cached_tx()) + .await + .expect("bitcoin RPC did not accept transaction"); + } + + #[tokio::test] + async fn test_base_deposit_script_spendable() { + let mut config = create_test_config_with_thread_name().await; + let regtest = create_regtest_rpc(&mut config).await; + let rpc = regtest.rpc().clone(); + + let kp = bitcoin::secp256k1::Keypair::new(&SECP, &mut rand::thread_rng()); + let xonly_pk = kp.public_key().x_only_public_key().0; + + let script: Arc = + Arc::new(BaseDepositScript::new(xonly_pk, EVMAddress([2; 20]))); + let scripts = vec![script]; + let (builder, _) = create_taproot_test_tx( + &rpc, + scripts, + SpendPath::ScriptSpend(0), + Amount::from_sat(10_000), + ) + .await; + let mut tx = builder.finalize(); + + let signer = Actor::new( + kp.secret_key(), + Some(bitcoin::secp256k1::SecretKey::new(&mut rand::thread_rng())), + bitcoin::Network::Regtest, + ); + + signer + .tx_sign_and_fill_sigs(&mut tx, &[], None) + .expect("should be able to sign base deposit"); + + rpc.send_raw_transaction(tx.get_cached_tx()) + .await + .expect("bitcoin RPC did not accept transaction"); + } + + #[tokio::test] + async fn test_replacement_deposit_script_spendable() { + let mut config = create_test_config_with_thread_name().await; + let regtest = create_regtest_rpc(&mut config).await; + let rpc = regtest.rpc().clone(); + + let kp = bitcoin::secp256k1::Keypair::new(&SECP, &mut rand::thread_rng()); + let xonly_pk = kp.public_key().x_only_public_key().0; + + let script: Arc = + Arc::new(ReplacementDepositScript::new(xonly_pk, Txid::all_zeros())); + let scripts = vec![script]; + let (builder, _) = create_taproot_test_tx( + &rpc, + scripts, + SpendPath::ScriptSpend(0), + Amount::from_sat(10_000), + ) + .await; + let mut tx = builder.finalize(); + + let signer = Actor::new( + kp.secret_key(), + Some(bitcoin::secp256k1::SecretKey::new(&mut rand::thread_rng())), + bitcoin::Network::Regtest, + ); + + signer + .tx_sign_and_fill_sigs(&mut tx, &[], None) + .expect("should be able to sign replacement deposit"); + + rpc.send_raw_transaction(tx.get_cached_tx()) + .await + .expect("bitcoin RPC did not accept transaction"); + } + + #[tokio::test] + async fn test_extract_commit_data() { + let config = create_test_config_with_thread_name().await; + let kp = bitcoin::secp256k1::Keypair::new(&SECP, &mut rand::thread_rng()); + + let signer = Actor::new( + kp.secret_key(), + Some(kp.secret_key()), + bitcoin::Network::Regtest, + ); + + let kickoff = + WinternitzDerivationPath::Kickoff(RoundIndex::Round(0), 0, config.protocol_paramset()); + let bitvm_assert = WinternitzDerivationPath::BitvmAssert( + 64, + 3, + 0, + OutPoint { + txid: Txid::all_zeros(), + vout: 0, + }, + config.protocol_paramset(), + ); + let commit_script = WinternitzCommit::new( + vec![ + ( + signer + .derive_winternitz_pk(kickoff.clone()) + .expect("failed to derive Winternitz public key"), + 40, + ), + ( + signer + .derive_winternitz_pk(bitvm_assert.clone()) + .expect("failed to derive Winternitz public key"), + 64, + ), + ], + signer.xonly_public_key, + 4, + ); + let signature = taproot::Signature::from_slice(&[1u8; 64]).expect("valid signature"); + let kickoff_blockhash: Vec = (0..20u8).collect(); + let assert_commit_data: Vec = (0..32u8).collect(); + let witness = commit_script.generate_script_inputs( + &[ + ( + kickoff_blockhash.clone(), + signer.get_derived_winternitz_sk(kickoff.clone()).unwrap(), + ), + ( + assert_commit_data.clone(), + signer + .get_derived_winternitz_sk(bitvm_assert.clone()) + .unwrap(), + ), + ], + &signature, + ); + let extracted = extract_winternitz_commits( + witness, + &[kickoff, bitvm_assert], + config.protocol_paramset(), + ) + .unwrap(); + tracing::info!("{:?}", extracted); + assert_eq!(extracted[0], kickoff_blockhash); + assert_eq!(extracted[1], assert_commit_data); + } + + #[tokio::test] + async fn test_multisig_matches_descriptor() { + let mut config = create_test_config_with_thread_name().await; + let regtest = create_regtest_rpc(&mut config).await; + let rpc = regtest.rpc().clone(); + + // select a random number of public keys + let num_pks = rand::thread_rng().gen_range(1..=10); + let threshold = rand::thread_rng().gen_range(1..=num_pks); + + let mut pks = Vec::new(); + for _ in 0..num_pks { + let secret_key = SecretKey::new(&mut rand::thread_rng()); + let kp = bitcoin::secp256k1::Keypair::from_secret_key(&*SECP, &secret_key); + pks.push(kp.public_key().x_only_public_key().0); + } + + let unspendable_xonly_pk_str = (*UNSPENDABLE_XONLY_PUBKEY).to_string(); + let descriptor = format!( + "tr({},multi_a({},{}))", + unspendable_xonly_pk_str, + threshold, + pks.iter() + .map(|pk| pk.to_string()) + .collect::>() + .join(",") + ); + + let descriptor_info = rpc.get_descriptor_info(&descriptor).await.expect(""); + + let descriptor = descriptor_info.descriptor; + + let addresses = rpc.derive_addresses(&descriptor, None).await.expect(""); + + tracing::info!("{:?}", addresses); + + let multisig_address = addresses[0].clone().assume_checked(); + + let multisig = Multisig::new(pks, threshold); + + let (addr, _) = create_taproot_address( + &[multisig.to_script_buf()], + None, + config.protocol_paramset().network, + ); + + // println!("addr: {:?}", addr); + // println!("multisig_address: {:?}", multisig_address); + + assert_eq!(addr, multisig_address); + } +} diff --git a/core/src/builder/sighash.rs b/core/src/builder/sighash.rs new file mode 100644 index 000000000..b9c3d2a0b --- /dev/null +++ b/core/src/builder/sighash.rs @@ -0,0 +1,762 @@ +//! # Sighash Builder +//! +//! This module provides functions and types for constructing signature hashes (sighashes) for the transactions in the Clementine bridge protocol. +//! Sighash is the message that is signed by the private key of the signer. +//! +//! The module supports generating sighash streams for both N-of-N (verifier) and operator signatures, as well as utilities for signature identification and protocol-specific signature requirements. +//! As the number of transactions can reach around 100_000 depending on number of entities in the protocol, we generate the sighashes in a stream to avoid memory issues. +//! +//! ## Responsibilities +//! +//! - Calculate the number of required signatures for various protocol roles and transaction types. +//! - Generate sighash streams for all protocol-required signatures for a deposit, for both verifiers and operators. +//! - Provide types for tracking signature requirements and spend paths. +//! +//! ## Key Types for Signatures +//! +//! - [`PartialSignatureInfo`] - Identifies a signature by operator, round, and kickoff index. +//! - [`SignatureInfo`] - Uniquely identifies a signature, including spend path of the signature. +//! - [`TapTweakData`] - Describes the spend path (key or script) and any required tweak data. +//! +//! For more on sighash types, see: + +use crate::bitvm_client; +use crate::builder::transaction::deposit_signature_owner::EntityType; +use crate::builder::transaction::sign::get_kickoff_utxos_to_sign; +use crate::builder::transaction::{ + create_txhandlers, ContractContext, ReimburseDbCache, TransactionType, TxHandlerCache, +}; +use crate::config::BridgeConfig; +use crate::database::Database; +use crate::deposit::{DepositData, KickoffData}; +use crate::errors::BridgeError; +use crate::operator::RoundIndex; +use crate::rpc::clementine::tagged_signature::SignatureId; +use crate::rpc::clementine::NormalSignatureKind; +use async_stream::try_stream; +use bitcoin::hashes::Hash; +use bitcoin::{TapNodeHash, TapSighash, XOnlyPublicKey}; +use futures_core::stream::Stream; + +impl BridgeConfig { + /// Returns the number of required signatures for N-of-N signing session. + /// + /// # Arguments + /// * `deposit_data` - The deposit data for which to calculate required signatures. + /// + /// # Returns + /// The number of required N-of-N signatures for the deposit. + pub fn get_num_required_nofn_sigs(&self, deposit_data: &DepositData) -> usize { + deposit_data.get_num_operators() + * self.protocol_paramset().num_round_txs + * self.protocol_paramset().num_signed_kickoffs + * self.get_num_required_nofn_sigs_per_kickoff(deposit_data) + } + + /// Returns the number of required operator signatures for a deposit. + /// + /// # Arguments + /// * `deposit_data` - The deposit data for which to calculate required signatures. + /// + /// # Returns + /// The number of required operator signatures for the deposit. + pub fn get_num_required_operator_sigs(&self, deposit_data: &DepositData) -> usize { + self.protocol_paramset().num_round_txs + * self.protocol_paramset().num_signed_kickoffs + * self.get_num_required_operator_sigs_per_kickoff(deposit_data) + } + + /// Returns the number of required N-of-N signatures per kickoff for a deposit. + /// + /// # Arguments + /// * `deposit_data` - The deposit data for which to calculate required signatures per kickoff. + /// + /// # Returns + /// The number of required N-of-N signatures per kickoff. + pub fn get_num_required_nofn_sigs_per_kickoff(&self, deposit_data: &DepositData) -> usize { + 7 + 4 * deposit_data.get_num_verifiers() + + bitvm_client::ClementineBitVMPublicKeys::number_of_assert_txs() * 2 + } + + /// Returns the number of required operator signatures per kickoff for a deposit. + /// + /// # Arguments + /// * `deposit_data` - The deposit data for which to calculate required signatures per kickoff. + /// + /// # Returns + /// The number of required operator signatures per kickoff. + pub fn get_num_required_operator_sigs_per_kickoff(&self, deposit_data: &DepositData) -> usize { + 4 + bitvm_client::ClementineBitVMPublicKeys::number_of_assert_txs() + + deposit_data.get_num_verifiers() + } + + /// Returns the total number of Winternitz public keys used in kickoff UTXOs for blockhash commits. + /// + /// # Returns + /// The number of Winternitz public keys required for all rounds and kickoffs. + pub fn get_num_kickoff_winternitz_pks(&self) -> usize { + self.protocol_paramset().num_kickoffs_per_round + * (self.protocol_paramset().num_round_txs + 1) // we need num_round_txs + 1 because we need one extra round tx to generate the reimburse connectors of the actual last round + } + + /// Returns the total number of unspent kickoff signatures needed from each operator. + /// + /// # Returns + /// The number of unspent kickoff signatures required for all rounds from one operator. + pub fn get_num_unspent_kickoff_sigs(&self) -> usize { + self.protocol_paramset().num_round_txs * self.protocol_paramset().num_kickoffs_per_round * 2 + } + + /// Returns the number of challenge ack hashes needed for a single operator for each round. + /// + /// # Arguments + /// * `deposit_data` - The deposit data for which to calculate required challenge ack hashes. + /// + /// # Returns + /// The number of challenge ack hashes required for the deposit. + pub fn get_num_challenge_ack_hashes(&self, deposit_data: &DepositData) -> usize { + deposit_data.get_num_watchtowers() + } + + // /// Returns the number of winternitz pks needed for a single operator for each round + // pub fn get_num_assert_winternitz_pks(&self) -> usize { + // crate::utils::BITVM_CACHE.num_intermediate_variables + // } +} + +/// Identifies a signature by operator, round, and kickoff index. +#[derive(Copy, Clone, Debug)] +pub struct PartialSignatureInfo { + pub operator_idx: usize, + pub round_idx: RoundIndex, + pub kickoff_utxo_idx: usize, +} + +/// Contains information about the spend path that is needed to sign the utxo. +/// If it is KeyPath, it also includes the merkle root hash of the scripts as +/// the root hash is needed to tweak the key before signing. For ScriptPath nothing is needed. +#[derive(Copy, Clone, Debug)] +pub enum TapTweakData { + KeyPath(Option), + ScriptPath, + Unknown, +} + +/// Contains information to uniquely identify a single signature in the deposit. +/// operator_idx, round_idx, and kickoff_utxo_idx uniquely identify a kickoff. +/// signature_id uniquely identifies a signature in that specific kickoff. +/// tweak_data contains information about the spend path that is needed to sign the utxo. +/// kickoff_txid is the txid of the kickoff tx the signature belongs to. This is not actually needed for the signature, it is only used to +/// pass the kickoff txid to the caller of the sighash streams in this module. +#[derive(Copy, Clone, Debug)] +pub struct SignatureInfo { + pub operator_idx: usize, + pub round_idx: RoundIndex, + pub kickoff_utxo_idx: usize, + pub signature_id: SignatureId, + pub tweak_data: TapTweakData, + pub kickoff_txid: Option, +} + +impl PartialSignatureInfo { + pub fn new( + operator_idx: usize, + round_idx: RoundIndex, + kickoff_utxo_idx: usize, + ) -> PartialSignatureInfo { + PartialSignatureInfo { + operator_idx, + round_idx, + kickoff_utxo_idx, + } + } + /// Completes the partial info with a signature id and spend path data. + pub fn complete(&self, signature_id: SignatureId, spend_data: TapTweakData) -> SignatureInfo { + SignatureInfo { + operator_idx: self.operator_idx, + round_idx: self.round_idx, + kickoff_utxo_idx: self.kickoff_utxo_idx, + signature_id, + tweak_data: spend_data, + kickoff_txid: None, + } + } + /// Completes the partial info with a kickoff txid (for yielding kickoff txid in sighash streams). + pub fn complete_with_kickoff_txid(&self, kickoff_txid: bitcoin::Txid) -> SignatureInfo { + SignatureInfo { + operator_idx: self.operator_idx, + round_idx: self.round_idx, + kickoff_utxo_idx: self.kickoff_utxo_idx, + signature_id: NormalSignatureKind::YieldKickoffTxid.into(), + tweak_data: TapTweakData::ScriptPath, + kickoff_txid: Some(kickoff_txid), + } + } +} + +/// Generates the sighash stream for all N-of-N (verifier) signatures required for a deposit. See [clementine whitepaper](https://citrea.xyz/clementine_whitepaper.pdf) for details on the transactions. +/// +/// For a given deposit, for each operator and round, generates the sighash stream for all protocol-required transactions. +/// If `yield_kickoff_txid` is true, yields the kickoff txid as a special entry. +/// +/// # Arguments +/// * `db` - Database handle. +/// * `config` - Bridge configuration. +/// * `deposit_data` - Deposit data for which to generate sighashes. +/// * `deposit_blockhash` - Block hash of the deposit. +/// * `yield_kickoff_txid` - Whether to yield the kickoff txid as a special entry. +/// +/// # Returns +/// +/// An async stream of ([`TapSighash`], [`SignatureInfo`]) pairs, or [`BridgeError`] on failure. +pub fn create_nofn_sighash_stream( + db: Database, + config: BridgeConfig, + deposit_data: DepositData, + deposit_blockhash: bitcoin::BlockHash, + yield_kickoff_txid: bool, +) -> impl Stream> { + try_stream! { + let paramset = config.protocol_paramset(); + + let operators = deposit_data.get_operators(); + + for (operator_idx, op_xonly_pk) in + operators.iter().enumerate() + { + + let utxo_idxs = get_kickoff_utxos_to_sign( + config.protocol_paramset(), + *op_xonly_pk, + deposit_blockhash, + deposit_data.get_deposit_outpoint(), + ); + // need to create new TxHandlerDbData for each operator + let mut tx_db_data = ReimburseDbCache::new_for_deposit(db.clone(), *op_xonly_pk, deposit_data.get_deposit_outpoint(), config.protocol_paramset(), None); + + let mut txhandler_cache = TxHandlerCache::new(); + + for round_idx in RoundIndex::iter_rounds(paramset.num_round_txs) { + // For each round, we have multiple kickoff_utxos to sign for the deposit. + for &kickoff_idx in &utxo_idxs { + let partial = PartialSignatureInfo::new(operator_idx, round_idx, kickoff_idx); + + let context = ContractContext::new_context_for_kickoff( + KickoffData { + operator_xonly_pk: *op_xonly_pk, + round_idx, + kickoff_idx: kickoff_idx as u32, + }, + deposit_data.clone(), + config.protocol_paramset(), + ); + + let mut txhandlers = create_txhandlers( + TransactionType::AllNeededForDeposit, + context, + &mut txhandler_cache, + &mut tx_db_data, + ).await?; + + let mut sum = 0; + let mut kickoff_txid = None; + for (tx_type, txhandler) in txhandlers.iter() { + let sighashes = txhandler.calculate_shared_txins_sighash(EntityType::VerifierDeposit, partial)?; + sum += sighashes.len(); + for sighash in sighashes { + yield sighash; + } + if tx_type == &TransactionType::Kickoff { + kickoff_txid = Some(txhandler.get_txid()); + } + } + + match (yield_kickoff_txid, kickoff_txid) { + (true, Some(kickoff_txid)) => { + yield (TapSighash::all_zeros(), partial.complete_with_kickoff_txid(*kickoff_txid)); + } + (true, None) => { + Err(eyre::eyre!("Kickoff txid not found in sighash stream"))?; + } + _ => {} + } + + + if sum != config.get_num_required_nofn_sigs_per_kickoff(&deposit_data) { + Err(eyre::eyre!("NofN sighash count does not match: expected {0}, got {1}", config.get_num_required_nofn_sigs_per_kickoff(&deposit_data), sum))?; + } + // recollect round_tx, ready_to_reimburse_tx, and move_to_vault_tx for the next kickoff_utxo + txhandler_cache.store_for_next_kickoff(&mut txhandlers)?; + } + // collect the last ready_to_reimburse txhandler for the next round + txhandler_cache.store_for_next_round()?; + } + } + } +} + +/// Generates the sighash stream for all operator signatures required for a deposit. These signatures required by the operators are +/// the signatures needed to burn the collateral of the operators, only able to be burned if the operator is malicious. +/// See [clementine whitepaper](https://citrea.xyz/clementine_whitepaper.pdf) for details on the transactions. +/// +/// # Arguments +/// * `db` - Database handle. +/// * `operator_xonly_pk` - X-only public key of the operator. +/// * `config` - Bridge configuration. +/// * `deposit_data` - Deposit data for which to generate sighashes. +/// * `deposit_blockhash` - Block hash of the deposit. +/// +/// # Returns +/// +/// An async stream of (sighash, [`SignatureInfo`]) pairs, or [`BridgeError`] on failure. +// Possible future optimization: Each verifier already generates some of these TX's in create_nofn_sighash_stream() +// It is possible to for verifiers somehow return the required sighashes for operator signatures there too. But operators only needs to use sighashes included in this function. +pub fn create_operator_sighash_stream( + db: Database, + operator_xonly_pk: XOnlyPublicKey, + config: BridgeConfig, + deposit_data: DepositData, + deposit_blockhash: bitcoin::BlockHash, +) -> impl Stream> { + try_stream! { + let mut tx_db_data = ReimburseDbCache::new_for_deposit(db.clone(), operator_xonly_pk, deposit_data.get_deposit_outpoint(), config.protocol_paramset(), None); + + let operator = db.get_operator(None, operator_xonly_pk).await?; + + let operator = match operator { + Some(operator) => operator, + None => Err(BridgeError::OperatorNotFound(operator_xonly_pk))?, + }; + + let utxo_idxs = get_kickoff_utxos_to_sign( + config.protocol_paramset(), + operator.xonly_pk, + deposit_blockhash, + deposit_data.get_deposit_outpoint(), + ); + + let paramset = config.protocol_paramset(); + let mut txhandler_cache = TxHandlerCache::new(); + let operator_idx = deposit_data.get_operator_index(operator_xonly_pk)?; + + // For each round_tx, we have multiple kickoff_utxos as the connectors. + for round_idx in RoundIndex::iter_rounds(paramset.num_round_txs) { + for &kickoff_idx in &utxo_idxs { + let partial = PartialSignatureInfo::new(operator_idx, round_idx, kickoff_idx); + + let context = ContractContext::new_context_for_kickoff( + KickoffData { + operator_xonly_pk, + round_idx, + kickoff_idx: kickoff_idx as u32, + }, + deposit_data.clone(), + config.protocol_paramset(), + ); + + let mut txhandlers = create_txhandlers( + TransactionType::AllNeededForDeposit, + context, + &mut txhandler_cache, + &mut tx_db_data, + ).await?; + + let mut sum = 0; + for (_, txhandler) in txhandlers.iter() { + let sighashes = txhandler.calculate_shared_txins_sighash(EntityType::OperatorDeposit, partial)?; + sum += sighashes.len(); + for sighash in sighashes { + yield sighash; + } + } + if sum != config.get_num_required_operator_sigs_per_kickoff(&deposit_data) { + Err(eyre::eyre!("Operator sighash count does not match: expected {0}, got {1}", config.get_num_required_operator_sigs_per_kickoff(&deposit_data), sum))?; + } + // recollect round_tx, ready_to_reimburse_tx, and move_to_vault_tx for the next kickoff_utxo + txhandler_cache.store_for_next_kickoff(&mut txhandlers)?; + } + // collect the last ready_to_reimburse txhandler for the next round + txhandler_cache.store_for_next_round()?; + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{ + bitvm_client::SECP, + builder::transaction::sign::TransactionRequestData, + config::protocol::ProtocolParamset, + deposit::{Actors, DepositInfo, OperatorData}, + extended_bitcoin_rpc::ExtendedBitcoinRpc, + rpc::clementine::{ + clementine_operator_client::ClementineOperatorClient, TransactionRequest, + }, + test::common::{ + citrea::MockCitreaClient, create_regtest_rpc, create_test_config_with_thread_name, + tx_utils::get_tx_from_signed_txs_with_type, + }, + }; + use bincode; + use bitcoin::hashes::sha256; + use bitcoin::secp256k1::PublicKey; + use bitcoin::{Block, BlockHash, OutPoint, Txid}; + use bitcoincore_rpc::RpcApi; + use futures_util::stream::TryStreamExt; + use std::fs::File; + + #[cfg(debug_assertions)] + pub const DEPOSIT_STATE_FILE_PATH_DEBUG: &str = "src/test/data/deposit_state_debug.bincode"; + #[cfg(not(debug_assertions))] + pub const DEPOSIT_STATE_FILE_PATH_RELEASE: &str = "src/test/data/deposit_state_release.bincode"; + + /// State of the chain and the deposit generated in generate_deposit_state() test. + /// Contains: + /// - Blocks: All blocks from height 1 until the chain tip. + /// - Deposit info: Deposit info of the deposit that were signed. + /// - Deposit blockhash: Block hash of the deposit outpoint. + /// - Move txid: Move to vault txid of the deposit. + /// - Operator data: Operator data of the single operator that were used in the deposit. + /// - Round tx txid hash: Hash of all round tx txids of the operator. + /// - Nofn sighash hash: Hash of all nofn sighashes of the deposit. + /// - Operator sighash hash: Hash of all operator sighashes of the deposit. + #[derive(Debug, Clone, serde::Serialize, serde::Deserialize)] + struct DepositChainState { + blocks: Vec, + deposit_info: DepositInfo, + deposit_blockhash: BlockHash, + move_txid: Txid, + operator_data: OperatorData, + round_tx_txid_hash: sha256::Hash, + nofn_sighash_hash: sha256::Hash, + operator_sighash_hash: sha256::Hash, + } + + /// To make the [`test_bridge_contract_change`] test work if breaking changes are expected, run this test again + /// (with both debug and release), the states will get updated with the current values. + /// Read [`test_bridge_contract_change`] test doc for more details. + #[cfg(feature = "automation")] + #[tokio::test] + #[ignore = "Run this to generate fresh deposit state data, in case any breaking change occurs to deposits"] + async fn generate_deposit_state() { + use crate::test::common::run_single_deposit; + + let mut config = create_test_config_with_thread_name().await; + // only run with one operator + config.test_params.all_operators_secret_keys.truncate(1); + + let regtest = create_regtest_rpc(&mut config).await; + let rpc = regtest.rpc().clone(); + + let (actors, deposit_info, move_txid, deposit_blockhash, verifiers_public_keys) = + run_single_deposit::(&mut config, rpc.clone(), None, None, None) + .await + .unwrap(); + + // get generated blocks + let height = rpc.get_current_chain_height().await.unwrap(); + let mut blocks = Vec::new(); + for i in 1..=height { + let (blockhash, _) = rpc.get_block_info_by_height(i as u64).await.unwrap(); + let block = rpc.get_block(&blockhash).await.unwrap(); + blocks.push(block); + } + + let op0_config = BridgeConfig { + secret_key: config.test_params.all_verifiers_secret_keys[0], + db_name: config.db_name + "0", + ..config + }; + + let operators_xonly_pks = op0_config + .test_params + .all_operators_secret_keys + .iter() + .map(|sk| sk.x_only_public_key(&SECP).0) + .collect::>(); + + let op0_xonly_pk = operators_xonly_pks[0]; + + let db = Database::new(&op0_config).await.unwrap(); + let operator_data = db.get_operator(None, op0_xonly_pk).await.unwrap().unwrap(); + + let (nofn_sighash_hash, operator_sighash_hash) = calculate_hash_of_sighashes( + deposit_info.clone(), + verifiers_public_keys, + operators_xonly_pks.clone(), + op0_config.clone(), + deposit_blockhash, + ) + .await; + + let operator = actors.get_operator_client_by_index(0); + + let round_tx_txid_hash = compute_hash_of_round_txs( + operator, + deposit_info.deposit_outpoint, + operators_xonly_pks[0], + deposit_blockhash, + op0_config.protocol_paramset(), + ) + .await; + + let deposit_state = DepositChainState { + blocks, + deposit_blockhash, + move_txid, + deposit_info, + operator_data, + round_tx_txid_hash, + nofn_sighash_hash, + operator_sighash_hash, + }; + + #[cfg(debug_assertions)] + let file_path = DEPOSIT_STATE_FILE_PATH_DEBUG; + #[cfg(not(debug_assertions))] + let file_path = DEPOSIT_STATE_FILE_PATH_RELEASE; + + // save to file + let file = File::create(file_path).unwrap(); + bincode::serialize_into(file, &deposit_state).unwrap(); + } + + async fn load_deposit_state(rpc: &ExtendedBitcoinRpc) -> DepositChainState { + tracing::debug!( + "Current chain height: {}", + rpc.get_current_chain_height().await.unwrap() + ); + #[cfg(debug_assertions)] + let file_path = DEPOSIT_STATE_FILE_PATH_DEBUG; + #[cfg(not(debug_assertions))] + let file_path = DEPOSIT_STATE_FILE_PATH_RELEASE; + + let file = File::open(file_path).unwrap(); + let deposit_state: DepositChainState = bincode::deserialize_from(file).unwrap(); + + // submit blocks to current rpc + for block in &deposit_state.blocks { + rpc.submit_block(block).await.unwrap(); + } + deposit_state + } + + /// Returns the hash of all round txs txids for a given operator. + async fn compute_hash_of_round_txs( + mut operator: ClementineOperatorClient, + deposit_outpoint: OutPoint, + operator_xonly_pk: XOnlyPublicKey, + deposit_blockhash: bitcoin::BlockHash, + paramset: &'static ProtocolParamset, + ) -> sha256::Hash { + let kickoff_utxo = get_kickoff_utxos_to_sign( + paramset, + operator_xonly_pk, + deposit_blockhash, + deposit_outpoint, + )[0]; + + let mut all_round_txids = Vec::new(); + for i in 0..paramset.num_round_txs { + let tx_req = TransactionRequestData { + deposit_outpoint, + kickoff_data: KickoffData { + operator_xonly_pk, + round_idx: RoundIndex::Round(i), + kickoff_idx: kickoff_utxo as u32, + }, + }; + let signed_txs = operator + .internal_create_signed_txs(TransactionRequest::from(tx_req)) + .await + .unwrap() + .into_inner(); + let round_tx = + get_tx_from_signed_txs_with_type(&signed_txs, TransactionType::Round).unwrap(); + all_round_txids.push(round_tx.compute_txid()); + } + + sha256::Hash::hash(&all_round_txids.concat()) + } + + /// Calculates the hash of all nofn and operator sighashes for a given deposit. + async fn calculate_hash_of_sighashes( + deposit_info: DepositInfo, + verifiers_public_keys: Vec, + operators_xonly_pks: Vec, + op0_config: BridgeConfig, + deposit_blockhash: bitcoin::BlockHash, + ) -> (sha256::Hash, sha256::Hash) { + let deposit_data = DepositData { + nofn_xonly_pk: None, + deposit: deposit_info, + actors: Actors { + verifiers: verifiers_public_keys, + watchtowers: vec![], + operators: operators_xonly_pks.clone(), + }, + security_council: op0_config.security_council.clone(), + }; + + let db = Database::new(&op0_config).await.unwrap(); + + let sighash_stream = create_nofn_sighash_stream( + db.clone(), + op0_config.clone(), + deposit_data.clone(), + deposit_blockhash, + true, + ); + + let nofn_sighashes: Vec<_> = sighash_stream.try_collect().await.unwrap(); + let nofn_sighashes = nofn_sighashes + .into_iter() + .map(|(sighash, _info)| sighash.to_byte_array()) + .collect::>(); + + let operator_streams = create_operator_sighash_stream( + db.clone(), + operators_xonly_pks[0], + op0_config.clone(), + deposit_data.clone(), + deposit_blockhash, + ); + + let operator_sighashes: Vec<_> = operator_streams.try_collect().await.unwrap(); + let operator_sighashes = operator_sighashes + .into_iter() + .map(|(sighash, _info)| sighash.to_byte_array()) + .collect::>(); + + // Hash the vectors + let nofn_hash = sha256::Hash::hash(&nofn_sighashes.concat()); + let operator_hash = sha256::Hash::hash(&operator_sighashes.concat()); + + (nofn_hash, operator_hash) + } + + /// Test for checking if the sighash stream is changed due to changes in code. + /// If this test fails, the code contains breaking changes that needs replacement deposits on deployment. + /// It is also possible that round tx's are changed, which is a bigger issue. In addition to replacement deposits, + /// the collaterals of operators that created at least round 1 are unusable. + /// + /// Its also possible for this test to fail if default config is changed(for example num_verifiers, operators, etc). + /// + /// This test only uses one operator, because it is hard (too much code duplication) with + /// current test setup fn's to generate operators with different configs (config has the + /// reimburse address and collateral funding outpoint, which should be loaded from the saved + /// deposit state) + /// + /// To make the test work if breaking changes are expected, run generate_deposit_state() test again + /// (with both debug and release), it will get updated with the current values. Run following commands: + /// debug: cargo test --all-features generate_deposit_state -- --ignored + /// release: cargo test --all-features --release generate_deposit_state -- --ignored + /// If test_bridge_contract_change failed on github CI, CI also uploads the deposit state file as an artifact, so it can be downloaded + /// and committed to the repo. + #[cfg(feature = "automation")] + #[tokio::test] + async fn test_bridge_contract_change() { + use crate::test::common::run_single_deposit; + + let mut config = create_test_config_with_thread_name().await; + // only run with one operator + config.test_params.all_operators_secret_keys.truncate(1); + + // do not generate to address + config.test_params.generate_to_address = false; + + let regtest = create_regtest_rpc(&mut config).await; + let rpc = regtest.rpc().clone(); + + let deposit_state = load_deposit_state(&rpc).await; + + // set operator reimbursement address and collateral funding outpoint to the ones from the saved deposit state + config.operator_reimbursement_address = Some( + deposit_state + .operator_data + .reimburse_addr + .as_unchecked() + .to_owned(), + ); + config.operator_collateral_funding_outpoint = + Some(deposit_state.operator_data.collateral_funding_outpoint); + + // after loading generate some funds to rpc wallet + // needed so that the deposit doesn't crash (I don't know why) due to insufficient funds + let address = rpc + .get_new_address(None, None) + .await + .expect("Failed to get new address"); + + rpc.generate_to_address(105, address.assume_checked_ref()) + .await + .expect("Failed to generate blocks"); + + let (actors, deposit_info, move_txid, deposit_blockhash, verifiers_public_keys) = + run_single_deposit::( + &mut config, + rpc.clone(), + None, + None, + Some(deposit_state.deposit_info.deposit_outpoint), + ) + .await + .unwrap(); + + // sanity checks, these should be equal if the deposit state saved is still valid + // if not a new deposit state needs to be generated + assert_eq!(move_txid, deposit_state.move_txid); + assert_eq!(deposit_blockhash, deposit_state.deposit_blockhash); + assert_eq!(deposit_info, deposit_state.deposit_info); + + let op0_config = BridgeConfig { + secret_key: config.test_params.all_verifiers_secret_keys[0], + db_name: config.db_name.clone() + "0", + ..config.clone() + }; + + let operators_xonly_pks = op0_config + .test_params + .all_operators_secret_keys + .iter() + .map(|sk| sk.x_only_public_key(&SECP).0) + .collect::>(); + + let operator = actors.get_operator_client_by_index(0); + + let round_tx_hash = compute_hash_of_round_txs( + operator, + deposit_info.deposit_outpoint, + operators_xonly_pks[0], + deposit_blockhash, + op0_config.protocol_paramset(), + ) + .await; + + // If this fails, the round txs are changed. + assert_eq!( + round_tx_hash, deposit_state.round_tx_txid_hash, + "Round tx hash does not match the previous values, round txs are changed" + ); + + let (nofn_hash, operator_hash) = calculate_hash_of_sighashes( + deposit_info, + verifiers_public_keys, + operators_xonly_pks, + op0_config, + deposit_blockhash, + ) + .await; + + // If these fail, the bridge contract is changed. + assert_eq!( + nofn_hash, deposit_state.nofn_sighash_hash, + "NofN sighashes do not match the previous values" + ); + assert_eq!( + operator_hash, deposit_state.operator_sighash_hash, + "Operator sighashes do not match the previous values" + ); + } +} diff --git a/core/src/builder/transaction/challenge.rs b/core/src/builder/transaction/challenge.rs new file mode 100644 index 000000000..038b5ecab --- /dev/null +++ b/core/src/builder/transaction/challenge.rs @@ -0,0 +1,406 @@ +//! # Challenge Transaction Logic +//! +//! This module provides functions for constructing and challenge related transactions in the protocol. +//! The transactions are: Challenge, ChallengeTimeout, OperatorChallengeNack, OperatorChallengeAck, Disprove. + +use crate::builder::script::SpendPath; +use crate::builder::transaction::output::UnspentTxOut; +use crate::builder::transaction::txhandler::{TxHandler, DEFAULT_SEQUENCE}; +use crate::builder::transaction::*; +use crate::config::protocol::ProtocolParamset; +use crate::constants::{MIN_TAPROOT_AMOUNT, NON_STANDARD_V3}; +use crate::errors::BridgeError; +use crate::rpc::clementine::{NormalSignatureKind, NumberedSignatureKind}; +use crate::{builder, EVMAddress}; +use bitcoin::script::PushBytesBuf; +use bitcoin::{Sequence, TxOut, WitnessVersion}; +use eyre::Context; + +use self::input::UtxoVout; + +/// Creates a [`TxHandler`] for the `watchtower_challenge_tx`. +/// +/// This transaction is sent by a watchtower to submit a challenge proof (e.g., a Groth16 proof with public inputs). +/// The proof data is encoded as a series of Taproot outputs and a final OP_RETURN output. +/// Currently a watchtower challenge is in total 144 bytes, 32 + 32 + 80 bytes. +/// +/// # Inputs +/// 1. KickoffTx: WatchtowerChallenge utxo (for the given watchtower) +/// +/// # Outputs +/// 1. First output, first 32 bytes of challenge data encoded directly in scriptpubkey. +/// 2. Second output, next 32 bytes of challenge data encoded directly in scriptpubkey. +/// 3. OP_RETURN output, containing the last 80 bytes of challenge data. +/// +/// # Arguments +/// +/// * `kickoff_txhandler` - The kickoff transaction handler the watchtower challenge belongs to. +/// * `watchtower_idx` - The index of the watchtower in the deposit submitting the challenge. +/// * `commit_data` - The challenge proof data to be included in the transaction. +/// * `paramset` - Protocol parameter set. +/// +/// # Returns +/// +/// A [`TxHandler`] for the watchtower challenge transaction, or a [`BridgeError`] if construction fails. +pub fn create_watchtower_challenge_txhandler( + kickoff_txhandler: &TxHandler, + watchtower_idx: usize, + commit_data: &[u8], + paramset: &'static ProtocolParamset, + #[cfg(test)] test_params: &crate::config::TestParams, +) -> Result { + if commit_data.len() != paramset.watchtower_challenge_bytes { + return Err(TxError::IncorrectWatchtowerChallengeDataLength.into()); + } + let mut builder = TxHandlerBuilder::new(TransactionType::WatchtowerChallenge(watchtower_idx)) + .with_version(Version::TWO) + .add_input( + ( + NumberedSignatureKind::WatchtowerChallenge, + watchtower_idx as i32, + ), + kickoff_txhandler + .get_spendable_output(UtxoVout::WatchtowerChallenge(watchtower_idx))?, + SpendPath::KeySpend, + DEFAULT_SEQUENCE, + ); + let mut current_idx = 0; + while current_idx + 80 < paramset.watchtower_challenge_bytes { + // encode next 32 bytes of data as script pubkey of taproot utxo + let data = PushBytesBuf::try_from(commit_data[current_idx..current_idx + 32].to_vec()) + .wrap_err("Failed to create pushbytesbuf for watchtower challenge op_return: {}")?; + + let data_encoded_scriptbuf = Builder::new() + .push_opcode(WitnessVersion::V1.into()) + .push_slice(data) + .into_script(); + + builder = builder.add_output(UnspentTxOut::from_partial(TxOut { + value: MIN_TAPROOT_AMOUNT, + script_pubkey: data_encoded_scriptbuf, + })); + current_idx += 32; + } + + // add the remaining data as an op_return output + if current_idx < paramset.watchtower_challenge_bytes { + let remaining_data = PushBytesBuf::try_from(commit_data[current_idx..].to_vec()) + .wrap_err("Failed to create pushbytesbuf for watchtower challenge op_return")?; + builder = builder.add_output(UnspentTxOut::from_partial(op_return_txout(remaining_data))); + } + + #[cfg(test)] + { + builder = test_params.maybe_add_large_test_outputs(builder)?; + } + + Ok(builder.finalize()) +} + +/// Creates a [`TxHandler`] for the `watchtower_challenge_timeout_tx`. +/// +/// This transaction is sent by an operator if a watchtower does not submit a challenge in time, allowing the operator to claim a timeout. +/// This way, operators do not need to reveal their preimage, and do not need to use the watchtowers longest chain proof in their +/// bridge proof. +/// +/// # Inputs +/// 1. KickoffTx: WatchtowerChallenge utxo (for the given watchtower) +/// 2. KickoffTx: WatchtowerChallengeAck utxo (for the given watchtower) +/// +/// # Outputs +/// 1. Anchor output for CPFP +/// +/// # Arguments +/// +/// * `kickoff_txhandler` - The kickoff transaction handler the watchtower challenge timeout belongs to. +/// * `watchtower_idx` - The index of the watchtower in the deposit submitting the challenge. +/// * `paramset` - Protocol parameter set. +/// +/// # Returns +/// +/// A [`TxHandler`] for the watchtower challenge timeout transaction, or a [`BridgeError`] if construction fails. +pub fn create_watchtower_challenge_timeout_txhandler( + kickoff_txhandler: &TxHandler, + watchtower_idx: usize, + paramset: &'static ProtocolParamset, +) -> Result { + let watchtower_challenge_vout = UtxoVout::WatchtowerChallenge(watchtower_idx); + let challenge_ack_vout = UtxoVout::WatchtowerChallengeAck(watchtower_idx); + Ok( + TxHandlerBuilder::new(TransactionType::WatchtowerChallengeTimeout(watchtower_idx)) + .with_version(NON_STANDARD_V3) + .add_input( + ( + NumberedSignatureKind::WatchtowerChallengeTimeout1, + watchtower_idx as i32, + ), + kickoff_txhandler.get_spendable_output(watchtower_challenge_vout)?, + SpendPath::ScriptSpend(0), + Sequence::from_height(paramset.watchtower_challenge_timeout_timelock), + ) + .add_input( + ( + NumberedSignatureKind::WatchtowerChallengeTimeout2, + watchtower_idx as i32, + ), + kickoff_txhandler.get_spendable_output(challenge_ack_vout)?, + SpendPath::ScriptSpend(1), + Sequence::from_height(paramset.watchtower_challenge_timeout_timelock), + ) + .add_output(UnspentTxOut::from_partial( + builder::transaction::anchor_output(paramset.anchor_amount()), + )) + .finalize(), + ) +} + +/// Creates a [`TxHandler`] for the `OperatorChallengeNack` transaction. +/// +/// This transaction is used to force an operator to reveal a preimage for a watchtower challenge. If a watchtower sends a watchtower challenge, +/// but the operator does not reveal the preimage by sending an OperatorChallengeAck, after a specified number of time (defined in paramset), +/// the N-of-N can spend the output, burning the operator's collateral. +/// +/// # Inputs +/// 1. KickoffTx: WatchtowerChallengeAck utxo (for the given watchtower) +/// 2. KickoffTx: KickoffFinalizer utxo +/// 3. RoundTx: BurnConnector utxo +/// +/// # Outputs +/// 1. Anchor output for CPFP +/// +/// # Arguments +/// +/// * `kickoff_txhandler` - The kickoff transaction handler the operator challenge nack belongs to. +/// * `watchtower_idx` - The index of the watchtower in the deposit corresponding to the watchtower challenge related to the operator challenge nack. +/// * `round_txhandler` - The round transaction handler for the current round the kickoff belongs to. +/// * `paramset` - Protocol parameter set. +/// +/// # Returns +/// +/// A [`TxHandler`] for the operator challenge NACK transaction, or a [`BridgeError`] if construction fails. +pub fn create_operator_challenge_nack_txhandler( + kickoff_txhandler: &TxHandler, + watchtower_idx: usize, + round_txhandler: &TxHandler, + paramset: &'static ProtocolParamset, +) -> Result { + Ok( + TxHandlerBuilder::new(TransactionType::OperatorChallengeNack(watchtower_idx)) + .with_version(NON_STANDARD_V3) + .add_input( + ( + NumberedSignatureKind::OperatorChallengeNack1, + watchtower_idx as i32, + ), + kickoff_txhandler + .get_spendable_output(UtxoVout::WatchtowerChallengeAck(watchtower_idx))?, + SpendPath::ScriptSpend(0), + Sequence::from_height(paramset.operator_challenge_nack_timelock), + ) + .add_input( + ( + NumberedSignatureKind::OperatorChallengeNack2, + watchtower_idx as i32, + ), + kickoff_txhandler.get_spendable_output(UtxoVout::KickoffFinalizer)?, + SpendPath::ScriptSpend(0), + DEFAULT_SEQUENCE, + ) + .add_input( + ( + NumberedSignatureKind::OperatorChallengeNack3, + watchtower_idx as i32, + ), + round_txhandler.get_spendable_output(UtxoVout::CollateralInRound)?, + SpendPath::KeySpend, + DEFAULT_SEQUENCE, + ) + .add_output(UnspentTxOut::from_partial( + builder::transaction::anchor_output(paramset.anchor_amount()), + )) + .finalize(), + ) +} + +/// Creates a [`TxHandler`] for the OperatorChallengeAck transaction. +/// +/// This transaction is used by an operator to acknowledge a watchtower challenge and reveal the required preimage, if a watchtower challenge is sent. +/// +/// # Inputs +/// 1. KickoffTx: WatchtowerChallengeAck utxo (for the given watchtower) +/// +/// # Outputs +/// 1. Anchor output for CPFP +/// 2. Dummy OP_RETURN output (to pad the size of the transaction, as it is too small otherwise) +/// +/// # Arguments +/// +/// * `kickoff_txhandler` - The kickoff transaction handler the operator challenge ack belongs to. +/// * `watchtower_idx` - The index of the watchtower that sent the challenge. +/// * `paramset` - Protocol parameter set. +/// +/// # Returns +/// +/// A [`TxHandler`] for the operator challenge ACK transaction, or a [`BridgeError`] if construction fails. +pub fn create_operator_challenge_ack_txhandler( + kickoff_txhandler: &TxHandler, + watchtower_idx: usize, + paramset: &'static ProtocolParamset, +) -> Result { + Ok( + TxHandlerBuilder::new(TransactionType::OperatorChallengeAck(watchtower_idx)) + .with_version(NON_STANDARD_V3) + .add_input( + NormalSignatureKind::OperatorChallengeAck1, + kickoff_txhandler + .get_spendable_output(UtxoVout::WatchtowerChallengeAck(watchtower_idx))?, + SpendPath::ScriptSpend(2), + DEFAULT_SEQUENCE, + ) + .add_output(UnspentTxOut::from_partial( + builder::transaction::anchor_output(paramset.anchor_amount()), + )) + .add_output(UnspentTxOut::from_partial(op_return_txout(b"PADDING"))) + .finalize(), + ) +} + +/// Creates a [`TxHandler`] for the `disprove_tx`. +/// +/// This transaction is sent by N-of-N to penalize a malicious operator by burning their collateral (burn connector). +/// This is done either with the additional disprove script created by BitVM, in case the public inputs of the bridge proof the operator +/// sent are not correct/do not match previous data, or if the Groth16 verification of the proof is incorrect using BitVM disprove scripts. +/// +/// # Inputs +/// 1. KickoffTx: Disprove utxo +/// 2. RoundTx: BurnConnector utxo +/// +/// # Outputs +/// 1. Anchor output for CPFP +/// +/// # Arguments +/// +/// * `kickoff_txhandler` - The kickoff transaction handler the disprove belongs to. +/// * `round_txhandler` - The round transaction handler to the current round the kickoff belongs to. +/// +/// # Returns +/// +/// A [`TxHandler`] for the disprove transaction, or a [`BridgeError`] if construction fails. +pub fn create_disprove_txhandler( + kickoff_txhandler: &TxHandler, + round_txhandler: &TxHandler, +) -> Result { + Ok(TxHandlerBuilder::new(TransactionType::Disprove) + .with_version(Version::TWO) + .add_input( + NormalSignatureKind::NoSignature, + kickoff_txhandler.get_spendable_output(UtxoVout::Disprove)?, + SpendPath::Unknown, + DEFAULT_SEQUENCE, + ) + .add_input( + NormalSignatureKind::Disprove2, + round_txhandler.get_spendable_output(UtxoVout::CollateralInRound)?, + SpendPath::KeySpend, + DEFAULT_SEQUENCE, + ) + .add_output(UnspentTxOut::from_partial( + builder::transaction::non_ephemeral_anchor_output(), // must be non-ephemeral, because tx is v2 + )) + .finalize()) +} + +/// Creates a [`TxHandler`] for the `challenge` transaction. +/// +/// This transaction is used to reimburse an operator for a valid challenge, intended to cover their costs for sending asserts transactions, +/// and potentially cover their opportunity cost as their reimbursements are delayed due to the challenge. This cost of a challenge is also +/// used to disincentivize sending challenges for kickoffs that are correct. In case the challenge is correct and operator is proved to be +/// malicious, the challenge cost will be reimbursed using the operator's collateral that's locked in Citrea. +/// +/// # Inputs +/// 1. KickoffTx: Challenge utxo +/// +/// # Outputs +/// 1. Operator reimbursement output +/// 2. OP_RETURN output (containing EVM address of the challenger, for reimbursement if the challenge is correct) +/// +/// # Arguments +/// +/// * `kickoff_txhandler` - The kickoff transaction handler that the challenge belongs to. +/// * `operator_reimbursement_address` - The address to reimburse the operator to cover their costs. +/// * `challenger_evm_address` - The EVM address of the challenger, for reimbursement if the challenge is correct. +/// * `paramset` - Protocol parameter set. +/// +/// # Returns +/// +/// A [`TxHandler`] for the challenge transaction, or a [`BridgeError`] if construction fails. +pub fn create_challenge_txhandler( + kickoff_txhandler: &TxHandler, + operator_reimbursement_address: &bitcoin::Address, + challenger_evm_address: Option, + paramset: &'static ProtocolParamset, +) -> Result { + let mut builder = TxHandlerBuilder::new(TransactionType::Challenge) + .with_version(NON_STANDARD_V3) + .add_input( + NormalSignatureKind::Challenge, + kickoff_txhandler.get_spendable_output(UtxoVout::Challenge)?, + SpendPath::ScriptSpend(0), + DEFAULT_SEQUENCE, + ) + .add_output(UnspentTxOut::from_partial(TxOut { + value: paramset.operator_challenge_amount, + script_pubkey: operator_reimbursement_address.script_pubkey(), + })); + + if let Some(challenger_evm_address) = challenger_evm_address { + builder = builder.add_output(UnspentTxOut::from_partial(op_return_txout( + challenger_evm_address.0, + ))); + } + + Ok(builder.finalize()) +} + +/// Creates a [`TxHandler`] for the `challenge_timeout` transaction. +/// +/// This transaction is used to finalize a kickoff if no challenge is submitted in time, allowing the operator to proceed faster to the next round, thus getting their reimbursement, as the next round will generate the reimbursement connectors of the current round. +/// +/// # Inputs +/// 1. KickoffTx: Challenge utxo +/// 2. KickoffTx: KickoffFinalizer utxo +/// +/// # Outputs +/// 1. Anchor output for CPFP +/// +/// # Arguments +/// +/// * `kickoff_txhandler` - The kickoff transaction handler the challenge timeout belongs to. +/// * `paramset` - Protocol parameter set. +/// +/// # Returns +/// +/// A [`TxHandler`] for the challenge timeout transaction, or a [`BridgeError`] if construction fails. +pub fn create_challenge_timeout_txhandler( + kickoff_txhandler: &TxHandler, + paramset: &'static ProtocolParamset, +) -> Result { + Ok(TxHandlerBuilder::new(TransactionType::ChallengeTimeout) + .with_version(NON_STANDARD_V3) + .add_input( + NormalSignatureKind::OperatorSighashDefault, + kickoff_txhandler.get_spendable_output(UtxoVout::Challenge)?, + SpendPath::ScriptSpend(1), + Sequence::from_height(paramset.operator_challenge_timeout_timelock), + ) + .add_input( + NormalSignatureKind::ChallengeTimeout2, + kickoff_txhandler.get_spendable_output(UtxoVout::KickoffFinalizer)?, + SpendPath::ScriptSpend(0), + DEFAULT_SEQUENCE, + ) + .add_output(UnspentTxOut::from_partial( + builder::transaction::anchor_output(paramset.anchor_amount()), + )) + .finalize()) +} diff --git a/core/src/builder/transaction/creator.rs b/core/src/builder/transaction/creator.rs new file mode 100644 index 000000000..2d06c35dd --- /dev/null +++ b/core/src/builder/transaction/creator.rs @@ -0,0 +1,1454 @@ +//! # Transaction Handler Creation Logic +//! +//! This module provides the logic for constructing, caching, and managing transaction handlers (`TxHandler`) for all transaction types in the Clementine bridge. +//! +//! It is responsible for orchestrating the creation of all transaction flows for a given operator, round, and deposit, including collateral, kickoff, challenge, reimbursement, and assertion transactions. It also manages context and database-backed caching to support efficient and correct transaction construction. +//! +//! ## Key Types +//! +//! - [`KickoffWinternitzKeys`] - Helper for managing Winternitz keys for kickoff transactions, to retrieve the correct keys for a given round. +//! - [`ReimburseDbCache`] - Retrieves and caches relevant data from the database for transaction handler creation. +//! - [`ContractContext`] - Holds context for a specific operator, round, and optionally deposit, in short all the information needed to create the relevant transactions. +//! - [`TxHandlerCache`] - Stores and manages cached transaction handlers for efficient flow construction. This is important during the deposit, as the functions create all transactions for a single operator, kickoff utxo, and deposit tuple, which has common transactions between them. (Mainly round tx and move to vault tx) +//! +//! ## Main Functions +//! +//! - [`create_txhandlers`] - Orchestrates the creation of all required transaction handlers for a given context and transaction type. +//! - [`create_round_txhandlers`] - Creates round and ready-to-reimburse transaction handlers for a specific operator and round. +//! + +use super::input::UtxoVout; +use super::operator_assert::{ + create_latest_blockhash_timeout_txhandler, create_latest_blockhash_txhandler, +}; +use super::{remove_txhandler_from_map, RoundTxInput}; +use crate::actor::Actor; +use crate::bitvm_client::ClementineBitVMPublicKeys; +use crate::builder; +use crate::builder::script::{SpendableScript, TimelockScript, WinternitzCommit}; +use crate::builder::transaction::operator_reimburse::DisprovePath; +use crate::builder::transaction::{ + create_assert_timeout_txhandlers, create_challenge_timeout_txhandler, create_kickoff_txhandler, + create_mini_asserts, create_round_txhandler, create_unspent_kickoff_txhandlers, AssertScripts, + TransactionType, TxHandler, +}; +use crate::config::protocol::ProtocolParamset; +use crate::database::{Database, DatabaseTransaction}; +use crate::deposit::{DepositData, KickoffData, OperatorData}; +use crate::errors::{BridgeError, TxError}; +use crate::operator::{PublicHash, RoundIndex}; +use bitcoin::hashes::Hash; +use bitcoin::key::Secp256k1; +use bitcoin::taproot::TaprootBuilder; +use bitcoin::{OutPoint, XOnlyPublicKey}; +use bitvm::clementine::additional_disprove::{ + create_additional_replacable_disprove_script_with_dummy, replace_placeholders_in_script, +}; +use circuits_lib::bridge_circuit::deposit_constant; +use eyre::Context; +use eyre::OptionExt; +use std::collections::BTreeMap; +use std::sync::Arc; + +// helper function to get a txhandler from a hashmap +fn get_txhandler( + txhandlers: &BTreeMap, + tx_type: TransactionType, +) -> Result<&TxHandler, TxError> { + txhandlers + .get(&tx_type) + .ok_or(TxError::TxHandlerNotFound(tx_type)) +} + +/// Helper struct to get specific kickoff winternitz keys for a sequential collateral tx +#[derive(Debug, Clone)] +pub struct KickoffWinternitzKeys { + pub keys: Vec, + num_kickoffs_per_round: usize, + num_rounds: usize, +} + +impl KickoffWinternitzKeys { + /// Creates a new [`KickoffWinternitzKeys`] with the given keys and number per round. + pub fn new( + keys: Vec, + num_kickoffs_per_round: usize, + num_rounds: usize, + ) -> Self { + Self { + keys, + num_kickoffs_per_round, + num_rounds, + } + } + + /// Get the winternitz keys for a specific round tx. + /// + /// # Arguments + /// * `round_idx` - The index of the round. + /// + /// # Returns + /// A slice of Winternitz public keys for the given round. + pub fn get_keys_for_round( + &self, + round_idx: RoundIndex, + ) -> Result<&[bitvm::signatures::winternitz::PublicKey], TxError> { + // 0th round is the collateral, there are no keys for the 0th round + // Additionally there are no keys after num_rounds + 1, +1 is because we need additional round to generate + // reimbursement connectors of previous round + if round_idx == RoundIndex::Collateral || round_idx.to_index() > self.num_rounds + 1 { + return Err(TxError::InvalidRoundIndex(round_idx)); + } + let start_idx = (round_idx.to_index()) + .checked_sub(1) // 0th round is the collateral, there are no keys for the 0th round + .ok_or(TxError::IndexOverflow)? + .checked_mul(self.num_kickoffs_per_round) + .ok_or(TxError::IndexOverflow)?; + let end_idx = start_idx + .checked_add(self.num_kickoffs_per_round) + .ok_or(TxError::IndexOverflow)?; + Ok(&self.keys[start_idx..end_idx]) + } +} + +/// Struct to retrieve and cache data from DB for creating TxHandlers on demand +/// It can only store information for one deposit and operator pair. +/// It has two context modes, for rounds or for deposits. Deposit context needs additional information, like the deposit outpoint, which is not needed for rounds. +/// Round context can only create transactions that do not depend on the deposit, like the round tx and ready to reimburse tx. +/// Deposit context can create all transactions. +/// Note: This cache is specific to a single operator, for each operator a new cache is needed. +#[derive(Debug)] +pub struct ReimburseDbCache<'a, 'b> { + pub db: Database, + pub operator_xonly_pk: XOnlyPublicKey, + pub deposit_outpoint: Option, + pub paramset: &'static ProtocolParamset, + /// Optional database transaction to use for the cache. + dbtx: Option>, + /// winternitz keys to sign the kickoff tx with the blockhash + kickoff_winternitz_keys: Option, + /// bitvm assert scripts for each assert utxo + bitvm_assert_addr: Option>, + /// bitvm disprove scripts taproot merkle tree root hash + bitvm_disprove_root_hash: Option<[u8; 32]>, + /// Public hashes to acknowledge watchtower challenges + challenge_ack_hashes: Option>, + /// operator data + operator_data: Option, + /// latest blockhash root hash + latest_blockhash_root_hash: Option<[u8; 32]>, + /// replaceable additional disprove script + replaceable_additional_disprove_script: Option>, +} + +impl<'a, 'b> ReimburseDbCache<'a, 'b> { + /// Creates a db cache that can be used to create txhandlers for a specific operator and deposit/kickoff + pub fn new_for_deposit( + db: Database, + operator_xonly_pk: XOnlyPublicKey, + deposit_outpoint: bitcoin::OutPoint, + paramset: &'static ProtocolParamset, + dbtx: Option>, + ) -> Self { + Self { + db, + operator_xonly_pk, + deposit_outpoint: Some(deposit_outpoint), + paramset, + dbtx, + kickoff_winternitz_keys: None, + bitvm_assert_addr: None, + bitvm_disprove_root_hash: None, + challenge_ack_hashes: None, + operator_data: None, + latest_blockhash_root_hash: None, + replaceable_additional_disprove_script: None, + } + } + + /// Creates a db cache that can be used to create txhandlers for a specific operator and collateral chain + pub fn new_for_rounds( + db: Database, + operator_xonly_pk: XOnlyPublicKey, + paramset: &'static ProtocolParamset, + dbtx: Option>, + ) -> Self { + Self { + db, + operator_xonly_pk, + deposit_outpoint: None, + paramset, + dbtx, + kickoff_winternitz_keys: None, + bitvm_assert_addr: None, + bitvm_disprove_root_hash: None, + challenge_ack_hashes: None, + operator_data: None, + latest_blockhash_root_hash: None, + replaceable_additional_disprove_script: None, + } + } + + /// Creates a db cache from a contract context. This context can possible include a deposit data, for which it will be equivalent to new_for_deposit, otherwise it will be equivalent to new_for_rounds. + pub fn from_context( + db: Database, + context: &ContractContext, + dbtx: Option>, + ) -> Self { + if context.deposit_data.is_some() { + let deposit_data = context + .deposit_data + .as_ref() + .expect("checked in if statement"); + Self::new_for_deposit( + db, + context.operator_xonly_pk, + deposit_data.get_deposit_outpoint(), + context.paramset, + dbtx, + ) + } else { + Self::new_for_rounds(db, context.operator_xonly_pk, context.paramset, dbtx) + } + } + + pub async fn get_operator_data(&mut self) -> Result<&OperatorData, BridgeError> { + match self.operator_data { + Some(ref data) => Ok(data), + None => { + self.operator_data = Some( + self.db + .get_operator(self.dbtx.as_deref_mut(), self.operator_xonly_pk) + .await + .wrap_err("Failed to get operator data from database")? + .ok_or_eyre(format!( + "Operator not found for xonly_pk {}", + self.operator_xonly_pk + ))?, + ); + Ok(self.operator_data.as_ref().expect("Inserted before")) + } + } + } + + async fn get_bitvm_setup(&mut self, deposit_outpoint: OutPoint) -> Result<(), BridgeError> { + let (assert_addr, bitvm_hash, latest_blockhash_root_hash) = self + .db + .get_bitvm_setup( + self.dbtx.as_deref_mut(), + self.operator_xonly_pk, + deposit_outpoint, + ) + .await + .wrap_err("Failed to get bitvm setup in ReimburseDbCache::get_bitvm_setup")? + .ok_or(TxError::BitvmSetupNotFound( + self.operator_xonly_pk, + deposit_outpoint.txid, + ))?; + self.bitvm_assert_addr = Some(assert_addr); + self.bitvm_disprove_root_hash = Some(bitvm_hash); + self.latest_blockhash_root_hash = Some(latest_blockhash_root_hash); + Ok(()) + } + + pub async fn get_kickoff_winternitz_keys( + &mut self, + ) -> Result<&KickoffWinternitzKeys, BridgeError> { + match self.kickoff_winternitz_keys { + Some(ref keys) => Ok(keys), + None => { + self.kickoff_winternitz_keys = Some(KickoffWinternitzKeys::new( + self.db + .get_operator_kickoff_winternitz_public_keys( + self.dbtx.as_deref_mut(), + self.operator_xonly_pk, + ) + .await + .wrap_err("Failed to get kickoff winternitz keys from database")?, + self.paramset.num_kickoffs_per_round, + self.paramset.num_round_txs, + )); + Ok(self + .kickoff_winternitz_keys + .as_ref() + .expect("Inserted before")) + } + } + } + + pub async fn get_bitvm_assert_hash(&mut self) -> Result<&[[u8; 32]], BridgeError> { + if let Some(deposit_outpoint) = &self.deposit_outpoint { + match self.bitvm_assert_addr { + Some(ref addr) => Ok(addr), + None => { + self.get_bitvm_setup(*deposit_outpoint).await?; + Ok(self.bitvm_assert_addr.as_ref().expect("Inserted before")) + } + } + } else { + Err(TxError::InsufficientContext.into()) + } + } + + pub async fn get_replaceable_additional_disprove_script( + &mut self, + ) -> Result<&Vec, BridgeError> { + if let Some(ref script) = self.replaceable_additional_disprove_script { + return Ok(script); + } + + let deposit_outpoint = self.deposit_outpoint.ok_or(TxError::InsufficientContext)?; + + let bitvm_wpks = self + .db + .get_operator_bitvm_keys( + self.dbtx.as_deref_mut(), + self.operator_xonly_pk, + deposit_outpoint, + ) + .await?; + + let challenge_ack_hashes = self + .db + .get_operators_challenge_ack_hashes( + self.dbtx.as_deref_mut(), + self.operator_xonly_pk, + deposit_outpoint, + ) + .await? + .ok_or(BridgeError::InvalidChallengeAckHashes)?; + + let bitvm_keys = ClementineBitVMPublicKeys::from_flattened_vec(&bitvm_wpks); + + let script = create_additional_replacable_disprove_script_with_dummy( + *self.paramset.bridge_circuit_constant()?, + bitvm_keys.bitvm_pks.0[0].to_vec(), + bitvm_keys.latest_blockhash_pk.to_vec(), + bitvm_keys.challenge_sending_watchtowers_pk.to_vec(), + challenge_ack_hashes, + ); + + self.replaceable_additional_disprove_script = Some(script); + Ok(self + .replaceable_additional_disprove_script + .as_ref() + .expect("Cached above")) + } + + pub async fn get_challenge_ack_hashes(&mut self) -> Result<&[PublicHash], BridgeError> { + if let Some(deposit_outpoint) = &self.deposit_outpoint { + match self.challenge_ack_hashes { + Some(ref hashes) => Ok(hashes), + None => { + self.challenge_ack_hashes = Some( + self.db + .get_operators_challenge_ack_hashes( + self.dbtx.as_deref_mut(), + self.operator_xonly_pk, + *deposit_outpoint, + ) + .await + .wrap_err("Failed to get challenge ack hashes from database in ReimburseDbCache")? + .ok_or(eyre::eyre!( + "Watchtower public hashes not found for operator {0:?} and deposit {1}", + self.operator_xonly_pk, + deposit_outpoint.txid, + ))?, + ); + Ok(self.challenge_ack_hashes.as_ref().expect("Inserted before")) + } + } + } else { + Err(TxError::InsufficientContext.into()) + } + } + + pub async fn get_bitvm_disprove_root_hash(&mut self) -> Result<&[u8; 32], BridgeError> { + if let Some(deposit_outpoint) = &self.deposit_outpoint { + match self.bitvm_disprove_root_hash { + Some(ref hash) => Ok(hash), + None => { + self.get_bitvm_setup(*deposit_outpoint).await?; + Ok(self + .bitvm_disprove_root_hash + .as_ref() + .expect("Inserted before")) + } + } + } else { + Err(TxError::InsufficientContext.into()) + } + } + + pub async fn get_latest_blockhash_root_hash(&mut self) -> Result<&[u8; 32], BridgeError> { + if let Some(deposit_outpoint) = &self.deposit_outpoint { + match self.latest_blockhash_root_hash { + Some(ref hash) => Ok(hash), + None => { + self.get_bitvm_setup(*deposit_outpoint).await?; + Ok(self + .latest_blockhash_root_hash + .as_ref() + .expect("Inserted before")) + } + } + } else { + Err(TxError::InsufficientContext.into()) + } + } +} + +/// Context for a single operator and round, and optionally a single deposit. +/// Data about deposit and kickoff idx is needed to create the deposit-related transactions. +/// For non deposit related transactions, like the round tx and ready to reimburse tx, the round idx is enough. +#[derive(Debug, Clone)] +pub struct ContractContext { + /// required + pub operator_xonly_pk: XOnlyPublicKey, + pub round_idx: RoundIndex, + pub paramset: &'static ProtocolParamset, + /// optional (only used for after kickoff) + pub kickoff_idx: Option, + pub deposit_data: Option, + signer: Option, +} + +impl ContractContext { + /// Contains all necessary context for creating txhandlers for a specific operator and collateral chain + pub fn new_context_for_round( + operator_xonly_pk: XOnlyPublicKey, + round_idx: RoundIndex, + paramset: &'static ProtocolParamset, + ) -> Self { + Self { + operator_xonly_pk, + round_idx, + paramset, + kickoff_idx: None, + deposit_data: None, + signer: None, + } + } + + /// Contains all necessary context for creating txhandlers for a specific operator, kickoff utxo, and a deposit + pub fn new_context_for_kickoff( + kickoff_data: KickoffData, + deposit_data: DepositData, + paramset: &'static ProtocolParamset, + ) -> Self { + Self { + operator_xonly_pk: kickoff_data.operator_xonly_pk, + round_idx: kickoff_data.round_idx, + paramset, + kickoff_idx: Some(kickoff_data.kickoff_idx), + deposit_data: Some(deposit_data), + signer: None, + } + } + + /// Contains all necessary context for creating txhandlers for a specific operator, kickoff utxo, and a deposit + /// Additionally holds signer of an actor that can generate the actual winternitz public keys for operator, + /// and append evm address to the challenge tx for verifier. + pub fn new_context_with_signer( + kickoff_data: KickoffData, + deposit_data: DepositData, + paramset: &'static ProtocolParamset, + signer: Actor, + ) -> Self { + Self { + operator_xonly_pk: kickoff_data.operator_xonly_pk, + round_idx: kickoff_data.round_idx, + paramset, + kickoff_idx: Some(kickoff_data.kickoff_idx), + deposit_data: Some(deposit_data), + signer: Some(signer), + } + } + + /// Returns if the context is for a kickoff + pub fn is_context_for_kickoff(&self) -> bool { + self.deposit_data.is_some() && self.kickoff_idx.is_some() + } +} + +/// Stores and manages cached transaction handlers for efficient flow construction. +/// +/// This cache is used to avoid redundant construction of common transactions (such as round and move-to-vault transactions) +/// when creating all transactions for a single operator, kickoff utxo, and deposit tuple. It is especially important during deposit flows, +/// where many transactions share common intermediates. The cache tracks the previous ready-to-reimburse transaction and a map of saved +/// transaction handlers by type. +/// Note: Why is prev_ready_to_reimburse needed and not just stored in saved_txs? Because saved_txs can include the ReadyToReimburse txhandler for the current round, prev_ready_to_reimburse is specifically from the previous round. +/// +/// # Fields +/// +/// - `prev_ready_to_reimburse`: Optionally stores the previous round's ready-to-reimburse transaction handler. +/// - `saved_txs`: A map from [`TransactionType`] to [`TxHandler`], storing cached transaction handlers for the current context. +/// +/// # Usage +/// +/// - Use `store_for_next_kickoff` to cache the current round's main transactions before moving to the next kickoff within the same round. +/// - Use `store_for_next_round` to update the cache when moving to the next round, preserving the necessary state. +/// - Use `get_cached_txs` to retrieve and clear the current cache when constructing new transactions. +/// - Use `get_prev_ready_to_reimburse` to access the previous round's ready-to-reimburse transaction to create the next round's round tx. +pub struct TxHandlerCache { + pub prev_ready_to_reimburse: Option, + pub saved_txs: BTreeMap, +} + +impl Default for TxHandlerCache { + fn default() -> Self { + Self::new() + } +} + +impl TxHandlerCache { + /// Creates a new, empty cache. + pub fn new() -> Self { + Self { + saved_txs: BTreeMap::new(), + prev_ready_to_reimburse: None, + } + } + /// Stores txhandlers for the next kickoff, caching MoveToVault, Round, and ReadyToReimburse. + /// + /// Removes these transaction types from the provided map and stores them in the cache. + /// This is used to preserve the state between kickoffs within the same round. + pub fn store_for_next_kickoff( + &mut self, + txhandlers: &mut BTreeMap, + ) -> Result<(), BridgeError> { + // can possibly cache next round tx too, as next round has the needed reimburse utxos + // but need to implement a new TransactionType for that + for tx_type in [ + TransactionType::MoveToVault, + TransactionType::Round, + TransactionType::ReadyToReimburse, + ] + .iter() + { + let txhandler = txhandlers + .remove(tx_type) + .ok_or(TxError::TxHandlerNotFound(*tx_type))?; + self.saved_txs.insert(*tx_type, txhandler); + } + Ok(()) + } + /// Stores MoveToVault and previous ReadyToReimburse for the next round. + /// + /// Moves the MoveToVault and ReadyToReimburse txhandlers from the cache to their respective fields, + /// clearing the rest of the cache. This is used to preserve the state between rounds. + pub fn store_for_next_round(&mut self) -> Result<(), BridgeError> { + let move_to_vault = + remove_txhandler_from_map(&mut self.saved_txs, TransactionType::MoveToVault)?; + self.prev_ready_to_reimburse = Some(remove_txhandler_from_map( + &mut self.saved_txs, + TransactionType::ReadyToReimburse, + )?); + self.saved_txs = BTreeMap::new(); + self.saved_txs + .insert(move_to_vault.get_transaction_type(), move_to_vault); + Ok(()) + } + /// Gets the previous ReadyToReimburse txhandler, if any. + /// + /// This is used to chain rounds together, as the output of the previous ready-to-reimburse transaction + /// is needed as input for the next round's round transaction. Without caching, we would have to create the full collateral chain again. + pub fn get_prev_ready_to_reimburse(&self) -> Option<&TxHandler> { + self.prev_ready_to_reimburse.as_ref() + } + /// Takes and returns all cached txhandlers, clearing the cache. + pub fn get_cached_txs(&mut self) -> BTreeMap { + std::mem::take(&mut self.saved_txs) + } +} + +/// Creates all required transaction handlers for a given context and transaction type. +/// +/// This function builds and caches all necessary transaction handlers for the specified transaction type, operator, round, and deposit context. +/// It handles the full flow of collateral, kickoff, challenge, reimbursement, and assertion transactions, including round management and challenge handling. +/// Function returns early if the needed txhandler is already created. +/// Currently there are 3 kinds of specific transaction types that can be given as parameter that change the logic flow +/// - AllNeededForDeposit: Creates all transactions, including the round tx's and deposit related tx's. +/// - Round related tx's (Round, ReadyToReimburse, UnspentKickoff): Creates only round related tx's and returns early. +/// - MiniAssert and LatestBlockhash: These tx's are created to commit data in their witness using winternitz signatures. To enable signing these transactions, the kickoff transaction (where the input of MiniAssert and LatestBlockhash resides) needs to be created with the full list of scripts in its TxHandler data. This may take some time especially for a deposit where thousands of kickoff tx's are created. That's why if MiniAssert or LatestBlockhash is not requested, these scripts are not created and just the merkle root hash of these scripts is used to create the kickoff tx. But if these tx's are requested, the full list of scripts is needed to create the kickoff tx, to enable signing these transactions with winternitz signatures. +/// +/// # Arguments +/// +/// * `transaction_type` - The type of transaction(s) to create. +/// * `context` - The contract context (operator, round, deposit, etc). +/// * `txhandler_cache` - Cache for storing/retrieving intermediate txhandlers. +/// * `db_cache` - Database-backed cache for retrieving protocol data. +/// +/// # Returns +/// +/// A map of [`TransactionType`] to [`TxHandler`] for all constructed transactions, or a [`BridgeError`] if construction fails. +pub async fn create_txhandlers( + transaction_type: TransactionType, + context: ContractContext, + txhandler_cache: &mut TxHandlerCache, + db_cache: &mut ReimburseDbCache<'_, '_>, +) -> Result, BridgeError> { + let paramset = db_cache.paramset; + + let operator_data = db_cache.get_operator_data().await?.clone(); + let kickoff_winternitz_keys = db_cache.get_kickoff_winternitz_keys().await?.clone(); + + let ContractContext { + operator_xonly_pk, + round_idx, + .. + } = context; + + let mut txhandlers = txhandler_cache.get_cached_txs(); + if !txhandlers.contains_key(&TransactionType::Round) { + // create round tx, ready to reimburse tx, and unspent kickoff txs if not in cache + let round_txhandlers = create_round_txhandlers( + paramset, + round_idx, + &operator_data, + &kickoff_winternitz_keys, + txhandler_cache.get_prev_ready_to_reimburse(), + )?; + for round_txhandler in round_txhandlers.into_iter() { + txhandlers.insert(round_txhandler.get_transaction_type(), round_txhandler); + } + } + + if matches!( + transaction_type, + TransactionType::Round + | TransactionType::ReadyToReimburse + | TransactionType::UnspentKickoff(_) + ) { + // return if only one of the collateral tx's were requested + // do not continue as we might not have the necessary context for the remaining tx's + return Ok(txhandlers); + } + + // get the next round txhandler (because reimburse connectors will be in it) + let next_round_txhandler = create_round_txhandler( + operator_data.xonly_pk, + RoundTxInput::Prevout(Box::new( + get_txhandler(&txhandlers, TransactionType::ReadyToReimburse)? + .get_spendable_output(UtxoVout::CollateralInReadyToReimburse)?, + )), + kickoff_winternitz_keys.get_keys_for_round(round_idx.next_round())?, + paramset, + )?; + + let mut deposit_data = context.deposit_data.ok_or(TxError::InsufficientContext)?; + let kickoff_data = KickoffData { + operator_xonly_pk, + round_idx, + kickoff_idx: context.kickoff_idx.ok_or(TxError::InsufficientContext)?, + }; + + if !txhandlers.contains_key(&TransactionType::MoveToVault) { + // if not cached create move_txhandler + let move_txhandler = + builder::transaction::create_move_to_vault_txhandler(&mut deposit_data, paramset)?; + txhandlers.insert(move_txhandler.get_transaction_type(), move_txhandler); + } + + let challenge_ack_hashes = db_cache.get_challenge_ack_hashes().await?.to_vec(); + + let num_asserts = ClementineBitVMPublicKeys::number_of_assert_txs(); + let public_hashes = challenge_ack_hashes; + + let move_txid = txhandlers + .get(&TransactionType::MoveToVault) + .ok_or(TxError::TxHandlerNotFound(TransactionType::MoveToVault))? + .get_txid() + .to_byte_array(); + + let round_txid = txhandlers + .get(&TransactionType::Round) + .ok_or(TxError::TxHandlerNotFound(TransactionType::Round))? + .get_txid() + .to_byte_array(); + + let vout = UtxoVout::Kickoff(kickoff_data.kickoff_idx as usize).get_vout(); + let watchtower_challenge_start_idx = UtxoVout::WatchtowerChallenge(0).get_vout() as u16; + let secp = Secp256k1::verification_only(); + + let nofn_key: XOnlyPublicKey = deposit_data.get_nofn_xonly_pk()?; + + let watchtower_xonly_pk = deposit_data.get_watchtowers(); + let watchtower_pubkeys = watchtower_xonly_pk + .iter() + .map(|xonly_pk| { + let nofn_2week = Arc::new(TimelockScript::new( + Some(nofn_key), + paramset.watchtower_challenge_timeout_timelock, + )); + + let builder = TaprootBuilder::new(); + let tweaked = builder + .add_leaf(0, nofn_2week.to_script_buf()) + .expect("Valid script leaf") + .finalize(&secp, *xonly_pk) + .expect("taproot finalize must succeed"); + + tweaked.output_key().serialize() + }) + .collect::>(); + + let deposit_constant = deposit_constant( + operator_xonly_pk.serialize(), + watchtower_challenge_start_idx, + &watchtower_pubkeys, + move_txid, + round_txid, + vout, + context.paramset.genesis_chain_state_hash, + ); + + tracing::debug!( + target: "ci", + "Create txhandlers - Genesis height: {:?}, operator_xonly_pk: {:?}, move_txid: {:?}, round_txid: {:?}, vout: {:?}, watchtower_challenge_start_idx: {:?}, genesis_chain_state_hash: {:?}, deposit_constant: {:?}", + context.paramset.genesis_height, + operator_xonly_pk, + move_txid, + round_txid, + vout, + watchtower_challenge_start_idx, + context.paramset.genesis_chain_state_hash, + deposit_constant.0, + ); + + tracing::debug!( + "Deposit constant for {:?}: {:?} - deposit outpoint: {:?}", + operator_xonly_pk, + deposit_constant.0, + deposit_data.get_deposit_outpoint(), + ); + + let payout_tx_blockhash_pk = kickoff_winternitz_keys + .get_keys_for_round(round_idx)? + .get(kickoff_data.kickoff_idx as usize) + .ok_or(TxError::IndexOverflow)? + .clone(); + + tracing::debug!( + target: "ci", + "Payout tx blockhash pk: {:?}", + payout_tx_blockhash_pk + ); + + let additional_disprove_script = db_cache + .get_replaceable_additional_disprove_script() + .await? + .clone(); + + let additional_disprove_script = replace_placeholders_in_script( + additional_disprove_script, + payout_tx_blockhash_pk, + deposit_constant.0, + ); + let disprove_root_hash = *db_cache.get_bitvm_disprove_root_hash().await?; + let latest_blockhash_root_hash = *db_cache.get_latest_blockhash_root_hash().await?; + + let disprove_path = if transaction_type == TransactionType::Disprove { + let actor = context.signer.clone().ok_or(TxError::InsufficientContext)?; + let bitvm_pks = + actor.generate_bitvm_pks_for_deposit(deposit_data.get_deposit_outpoint(), paramset)?; + let disprove_scripts = bitvm_pks.get_g16_verifier_disprove_scripts()?; + DisprovePath::Scripts(disprove_scripts) + } else { + DisprovePath::HiddenNode(&disprove_root_hash) + }; + + let kickoff_txhandler = if matches!( + transaction_type, + TransactionType::LatestBlockhash | TransactionType::MiniAssert(_) + ) { + // create scripts if any mini assert tx or latest blockhash tx is specifically requested as it needs + // the actual scripts to be able to spend + let actor = context.signer.clone().ok_or(TxError::InsufficientContext)?; + + // deposit_data.deposit_outpoint.txid + + let bitvm_pks = + actor.generate_bitvm_pks_for_deposit(deposit_data.get_deposit_outpoint(), paramset)?; + + let assert_scripts = bitvm_pks.get_assert_scripts(operator_data.xonly_pk); + + let latest_blockhash_script = Arc::new(WinternitzCommit::new( + vec![(bitvm_pks.latest_blockhash_pk.to_vec(), 40)], + operator_data.xonly_pk, + context.paramset.winternitz_log_d, + )); + + let kickoff_txhandler = create_kickoff_txhandler( + kickoff_data, + get_txhandler(&txhandlers, TransactionType::Round)?, + get_txhandler(&txhandlers, TransactionType::MoveToVault)?, + &mut deposit_data, + operator_data.xonly_pk, + AssertScripts::AssertSpendableScript(assert_scripts), + disprove_path, + additional_disprove_script.clone(), + AssertScripts::AssertSpendableScript(vec![latest_blockhash_script]), + &public_hashes, + paramset, + )?; + + // Create and insert mini_asserts into return Vec + let mini_asserts = create_mini_asserts(&kickoff_txhandler, num_asserts, paramset)?; + + for mini_assert in mini_asserts.into_iter() { + txhandlers.insert(mini_assert.get_transaction_type(), mini_assert); + } + + let latest_blockhash_txhandler = + create_latest_blockhash_txhandler(&kickoff_txhandler, paramset)?; + txhandlers.insert( + latest_blockhash_txhandler.get_transaction_type(), + latest_blockhash_txhandler, + ); + + kickoff_txhandler + } else { + // use db data for scripts + create_kickoff_txhandler( + kickoff_data, + get_txhandler(&txhandlers, TransactionType::Round)?, + get_txhandler(&txhandlers, TransactionType::MoveToVault)?, + &mut deposit_data, + operator_data.xonly_pk, + AssertScripts::AssertScriptTapNodeHash(db_cache.get_bitvm_assert_hash().await?), + disprove_path, + additional_disprove_script.clone(), + AssertScripts::AssertScriptTapNodeHash(&[latest_blockhash_root_hash]), + &public_hashes, + paramset, + )? + }; + + txhandlers.insert(kickoff_txhandler.get_transaction_type(), kickoff_txhandler); + + // Creates the challenge_tx handler. + let challenge_txhandler = builder::transaction::create_challenge_txhandler( + get_txhandler(&txhandlers, TransactionType::Kickoff)?, + &operator_data.reimburse_addr, + context.signer.map(|s| s.get_evm_address()).transpose()?, + paramset, + )?; + txhandlers.insert( + challenge_txhandler.get_transaction_type(), + challenge_txhandler, + ); + + // Creates the challenge timeout txhandler + let challenge_timeout_txhandler = create_challenge_timeout_txhandler( + get_txhandler(&txhandlers, TransactionType::Kickoff)?, + paramset, + )?; + + txhandlers.insert( + challenge_timeout_txhandler.get_transaction_type(), + challenge_timeout_txhandler, + ); + + let kickoff_not_finalized_txhandler = + builder::transaction::create_kickoff_not_finalized_txhandler( + get_txhandler(&txhandlers, TransactionType::Kickoff)?, + get_txhandler(&txhandlers, TransactionType::ReadyToReimburse)?, + paramset, + )?; + txhandlers.insert( + kickoff_not_finalized_txhandler.get_transaction_type(), + kickoff_not_finalized_txhandler, + ); + + let latest_blockhash_timeout_txhandler = create_latest_blockhash_timeout_txhandler( + get_txhandler(&txhandlers, TransactionType::Kickoff)?, + get_txhandler(&txhandlers, TransactionType::Round)?, + paramset, + )?; + txhandlers.insert( + latest_blockhash_timeout_txhandler.get_transaction_type(), + latest_blockhash_timeout_txhandler, + ); + + // create watchtower tx's except WatchtowerChallenges + for watchtower_idx in 0..deposit_data.get_num_watchtowers() { + // Each watchtower will sign their Groth16 proof of the header chain circuit. Then, the operator will either + // - acknowledge the challenge by sending the operator_challenge_ACK_tx, otherwise their burn connector + // will get burned by operator_challenge_nack + let watchtower_challenge_timeout_txhandler = + builder::transaction::create_watchtower_challenge_timeout_txhandler( + get_txhandler(&txhandlers, TransactionType::Kickoff)?, + watchtower_idx, + paramset, + )?; + txhandlers.insert( + watchtower_challenge_timeout_txhandler.get_transaction_type(), + watchtower_challenge_timeout_txhandler, + ); + + let operator_challenge_nack_txhandler = + builder::transaction::create_operator_challenge_nack_txhandler( + get_txhandler(&txhandlers, TransactionType::Kickoff)?, + watchtower_idx, + get_txhandler(&txhandlers, TransactionType::Round)?, + paramset, + )?; + txhandlers.insert( + operator_challenge_nack_txhandler.get_transaction_type(), + operator_challenge_nack_txhandler, + ); + + let operator_challenge_ack_txhandler = + builder::transaction::create_operator_challenge_ack_txhandler( + get_txhandler(&txhandlers, TransactionType::Kickoff)?, + watchtower_idx, + paramset, + )?; + txhandlers.insert( + operator_challenge_ack_txhandler.get_transaction_type(), + operator_challenge_ack_txhandler, + ); + } + + if let TransactionType::WatchtowerChallenge(_) = transaction_type { + return Err(eyre::eyre!( + "Can't directly create a watchtower challenge in create_txhandlers as it needs commit data".to_string(), + ).into()); + } + + let assert_timeouts = create_assert_timeout_txhandlers( + get_txhandler(&txhandlers, TransactionType::Kickoff)?, + get_txhandler(&txhandlers, TransactionType::Round)?, + num_asserts, + paramset, + )?; + + for assert_timeout in assert_timeouts.into_iter() { + txhandlers.insert(assert_timeout.get_transaction_type(), assert_timeout); + } + + // Creates the disprove_timeout_tx handler. + let disprove_timeout_txhandler = builder::transaction::create_disprove_timeout_txhandler( + get_txhandler(&txhandlers, TransactionType::Kickoff)?, + paramset, + )?; + + txhandlers.insert( + disprove_timeout_txhandler.get_transaction_type(), + disprove_timeout_txhandler, + ); + + // Creates the reimburse_tx handler. + let reimburse_txhandler = builder::transaction::create_reimburse_txhandler( + get_txhandler(&txhandlers, TransactionType::MoveToVault)?, + &next_round_txhandler, + get_txhandler(&txhandlers, TransactionType::Kickoff)?, + kickoff_data.kickoff_idx as usize, + paramset, + &operator_data.reimburse_addr, + )?; + + txhandlers.insert( + reimburse_txhandler.get_transaction_type(), + reimburse_txhandler, + ); + + match transaction_type { + TransactionType::AllNeededForDeposit | TransactionType::Disprove => { + let disprove_txhandler = builder::transaction::create_disprove_txhandler( + get_txhandler(&txhandlers, TransactionType::Kickoff)?, + get_txhandler(&txhandlers, TransactionType::Round)?, + )?; + + txhandlers.insert( + disprove_txhandler.get_transaction_type(), + disprove_txhandler, + ); + } + _ => {} + } + + Ok(txhandlers) +} + +/// Creates the round and ready-to-reimburse txhandlers for a specific operator and round index. +/// These transactions currently include round tx, ready to reimburse tx, and unspent kickoff txs. +/// +/// # Arguments +/// +/// * `paramset` - Protocol parameter set. +/// * `round_idx` - The index of the round. +/// * `operator_data` - Data for the operator. +/// * `kickoff_winternitz_keys` - All winternitz keys of the operator. +/// * `prev_ready_to_reimburse` - Previous ready-to-reimburse txhandler, if any, to not create the full collateral chain if we already have the previous round's ready to reimburse txhandler. +/// +/// # Returns +/// +/// A vector of [`TxHandler`] for the round, ready-to-reimburse, and unspent kickoff transactions, or a [`BridgeError`] if construction fails. +pub fn create_round_txhandlers( + paramset: &'static ProtocolParamset, + round_idx: RoundIndex, + operator_data: &OperatorData, + kickoff_winternitz_keys: &KickoffWinternitzKeys, + prev_ready_to_reimburse: Option<&TxHandler>, +) -> Result, BridgeError> { + let mut txhandlers = Vec::with_capacity(2 + paramset.num_kickoffs_per_round); + + let (round_txhandler, ready_to_reimburse_txhandler) = match prev_ready_to_reimburse { + Some(prev_ready_to_reimburse_txhandler) => { + if round_idx == RoundIndex::Collateral || round_idx == RoundIndex::Round(0) { + return Err( + eyre::eyre!("Round 0 cannot be created from prev_ready_to_reimburse").into(), + ); + } + let round_txhandler = builder::transaction::create_round_txhandler( + operator_data.xonly_pk, + RoundTxInput::Prevout(Box::new( + prev_ready_to_reimburse_txhandler + .get_spendable_output(UtxoVout::CollateralInReadyToReimburse)?, + )), + kickoff_winternitz_keys.get_keys_for_round(round_idx)?, + paramset, + )?; + + let ready_to_reimburse_txhandler = + builder::transaction::create_ready_to_reimburse_txhandler( + &round_txhandler, + operator_data.xonly_pk, + paramset, + )?; + (round_txhandler, ready_to_reimburse_txhandler) + } + None => { + // create nth sequential collateral tx and reimburse generator tx for the operator + builder::transaction::create_round_nth_txhandler( + operator_data.xonly_pk, + operator_data.collateral_funding_outpoint, + paramset.collateral_funding_amount, + round_idx, + kickoff_winternitz_keys, + paramset, + )? + } + }; + + let unspent_kickoffs = create_unspent_kickoff_txhandlers( + &round_txhandler, + &ready_to_reimburse_txhandler, + paramset, + )?; + + txhandlers.push(round_txhandler); + txhandlers.push(ready_to_reimburse_txhandler); + + for unspent_kickoff in unspent_kickoffs.into_iter() { + txhandlers.push(unspent_kickoff); + } + + Ok(txhandlers) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::bitvm_client::ClementineBitVMPublicKeys; + use crate::builder::transaction::sign::get_kickoff_utxos_to_sign; + use crate::builder::transaction::{TransactionType, TxHandlerBuilder}; + use crate::config::BridgeConfig; + use crate::deposit::{DepositInfo, KickoffData}; + use crate::rpc::clementine::{SignedTxsWithType, TransactionRequest}; + use crate::test::common::citrea::MockCitreaClient; + use crate::test::common::test_actors::TestActors; + use crate::test::common::*; + use bitcoin::{BlockHash, Transaction, XOnlyPublicKey}; + use futures::future::try_join_all; + use std::collections::HashMap; + use tokio::sync::mpsc; + + fn signed_txs_to_txid(signed_txs: SignedTxsWithType) -> Vec<(TransactionType, bitcoin::Txid)> { + signed_txs + .signed_txs + .into_iter() + .map(|signed_tx| { + ( + signed_tx.transaction_type.unwrap().try_into().unwrap(), + bitcoin::consensus::deserialize::(&signed_tx.raw_tx) + .unwrap() + .compute_txid(), + ) + }) + .collect() + } + + /// This test first creates a vec of transaction types the entity should be able to sign. + /// Afterwards it calls internal_create_signed_txs for verifiers and operators, + /// internal_create_assert_commitment_txs for operators, and internal_create_watchtower_challenge for verifiers + /// and checks if all transaction types that should be signed are returned from these functions. + /// If a transaction type is not found, it means the entity is not able to sign it. + async fn check_if_signable( + actors: TestActors, + deposit_info: DepositInfo, + deposit_blockhash: BlockHash, + config: BridgeConfig, + ) { + let paramset = config.protocol_paramset(); + let deposit_outpoint = deposit_info.deposit_outpoint; + + let mut txs_operator_can_sign = vec![ + TransactionType::Round, + TransactionType::ReadyToReimburse, + TransactionType::Kickoff, + TransactionType::KickoffNotFinalized, + TransactionType::Challenge, + TransactionType::DisproveTimeout, + TransactionType::Reimburse, + TransactionType::ChallengeTimeout, + TransactionType::LatestBlockhashTimeout, + ]; + txs_operator_can_sign + .extend((0..actors.get_num_verifiers()).map(TransactionType::OperatorChallengeNack)); + txs_operator_can_sign + .extend((0..actors.get_num_verifiers()).map(TransactionType::OperatorChallengeAck)); + txs_operator_can_sign.extend( + (0..ClementineBitVMPublicKeys::number_of_assert_txs()) + .map(TransactionType::AssertTimeout), + ); + txs_operator_can_sign + .extend((0..paramset.num_kickoffs_per_round).map(TransactionType::UnspentKickoff)); + txs_operator_can_sign.extend( + (0..actors.get_num_verifiers()).map(TransactionType::WatchtowerChallengeTimeout), + ); + + let operator_xonly_pks: Vec = actors.get_operators_xonly_pks(); + let mut utxo_idxs: Vec> = Vec::with_capacity(operator_xonly_pks.len()); + + for op_xonly_pk in &operator_xonly_pks { + utxo_idxs.push(get_kickoff_utxos_to_sign( + config.protocol_paramset(), + *op_xonly_pk, + deposit_blockhash, + deposit_outpoint, + )); + } + + let (tx, mut rx) = mpsc::unbounded_channel(); + let mut created_txs: HashMap<(KickoffData, TransactionType), Vec> = + HashMap::new(); + + // try to sign everything for all operators + let operator_task_handles: Vec<_> = actors + .get_operators() + .iter_mut() + .enumerate() + .map(|(operator_idx, operator_rpc)| { + let txs_operator_can_sign = txs_operator_can_sign.clone(); + let mut operator_rpc = operator_rpc.clone(); + let utxo_idxs = utxo_idxs.clone(); + let tx = tx.clone(); + let operator_xonly_pk = operator_xonly_pks[operator_idx]; + async move { + for round_idx in RoundIndex::iter_rounds(paramset.num_round_txs) { + for &kickoff_idx in &utxo_idxs[operator_idx] { + let kickoff_data = KickoffData { + operator_xonly_pk, + round_idx, + kickoff_idx: kickoff_idx as u32, + }; + let start_time = std::time::Instant::now(); + let raw_txs = operator_rpc + .internal_create_signed_txs(TransactionRequest { + deposit_outpoint: Some(deposit_outpoint.into()), + kickoff_id: Some(kickoff_data.into()), + }) + .await + .unwrap() + .into_inner(); + // test if all needed tx's are signed + for tx_type in &txs_operator_can_sign { + assert!( + raw_txs + .signed_txs + .iter() + .any(|signed_tx| signed_tx.transaction_type + == Some((*tx_type).into())), + "Tx type: {:?} not found in signed txs for operator", + tx_type + ); + } + tracing::info!( + "Operator signed txs {:?} from rpc call in time {:?}", + TransactionType::AllNeededForDeposit, + start_time.elapsed() + ); + tx.send((kickoff_data, signed_txs_to_txid(raw_txs))) + .unwrap(); + let raw_assert_txs = operator_rpc + .internal_create_assert_commitment_txs(TransactionRequest { + deposit_outpoint: Some(deposit_outpoint.into()), + kickoff_id: Some(kickoff_data.into()), + }) + .await + .unwrap() + .into_inner(); + tracing::info!( + "Operator Signed Assert txs of size: {}", + raw_assert_txs.signed_txs.len() + ); + tx.send((kickoff_data, signed_txs_to_txid(raw_assert_txs))) + .unwrap(); + } + } + } + }) + .map(tokio::task::spawn) + .collect(); + + let mut txs_verifier_can_sign = vec![ + TransactionType::Challenge, + TransactionType::KickoffNotFinalized, + TransactionType::LatestBlockhashTimeout, + //TransactionType::Disprove, + ]; + txs_verifier_can_sign + .extend((0..actors.get_num_verifiers()).map(TransactionType::OperatorChallengeNack)); + txs_verifier_can_sign.extend( + (0..ClementineBitVMPublicKeys::number_of_assert_txs()) + .map(TransactionType::AssertTimeout), + ); + txs_verifier_can_sign + .extend((0..paramset.num_kickoffs_per_round).map(TransactionType::UnspentKickoff)); + txs_verifier_can_sign.extend( + (0..actors.get_num_verifiers()).map(TransactionType::WatchtowerChallengeTimeout), + ); + + // try to sign everything for all verifiers + // try signing verifier transactions + let verifier_task_handles: Vec<_> = actors + .get_verifiers() + .iter_mut() + .map(|verifier_rpc| { + let txs_verifier_can_sign = txs_verifier_can_sign.clone(); + let mut verifier_rpc = verifier_rpc.clone(); + let utxo_idxs = utxo_idxs.clone(); + let tx = tx.clone(); + let operator_xonly_pks = operator_xonly_pks.clone(); + async move { + for (operator_idx, utxo_idx) in utxo_idxs.iter().enumerate() { + for round_idx in RoundIndex::iter_rounds(paramset.num_round_txs) { + for &kickoff_idx in utxo_idx { + let kickoff_data = KickoffData { + operator_xonly_pk: operator_xonly_pks[operator_idx], + round_idx, + kickoff_idx: kickoff_idx as u32, + }; + let start_time = std::time::Instant::now(); + let raw_txs = verifier_rpc + .internal_create_signed_txs(TransactionRequest { + deposit_outpoint: Some(deposit_outpoint.into()), + kickoff_id: Some(kickoff_data.into()), + }) + .await + .unwrap() + .into_inner(); + // test if all needed tx's are signed + for tx_type in &txs_verifier_can_sign { + assert!( + raw_txs + .signed_txs + .iter() + .any(|signed_tx| signed_tx.transaction_type + == Some((*tx_type).into())), + "Tx type: {:?} not found in signed txs for verifier", + tx_type + ); + } + tracing::info!( + "Verifier signed txs {:?} from rpc call in time {:?}", + TransactionType::AllNeededForDeposit, + start_time.elapsed() + ); + tx.send((kickoff_data, signed_txs_to_txid(raw_txs))) + .unwrap(); + let _watchtower_challenge_tx = verifier_rpc + .internal_create_watchtower_challenge(TransactionRequest { + deposit_outpoint: Some(deposit_outpoint.into()), + kickoff_id: Some(kickoff_data.into()), + }) + .await + .unwrap() + .into_inner(); + } + } + } + } + }) + .map(tokio::task::spawn) + .collect(); + + drop(tx); + while let Some((kickoff_id, txids)) = rx.recv().await { + for (tx_type, txid) in txids { + created_txs + .entry((kickoff_id, tx_type)) + .or_default() + .push(txid); + } + } + + let mut incorrect = false; + + for ((kickoff_id, tx_type), txids) in &created_txs { + // for challenge tx, txids are different because op return with own evm address, skip it + if tx_type == &TransactionType::Challenge { + continue; + } + // check if all txids are equal + if !txids.iter().all(|txid| txid == &txids[0]) { + tracing::error!( + "Mismatch in Txids for kickoff_id: {:?}, tx_type: {:?}, Txids: {:?}", + kickoff_id, + tx_type, + txids + ); + incorrect = true; + } + } + assert!(!incorrect); + + try_join_all(operator_task_handles).await.unwrap(); + try_join_all(verifier_task_handles).await.unwrap(); + } + + #[cfg(feature = "automation")] + #[tokio::test(flavor = "multi_thread")] + async fn test_deposit_and_sign_txs() { + let mut config = create_test_config_with_thread_name().await; + let WithProcessCleanup(_, ref rpc, _, _) = create_regtest_rpc(&mut config).await; + + let (actors, deposit_params, _, deposit_blockhash, _) = + run_single_deposit::(&mut config, rpc.clone(), None, None, None) + .await + .unwrap(); + + check_if_signable(actors, deposit_params, deposit_blockhash, config.clone()).await; + } + + #[tokio::test(flavor = "multi_thread")] + #[cfg(feature = "automation")] + async fn test_replacement_deposit_and_sign_txs() { + let mut config = create_test_config_with_thread_name().await; + let WithProcessCleanup(_, ref rpc, _, _) = create_regtest_rpc(&mut config).await; + + let (mut actors, _deposit_info, old_move_txid, _deposit_blockhash, _verifiers_public_keys) = + run_single_deposit::(&mut config, rpc.clone(), None, None, None) + .await + .unwrap(); + + let old_nofn_xonly_pk = actors.get_nofn_aggregated_xonly_pk().unwrap(); + // remove 1 verifier then run a replacement deposit + actors.remove_verifier(2).await.unwrap(); + + let ( + actors, + replacement_deposit_info, + _replacement_move_txid, + replacement_deposit_blockhash, + ) = run_single_replacement_deposit( + &mut config, + rpc, + old_move_txid, + actors, + old_nofn_xonly_pk, + ) + .await + .unwrap(); + + check_if_signable( + actors, + replacement_deposit_info, + replacement_deposit_blockhash, + config.clone(), + ) + .await; + } + + #[test] + fn test_txhandler_cache_store_for_next_kickoff() { + let mut cache = TxHandlerCache::new(); + let mut txhandlers = BTreeMap::new(); + txhandlers.insert( + TransactionType::MoveToVault, + TxHandlerBuilder::new(TransactionType::MoveToVault).finalize(), + ); + txhandlers.insert( + TransactionType::Round, + TxHandlerBuilder::new(TransactionType::Round).finalize(), + ); + txhandlers.insert( + TransactionType::ReadyToReimburse, + TxHandlerBuilder::new(TransactionType::ReadyToReimburse).finalize(), + ); + txhandlers.insert( + TransactionType::Kickoff, + TxHandlerBuilder::new(TransactionType::Kickoff).finalize(), + ); + + // should store the first 3 txhandlers, and not insert kickoff + assert!(cache.store_for_next_kickoff(&mut txhandlers).is_ok()); + assert!(txhandlers.len() == 1); + assert!(cache.saved_txs.len() == 3); + assert!(cache.saved_txs.contains_key(&TransactionType::MoveToVault)); + assert!(cache.saved_txs.contains_key(&TransactionType::Round)); + assert!(cache + .saved_txs + .contains_key(&TransactionType::ReadyToReimburse)); + // prev_ready_to_reimburse should be None as it is the first iteration + assert!(cache.prev_ready_to_reimburse.is_none()); + + // txhandlers should contain all cached tx's + txhandlers = cache.get_cached_txs(); + assert!(txhandlers.len() == 3); + assert!(txhandlers.contains_key(&TransactionType::MoveToVault)); + assert!(txhandlers.contains_key(&TransactionType::Round)); + assert!(txhandlers.contains_key(&TransactionType::ReadyToReimburse)); + assert!(cache.store_for_next_kickoff(&mut txhandlers).is_ok()); + // prev ready to reimburse still none as we didn't go to next round + assert!(cache.prev_ready_to_reimburse.is_none()); + + // should delete saved txs and store prev ready to reimburse, but it should keep movetovault + assert!(cache.store_for_next_round().is_ok()); + assert!(cache.saved_txs.len() == 1); + assert!(cache.prev_ready_to_reimburse.is_some()); + assert!(cache.saved_txs.contains_key(&TransactionType::MoveToVault)); + + // retrieve cached movetovault + txhandlers = cache.get_cached_txs(); + + // create new round txs + txhandlers.insert( + TransactionType::ReadyToReimburse, + TxHandlerBuilder::new(TransactionType::ReadyToReimburse).finalize(), + ); + txhandlers.insert( + TransactionType::Round, + TxHandlerBuilder::new(TransactionType::Round).finalize(), + ); + // add not relevant tx + txhandlers.insert( + TransactionType::WatchtowerChallenge(0), + TxHandlerBuilder::new(TransactionType::WatchtowerChallenge(0)).finalize(), + ); + + // should add all 3 tx's to cache again + assert!(cache.store_for_next_kickoff(&mut txhandlers).is_ok()); + assert!(cache.saved_txs.len() == 3); + assert!(cache.saved_txs.contains_key(&TransactionType::MoveToVault)); + assert!(cache.saved_txs.contains_key(&TransactionType::Round)); + assert!(cache + .saved_txs + .contains_key(&TransactionType::ReadyToReimburse)); + // prev ready to reimburse is still stored + assert!(cache.prev_ready_to_reimburse.is_some()); + } +} diff --git a/core/src/builder/transaction/deposit_signature_owner.rs b/core/src/builder/transaction/deposit_signature_owner.rs new file mode 100644 index 000000000..365e60c9b --- /dev/null +++ b/core/src/builder/transaction/deposit_signature_owner.rs @@ -0,0 +1,120 @@ +//! # Deposit Signature Ownership Mapping +//! +//! This module provides types and logic for mapping transaction signature requirements to protocol entities in the Clementine bridge. +//! It is used to determine which entity (operator, verifier, N-of-N, etc.) is responsible for providing a signature for a given transaction input, +//! and what sighash type is required for that signature. Additionally it encodes when this signature is given to other entities. +//! + +use crate::errors::BridgeError; +use crate::rpc::clementine::tagged_signature::SignatureId; +use crate::rpc::clementine::{NormalSignatureKind, NumberedSignatureKind}; +use bitcoin::TapSighashType; +use eyre::Context; + +/// Enumerates the protocol entities that may own a required signature for a transaction input. +/// Additionally it encodes when this signature is given to other entities. For example signatures with OperatorDeposit are operator's +/// signatures that are shared with verifiers during a new deposit, while OperatorSetup is operator's signature that is given to the +/// verifiers when Operator is being newly setup and added to verifiers databases. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum EntityType { + OperatorDeposit, + VerifierDeposit, + OperatorSetup, +} + +/// Describes the ownership and sighash type for a required signature. +/// +/// - `NotOwned`: No signature required or not owned by any protocol entity. +/// - `OperatorSharedDeposit`: Operator's signature required for deposit, with the given sighash type. +/// - `NofnSharedDeposit`: N-of-N signature required for deposit, with the given sighash type. +/// - `Own`: Signature required for the entity running the program, with the given sighash type. +/// - `OperatorSharedSetup`: Operator's signature required during aggregator setup, with the given sighash type. +#[derive(Debug, Clone, Copy)] +pub enum DepositSigKeyOwner { + NotOwned, + /// Operator's signature required for deposit (shared with verifiers), with the given sighash type. + OperatorSharedDeposit(TapSighashType), + /// N-of-N signature required for deposit, with the given sighash type. + NofnSharedDeposit(TapSighashType), + /// Signature required for the entity itself, with the given sighash type. + /// Verifiers do not need this signature info, thus it is not saved to DB. + /// Added to help define different sighash types for operator's own signatures. + Own(TapSighashType), + /// Operator's signature required during first setup, with the given sighash type. + OperatorSharedSetup(TapSighashType), +} + +impl DepositSigKeyOwner { + /// Returns the sighash type for this signature owner, if any. + pub fn sighash_type(&self) -> Option { + match self { + DepositSigKeyOwner::NotOwned => None, + DepositSigKeyOwner::Own(t) + | DepositSigKeyOwner::NofnSharedDeposit(t) + | DepositSigKeyOwner::OperatorSharedDeposit(t) + | DepositSigKeyOwner::OperatorSharedSetup(t) => Some(*t), + } + } +} + +impl SignatureId { + /// Maps a [`SignatureId`] to its required signature owner and sighash type. + /// + /// # Returns + /// + /// A [`DepositSigKeyOwner`] describing the required signature owner and sighash type for this signature ID, or a [`BridgeError`] if the mapping fails. + pub fn get_deposit_sig_owner(&self) -> Result { + use DepositSigKeyOwner::*; + use TapSighashType::{Default as SighashDefault, SinglePlusAnyoneCanPay}; + match *self { + SignatureId::NormalSignature(normal_sig) => { + let normal_sig_type = NormalSignatureKind::try_from(normal_sig.signature_kind) + .wrap_err("Couldn't convert SignatureId::NormalSignature to DepositSigKey")?; + use NormalSignatureKind::*; + match normal_sig_type { + OperatorSighashDefault => Ok(Own(SighashDefault)), + NormalSignatureUnknown => Ok(NotOwned), + Challenge => Ok(OperatorSharedDeposit(SinglePlusAnyoneCanPay)), + DisproveTimeout2 => Ok(NofnSharedDeposit(SighashDefault)), + Disprove2 => Ok(OperatorSharedDeposit(SighashDefault)), + Reimburse1 => Ok(NofnSharedDeposit(SighashDefault)), + KickoffNotFinalized1 => Ok(NofnSharedDeposit(SighashDefault)), + KickoffNotFinalized2 => Ok(OperatorSharedDeposit(SighashDefault)), + Reimburse2 => Ok(NofnSharedDeposit(SighashDefault)), + NoSignature => Ok(NotOwned), + ChallengeTimeout2 => Ok(NofnSharedDeposit(SighashDefault)), + MiniAssert1 => Ok(Own(SighashDefault)), + OperatorChallengeAck1 => Ok(Own(SighashDefault)), + NotStored => Ok(NotOwned), + YieldKickoffTxid => Ok(NotOwned), + LatestBlockhashTimeout1 => Ok(NofnSharedDeposit(SighashDefault)), + LatestBlockhashTimeout2 => Ok(NofnSharedDeposit(SighashDefault)), + LatestBlockhashTimeout3 => Ok(OperatorSharedDeposit(SighashDefault)), + LatestBlockhash => Ok(Own(SighashDefault)), + } + } + SignatureId::NumberedSignature(numbered_sig) => { + let numbered_sig_type = NumberedSignatureKind::try_from( + numbered_sig.signature_kind, + ) + .wrap_err("Couldn't convert SignatureId::NumberedSignature to DepositSigKey")?; + use NumberedSignatureKind::*; + match numbered_sig_type { + OperatorChallengeNack1 => Ok(NofnSharedDeposit(SighashDefault)), + OperatorChallengeNack2 => Ok(NofnSharedDeposit(SighashDefault)), + NumberedSignatureUnknown => Ok(NotOwned), + NumberedNotStored => Ok(Own(SighashDefault)), + OperatorChallengeNack3 => Ok(OperatorSharedDeposit(SighashDefault)), + AssertTimeout1 => Ok(NofnSharedDeposit(SighashDefault)), + AssertTimeout2 => Ok(NofnSharedDeposit(SighashDefault)), + AssertTimeout3 => Ok(OperatorSharedDeposit(SighashDefault)), + UnspentKickoff1 => Ok(OperatorSharedSetup(SighashDefault)), + UnspentKickoff2 => Ok(OperatorSharedSetup(SighashDefault)), + WatchtowerChallengeTimeout1 => Ok(NofnSharedDeposit(SighashDefault)), + WatchtowerChallengeTimeout2 => Ok(NofnSharedDeposit(SighashDefault)), + WatchtowerChallenge => Ok(Own(SighashDefault)), + } + } + } + } +} diff --git a/core/src/builder/transaction/input.rs b/core/src/builder/transaction/input.rs new file mode 100644 index 000000000..966f90513 --- /dev/null +++ b/core/src/builder/transaction/input.rs @@ -0,0 +1,348 @@ +//! # Transaction Input Types and Utilities +//! +//! This module defines types and utilities for representing and handling transaction inputs used in [`super::TxHandler`]. +//! It provides abstractions for spendable inputs, input errors, correctness checks, supporting Taproot and script path spends. +//! + +use crate::bitvm_client; +use crate::builder::script::SpendableScript; +use crate::builder::sighash::TapTweakData; +use crate::builder::{address::create_taproot_address, script::SpendPath}; +use crate::config::protocol::ProtocolParamset; +use crate::rpc::clementine::tagged_signature::SignatureId; +use bitcoin::{ + taproot::{LeafVersion, TaprootSpendInfo}, + Amount, OutPoint, ScriptBuf, Sequence, TxIn, TxOut, Witness, WitnessProgram, XOnlyPublicKey, +}; +use std::sync::Arc; +use thiserror::Error; + +pub type BlockHeight = u16; + +#[derive(Debug, Clone)] +/// Represents a spendable transaction input, including previous output, scripts, and Taproot spend info. +pub struct SpendableTxIn { + /// The reference to the previous output that is being used as an input. + previous_outpoint: OutPoint, + prevout: TxOut, // locking script (taproot => op_1 op_pushbytes_32 tweaked pk) + /// Scripts associated with this input (for script path spends). + scripts: Vec>, + /// Optional Taproot spend info for this input. + spendinfo: Option, +} + +#[derive(Clone, Debug, Error, PartialEq)] +/// Error type for spendable input construction and validation. +pub enum SpendableTxInError { + #[error( + "The taproot spend info contains an incomplete merkle proof map. Some scripts are missing." + )] + IncompleteMerkleProofMap, + + #[error("The script_pubkey of the previous output does not match the expected script_pubkey for the taproot spending information.")] + IncorrectScriptPubkey, + + #[error("Error creating a spendable txin: {0}")] + Error(String), +} + +#[derive(Debug, Clone, Copy)] +/// Enumerates protocol-specific UTXO output indices for transaction construction. +/// Used to identify the vout of specific UTXOs in protocol transactions. +pub enum UtxoVout { + /// The vout of the assert utxo in KickoffTx + Assert(usize), + /// The vout of the watchtower challenge utxo in KickoffTx + WatchtowerChallenge(usize), + /// The vout of the watchtower challenge ack utxo in KickoffTx + WatchtowerChallengeAck(usize), + /// The vout of the challenge utxo in KickoffTx + Challenge, + /// The vout of the kickoff finalizer utxo in KickoffTx + KickoffFinalizer, + /// The vout of the reimburse utxo in KickoffTx + ReimburseInKickoff, + /// The vout of the disprove utxo in KickoffTx + Disprove, + /// The vout of the latest blockhash utxo in KickoffTx + LatestBlockhash, + /// The vout of the deposited btc utxo in MoveTx + DepositInMove, + /// The vout of the reimburse connector utxo in RoundTx + ReimburseInRound(usize, &'static ProtocolParamset), + /// The vout of the kickoff utxo in RoundTx + Kickoff(usize), + /// The vout of the collateral utxo in RoundTx + CollateralInRound, + /// The vout of the collateral utxo in ReadyToReimburseTx + CollateralInReadyToReimburse, +} + +impl UtxoVout { + /// Returns the vout index for this UTXO in the corresponding transaction. + pub fn get_vout(self) -> u32 { + match self { + UtxoVout::Assert(idx) => idx as u32 + 5, + UtxoVout::WatchtowerChallenge(idx) => { + (2 * idx + 5 + bitvm_client::ClementineBitVMPublicKeys::number_of_assert_txs()) + as u32 + } + UtxoVout::WatchtowerChallengeAck(idx) => { + (2 * idx + 6 + bitvm_client::ClementineBitVMPublicKeys::number_of_assert_txs()) + as u32 + } + UtxoVout::Challenge => 0, + UtxoVout::KickoffFinalizer => 1, + UtxoVout::ReimburseInKickoff => 2, + UtxoVout::Disprove => 3, + UtxoVout::LatestBlockhash => 4, + UtxoVout::ReimburseInRound(idx, paramset) => { + (paramset.num_kickoffs_per_round + idx + 1) as u32 + } + UtxoVout::Kickoff(idx) => idx as u32 + 1, + UtxoVout::DepositInMove => 0, + UtxoVout::CollateralInRound => 0, + UtxoVout::CollateralInReadyToReimburse => 0, + } + } +} + +impl SpendableTxIn { + /// Returns a reference to the previous output (TxOut) for this input. + pub fn get_prevout(&self) -> &TxOut { + &self.prevout + } + + /// Returns a reference to the previous outpoint (OutPoint) for this input. + pub fn get_prev_outpoint(&self) -> &OutPoint { + &self.previous_outpoint + } + + /// Creates a new [`SpendableTxIn`] with only a previous output and TxOut (no scripts or spend info). + pub fn new_partial(previous_output: OutPoint, prevout: TxOut) -> SpendableTxIn { + Self::new(previous_output, prevout, vec![], None) + } + + /// Constructs a [`SpendableTxIn`] from scripts, value, and the internal key. Giving None for the internal key will create the tx + /// with an unspendable internal key. + /// + /// # Arguments + /// * `previous_output` - The outpoint being spent. + /// * `value` - The value of the previous output. + /// * `scripts` - Scripts for script path spends. + /// * `key_path` - The internal key for key path spends. + /// * `network` - Bitcoin network. + /// + /// # Returns + /// + /// A new [`SpendableTxIn`] with the specified parameters. + pub fn from_scripts( + previous_output: OutPoint, + value: Amount, + scripts: Vec>, + key_path: Option, + network: bitcoin::Network, + ) -> SpendableTxIn { + let script_bufs: Vec = scripts + .iter() + .map(|script| script.clone().to_script_buf()) + .collect(); + let (addr, spend_info) = create_taproot_address(&script_bufs, key_path, network); + Self::new( + previous_output, + TxOut { + value, + script_pubkey: addr.script_pubkey(), + }, + scripts, + Some(spend_info), + ) + } + + /// Creates a new [`SpendableTxIn`] from all fields. + #[inline(always)] + pub fn new( + previous_output: OutPoint, + prevout: TxOut, + scripts: Vec>, + spendinfo: Option, + ) -> SpendableTxIn { + if cfg!(debug_assertions) { + return Self::from_checked(previous_output, prevout, scripts, spendinfo) + .expect("failed to construct a spendabletxin in debug mode"); + } + + Self::from_unchecked(previous_output, prevout, scripts, spendinfo) + } + + /// Returns a reference to the scripts for this input. + pub fn get_scripts(&self) -> &Vec> { + &self.scripts + } + + /// Returns a reference to the Taproot spend info for this input, if any. + pub fn get_spend_info(&self) -> &Option { + &self.spendinfo + } + + /// Sets the Taproot spend info for this input. + pub fn set_spend_info(&mut self, spendinfo: Option) { + self.spendinfo = spendinfo; + #[cfg(debug_assertions)] + self.check().expect("spendinfo is invalid in debug mode"); + } + + /// Checks the validity of the spendable input, ensuring script pubkey and merkle proof map are correct. + fn check(&self) -> Result<(), SpendableTxInError> { + use SpendableTxInError::*; + let Some(spendinfo) = self.spendinfo.as_ref() else { + return Ok(()); + }; + + let (prevout, scripts) = (&self.prevout, &self.scripts); + + if ScriptBuf::new_witness_program(&WitnessProgram::p2tr_tweaked(spendinfo.output_key())) + != prevout.script_pubkey + { + return Err(IncorrectScriptPubkey); + } + let script_bufs: Vec = scripts + .iter() + .map(|script| script.to_script_buf()) + .collect(); + if script_bufs.into_iter().any(|script| { + spendinfo + .script_map() + .get(&(script, LeafVersion::TapScript)) + .is_none() + }) { + return Err(IncompleteMerkleProofMap); + } + Ok(()) + } + + /// Creates a [`SpendableTxIn`] with validation if the given input is valid (used in debug mode for testing). + fn from_checked( + previous_output: OutPoint, + prevout: TxOut, + scripts: Vec>, + spendinfo: Option, + ) -> Result { + let this = Self::from_unchecked(previous_output, prevout, scripts, spendinfo); + this.check()?; + Ok(this) + } + + /// Creates a [`SpendableTxIn`] without validation (used in release mode). + fn from_unchecked( + previous_outpoint: OutPoint, + prevout: TxOut, + scripts: Vec>, + spendinfo: Option, + ) -> SpendableTxIn { + SpendableTxIn { + previous_outpoint, + prevout, + scripts, + spendinfo, + } + } +} + +#[allow(dead_code)] +#[derive(Debug, Clone)] +/// Represents a fully specified transaction input, including sequence, witness, spend path, and signature ID. +pub struct SpentTxIn { + spendable: SpendableTxIn, + /// The sequence number, which suggests to miners which of two + /// conflicting transactions should be preferred, or 0xFFFFFFFF + /// to ignore this feature. This is generally never used since + /// the miner behavior cannot be enforced. + sequence: Sequence, + /// Witness data used to spend this TxIn. Can be None if the + /// transaction that this TxIn is in has not been signed yet. + /// + /// Has to be Some(_) when the transaction is signed. + witness: Option, + /// Spend path for this input (key or script path). + spend_path: SpendPath, + /// Signature ID for this input, which signature in the protocol this input needs. + input_id: SignatureId, +} + +impl SpentTxIn { + /// Constructs a [`SpentTxIn`] from a spendable input and associated metadata. + pub fn from_spendable( + input_id: SignatureId, + spendable: SpendableTxIn, + spend_path: SpendPath, + sequence: Sequence, + witness: Option, + ) -> SpentTxIn { + SpentTxIn { + spendable, + sequence, + witness, + spend_path, + input_id, + } + } + + /// Returns a reference to the underlying [`SpendableTxIn`]. + pub fn get_spendable(&self) -> &SpendableTxIn { + &self.spendable + } + + /// Returns the spend path for this input. + pub fn get_spend_path(&self) -> SpendPath { + self.spend_path + } + + /// Returns the Taproot tweak data for this input, based on the spend path and spend info. + pub fn get_tweak_data(&self) -> TapTweakData { + match self.spend_path { + SpendPath::ScriptSpend(_) => TapTweakData::ScriptPath, + SpendPath::KeySpend => { + let spendinfo = self.spendable.get_spend_info(); + match spendinfo { + Some(spendinfo) => TapTweakData::KeyPath(spendinfo.merkle_root()), + None => TapTweakData::Unknown, + } + } + SpendPath::Unknown => TapTweakData::Unknown, + } + } + + /// Returns a reference to the witness data for this input, if any. + pub fn get_witness(&self) -> &Option { + &self.witness + } + + /// Returns the signature ID for this input. + pub fn get_signature_id(&self) -> SignatureId { + self.input_id + } + + /// Sets the witness data for this input. + pub fn set_witness(&mut self, witness: Witness) { + self.witness = Some(witness); + } + + // pub fn get_sequence(&self) -> Sequence { + // self.sequence + // } + + // pub fn set_sequence(&mut self, sequence: Sequence) { + // self.sequence = sequence; + // } + + /// Converts this [`SpentTxIn`] into a Bitcoin [`TxIn`] for inclusion in a Bitcoin transaction. + pub fn to_txin(&self) -> TxIn { + TxIn { + previous_output: self.spendable.previous_outpoint, + sequence: self.sequence, + script_sig: ScriptBuf::default(), + witness: self.witness.clone().unwrap_or_default(), + } + } +} diff --git a/core/src/builder/transaction/mod.rs b/core/src/builder/transaction/mod.rs new file mode 100644 index 000000000..d91e444a8 --- /dev/null +++ b/core/src/builder/transaction/mod.rs @@ -0,0 +1,779 @@ +//! # builder::transaction +//! +//! +//! This module provides the core logic for constructing, handling, and signing the various Bitcoin transactions +//! required by the Clementine bridge protocol. It defines the creation, and validation of +//! transaction flows involving operators, verifiers, watchtowers, and the security council, aimed to make it +//! easy to create transactions and sign them properly. +//! +//! ## Overview +//! +//! The transaction builder is responsible for: +//! - Defining all transaction types and their flows in the protocol (see [`TransactionType`]). +//! - Building and signing transactions for deposit, withdrawal, challenge, reimbursement, and related operations. +//! - Storing transaction inputs/outputs, scripts, and Taproot spend information. +//! - Providing utilities to speed up transaction creating during a deposit using caching tx and db data. +//! +//! ## Main Components +//! +//! - [`mod.rs`] - The main entry point, re-exporting key types and functions. Defines some helper functions for creating taproot outputs. +//! - [`creator.rs`] - Contains the functions to create multiple TxHandler's for a deposit and related structs for caching. In particular, it contains the functions to create TxHandler's for all transactions generated during a deposit from a single kickoff. +//! - [`operator_collateral.rs`] - Handles the creation of operator-specific collateral-related transactions, such as round, ready-to-reimburse, and unspent kickoff transactions. +//! - [`operator_reimburse.rs`] - Implements the creation of reimbursement and payout transactions, including logic for operator compensation and optimistic payouts. +//! - [`operator_assert.rs`] - Provides functions for creating BitVM assertion and timeout transactions. +//! - [`challenge.rs`] - Handles the creation of challenge, disprove, and watchtower challenge transactions, supporting protocol dispute resolution and fraud proofs. +//! - [`sign.rs`] - Contains logic for signing transactions using data in the [`TxHandler`]. +//! - [`txhandler.rs`] - Defines the [`TxHandler`] abstraction, which wraps a transaction and its metadata, and provides methods for signing, finalizing, and extracting transaction data. +//! - [`input.rs`] - Defines types and utilities for transaction inputs used in the [`TxHandler`]. +//! - [`output.rs`] - Defines types and utilities for transaction outputs used in the [`TxHandler`]. +//! - [`deposit_signature_owner.rs`] - Maps which TxIn signatures are signed by which protocol entities, additionally supporting different Sighash types. +//! + +use super::script::{CheckSig, Multisig, SpendableScript}; +use super::script::{ReplacementDepositScript, SpendPath}; +use crate::builder::address::calculate_taproot_leaf_depths; +use crate::builder::script::OtherSpendable; +use crate::builder::transaction::challenge::*; +use crate::builder::transaction::input::SpendableTxIn; +use crate::builder::transaction::operator_assert::*; +use crate::builder::transaction::operator_collateral::*; +use crate::builder::transaction::operator_reimburse::*; +use crate::builder::transaction::output::UnspentTxOut; +use crate::config::protocol::ProtocolParamset; +use crate::constants::{NON_EPHEMERAL_ANCHOR_AMOUNT, NON_STANDARD_V3}; +use crate::deposit::{DepositData, SecurityCouncil}; +use crate::errors::BridgeError; +use crate::operator::RoundIndex; +use crate::rpc::clementine::grpc_transaction_id; +use crate::rpc::clementine::GrpcTransactionId; +use crate::rpc::clementine::{ + NormalSignatureKind, NormalTransactionId, NumberedTransactionId, NumberedTransactionType, +}; +use bitcoin::hashes::Hash; +use bitcoin::opcodes::all::OP_RETURN; +use bitcoin::script::Builder; +use bitcoin::transaction::Version; +use bitcoin::{ + Address, Amount, OutPoint, ScriptBuf, Transaction, TxIn, TxOut, Txid, XOnlyPublicKey, +}; +use hex; +use input::UtxoVout; +use serde::{Deserialize, Serialize}; +use std::sync::Arc; +use thiserror::Error; + +// Exports to the outside +pub use crate::builder::transaction::txhandler::*; +pub use creator::{ + create_round_txhandlers, create_txhandlers, ContractContext, KickoffWinternitzKeys, + ReimburseDbCache, TxHandlerCache, +}; +pub use operator_collateral::{ + create_burn_unused_kickoff_connectors_txhandler, create_round_nth_txhandler, +}; +pub use operator_reimburse::{create_optimistic_payout_txhandler, create_payout_txhandler}; +pub use txhandler::Unsigned; + +pub mod challenge; +mod creator; +pub mod deposit_signature_owner; +pub mod input; +mod operator_assert; +mod operator_collateral; +mod operator_reimburse; +pub mod output; +pub mod sign; +mod txhandler; + +type HiddenNode<'a> = &'a [u8; 32]; + +#[derive(Debug, Error)] +pub enum TxError { + /// TxInputNotFound is returned when the input is not found in the transaction + #[error("Could not find input of transaction")] + TxInputNotFound, + #[error("Could not find output of transaction")] + TxOutputNotFound, + #[error("Attempted to set witness when it's already set")] + WitnessAlreadySet, + #[error("Script with index {0} not found for transaction")] + ScriptNotFound(usize), + #[error("Insufficient Context data for the requested TxHandler")] + InsufficientContext, + #[error("No scripts in TxHandler for the TxIn with index {0}")] + NoScriptsForTxIn(usize), + #[error("No script in TxHandler for the index {0}")] + NoScriptAtIndex(usize), + #[error("Spend Path in SpentTxIn in TxHandler not specified")] + SpendPathNotSpecified, + #[error("Actor does not own the key needed in P2TR keypath")] + NotOwnKeyPath, + #[error("public key of Checksig in script is not owned by Actor")] + NotOwnedScriptPath, + #[error("Couldn't find needed signature from database for tx: {:?}", _0)] + SignatureNotFound(TransactionType), + #[error("Couldn't find needed txhandler during creation for tx: {:?}", _0)] + TxHandlerNotFound(TransactionType), + #[error("BitvmSetupNotFound for operator {0:?}, deposit_txid {1}")] + BitvmSetupNotFound(XOnlyPublicKey, Txid), + #[error("Transaction input is missing spend info")] + MissingSpendInfo, + #[error("Incorrect watchtower challenge data length")] + IncorrectWatchtowerChallengeDataLength, + #[error("Latest blockhash script must be a single script")] + LatestBlockhashScriptNumber, + #[error("Round index cannot be used to create a Round transaction: {0:?}")] + InvalidRoundIndex(RoundIndex), + #[error("Index overflow")] + IndexOverflow, + + #[error(transparent)] + Other(#[from] eyre::Report), +} + +/// Types of all transactions that can be created. Some transactions have a +/// (usize) index as there are multiple instances of the same transaction type +/// per kickoff. +#[derive(Debug, Clone, Copy, Hash, PartialEq, Eq, Ord, PartialOrd, Serialize, Deserialize)] +pub enum TransactionType { + // --- Transaction Types --- + AssertTimeout(usize), + BurnUnusedKickoffConnectors, + Challenge, + ChallengeTimeout, + Disprove, + DisproveTimeout, + EmergencyStop, + Kickoff, + KickoffNotFinalized, + LatestBlockhash, + LatestBlockhashTimeout, + MiniAssert(usize), + MoveToVault, + OperatorChallengeAck(usize), + OperatorChallengeNack(usize), + OptimisticPayout, + Payout, + ReadyToReimburse, + Reimburse, + ReplacementDeposit, + Round, + UnspentKickoff(usize), + WatchtowerChallenge(usize), + WatchtowerChallengeTimeout(usize), + + // --- Transaction Subsets --- + AllNeededForDeposit, // this will include all tx's that is to be signed for a deposit for verifiers + YieldKickoffTxid, // This is just to yield kickoff txid from the sighash stream, not used for anything else, sorry + + /// For testing and for values to be replaced later. + Dummy, +} + +// converter from proto type to rust enum +impl TryFrom for TransactionType { + type Error = ::prost::UnknownEnumValue; + fn try_from(value: GrpcTransactionId) -> Result { + use NormalTransactionId as Normal; + use NumberedTransactionType as Numbered; + // return err if id is None + let inner_id = value.id.ok_or(::prost::UnknownEnumValue(0))?; + match inner_id { + grpc_transaction_id::Id::NormalTransaction(idx) => { + let tx_type = NormalTransactionId::try_from(idx)?; + match tx_type { + Normal::Round => Ok(Self::Round), + Normal::Kickoff => Ok(Self::Kickoff), + Normal::MoveToVault => Ok(Self::MoveToVault), + Normal::Payout => Ok(Self::Payout), + Normal::Challenge => Ok(Self::Challenge), + Normal::Disprove => Ok(Self::Disprove), + Normal::DisproveTimeout => Ok(Self::DisproveTimeout), + Normal::Reimburse => Ok(Self::Reimburse), + Normal::AllNeededForDeposit => Ok(Self::AllNeededForDeposit), + Normal::Dummy => Ok(Self::Dummy), + Normal::ReadyToReimburse => Ok(Self::ReadyToReimburse), + Normal::KickoffNotFinalized => Ok(Self::KickoffNotFinalized), + Normal::ChallengeTimeout => Ok(Self::ChallengeTimeout), + Normal::UnspecifiedTransactionType => Err(::prost::UnknownEnumValue(idx)), + Normal::BurnUnusedKickoffConnectors => Ok(Self::BurnUnusedKickoffConnectors), + Normal::YieldKickoffTxid => Ok(Self::YieldKickoffTxid), + Normal::ReplacementDeposit => Ok(Self::ReplacementDeposit), + Normal::LatestBlockhashTimeout => Ok(Self::LatestBlockhashTimeout), + Normal::LatestBlockhash => Ok(Self::LatestBlockhash), + Normal::OptimisticPayout => Ok(Self::OptimisticPayout), + } + } + grpc_transaction_id::Id::NumberedTransaction(transaction_id) => { + let tx_type = NumberedTransactionType::try_from(transaction_id.transaction_type)?; + match tx_type { + Numbered::WatchtowerChallenge => { + Ok(Self::WatchtowerChallenge(transaction_id.index as usize)) + } + Numbered::OperatorChallengeNack => { + Ok(Self::OperatorChallengeNack(transaction_id.index as usize)) + } + Numbered::OperatorChallengeAck => { + Ok(Self::OperatorChallengeAck(transaction_id.index as usize)) + } + Numbered::AssertTimeout => { + Ok(Self::AssertTimeout(transaction_id.index as usize)) + } + Numbered::UnspentKickoff => { + Ok(Self::UnspentKickoff(transaction_id.index as usize)) + } + Numbered::MiniAssert => Ok(Self::MiniAssert(transaction_id.index as usize)), + Numbered::WatchtowerChallengeTimeout => Ok(Self::WatchtowerChallengeTimeout( + transaction_id.index as usize, + )), + Numbered::UnspecifiedIndexedTransactionType => { + Err(::prost::UnknownEnumValue(transaction_id.transaction_type)) + } + } + } + } + } +} + +impl From for GrpcTransactionId { + fn from(value: TransactionType) -> Self { + use grpc_transaction_id::Id::*; + use NormalTransactionId as Normal; + use NumberedTransactionType as Numbered; + GrpcTransactionId { + id: Some(match value { + TransactionType::Round => NormalTransaction(Normal::Round as i32), + TransactionType::Kickoff => NormalTransaction(Normal::Kickoff as i32), + TransactionType::MoveToVault => NormalTransaction(Normal::MoveToVault as i32), + TransactionType::Payout => NormalTransaction(Normal::Payout as i32), + TransactionType::Challenge => NormalTransaction(Normal::Challenge as i32), + TransactionType::Disprove => NormalTransaction(Normal::Disprove as i32), + TransactionType::DisproveTimeout => { + NormalTransaction(Normal::DisproveTimeout as i32) + } + TransactionType::Reimburse => NormalTransaction(Normal::Reimburse as i32), + TransactionType::AllNeededForDeposit => { + NormalTransaction(Normal::AllNeededForDeposit as i32) + } + TransactionType::Dummy => NormalTransaction(Normal::Dummy as i32), + TransactionType::ReadyToReimburse => { + NormalTransaction(Normal::ReadyToReimburse as i32) + } + TransactionType::KickoffNotFinalized => { + NormalTransaction(Normal::KickoffNotFinalized as i32) + } + TransactionType::ChallengeTimeout => { + NormalTransaction(Normal::ChallengeTimeout as i32) + } + TransactionType::ReplacementDeposit => { + NormalTransaction(Normal::ReplacementDeposit as i32) + } + TransactionType::LatestBlockhashTimeout => { + NormalTransaction(Normal::LatestBlockhashTimeout as i32) + } + TransactionType::LatestBlockhash => { + NormalTransaction(Normal::LatestBlockhash as i32) + } + TransactionType::OptimisticPayout => { + NormalTransaction(Normal::OptimisticPayout as i32) + } + TransactionType::WatchtowerChallenge(index) => { + NumberedTransaction(NumberedTransactionId { + transaction_type: Numbered::WatchtowerChallenge as i32, + index: index as i32, + }) + } + TransactionType::OperatorChallengeNack(index) => { + NumberedTransaction(NumberedTransactionId { + transaction_type: Numbered::OperatorChallengeNack as i32, + index: index as i32, + }) + } + TransactionType::OperatorChallengeAck(index) => { + NumberedTransaction(NumberedTransactionId { + transaction_type: Numbered::OperatorChallengeAck as i32, + index: index as i32, + }) + } + TransactionType::AssertTimeout(index) => { + NumberedTransaction(NumberedTransactionId { + transaction_type: Numbered::AssertTimeout as i32, + index: index as i32, + }) + } + TransactionType::UnspentKickoff(index) => { + NumberedTransaction(NumberedTransactionId { + transaction_type: Numbered::UnspentKickoff as i32, + index: index as i32, + }) + } + TransactionType::MiniAssert(index) => NumberedTransaction(NumberedTransactionId { + transaction_type: Numbered::MiniAssert as i32, + index: index as i32, + }), + TransactionType::WatchtowerChallengeTimeout(index) => { + NumberedTransaction(NumberedTransactionId { + transaction_type: Numbered::WatchtowerChallengeTimeout as i32, + index: index as i32, + }) + } + TransactionType::BurnUnusedKickoffConnectors => { + NormalTransaction(Normal::BurnUnusedKickoffConnectors as i32) + } + TransactionType::YieldKickoffTxid => { + NormalTransaction(Normal::YieldKickoffTxid as i32) + } + TransactionType::EmergencyStop => { + NormalTransaction(Normal::UnspecifiedTransactionType as i32) + } + }), + } + } +} + +/// Creates a P2A (anchor) output for Child Pays For Parent (CPFP) fee bumping. +/// +/// # Returns +/// +/// A [`TxOut`] with a statically defined script and value, used as an anchor output in protocol transactions. The TxOut is spendable by anyone. +pub fn anchor_output(amount: Amount) -> TxOut { + TxOut { + value: amount, + script_pubkey: ScriptBuf::from_hex("51024e73").expect("statically valid script"), + } +} + +/// A non-ephemeral anchor output. It is used in tx's that should have a non-ephemeral anchor. +/// Because ephemeral anchors force the tx to have 0 fee. +pub fn non_ephemeral_anchor_output() -> TxOut { + TxOut { + value: NON_EPHEMERAL_ANCHOR_AMOUNT, + script_pubkey: ScriptBuf::from_hex("51024e73").expect("statically valid script"), + } +} + +/// Creates an OP_RETURN output with the given data slice. +/// +/// # Arguments +/// +/// * `slice` - The data to embed in the OP_RETURN output. +/// +/// # Returns +/// +/// A [`TxOut`] with an OP_RETURN script containing the provided data. +/// +/// # Warning +/// +/// Does not check if the data is valid for an OP_RETURN script. Data must be at most 80 bytes. +pub fn op_return_txout>(slice: S) -> TxOut { + let script = Builder::new() + .push_opcode(OP_RETURN) + .push_slice(slice) + .into_script(); + + TxOut { + value: Amount::from_sat(0), + script_pubkey: script, + } +} + +/// Creates a [`TxHandler`] for the `move_to_vault_tx`. +/// +/// This transaction moves funds to a N-of-N address from the deposit address created by the user that deposits into Citrea after all signature collection operations are done for the deposit. +/// +/// # Arguments +/// +/// * `deposit_data` - Mutable reference to the deposit data for the transaction. +/// * `paramset` - Protocol parameter set. +/// +/// # Returns +/// +/// A [`TxHandler`] for the move-to-vault transaction, or a [`BridgeError`] if construction fails. +pub fn create_move_to_vault_txhandler( + deposit_data: &mut DepositData, + paramset: &'static ProtocolParamset, +) -> Result, BridgeError> { + let nofn_xonly_pk = deposit_data.get_nofn_xonly_pk()?; + let deposit_outpoint = deposit_data.get_deposit_outpoint(); + let nofn_script = Arc::new(CheckSig::new(nofn_xonly_pk)); + let security_council_script = Arc::new(Multisig::from_security_council( + deposit_data.security_council.clone(), + )); + + let deposit_scripts = deposit_data.get_deposit_scripts(paramset)?; + + Ok(TxHandlerBuilder::new(TransactionType::MoveToVault) + .with_version(NON_STANDARD_V3) + .add_input( + NormalSignatureKind::NotStored, + SpendableTxIn::from_scripts( + deposit_outpoint, + paramset.bridge_amount, + deposit_scripts, + None, + paramset.network, + ), + SpendPath::ScriptSpend(0), + DEFAULT_SEQUENCE, + ) + .add_output(UnspentTxOut::from_scripts( + paramset.bridge_amount, + vec![nofn_script, security_council_script], + None, + paramset.network, + )) + // always use 0 sat anchor for move_tx, this will keep the amount in move to vault tx exactly the bridge amount + .add_output(UnspentTxOut::from_partial(anchor_output(Amount::from_sat( + 0, + )))) + .finalize()) +} + +/// Creates a [`TxHandler`] for the `emergency_stop_tx`. +/// +/// This transaction moves funds to the address controlled by the security council from the move-to-vault txout. +/// Used to stop the deposit in case of a security issue. The moved funds will eventually be redeposited using the replacement deposit tx. +/// +/// # Arguments +/// +/// * `deposit_data` - Mutable reference to the deposit data for the transaction. +/// * `move_to_vault_txhandler` - Reference to the move-to-vault transaction handler. +/// * `paramset` - Protocol parameter set. +/// +/// # Returns +/// +/// A [`TxHandler`] for the emergency stop transaction, or a [`BridgeError`] if construction fails. +pub fn create_emergency_stop_txhandler( + deposit_data: &mut DepositData, + move_to_vault_txhandler: &TxHandler, + paramset: &'static ProtocolParamset, +) -> Result, BridgeError> { + // Hand calculated, total tx size is 11 + 126 * NUM_EMERGENCY_STOPS + const EACH_EMERGENCY_STOP_VBYTES: Amount = Amount::from_sat(126); + let security_council = deposit_data.security_council.clone(); + + let builder = TxHandlerBuilder::new(TransactionType::EmergencyStop) + .add_input( + NormalSignatureKind::NotStored, + move_to_vault_txhandler.get_spendable_output(UtxoVout::DepositInMove)?, + SpendPath::ScriptSpend(0), + DEFAULT_SEQUENCE, + ) + .add_output(UnspentTxOut::from_scripts( + paramset.bridge_amount - paramset.anchor_amount() - EACH_EMERGENCY_STOP_VBYTES * 3, + vec![Arc::new(Multisig::from_security_council(security_council))], + None, + paramset.network, + )) + .finalize(); + + Ok(builder) +} + +/// Combines multiple emergency stop transactions into a single transaction. +/// +/// # Arguments +/// +/// * `txs` - A vector of (Txid, Transaction) pairs, each representing a signed emergency stop transaction using Sighash Single | AnyoneCanPay. +/// * `add_anchor` - If true, an anchor output will be appended to the outputs. +/// +/// # Returns +/// +/// A new [`Transaction`] that merges all inputs and outputs from the provided transactions, optionally adding an anchor output. +/// +/// # Warning +/// +/// This function does not perform any safety checks and assumes all inputs/outputs are valid and compatible. +pub fn combine_emergency_stop_txhandler( + txs: Vec<(Txid, Transaction)>, + add_anchor: bool, + paramset: &'static ProtocolParamset, +) -> Transaction { + let (inputs, mut outputs): (Vec, Vec) = txs + .into_iter() + .map(|(_, tx)| (tx.input[0].clone(), tx.output[0].clone())) + .unzip(); + + if add_anchor { + outputs.push(anchor_output(paramset.anchor_amount())); + } + + Transaction { + version: Version::non_standard(2), + lock_time: bitcoin::absolute::LockTime::ZERO, + input: inputs, + output: outputs, + } +} + +/// Creates a [`TxHandler`] for the `replacement_deposit_tx`. +/// +/// This transaction replaces a previous deposit with a new deposit. +/// In the its script, it commits the old move_to_vault txid that it replaces. +/// +/// # Arguments +/// +/// * `old_move_txid` - The txid of the old move_to_vault transaction that is being replaced. +/// * `input_outpoint` - The outpoint of the input to the replacement deposit tx that holds bridge amount. +/// * `nofn_xonly_pk` - The N-of-N XOnlyPublicKey for the deposit. +/// * `paramset` - The protocol paramset. +/// * `security_council` - The security council. +/// +/// # Returns +/// +/// A [`TxHandler`] for the replacement deposit transaction, or a [`BridgeError`] if construction fails. +pub fn create_replacement_deposit_txhandler( + old_move_txid: Txid, + input_outpoint: OutPoint, + old_nofn_xonly_pk: XOnlyPublicKey, + new_nofn_xonly_pk: XOnlyPublicKey, + paramset: &'static ProtocolParamset, + security_council: SecurityCouncil, +) -> Result { + Ok(TxHandlerBuilder::new(TransactionType::ReplacementDeposit) + .with_version(NON_STANDARD_V3) + .add_input( + NormalSignatureKind::NoSignature, + SpendableTxIn::from_scripts( + input_outpoint, + paramset.bridge_amount, + vec![ + Arc::new(CheckSig::new(old_nofn_xonly_pk)), + Arc::new(Multisig::from_security_council(security_council.clone())), + ], + None, + paramset.network, + ), + crate::builder::script::SpendPath::ScriptSpend(0), + DEFAULT_SEQUENCE, + ) + .add_output(UnspentTxOut::from_scripts( + paramset.bridge_amount, + vec![ + Arc::new(ReplacementDepositScript::new( + new_nofn_xonly_pk, + old_move_txid, + )), + Arc::new(Multisig::from_security_council(security_council)), + ], + None, + paramset.network, + )) + // always use 0 sat anchor for replacement deposit tx, this will keep the amount in replacement deposit tx exactly the bridge amount + .add_output(UnspentTxOut::from_partial(anchor_output(Amount::from_sat( + 0, + )))) + .finalize()) +} + +/// Creates a Taproot output for a disprove path, combining a script, an additional disprove script, and a hidden node containing the BitVM disprove scripts. +/// +/// # Arguments +/// +/// * `operator_timeout_script` - The operator timeout script. +/// * `additional_script` - An additional script to include in the Taproot tree. This single additional script is generated by Clementine bridge +/// in addition to the disprove scripts coming from BitVM side. +/// * `disprove_root_hash` - The root hash for the hidden script merkle tree node. The scripts included in the root hash are the BitVM disprove scripts. +/// * `amount` - The output amount. +/// * `network` - The Bitcoin network. +/// +/// # Returns +/// +/// An [`UnspentTxOut`] representing the Taproot TxOut. +pub fn create_disprove_taproot_output( + operator_timeout_script: Arc, + additional_script: ScriptBuf, + disprove_path: DisprovePath, + amount: Amount, + network: bitcoin::Network, +) -> UnspentTxOut { + use crate::bitvm_client::{SECP, UNSPENDABLE_XONLY_PUBKEY}; + use bitcoin::taproot::{TapNodeHash, TaprootBuilder}; + + let mut scripts: Vec = vec![additional_script.clone()]; + + let builder = match disprove_path.clone() { + DisprovePath::Scripts(extra_scripts) => { + let mut builder = TaprootBuilder::new(); + + builder = builder + .add_leaf(1, operator_timeout_script.to_script_buf()) + .expect("add operator timeout script") + .add_leaf(2, additional_script) + .expect("add additional script"); + + // 1. Calculate depths. This is cheap and doesn't need ownership of scripts. + let depths = calculate_taproot_leaf_depths(extra_scripts.len()); + + // 2. Zip depths with an iterator over the scripts. + // We clone the `script` inside the loop because the builder needs an owned value. + // This is more efficient than cloning the whole Vec upfront. + for (depth, script) in depths.into_iter().zip(extra_scripts.iter()) { + let main_tree_depth = 2 + depth; + builder = builder + .add_leaf(main_tree_depth, script.clone()) + .expect("add inlined disprove script"); + } + + // 3. Now, move the original `extra_scripts` into `scripts.extend`. No clone needed. + scripts.extend(extra_scripts); + builder + } + DisprovePath::HiddenNode(root_hash) => TaprootBuilder::new() + .add_leaf(1, operator_timeout_script.to_script_buf()) + .expect("empty taptree will accept a script node") + .add_leaf(2, additional_script) + .expect("taptree with one node will accept a node at depth 2") + .add_hidden_node(2, TapNodeHash::from_byte_array(*root_hash)) + .expect("taptree with two nodes will accept a node at depth 2"), + }; + + let taproot_spend_info = builder + .finalize(&SECP, *UNSPENDABLE_XONLY_PUBKEY) + .expect("valid taptree"); + + let address = Address::p2tr( + &SECP, + *UNSPENDABLE_XONLY_PUBKEY, + taproot_spend_info.merkle_root(), + network, + ); + + let mut spendable_scripts: Vec> = vec![operator_timeout_script]; + let other_spendable_scripts: Vec> = scripts + .into_iter() + .map(|script| Arc::new(OtherSpendable::new(script)) as Arc) + .collect(); + + spendable_scripts.extend(other_spendable_scripts); + + UnspentTxOut::new( + TxOut { + value: amount, + script_pubkey: address.script_pubkey(), + }, + spendable_scripts, + Some(taproot_spend_info), + ) +} + +/// Helper function to create a Taproot output that combines a single script and a root hash containing any number of scripts. +/// The main use case for this function is to speed up the tx creating during a deposit. We don't need to create and combine all the +/// scripts in the taproot repeatedly, but cache and combine the common scripts for each kickoff tx to a root hash, and add an additional script +/// that depends on the specific operator or nofn_pk that is signing the deposit. +/// +/// # Arguments +/// +/// * `script` - The one additional script to include in the merkle tree. +/// * `hidden_node` - The root hash for the merkle tree node. The node can contain any number of scripts. +/// * `amount` - The output amount. +/// * `network` - The Bitcoin network. +/// +/// # Returns +/// +/// An [`UnspentTxOut`] representing the Taproot TxOut. +pub fn create_taproot_output_with_hidden_node( + script: Arc, + hidden_node: HiddenNode, + amount: Amount, + network: bitcoin::Network, +) -> UnspentTxOut { + use crate::bitvm_client::{SECP, UNSPENDABLE_XONLY_PUBKEY}; + use bitcoin::taproot::{TapNodeHash, TaprootBuilder}; + + let builder = TaprootBuilder::new() + .add_leaf(1, script.to_script_buf()) + .expect("empty taptree will accept a script node") + .add_hidden_node(1, TapNodeHash::from_byte_array(*hidden_node)) + .expect("taptree with one node will accept a node at depth 1"); + + let taproot_spend_info = builder + .finalize(&SECP, *UNSPENDABLE_XONLY_PUBKEY) + .expect("cannot fail since it is a valid taptree"); + + let address = Address::p2tr( + &SECP, + *UNSPENDABLE_XONLY_PUBKEY, + taproot_spend_info.merkle_root(), + network, + ); + + UnspentTxOut::new( + TxOut { + value: amount, + script_pubkey: address.script_pubkey(), + }, + vec![script.clone()], + Some(taproot_spend_info), + ) +} + +#[cfg(test)] +mod tests { + use super::*; + use bitcoin::secp256k1::XOnlyPublicKey; + use std::str::FromStr; + + #[test] + fn test_security_council_from_str() { + // Create some test public keys + let pk1 = XOnlyPublicKey::from_slice(&[1; 32]).unwrap(); + let pk2 = XOnlyPublicKey::from_slice(&[2; 32]).unwrap(); + + // Test valid input + let input = format!( + "2:{},{}", + hex::encode(pk1.serialize()), + hex::encode(pk2.serialize()) + ); + let council = SecurityCouncil::from_str(&input).unwrap(); + assert_eq!(council.threshold, 2); + assert_eq!(council.pks.len(), 2); + assert_eq!(council.pks[0], pk1); + assert_eq!(council.pks[1], pk2); + + // Test invalid threshold + let input = format!( + "3:{},{}", + hex::encode(pk1.serialize()), + hex::encode(pk2.serialize()) + ); + assert!(SecurityCouncil::from_str(&input).is_err()); + + // Test invalid hex + let input = "2:invalid,pk2"; + assert!(SecurityCouncil::from_str(input).is_err()); + + // Test missing parts + assert!(SecurityCouncil::from_str("2").is_err()); + assert!(SecurityCouncil::from_str(":").is_err()); + + // Test too many parts + let input = format!( + "2:{},{}:extra", + hex::encode(pk1.serialize()), + hex::encode(pk2.serialize()) + ); + assert!(SecurityCouncil::from_str(&input).is_err()); + + // Test empty public keys + assert!(SecurityCouncil::from_str("2:").is_err()); + } + + #[test] + fn test_security_council_round_trip() { + // Create some test public keys + let pk1 = XOnlyPublicKey::from_slice(&[1; 32]).unwrap(); + let pk2 = XOnlyPublicKey::from_slice(&[2; 32]).unwrap(); + + let original = SecurityCouncil { + pks: vec![pk1, pk2], + threshold: 2, + }; + + let string = original.to_string(); + let parsed = SecurityCouncil::from_str(&string).unwrap(); + + assert_eq!(original, parsed); + } +} diff --git a/core/src/builder/transaction/operator_assert.rs b/core/src/builder/transaction/operator_assert.rs new file mode 100644 index 000000000..4806a0e40 --- /dev/null +++ b/core/src/builder/transaction/operator_assert.rs @@ -0,0 +1,183 @@ +//! This module contains the creation of BitVM operator assertion transactions and timeout transactions related to assertions. + +use self::output::UnspentTxOut; +use super::input::UtxoVout; +use crate::builder; +pub use crate::builder::transaction::txhandler::TxHandler; +pub use crate::builder::transaction::*; +use crate::config::protocol::ProtocolParamset; +use crate::errors::BridgeError; +use crate::rpc::clementine::NormalSignatureKind; +use bitcoin::Sequence; + +/// Creates a [`TxHandler`] for the `disprove_timeout_tx`. +/// +/// This transaction is sent by the operator to enable sending a `reimburse_tx` later, if operator's asserted proof did not get disproved. +/// +/// # Inputs +/// 1. KickoffTx: Disprove utxo +/// 2. KickoffTx: KickoffFinalizer utxo +/// +/// # Outputs +/// 1. Anchor output for CPFP +/// +/// # Arguments +/// * `kickoff_txhandler` - The kickoff transaction handler providing the input. +/// * `paramset` - Protocol parameter set. +/// +/// # Returns +/// A [`TxHandler`] for the disprove timeout transaction, or a [`BridgeError`] if construction fails. +pub fn create_disprove_timeout_txhandler( + kickoff_txhandler: &TxHandler, + paramset: &'static ProtocolParamset, +) -> Result, BridgeError> { + Ok(TxHandlerBuilder::new(TransactionType::DisproveTimeout) + .with_version(NON_STANDARD_V3) + .add_input( + NormalSignatureKind::OperatorSighashDefault, + kickoff_txhandler.get_spendable_output(UtxoVout::Disprove)?, + SpendPath::ScriptSpend(0), + Sequence::from_height(paramset.disprove_timeout_timelock), + ) + .add_input( + NormalSignatureKind::DisproveTimeout2, + kickoff_txhandler.get_spendable_output(UtxoVout::KickoffFinalizer)?, + SpendPath::ScriptSpend(0), + DEFAULT_SEQUENCE, + ) + .add_output(UnspentTxOut::from_partial(anchor_output( + paramset.anchor_amount(), + ))) + .finalize()) +} + +/// Creates a [`TxHandler`] for the `latest_blockhash_timeout_tx`. +/// +/// This transaction is sent by the verifiers if the latest blockhash is not provided in time by operator. +/// +/// # Inputs +/// 1. KickoffTx: LatestBlockhash utxo +/// 2. KickoffTx: KickoffFinalizer utxo +/// 3. RoundTx: BurnConnector utxo +/// +/// # Outputs +/// 1. Anchor output for CPFP +/// +/// # Arguments +/// * `kickoff_txhandler` - The kickoff transaction handler providing the input. +/// * `round_txhandler` - The round transaction handler providing an additional input. +/// * `paramset` - Protocol parameter set. +/// +/// # Returns +/// A [`TxHandler`] for the latest blockhash timeout transaction, or a [`BridgeError`] if construction fails. +pub fn create_latest_blockhash_timeout_txhandler( + kickoff_txhandler: &TxHandler, + round_txhandler: &TxHandler, + paramset: &'static ProtocolParamset, +) -> Result, BridgeError> { + Ok( + TxHandlerBuilder::new(TransactionType::LatestBlockhashTimeout) + .with_version(NON_STANDARD_V3) + .add_input( + NormalSignatureKind::LatestBlockhashTimeout1, + kickoff_txhandler.get_spendable_output(UtxoVout::LatestBlockhash)?, + SpendPath::ScriptSpend(0), + Sequence::from_height(paramset.latest_blockhash_timeout_timelock), + ) + .add_input( + NormalSignatureKind::LatestBlockhashTimeout2, + kickoff_txhandler.get_spendable_output(UtxoVout::KickoffFinalizer)?, + SpendPath::ScriptSpend(0), + DEFAULT_SEQUENCE, + ) + .add_input( + NormalSignatureKind::LatestBlockhashTimeout3, + round_txhandler.get_spendable_output(UtxoVout::CollateralInRound)?, + SpendPath::KeySpend, + DEFAULT_SEQUENCE, + ) + .add_output(UnspentTxOut::from_partial(anchor_output( + paramset.anchor_amount(), + ))) + .finalize(), + ) +} + +/// Creates a vector of [`TxHandler`] for `mini_assert` transactions. +/// +/// These transactions are used to commit BitVM assertions of operator's proof that it paid the payout corresponding to the deposit. +/// +/// # Inputs +/// 1. KickoffTx: Assert utxo (per mini assert) +/// +/// # Outputs +/// 1. Anchor output for CPFP +/// 2. Dummy OP_RETURN output (to pad the size of the transaction, as it is too small otherwise) +/// +/// # Arguments +/// * `kickoff_txhandler` - The kickoff transaction handler providing the input. +/// * `num_asserts` - Number of mini assert transactions to create. +/// +/// # Returns +/// A vector of [`TxHandler`] for mini assert transactions, or a [`BridgeError`] if construction fails. +pub fn create_mini_asserts( + kickoff_txhandler: &TxHandler, + num_asserts: usize, + paramset: &'static ProtocolParamset, +) -> Result, BridgeError> { + let mut txhandlers = Vec::new(); + for idx in 0..num_asserts { + txhandlers.push( + TxHandlerBuilder::new(TransactionType::MiniAssert(idx)) + .with_version(NON_STANDARD_V3) + .add_input( + NormalSignatureKind::MiniAssert1, + kickoff_txhandler.get_spendable_output(UtxoVout::Assert(idx))?, + SpendPath::ScriptSpend(1), + DEFAULT_SEQUENCE, + ) + .add_output(UnspentTxOut::from_partial( + builder::transaction::anchor_output(paramset.anchor_amount()), + )) + .add_output(UnspentTxOut::from_partial(op_return_txout(b""))) + .finalize(), + ); + } + Ok(txhandlers) +} + +/// Creates a [`TxHandler`] for the `latest_blockhash_tx`. +/// +/// This transaction is used by operator to commit the latest blockhash of the bitcoin chain. This latest blockhash will be used later +/// in the operator's bridge proof. Mainly used to reduce the time operator can spend building a private fork. +/// +/// # Inputs +/// 1. KickoffTx: LatestBlockhash utxo +/// +/// # Outputs +/// 1. Anchor output for CPFP +/// 2. Dummy OP_RETURN output (to pad the size of the transaction, as it is too small otherwise) +/// +/// # Arguments +/// * `kickoff_txhandler` - The kickoff transaction handler providing the input. +/// +/// # Returns +/// A [`TxHandler`] for the latest blockhash transaction, or a [`BridgeError`] if construction fails. +pub fn create_latest_blockhash_txhandler( + kickoff_txhandler: &TxHandler, + paramset: &'static ProtocolParamset, +) -> Result, BridgeError> { + Ok(TxHandlerBuilder::new(TransactionType::LatestBlockhash) + .with_version(NON_STANDARD_V3) + .add_input( + NormalSignatureKind::LatestBlockhash, + kickoff_txhandler.get_spendable_output(UtxoVout::LatestBlockhash)?, + SpendPath::ScriptSpend(1), + DEFAULT_SEQUENCE, + ) + .add_output(UnspentTxOut::from_partial(anchor_output( + paramset.anchor_amount(), + ))) + .add_output(UnspentTxOut::from_partial(op_return_txout(b""))) + .finalize()) +} diff --git a/core/src/builder/transaction/operator_collateral.rs b/core/src/builder/transaction/operator_collateral.rs new file mode 100644 index 000000000..dfba643e3 --- /dev/null +++ b/core/src/builder/transaction/operator_collateral.rs @@ -0,0 +1,472 @@ +//! # Collaterals +//! +//! This module contains the logic for creating the `round_tx`, `ready_to_reimburse_tx`, +//! and `unspent_kickoff_tx` transactions. These transactions are used to control the sequence of transactions +//! in the withdrawal process and limits the number of withdrawals the operator can make in a given time period. +//! +//! The flow is as follows: +//! `round_tx -> ready_to_reimburse_tx -> round_tx -> ...` +//! +//! The `round_tx` is used to create a collateral for the withdrawal, kickoff utxos for the current +//! round and the reimburse connectors for the previous round. + +use super::input::UtxoVout; +use super::txhandler::DEFAULT_SEQUENCE; +use crate::builder; +use crate::builder::address::create_taproot_address; +use crate::builder::script::{TimelockScript, WinternitzCommit}; +use crate::builder::transaction::creator::KickoffWinternitzKeys; +use crate::builder::transaction::input::SpendableTxIn; +use crate::builder::transaction::output::UnspentTxOut; +use crate::builder::transaction::txhandler::TxHandler; +use crate::builder::transaction::*; +use crate::config::protocol::ProtocolParamset; +use crate::constants::MIN_TAPROOT_AMOUNT; +use crate::errors::BridgeError; +use crate::rpc::clementine::NumberedSignatureKind; +use bitcoin::Sequence; +use bitcoin::{Amount, OutPoint, TxOut, XOnlyPublicKey}; +use std::sync::Arc; + +pub enum RoundTxInput { + Prevout(Box), + Collateral(OutPoint, Amount), +} + +/// Creates a [`TxHandler`] for `round_tx`. +/// +/// This transaction is used to create a collateral for the withdrawal, kickoff UTXOs for the current round, and the reimburse connectors for the previous round. +/// It always uses the first output of the previous `ready_to_reimburse_tx` as the input, chaining rounds together. +/// +/// `round tx` inputs: +/// 1. Either the first collateral utxo of operator, or operators collateral in the previous rounds ready to reimburse tx. +/// +/// `round tx` outputs: +/// 1. Operator's Burn Connector +/// 2. Kickoff utxo(s): the utxos will be used as the input for the kickoff transactions +/// 3. Reimburse utxo(s): the utxo(s) will be used as an input to Reimburse TX +/// 4. P2Anchor: Anchor output for CPFP +/// +/// # Arguments +/// * `operator_xonly_pk` - The operator's x-only public key. +/// * `txin` - The input to the round transaction (either a previous output or the first collateral). +/// * `pubkeys` - Winternitz public keys for the round's kickoff UTXOs. +/// * `paramset` - Protocol parameter set. +/// +/// # Returns +/// A [`TxHandler`] for the round transaction, or a [`BridgeError`] if construction fails. +pub fn create_round_txhandler( + operator_xonly_pk: XOnlyPublicKey, + txin: RoundTxInput, + pubkeys: &[bitvm::signatures::winternitz::PublicKey], + paramset: &'static ProtocolParamset, +) -> Result { + let mut builder = TxHandlerBuilder::new(TransactionType::Round).with_version(NON_STANDARD_V3); + let input_amount; + match txin { + RoundTxInput::Prevout(prevout) => { + input_amount = prevout.get_prevout().value; + builder = builder.add_input( + NormalSignatureKind::OperatorSighashDefault, + *prevout, + SpendPath::KeySpend, + Sequence::from_height(paramset.operator_reimburse_timelock), + ); + } + RoundTxInput::Collateral(outpoint, amount) => { + let (op_address, op_spend) = + create_taproot_address(&[], Some(operator_xonly_pk), paramset.network); + input_amount = amount; + builder = builder.add_input( + NormalSignatureKind::OperatorSighashDefault, + SpendableTxIn::new( + outpoint, + TxOut { + value: input_amount, + script_pubkey: op_address.script_pubkey(), + }, + vec![], + Some(op_spend.clone()), + ), + SpendPath::KeySpend, + DEFAULT_SEQUENCE, + ); + } + } + + // This 1 block is to enforce that operator has to put a sequence number in the input + // so this spending path can't be used to send kickoff tx + let timeout_block_count_locked_script = + Arc::new(TimelockScript::new(Some(operator_xonly_pk), 1)); + + let total_required = (paramset.kickoff_amount + paramset.default_utxo_amount()) + .checked_mul(paramset.num_kickoffs_per_round as u64) + .and_then(|kickoff_total| kickoff_total.checked_add(paramset.anchor_amount())) + .ok_or_else(|| { + BridgeError::ArithmeticOverflow("Total required amount calculation overflow") + })?; + + let remaining_amount = input_amount.checked_sub(total_required).ok_or_else(|| { + BridgeError::InsufficientFunds("Input amount insufficient for required outputs") + })?; + + builder = builder.add_output(UnspentTxOut::from_scripts( + remaining_amount, + vec![], + Some(operator_xonly_pk), + paramset.network, + )); + + // add kickoff utxos + for pubkey in pubkeys.iter().take(paramset.num_kickoffs_per_round) { + let blockhash_commit = Arc::new(WinternitzCommit::new( + vec![(pubkey.clone(), paramset.kickoff_blockhash_commit_length)], + operator_xonly_pk, + paramset.winternitz_log_d, + )); + builder = builder.add_output(UnspentTxOut::from_scripts( + paramset.kickoff_amount, + vec![blockhash_commit, timeout_block_count_locked_script.clone()], + None, + paramset.network, + )); + } + // Create reimburse utxos + for _ in 0..paramset.num_kickoffs_per_round { + builder = builder.add_output(UnspentTxOut::from_scripts( + paramset.default_utxo_amount(), + vec![], + Some(operator_xonly_pk), + paramset.network, + )); + } + Ok(builder + .add_output(UnspentTxOut::from_partial( + builder::transaction::anchor_output(paramset.anchor_amount()), + )) + .finalize()) +} + +/// Creates a vector of [`TxHandler`] for `assert_timeout_tx` transactions. +/// +/// These transactions can be sent by anyone if the operator did not send their asserts in time, burning their burn connector and kickoff finalizer. +/// +/// # Inputs +/// 1. KickoffTx: Assert utxo (corresponding to the assert) +/// 2. KickoffTx: KickoffFinalizer utxo +/// 3. RoundTx: BurnConnector utxo +/// +/// # Outputs +/// 1. Anchor output for CPFP +/// +/// # Arguments +/// * `kickoff_txhandler` - The kickoff transaction handler providing the input. +/// * `round_txhandler` - The round transaction handler providing an additional input. +/// * `num_asserts` - Number of assert timeout transactions to create. +/// * `paramset` - Protocol parameter set. +/// +/// # Returns +/// A vector of [`TxHandler`] for all assert timeout transactions, or a [`BridgeError`] if construction fails. +pub fn create_assert_timeout_txhandlers( + kickoff_txhandler: &TxHandler, + round_txhandler: &TxHandler, + num_asserts: usize, + paramset: &'static ProtocolParamset, +) -> Result, BridgeError> { + let mut txhandlers = Vec::new(); + for idx in 0..num_asserts { + txhandlers.push( + TxHandlerBuilder::new(TransactionType::AssertTimeout(idx)) + .with_version(NON_STANDARD_V3) + .add_input( + (NumberedSignatureKind::AssertTimeout1, idx as i32), + kickoff_txhandler.get_spendable_output(UtxoVout::Assert(idx))?, + SpendPath::ScriptSpend(0), + Sequence::from_height(paramset.assert_timeout_timelock), + ) + .add_input( + (NumberedSignatureKind::AssertTimeout2, idx as i32), + kickoff_txhandler.get_spendable_output(UtxoVout::KickoffFinalizer)?, + SpendPath::ScriptSpend(0), + DEFAULT_SEQUENCE, + ) + .add_input( + (NumberedSignatureKind::AssertTimeout3, idx as i32), + round_txhandler.get_spendable_output(UtxoVout::CollateralInRound)?, + SpendPath::KeySpend, + DEFAULT_SEQUENCE, + ) + .add_output(UnspentTxOut::from_partial( + builder::transaction::anchor_output(paramset.anchor_amount()), + )) + .finalize(), + ); + } + Ok(txhandlers) +} + +/// Creates the nth (1-indexed) `round_txhandler` and `reimburse_generator_txhandler` pair for a specific operator. +/// +/// # Arguments +/// * `operator_xonly_pk` - The operator's x-only public key. +/// * `input_outpoint` - The outpoint to use as input for the first round. +/// * `input_amount` - The amount for the input outpoint. +/// * `index` - The index of the round to create. +/// * `pubkeys` - Winternitz keys for all rounds. +/// * `paramset` - Protocol parameter set. +/// +/// # Returns +/// A tuple of (`TxHandler` for the round, `TxHandler` for ready-to-reimburse), or a [`BridgeError`] if construction fails. +pub fn create_round_nth_txhandler( + operator_xonly_pk: XOnlyPublicKey, + input_outpoint: OutPoint, + input_amount: Amount, + index: RoundIndex, + pubkeys: &KickoffWinternitzKeys, + paramset: &'static ProtocolParamset, +) -> Result<(TxHandler, TxHandler), BridgeError> { + // 0th round is the collateral, there are no keys for the 0th round + // Additionally there are no keys after num_rounds + 1, +1 is because we need additional round to generate + // reimbursement connectors of previous round + + if index == RoundIndex::Collateral + || index.to_index() > RoundIndex::Round(paramset.num_round_txs).to_index() + { + return Err(TxError::InvalidRoundIndex(index).into()); + } + + // create the first round txhandler + let mut round_txhandler = create_round_txhandler( + operator_xonly_pk, + RoundTxInput::Collateral(input_outpoint, input_amount), + pubkeys.get_keys_for_round(RoundIndex::Round(0))?, + paramset, + )?; + let mut ready_to_reimburse_txhandler = + create_ready_to_reimburse_txhandler(&round_txhandler, operator_xonly_pk, paramset)?; + + // get which round index we are creating txhandlers for + let round_idx = match index { + RoundIndex::Collateral => 0, // impossible, checked before + RoundIndex::Round(idx) => idx, + }; + // iterate starting from second round to the requested round + for round_idx in RoundIndex::iter_rounds_range(1, round_idx + 1) { + round_txhandler = create_round_txhandler( + operator_xonly_pk, + RoundTxInput::Prevout(Box::new( + ready_to_reimburse_txhandler + .get_spendable_output(UtxoVout::CollateralInReadyToReimburse)?, + )), + pubkeys.get_keys_for_round(round_idx)?, + paramset, + )?; + ready_to_reimburse_txhandler = + create_ready_to_reimburse_txhandler(&round_txhandler, operator_xonly_pk, paramset)?; + } + Ok((round_txhandler, ready_to_reimburse_txhandler)) +} + +/// Creates a [`TxHandler`] for the `ready_to_reimburse_tx`. +/// +/// # Inputs +/// 1. RoundTx: BurnConnector utxo +/// +/// # Outputs +/// 1. Operator's collateral +/// 2. Anchor output for CPFP +/// +/// # Arguments +/// * `round_txhandler` - The round transaction handler providing the input. +/// * `operator_xonly_pk` - The operator's x-only public key. +/// * `paramset` - Protocol parameter set. +/// +/// # Returns +/// A [`TxHandler`] for the ready-to-reimburse transaction, or a [`BridgeError`] if construction fails. +pub fn create_ready_to_reimburse_txhandler( + round_txhandler: &TxHandler, + operator_xonly_pk: XOnlyPublicKey, + paramset: &'static ProtocolParamset, +) -> Result { + let prevout = round_txhandler.get_spendable_output(UtxoVout::CollateralInRound)?; + let prev_value = prevout.get_prevout().value; + + Ok(TxHandlerBuilder::new(TransactionType::ReadyToReimburse) + .with_version(NON_STANDARD_V3) + .add_input( + NormalSignatureKind::OperatorSighashDefault, + prevout, + SpendPath::KeySpend, + DEFAULT_SEQUENCE, + ) + .add_output(UnspentTxOut::from_scripts( + prev_value - paramset.anchor_amount(), + vec![], + Some(operator_xonly_pk), + paramset.network, + )) + .add_output(UnspentTxOut::from_partial( + builder::transaction::anchor_output(paramset.anchor_amount()), + )) + .finalize()) +} + +/// Creates a vector of [`TxHandler`] for `unspent_kickoff_tx` transactions. +/// These transactions can be sent if an operator sends ReadyToReimburse transaction without spending all the kickoff utxos of the round. +/// +/// # Inputs +/// 1. ReadyToReimburseTx: BurnConnector utxo +/// 2. RoundTx: Any kickoff utxo of the same round +/// +/// # Outputs +/// 1. Anchor output for CPFP +/// +/// # Arguments +/// * `round_txhandler` - The round transaction handler providing the kickoff utxos. +/// * `ready_to_reimburse_txhandler` - The ready-to-reimburse transaction handler providing the collateral. +/// * `paramset` - Protocol parameter set. +/// +/// # Returns +/// A vector of [`TxHandler`] for unspent kickoff transactions, or a [`BridgeError`] if construction fails. +pub fn create_unspent_kickoff_txhandlers( + round_txhandler: &TxHandler, + ready_to_reimburse_txhandler: &TxHandler, + paramset: &'static ProtocolParamset, +) -> Result, BridgeError> { + let mut txhandlers = Vec::new(); + for idx in 0..paramset.num_kickoffs_per_round { + txhandlers.push( + TxHandlerBuilder::new(TransactionType::UnspentKickoff(idx)) + .with_version(NON_STANDARD_V3) + .add_input( + (NumberedSignatureKind::UnspentKickoff1, idx as i32), + ready_to_reimburse_txhandler + .get_spendable_output(UtxoVout::CollateralInReadyToReimburse)?, + SpendPath::KeySpend, + DEFAULT_SEQUENCE, + ) + .add_input( + (NumberedSignatureKind::UnspentKickoff2, idx as i32), + round_txhandler.get_spendable_output(UtxoVout::Kickoff(idx))?, + SpendPath::ScriptSpend(1), + Sequence::from_height(1), + ) + .add_output(UnspentTxOut::from_partial( + builder::transaction::anchor_output(paramset.anchor_amount()), + )) + .finalize(), + ); + } + Ok(txhandlers) +} + +/// Creates a [`TxHandler`] for burning unused kickoff connectors. +/// +/// # Inputs +/// 1. RoundTx: Kickoff utxo(s) (per unused connector) +/// +/// # Outputs +/// 1. Change output to the provided address +/// 2. Anchor output for CPFP +/// +/// # Arguments +/// * `round_txhandler` - The round transaction handler providing the input. +/// * `unused_kickoff_connectors_indices` - Indices of the unused kickoff connectors (0-indexed). +/// * `change_address` - The address to send the change to. +/// +/// # Returns +/// A [`TxHandler`] for burning unused kickoff connectors, or a [`BridgeError`] if construction fails. +pub fn create_burn_unused_kickoff_connectors_txhandler( + round_txhandler: &TxHandler, + unused_kickoff_connectors_indices: &[usize], + change_address: &Address, + paramset: &'static ProtocolParamset, +) -> Result { + let mut tx_handler_builder = + TxHandlerBuilder::new(TransactionType::BurnUnusedKickoffConnectors) + .with_version(NON_STANDARD_V3); + for &idx in unused_kickoff_connectors_indices { + tx_handler_builder = tx_handler_builder.add_input( + NormalSignatureKind::OperatorSighashDefault, + round_txhandler.get_spendable_output(UtxoVout::Kickoff(idx))?, + SpendPath::ScriptSpend(1), + Sequence::from_height(1), + ); + } + if !paramset.bridge_nonstandard { + // if we use standard tx's, kickoff utxo's will hold some sats so we can return the change to the change address + // but if we use nonstandard tx's with 0 sat values then the change is 0 anyway, no need to add an output + tx_handler_builder = tx_handler_builder.add_output(UnspentTxOut::from_partial(TxOut { + value: MIN_TAPROOT_AMOUNT, + script_pubkey: change_address.script_pubkey(), + })); + } + tx_handler_builder = tx_handler_builder.add_output(UnspentTxOut::from_partial( + builder::transaction::anchor_output(paramset.anchor_amount()), + )); + Ok(tx_handler_builder.finalize()) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::config::protocol::REGTEST_PARAMSET; + use std::str::FromStr; + + #[tokio::test] + async fn test_create_round_nth_txhandler_and_round_txhandlers() { + // check if round_nth_txhandler and round_txhandlers are consistent with each other + let op_xonly_pk = XOnlyPublicKey::from_str( + "50929b74c1a04954b78b4b6035e97a5e078a5a0f28ec96d547bfee9ace803ac0", + ) + .expect("this key is valid"); + let paramset = ®TEST_PARAMSET; + let input_outpoint = OutPoint::new(bitcoin::Txid::all_zeros(), 0); + let input_amount = Amount::from_sat(10000000000); + let pubkeys = KickoffWinternitzKeys::new( + vec![vec![[0u8; 20]; 44]; paramset.num_round_txs * paramset.num_kickoffs_per_round], + paramset.num_round_txs, + paramset.num_kickoffs_per_round, + ); + + let mut round_tx_input = RoundTxInput::Collateral(input_outpoint, input_amount); + + for i in 0..paramset.num_round_txs { + let (round_nth_txhandler, ready_to_reimburse_nth_txhandler) = + create_round_nth_txhandler( + op_xonly_pk, + input_outpoint, + input_amount, + RoundIndex::Round(i), + &pubkeys, + paramset, + ) + .unwrap(); + + let round_txhandler = create_round_txhandler( + op_xonly_pk, + round_tx_input, + pubkeys.get_keys_for_round(RoundIndex::Round(i)).unwrap(), + paramset, + ) + .unwrap(); + + let ready_to_reimburse_txhandler = + create_ready_to_reimburse_txhandler(&round_txhandler, op_xonly_pk, paramset) + .unwrap(); + + assert_eq!(round_nth_txhandler.get_txid(), round_txhandler.get_txid()); + assert_eq!( + ready_to_reimburse_nth_txhandler.get_txid(), + ready_to_reimburse_txhandler.get_txid() + ); + + let prev_ready_to_reimburse_txhandler = ready_to_reimburse_txhandler; + round_tx_input = RoundTxInput::Prevout(Box::new( + prev_ready_to_reimburse_txhandler + .get_spendable_output(UtxoVout::CollateralInReadyToReimburse) + .unwrap(), + )); + } + } +} diff --git a/core/src/builder/transaction/operator_reimburse.rs b/core/src/builder/transaction/operator_reimburse.rs new file mode 100644 index 000000000..2d7ff3511 --- /dev/null +++ b/core/src/builder/transaction/operator_reimburse.rs @@ -0,0 +1,494 @@ +//! # Operator Reimburse Transactions +//! +//! This module contains the logic for creating operator reimbursement and payout-related transactions in the protocol. +//! These transactions handle the flow of funds for operator compensation, challenge handling, and user withdrawals. +//! +//! The main responsibilities include: +//! - Constructing the kickoff transaction, which sets up all outputs needed for subsequent protocol steps (challenge, reimbursement, asserts, etc.). +//! - Creating transactions for operator reimbursement in case of honest behavior. +//! - Handling payout transactions for user withdrawals, including both standard (with BitVM) and optimistic payout flows. +//! + +use super::create_move_to_vault_txhandler; +use super::input::SpendableTxIn; +use super::input::UtxoVout; +use super::op_return_txout; +use super::txhandler::DEFAULT_SEQUENCE; +use super::HiddenNode; +use super::Signed; +use super::TransactionType; +use super::TxError; +use crate::builder::script::{CheckSig, SpendableScript, TimelockScript}; +use crate::builder::script::{PreimageRevealScript, SpendPath}; +use crate::builder::transaction::output::UnspentTxOut; +use crate::builder::transaction::txhandler::{TxHandler, TxHandlerBuilder}; +use crate::config::protocol::ProtocolParamset; +use crate::constants::NON_STANDARD_V3; +use crate::deposit::{DepositData, KickoffData}; +use crate::errors::BridgeError; +use crate::rpc::clementine::NormalSignatureKind; +use crate::{builder, UTXO}; +use bitcoin::hashes::Hash; +use bitcoin::script::PushBytesBuf; +use bitcoin::secp256k1::schnorr::Signature; +use bitcoin::ScriptBuf; +use bitcoin::XOnlyPublicKey; +use bitcoin::{TxOut, Txid}; +use std::sync::Arc; + +#[derive(Debug, Clone)] +pub enum AssertScripts<'a> { + AssertScriptTapNodeHash(&'a [[u8; 32]]), + AssertSpendableScript(Vec>), +} + +#[derive(Debug, Clone)] +pub enum DisprovePath<'a> { + Scripts(Vec), + HiddenNode(HiddenNode<'a>), +} + +/// Creates a [`TxHandler`] for the `kickoff_tx`. +/// +/// This transaction is sent by the operator to initialize protocol state for a round, when operator fronted a peg-out and wants reimbursement. It sets up all outputs needed for subsequent protocol steps (challenge, reimbursement, asserts, etc.). +/// +/// # Inputs +/// 1. RoundTx: Kickoff utxo (for the given kickoff index) +/// +/// # Outputs +/// 1. Operator challenge output (for challenge or no-challenge path) +/// 2. Kickoff finalizer connector +/// 3. Reimburse connector (to be used in reimburse transaction) +/// 4. Disprove output (Taproot, for BitVM disprove path) +/// 5. Latest blockhash output (for latest blockhash assertion using winternitz signatures) +/// 6. Multiple assert outputs (for BitVM assertions, currently 33) +/// 7. For each watchtower 2 outputs: +/// - Watchtower challenge output +/// - Operator challenge ack/nack output +/// 8. OP_RETURN output (with move-to-vault txid and operator xonly pubkey) +/// 9. Anchor output for CPFP +/// +/// # Arguments +/// * `kickoff_data` - Data to identify the kickoff. +/// * `round_txhandler` - The round transaction handler providing the input. +/// * `move_txhandler` - The move-to-vault transaction handler. +/// * `deposit_data` - Mutable reference to deposit data. +/// * `operator_xonly_pk` - The operator's x-only public key. +/// * `assert_scripts` - Actual assertion scripts or tapnode hashes (for faster creation of assert utxos) for BitVM assertion. +/// * `disprove_root_hash` - Root hash for BitVM disprove scripts. +/// * `additional_disprove_script` - Additional disprove script bytes (for additional disprove script specific to Clementine). +/// * `latest_blockhash_script` - Actual script or tapnode hash for latest blockhash assertion. +/// * `operator_unlock_hashes` - Unlock hashes for operator preimage reveals for OperatorChallengeAck transactions. +/// * `paramset` - Protocol parameter set. +/// +/// # Returns +/// A [`TxHandler`] for the kickoff transaction, or a [`BridgeError`] if construction fails. +#[allow(clippy::too_many_arguments)] +pub fn create_kickoff_txhandler( + kickoff_data: KickoffData, + round_txhandler: &TxHandler, + move_txhandler: &TxHandler, + deposit_data: &mut DepositData, + operator_xonly_pk: XOnlyPublicKey, + assert_scripts: AssertScripts, + disprove_path: DisprovePath, + additional_disprove_script: Vec, + latest_blockhash_script: AssertScripts, + operator_unlock_hashes: &[[u8; 20]], + paramset: &'static ProtocolParamset, +) -> Result { + let kickoff_idx = kickoff_data.kickoff_idx as usize; + let move_txid: Txid = *move_txhandler.get_txid(); + let mut builder = TxHandlerBuilder::new(TransactionType::Kickoff).with_version(NON_STANDARD_V3); + builder = builder.add_input( + NormalSignatureKind::OperatorSighashDefault, + round_txhandler.get_spendable_output(UtxoVout::Kickoff(kickoff_idx))?, + builder::script::SpendPath::ScriptSpend(0), + DEFAULT_SEQUENCE, + ); + + let nofn_script = Arc::new(CheckSig::new(deposit_data.get_nofn_xonly_pk()?)); + let operator_script = Arc::new(CheckSig::new(operator_xonly_pk)); + + let operator_1week = Arc::new(TimelockScript::new( + Some(operator_xonly_pk), + paramset.operator_challenge_timeout_timelock, + )); + + builder = builder + // goes to challenge tx or no challenge tx + .add_output(UnspentTxOut::from_scripts( + paramset.default_utxo_amount(), + vec![operator_script, operator_1week], + None, + paramset.network, + )) + // kickoff finalizer connector + .add_output(UnspentTxOut::from_scripts( + paramset.default_utxo_amount(), + vec![nofn_script.clone()], + None, + paramset.network, + )) + // UTXO to reimburse tx + .add_output(UnspentTxOut::from_scripts( + paramset.default_utxo_amount(), + vec![nofn_script.clone()], + None, + paramset.network, + )); + + // Add disprove utxo + // Add Operator in 5 week script to taproot, that connects to disprove timeout + let operator_5week = Arc::new(TimelockScript::new( + Some(operator_xonly_pk), + paramset.disprove_timeout_timelock, + )); + + let additional_disprove_script = ScriptBuf::from_bytes(additional_disprove_script); + + // disprove utxo + builder = builder.add_output(super::create_disprove_taproot_output( + operator_5week, + additional_disprove_script.clone(), + disprove_path, + paramset.default_utxo_amount(), + paramset.network, + )); + + let nofn_latest_blockhash = Arc::new(TimelockScript::new( + Some(deposit_data.get_nofn_xonly_pk()?), + paramset.latest_blockhash_timeout_timelock, + )); + + match latest_blockhash_script { + AssertScripts::AssertScriptTapNodeHash(latest_blockhash_root_hash) => { + if latest_blockhash_root_hash.len() != 1 { + return Err(TxError::LatestBlockhashScriptNumber.into()); + } + let latest_blockhash_root_hash = latest_blockhash_root_hash[0]; + // latest blockhash utxo + builder = builder.add_output(super::create_taproot_output_with_hidden_node( + nofn_latest_blockhash, + &latest_blockhash_root_hash, + paramset.default_utxo_amount(), + paramset.network, + )); + } + AssertScripts::AssertSpendableScript(latest_blockhash_script) => { + if latest_blockhash_script.len() != 1 { + return Err(TxError::LatestBlockhashScriptNumber.into()); + } + let latest_blockhash_script = latest_blockhash_script[0].clone(); + builder = builder.add_output(UnspentTxOut::from_scripts( + paramset.default_utxo_amount(), + vec![nofn_latest_blockhash, latest_blockhash_script], + None, + paramset.network, + )); + } + } + + // add nofn_4 week to all assert scripts + let nofn_4week = Arc::new(TimelockScript::new( + Some(deposit_data.get_nofn_xonly_pk()?), + paramset.assert_timeout_timelock, + )); + + match assert_scripts { + AssertScripts::AssertScriptTapNodeHash(assert_script_hashes) => { + for script_hash in assert_script_hashes.iter() { + // Add N-of-N in 4 week script to taproot, that connects to assert timeout + builder = builder.add_output(super::create_taproot_output_with_hidden_node( + nofn_4week.clone(), + script_hash, + paramset.default_utxo_amount(), + paramset.network, + )); + } + } + AssertScripts::AssertSpendableScript(assert_scripts) => { + for script in assert_scripts { + builder = builder.add_output(UnspentTxOut::from_scripts( + paramset.default_utxo_amount(), + vec![nofn_4week.clone(), script], + None, + paramset.network, + )); + } + } + } + + let watchtower_xonly_pks = deposit_data.get_watchtowers(); + + for (watchtower_idx, watchtower_xonly_pk) in watchtower_xonly_pks.iter().enumerate() { + let nofn_2week = Arc::new(TimelockScript::new( + Some(deposit_data.get_nofn_xonly_pk()?), + paramset.watchtower_challenge_timeout_timelock, + )); + // UTXO for watchtower challenge or watchtower challenge timeouts + builder = builder.add_output(UnspentTxOut::from_scripts( + paramset.default_utxo_amount() * 2 + paramset.anchor_amount(), // watchtower challenge has 2 taproot outputs, 1 op_return and 1 anchor + vec![nofn_2week.clone()], + Some(*watchtower_xonly_pk), // key path as watchtowers xonly pk + paramset.network, + )); + + // UTXO for operator challenge ack, nack, and watchtower challenge timeouts + let nofn_3week = Arc::new(TimelockScript::new( + Some(deposit_data.get_nofn_xonly_pk()?), + paramset.operator_challenge_nack_timelock, + )); + let operator_with_preimage = Arc::new(PreimageRevealScript::new( + operator_xonly_pk, + operator_unlock_hashes[watchtower_idx], + )); + builder = builder.add_output(UnspentTxOut::from_scripts( + paramset.default_utxo_amount(), + vec![ + nofn_3week.clone(), + nofn_2week.clone(), + operator_with_preimage, + ], + None, + paramset.network, + )); + } + + let mut op_return_script = move_txid.to_byte_array().to_vec(); + op_return_script.extend(kickoff_data.operator_xonly_pk.serialize()); + + let push_bytes = PushBytesBuf::try_from(op_return_script) + .expect("Can't fail since the script is shorter than 4294967296 bytes"); + + let op_return_txout = builder::transaction::op_return_txout(push_bytes); + + Ok(builder + .add_output(UnspentTxOut::from_partial(op_return_txout)) + .add_output(UnspentTxOut::from_partial( + builder::transaction::anchor_output(paramset.anchor_amount()), + )) + .finalize()) +} + +/// Creates a [`TxHandler`] for the `kickoff_not_finalized_tx`. +/// +/// This transaction if an operator sends ReadyToReimburse transaction while not all kickoffs of the round are finalized, burning their collateral. +/// +/// # Inputs +/// 1. KickoffTx: KickoffFinalizer utxo +/// 2. ReadyToReimburseTx: BurnConnector utxo +/// +/// # Outputs +/// 1. Anchor output for CPFP +/// +/// # Arguments +/// * `kickoff_txhandler` - The kickoff transaction handler providing the input. +/// * `ready_to_reimburse_txhandler` - The ready-to-reimburse transaction handler providing the input. +/// +/// # Returns +/// A [`TxHandler`] for the kickoff not finalized transaction, or a [`BridgeError`] if construction fails. +pub fn create_kickoff_not_finalized_txhandler( + kickoff_txhandler: &TxHandler, + ready_to_reimburse_txhandler: &TxHandler, + paramset: &'static ProtocolParamset, +) -> Result { + Ok(TxHandlerBuilder::new(TransactionType::KickoffNotFinalized) + .with_version(NON_STANDARD_V3) + .add_input( + NormalSignatureKind::KickoffNotFinalized1, + kickoff_txhandler.get_spendable_output(UtxoVout::KickoffFinalizer)?, + builder::script::SpendPath::ScriptSpend(0), + DEFAULT_SEQUENCE, + ) + .add_input( + NormalSignatureKind::KickoffNotFinalized2, + ready_to_reimburse_txhandler + .get_spendable_output(UtxoVout::CollateralInReadyToReimburse)?, + builder::script::SpendPath::KeySpend, + DEFAULT_SEQUENCE, + ) + .add_output(UnspentTxOut::from_partial( + builder::transaction::anchor_output(paramset.anchor_amount()), + )) + .finalize()) +} + +/// Creates a [`TxHandler`] for the `reimburse_tx`. +/// +/// This transaction is sent by the operator if no challenge was sent, or a challenge was sent but no disprove was sent, to reimburse the operator for their payout. +/// +/// # Inputs +/// 1. MoveToVaultTx: Utxo containing the deposit +/// 2. KickoffTx: Reimburse connector utxo in the kickoff +/// 3. RoundTx: Reimburse connector utxo in the round (for the given kickoff index) +/// +/// # Outputs +/// 1. Reimbursement output to the operator +/// 2. Anchor output for CPFP +/// +/// # Arguments +/// * `move_txhandler` - The move-to-vault transaction handler for the deposit. +/// * `round_txhandler` - The round transaction handler for the round. +/// * `kickoff_txhandler` - The kickoff transaction handler for the kickoff. +/// * `kickoff_idx` - The kickoff index of the operator's kickoff. +/// * `paramset` - Protocol parameter set. +/// * `operator_reimbursement_address` - The address to reimburse the operator. +/// +/// # Returns +/// A [`TxHandler`] for the reimburse transaction, or a [`BridgeError`] if construction fails. +pub fn create_reimburse_txhandler( + move_txhandler: &TxHandler, + round_txhandler: &TxHandler, + kickoff_txhandler: &TxHandler, + kickoff_idx: usize, + paramset: &'static ProtocolParamset, + operator_reimbursement_address: &bitcoin::Address, +) -> Result { + let builder = TxHandlerBuilder::new(TransactionType::Reimburse) + .with_version(NON_STANDARD_V3) + .add_input( + NormalSignatureKind::Reimburse1, + move_txhandler.get_spendable_output(UtxoVout::DepositInMove)?, + builder::script::SpendPath::ScriptSpend(0), + DEFAULT_SEQUENCE, + ) + .add_input( + NormalSignatureKind::Reimburse2, + kickoff_txhandler.get_spendable_output(UtxoVout::ReimburseInKickoff)?, + builder::script::SpendPath::ScriptSpend(0), + DEFAULT_SEQUENCE, + ) + .add_input( + NormalSignatureKind::OperatorSighashDefault, + round_txhandler + .get_spendable_output(UtxoVout::ReimburseInRound(kickoff_idx, paramset))?, + builder::script::SpendPath::KeySpend, + DEFAULT_SEQUENCE, + ); + + Ok(builder + .add_output(UnspentTxOut::from_partial(TxOut { + value: move_txhandler + .get_spendable_output(UtxoVout::DepositInMove)? + .get_prevout() + .value, + script_pubkey: operator_reimbursement_address.script_pubkey(), + })) + .add_output(UnspentTxOut::from_partial( + builder::transaction::anchor_output(paramset.anchor_amount()), + )) + .finalize()) +} + +/// Creates a [`TxHandler`] for the `payout_tx`. +/// +/// This transaction is sent by the operator to front a peg-out, after which operator will send a kickoff transaction to get reimbursed. +/// +/// # Inputs +/// 1. UTXO: User's withdrawal input (committed in Citrea side, with the signature given to operators off-chain) +/// +/// # Outputs +/// 1. User payout output +/// 2. OP_RETURN output (with operators x-only pubkey that fronts the peg-out) +/// +/// # Arguments +/// * `input_utxo` - The input UTXO for the payout, committed in Citrea side, with the signature given to operators off-chain. +/// * `output_txout` - The output TxOut for the user payout. +/// * `operator_xonly_pk` - The operator's x-only public key that fronts the peg-out. +/// * `user_sig` - The user's signature for the payout, given to operators off-chain. +/// * `network` - The Bitcoin network. +/// +/// # Returns +/// A [`TxHandler`] for the payout transaction, or a [`BridgeError`] if construction fails. +pub fn create_payout_txhandler( + input_utxo: UTXO, + output_txout: TxOut, + operator_xonly_pk: XOnlyPublicKey, + user_sig: Signature, + _network: bitcoin::Network, +) -> Result, BridgeError> { + let user_sig_wrapped = bitcoin::taproot::Signature { + signature: user_sig, + sighash_type: bitcoin::sighash::TapSighashType::SinglePlusAnyoneCanPay, + }; + let txin = SpendableTxIn::new_partial(input_utxo.outpoint, input_utxo.txout); + + let output_txout = UnspentTxOut::from_partial(output_txout.clone()); + + let op_return_txout = op_return_txout(PushBytesBuf::from(operator_xonly_pk.serialize())); + + let mut txhandler = TxHandlerBuilder::new(TransactionType::Payout) + .with_version(NON_STANDARD_V3) + .add_input( + NormalSignatureKind::NotStored, + txin, + SpendPath::KeySpend, + DEFAULT_SEQUENCE, + ) + .add_output(output_txout) + .add_output(UnspentTxOut::from_partial(op_return_txout)) + .finalize(); + txhandler.set_p2tr_key_spend_witness(&user_sig_wrapped, 0)?; + txhandler.promote() +} + +/// Creates a [`TxHandler`] for the `optimistic_payout_tx`. +/// +/// This transaction is signed by all verifiers that participated in the corresponding deposit give the deposited funds directly to the user withdrawing from Citrea. This way no kickoff/BitVM process is needed. +/// +/// # Inputs +/// 1. UTXO: User's withdrawal input (committed in Citrea side, with the signature given to operators off-chain) +/// 2. MoveToVaultTx: Utxo containing the deposit +/// +/// # Outputs +/// 1. User payout output (to the user withdrawing from Citrea) +/// 2. Anchor output for CPFP +/// +/// # Arguments +/// * `deposit_data` - Mutable reference to deposit data. +/// * `input_utxo` - The input UTXO for the payout, committed in Citrea side, with the signature given to operators off-chain. +/// * `output_txout` - The output TxOut for the user payout. +/// * `user_sig` - The user's signature for the payout, given to operators off-chain. +/// * `paramset` - Protocol parameter set. +/// +/// # Returns +/// A [`TxHandler`] for the optimistic payout transaction, or a [`BridgeError`] if construction fails. +pub fn create_optimistic_payout_txhandler( + deposit_data: &mut DepositData, + input_utxo: UTXO, + output_txout: TxOut, + user_sig: Signature, + paramset: &'static ProtocolParamset, +) -> Result { + let move_txhandler: TxHandler = create_move_to_vault_txhandler(deposit_data, paramset)?; + let user_sig_wrapped = bitcoin::taproot::Signature { + signature: user_sig, + sighash_type: bitcoin::sighash::TapSighashType::SinglePlusAnyoneCanPay, + }; + let txin = SpendableTxIn::new_partial(input_utxo.outpoint, input_utxo.txout); + + let output_txout = UnspentTxOut::from_partial(output_txout.clone()); + + let mut txhandler = TxHandlerBuilder::new(TransactionType::Payout) + .with_version(NON_STANDARD_V3) + .add_input( + NormalSignatureKind::NotStored, + txin, + SpendPath::KeySpend, + DEFAULT_SEQUENCE, + ) + .add_input( + NormalSignatureKind::NotStored, + move_txhandler.get_spendable_output(UtxoVout::DepositInMove)?, + SpendPath::ScriptSpend(0), + DEFAULT_SEQUENCE, + ) + .add_output(output_txout) + .add_output(UnspentTxOut::from_partial( + builder::transaction::non_ephemeral_anchor_output(), + )) + .finalize(); + txhandler.set_p2tr_key_spend_witness(&user_sig_wrapped, 0)?; + Ok(txhandler) +} diff --git a/core/src/builder/transaction/output.rs b/core/src/builder/transaction/output.rs new file mode 100644 index 000000000..2efb939a5 --- /dev/null +++ b/core/src/builder/transaction/output.rs @@ -0,0 +1,109 @@ +//! # Transaction Output Types and Utilities +//! +//! This module defines types and utilities for representing and handling transaction outputs used in [`super::TxHandler`]. +//! Main purpose of it is to store the scripts used in the taproot outputs. +//! + +use crate::builder::address::create_taproot_address; +use crate::builder::script::SpendableScript; +use bitcoin::{taproot::TaprootSpendInfo, Amount, ScriptBuf, TxOut, XOnlyPublicKey}; +use std::sync::Arc; + +#[derive(Debug, Clone)] +/// Represents a spendable transaction output, including scripts and optional Taproot spend info. +pub struct UnspentTxOut { + txout: TxOut, + scripts: Vec>, + spendinfo: Option, +} + +impl UnspentTxOut { + /// Constructs an [`UnspentTxOut`] from a partial [`TxOut`] (no scripts or spend info). + /// + /// # Arguments + /// * `txout` - The Bitcoin transaction output. + /// + /// # Returns + /// An [`UnspentTxOut`] with no scripts or spend info. + pub fn from_partial(txout: TxOut) -> UnspentTxOut { + UnspentTxOut { + txout, + scripts: vec![], + spendinfo: None, + } + } + /// Constructs an [`UnspentTxOut`] from all fields. + /// + /// # Arguments + /// * `txout` - The Bitcoin transaction output. + /// * `scripts` - Scripts associated with this output (for script path spends). + /// * `spendinfo` - Optional Taproot spend info for this output. + /// + /// # Returns + /// An [`UnspentTxOut`] with the specified parameters. + pub fn new( + txout: TxOut, + scripts: Vec>, + spendinfo: Option, + ) -> UnspentTxOut { + UnspentTxOut { + txout, + scripts, + spendinfo, + } + } + + /// Constructs an [`UnspentTxOut`] from value, scripts, and key path. + /// + /// # Arguments + /// * `value` - The output value. + /// * `scripts` - Scripts for script path spends. + /// * `key_path` - The internal key for key path spends. + /// * `network` - Bitcoin network. + /// + /// # Returns + /// An [`UnspentTxOut`] with the specified parameters and Taproot spend info if applicable. + pub fn from_scripts( + value: Amount, + scripts: Vec>, + key_path: Option, + network: bitcoin::Network, + ) -> UnspentTxOut { + let script_bufs: Vec = scripts + .iter() + .map(|script| script.clone().to_script_buf()) + .collect(); + let (addr, spend_info) = create_taproot_address(&script_bufs, key_path, network); + Self::new( + TxOut { + value, + script_pubkey: addr.script_pubkey(), + }, + scripts, + Some(spend_info), + ) + } + + /// Returns a reference to the underlying [`TxOut`]. + pub fn txout(&self) -> &TxOut { + &self.txout + } + + /// Returns a reference to the scripts for this output. + pub fn scripts(&self) -> &Vec> { + &self.scripts + } + + /// Returns a reference to the Taproot spend info for this output, if any. + pub fn spendinfo(&self) -> &Option { + &self.spendinfo + } + + /// Sets the Taproot spend info for this output. + /// + /// # Arguments + /// * `spendinfo` - The Taproot spend info to set. + pub fn set_spendinfo(&mut self, spendinfo: Option) { + self.spendinfo = spendinfo; + } +} diff --git a/core/src/builder/transaction/sign.rs b/core/src/builder/transaction/sign.rs new file mode 100644 index 000000000..abc8984c2 --- /dev/null +++ b/core/src/builder/transaction/sign.rs @@ -0,0 +1,645 @@ +//! # Transaction Signing Utilities +//! +//! This module provides logic signing the transactions used in the Clementine bridge. + +use super::challenge::create_watchtower_challenge_txhandler; +use super::{ContractContext, TxHandlerCache}; +use crate::actor::{Actor, TweakCache, WinternitzDerivationPath}; +use crate::bitvm_client::ClementineBitVMPublicKeys; +use crate::builder; +use crate::builder::transaction::creator::ReimburseDbCache; +use crate::builder::transaction::TransactionType; +use crate::citrea::CitreaClientT; +use crate::config::protocol::ProtocolParamset; +use crate::config::BridgeConfig; +use crate::database::{Database, DatabaseTransaction}; +use crate::deposit::KickoffData; +use crate::errors::{BridgeError, TxError}; +use crate::operator::{Operator, RoundIndex}; +use crate::utils::{Last20Bytes, RbfSigningInfo}; +use crate::verifier::Verifier; +use bitcoin::hashes::Hash; +use bitcoin::{BlockHash, OutPoint, Transaction, XOnlyPublicKey}; +use rand_chacha::rand_core::SeedableRng; +use rand_chacha::ChaCha12Rng; +use secp256k1::rand::seq::SliceRandom; + +/// Data to identify the deposit and kickoff. +#[derive(Debug, Clone)] +pub struct TransactionRequestData { + pub deposit_outpoint: OutPoint, + pub kickoff_data: KickoffData, +} + +/// Deterministically (given same seed) generates a set of kickoff indices for an operator to sign, using the operator's public key, deposit block hash, and deposit outpoint as the seed. +/// To make the output consistent across versions, a fixed rng algorithm (ChaCha12Rng) is used. +/// +/// This function creates a deterministic seed from the operator's public key, deposit block hash, +/// and deposit outpoint, then uses it to select a subset of kickoff indices. +/// deposit_blockhash is also included in the seed to ensure the randomness of the selected kickoff indices, otherwise deposit_outpoint +/// can be selected in a way to create hash collisions by the user depositing. +/// +/// # Arguments +/// * `paramset` - Protocol parameter set. +/// * `op_xonly_pk` - Operator's x-only public key. +/// * `deposit_blockhash` - Block hash of the block containing the deposit. +/// * `deposit_outpoint` - Outpoint of the deposit. +/// +/// # Returns +/// A vector of indices that the operator should sign, with the count determined by the protocol parameter `num_signed_kickoffs`. +pub fn get_kickoff_utxos_to_sign( + paramset: &'static ProtocolParamset, + op_xonly_pk: XOnlyPublicKey, + deposit_blockhash: BlockHash, + deposit_outpoint: bitcoin::OutPoint, +) -> Vec { + let deposit_data = [ + op_xonly_pk.serialize().to_vec(), + deposit_blockhash.to_byte_array().to_vec(), + deposit_outpoint.txid.to_byte_array().to_vec(), + deposit_outpoint.vout.to_le_bytes().to_vec(), + ] + .concat(); + + let seed = bitcoin::hashes::sha256d::Hash::hash(&deposit_data).to_byte_array(); + let mut rng = ChaCha12Rng::from_seed(seed); + + let mut numbers: Vec = (0..paramset.num_kickoffs_per_round).collect(); + numbers.shuffle(&mut rng); + + numbers + .into_iter() + .take(paramset.num_signed_kickoffs) + .collect() +} + +/// Creates and signs all transaction types that can be signed by the entity. +/// +/// This function handles the creation and signing of transactions based on the provided +/// transaction data. It returns a vector of signed transactions with their corresponding types. +/// +/// # Note +/// This function should not be used for transaction types that require special handling: +/// - MiniAsserts +/// - WatchtowerChallenge +/// - LatestBlockhash +/// - Disprove +/// +/// These transaction types have their own specialized signing flows. +pub async fn create_and_sign_txs( + db: Database, + signer: &Actor, + config: BridgeConfig, + context: ContractContext, + block_hash: Option<[u8; 20]>, //to sign kickoff + dbtx: Option>, +) -> Result, BridgeError> { + let txhandlers = builder::transaction::create_txhandlers( + match context.is_context_for_kickoff() { + true => TransactionType::AllNeededForDeposit, + // if context is only for a round, we can only sign the round txs + false => TransactionType::Round, + }, + context.clone(), + &mut TxHandlerCache::new(), + &mut match context.is_context_for_kickoff() { + true => ReimburseDbCache::new_for_deposit( + db.clone(), + context.operator_xonly_pk, + context + .deposit_data + .as_ref() + .expect("Already checked existence of deposit data") + .get_deposit_outpoint(), + config.protocol_paramset(), + dbtx, + ), + false => ReimburseDbCache::new_for_rounds( + db.clone(), + context.operator_xonly_pk, + config.protocol_paramset(), + dbtx, + ), + }, + ) + .await?; + + let mut signatures = Vec::new(); + + if context.is_context_for_kickoff() { + // signatures saved during deposit + let deposit_sigs_query = db + .get_deposit_signatures( + None, + context + .deposit_data + .as_ref() + .expect("Should have deposit data at this point") + .get_deposit_outpoint(), + context.operator_xonly_pk, + context.round_idx, + context + .kickoff_idx + .expect("Already checked existence of kickoff idx") as usize, + ) + .await?; + signatures.extend(deposit_sigs_query.unwrap_or_default()); + } + + // signatures saved during setup + let setup_sigs_query = db + .get_unspent_kickoff_sigs(None, context.operator_xonly_pk, context.round_idx) + .await?; + + signatures.extend(setup_sigs_query.unwrap_or_default()); + + let mut signed_txs = Vec::with_capacity(txhandlers.len()); + let mut tweak_cache = TweakCache::default(); + + for (tx_type, mut txhandler) in txhandlers.into_iter() { + let _ = signer.tx_sign_and_fill_sigs(&mut txhandler, &signatures, Some(&mut tweak_cache)); + + if let TransactionType::OperatorChallengeAck(watchtower_idx) = tx_type { + let path = WinternitzDerivationPath::ChallengeAckHash( + watchtower_idx as u32, + context + .deposit_data + .as_ref() + .expect("Should have deposit data at this point") + .get_deposit_outpoint(), + config.protocol_paramset(), + ); + let preimage = signer.generate_preimage_from_path(path)?; + let _ = signer.tx_sign_preimage(&mut txhandler, preimage); + } + + if let TransactionType::Kickoff = tx_type { + if let Some(block_hash) = block_hash { + // need to commit blockhash to start kickoff + let path = WinternitzDerivationPath::Kickoff( + context.round_idx, + context + .kickoff_idx + .expect("Should have kickoff idx at this point"), + config.protocol_paramset(), + ); + signer.tx_sign_winternitz(&mut txhandler, &[(block_hash.to_vec(), path)])?; + } + // do not give err if blockhash was not given + } + + let checked_txhandler = txhandler.promote(); + + match checked_txhandler { + Ok(checked_txhandler) => { + signed_txs.push((tx_type, checked_txhandler.get_cached_tx().clone())); + } + Err(e) => { + tracing::trace!( + "Couldn't sign transaction {:?} in create_and_sign_all_txs: {:?}. + This might be normal if the transaction is not needed to be/cannot be signed.", + tx_type, + e + ); + } + } + } + + Ok(signed_txs) +} + +impl Verifier +where + C: CitreaClientT, +{ + /// Creates and signs the watchtower challenge with the given commit data. + /// + /// # Arguments + /// * `transaction_data` - Data to identify the deposit and kickoff. + /// * `commit_data` - Commit data for the watchtower challenge. + /// + /// # Returns + /// A tuple of: + /// 1. TransactionType: WatchtowerChallenge + /// 2. Transaction: Signed watchtower challenge transaction + /// 3. RbfSigningInfo: Rbf signing info for the watchtower challenge (for re-signing the transaction after a rbf input is added to the tx) + pub async fn create_watchtower_challenge( + &self, + transaction_data: TransactionRequestData, + commit_data: &[u8], + dbtx: Option>, + ) -> Result<(TransactionType, Transaction, RbfSigningInfo), BridgeError> { + if commit_data.len() != self.config.protocol_paramset().watchtower_challenge_bytes { + return Err(TxError::IncorrectWatchtowerChallengeDataLength.into()); + } + + let deposit_data = self + .db + .get_deposit_data(None, transaction_data.deposit_outpoint) + .await? + .ok_or(BridgeError::DepositNotFound( + transaction_data.deposit_outpoint, + ))? + .1; + + let context = ContractContext::new_context_with_signer( + transaction_data.kickoff_data, + deposit_data.clone(), + self.config.protocol_paramset(), + self.signer.clone(), + ); + + let mut txhandlers = builder::transaction::create_txhandlers( + TransactionType::AllNeededForDeposit, + context, + &mut TxHandlerCache::new(), + &mut ReimburseDbCache::new_for_deposit( + self.db.clone(), + transaction_data.kickoff_data.operator_xonly_pk, + transaction_data.deposit_outpoint, + self.config.protocol_paramset(), + dbtx, + ), + ) + .await?; + + let kickoff_txhandler = txhandlers + .remove(&TransactionType::Kickoff) + .ok_or(TxError::TxHandlerNotFound(TransactionType::Kickoff))?; + + let watchtower_index = deposit_data.get_watchtower_index(&self.signer.xonly_public_key)?; + + let watchtower_challenge_txhandler = create_watchtower_challenge_txhandler( + &kickoff_txhandler, + watchtower_index, + commit_data, + self.config.protocol_paramset(), + #[cfg(test)] + &self.config.test_params, + )?; + + let merkle_root = watchtower_challenge_txhandler.get_merkle_root_of_txin(0)?; + + #[cfg(test)] + let mut annex: Option> = None; + + #[cfg(test)] + let mut additional_taproot_output_count = None; + + #[cfg(test)] + { + if self.config.test_params.use_small_annex { + annex = Some(vec![80u8; 10000]); + } else if self.config.test_params.use_large_annex { + annex = Some(vec![80u8; 3990000]); + } else if self.config.test_params.use_large_annex_and_output { + annex = Some(vec![80u8; 3000000]); + additional_taproot_output_count = Some(2300); + } else if self.config.test_params.use_large_output { + additional_taproot_output_count = Some(2300); + } + } + + Ok(( + TransactionType::WatchtowerChallenge(watchtower_index), + watchtower_challenge_txhandler.get_cached_tx().clone(), + RbfSigningInfo { + vout: 0, + tweak_merkle_root: merkle_root, + #[cfg(test)] + annex, + #[cfg(test)] + additional_taproot_output_count, + }, + )) + } + + /// Creates and signs all the unspent kickoff connector (using the previously saved signatures from operator during setup) + /// transactions for a single round of an operator. + /// + /// # Arguments + /// * `round_idx` - Index of the round. + /// * `operator_xonly_pk` - Operator's x-only public key. + /// + /// # Returns + /// A vector of tuples: + /// 1. TransactionType: UnspentKickoff(idx) for idx'th kickoff in the round + /// 2. Transaction: Signed unspent kickoff connector transaction + pub async fn create_and_sign_unspent_kickoff_connector_txs( + &self, + round_idx: RoundIndex, + operator_xonly_pk: XOnlyPublicKey, + dbtx: Option>, + ) -> Result, BridgeError> { + let context = ContractContext::new_context_for_round( + operator_xonly_pk, + round_idx, + self.config.protocol_paramset(), + ); + + let txhandlers = builder::transaction::create_txhandlers( + TransactionType::UnspentKickoff(0), + context, + &mut TxHandlerCache::new(), + &mut ReimburseDbCache::new_for_rounds( + self.db.clone(), + operator_xonly_pk, + self.config.protocol_paramset(), + dbtx, + ), + ) + .await?; + + // signatures saved during setup + let unspent_kickoff_sigs = self + .db + .get_unspent_kickoff_sigs(None, operator_xonly_pk, round_idx) + .await? + .ok_or(eyre::eyre!( + "No unspent kickoff signatures found for operator {:?} and round {:?}", + operator_xonly_pk, + round_idx + ))?; + + let mut signed_txs = Vec::with_capacity(txhandlers.len()); + let mut tweak_cache = TweakCache::default(); + + for (tx_type, mut txhandler) in txhandlers.into_iter() { + if !matches!(tx_type, TransactionType::UnspentKickoff(_)) { + // do not try to sign unrelated txs + continue; + } + self.signer.tx_sign_and_fill_sigs( + &mut txhandler, + &unspent_kickoff_sigs, + Some(&mut tweak_cache), + )?; + + let checked_txhandler = txhandler.promote(); + + match checked_txhandler { + Ok(checked_txhandler) => { + signed_txs.push((tx_type, checked_txhandler.get_cached_tx().clone())); + } + Err(e) => { + tracing::trace!( + "Couldn't sign transaction {:?} in create_and_sign_unspent_kickoff_connector_txs: {:?}", + tx_type, + e + ); + } + } + } + + Ok(signed_txs) + } +} + +impl Operator +where + C: CitreaClientT, +{ + /// Creates and signs all the assert commitment transactions for a single kickoff of an operator. + /// + /// # Arguments + /// * `assert_data` - Data to identify the deposit and kickoff. + /// * `commit_data` - BitVM assertions for the kickoff, for each assert tx. + /// + /// # Returns + /// A vector of tuples: + /// 1. TransactionType: MiniAssert(idx) for idx'th assert commitment + /// 2. Transaction: Signed assert commitment transaction + pub async fn create_assert_commitment_txs( + &self, + assert_data: TransactionRequestData, + commit_data: Vec>>, + dbtx: Option>, + ) -> Result, BridgeError> { + let deposit_data = self + .db + .get_deposit_data(None, assert_data.deposit_outpoint) + .await? + .ok_or(BridgeError::DepositNotFound(assert_data.deposit_outpoint))? + .1; + + let context = ContractContext::new_context_with_signer( + assert_data.kickoff_data, + deposit_data.clone(), + self.config.protocol_paramset(), + self.signer.clone(), + ); + + let mut txhandlers = builder::transaction::create_txhandlers( + TransactionType::MiniAssert(0), + context, + &mut TxHandlerCache::new(), + &mut ReimburseDbCache::new_for_deposit( + self.db.clone(), + self.signer.xonly_public_key, + assert_data.deposit_outpoint, + self.config.protocol_paramset(), + dbtx, + ), + ) + .await?; + + let mut signed_txhandlers = Vec::new(); + + for idx in 0..ClementineBitVMPublicKeys::number_of_assert_txs() { + let mut mini_assert_txhandler = txhandlers + .remove(&TransactionType::MiniAssert(idx)) + .ok_or(TxError::TxHandlerNotFound(TransactionType::MiniAssert(idx)))?; + let derivations = ClementineBitVMPublicKeys::get_assert_derivations( + idx, + assert_data.deposit_outpoint, + self.config.protocol_paramset(), + ); + // Combine data to be committed with the corresponding bitvm derivation path (needed to regenerate the winternitz secret keys + // to sign the transaction) + let winternitz_data: Vec<(Vec, WinternitzDerivationPath)> = derivations + .iter() + .zip(commit_data[idx].iter()) + .map(|(derivation, commit_data)| match derivation { + WinternitzDerivationPath::BitvmAssert(_len, _, _, _, _) => { + (commit_data.clone(), derivation.clone()) + } + _ => unreachable!(), + }) + .collect(); + self.signer + .tx_sign_winternitz(&mut mini_assert_txhandler, &winternitz_data)?; + signed_txhandlers.push(mini_assert_txhandler.promote()?); + } + + Ok(signed_txhandlers + .into_iter() + .map(|txhandler| { + ( + txhandler.get_transaction_type(), + txhandler.get_cached_tx().clone(), + ) + }) + .collect()) + } + + /// Creates and signs the latest blockhash transaction for a single kickoff of an operator. + /// + /// # Arguments + /// * `assert_data` - Data to identify the deposit and kickoff. + /// * `block_hash` - Block hash to commit using winternitz signatures. + /// + /// # Returns + /// A tuple of: + /// 1. TransactionType: LatestBlockhash + /// 2. Transaction: Signed latest blockhash transaction + pub async fn create_latest_blockhash_tx( + &self, + assert_data: TransactionRequestData, + block_hash: BlockHash, + dbtx: Option>, + ) -> Result<(TransactionType, Transaction), BridgeError> { + let deposit_data = self + .db + .get_deposit_data(None, assert_data.deposit_outpoint) + .await? + .ok_or(BridgeError::DepositNotFound(assert_data.deposit_outpoint))? + .1; + + let context = ContractContext::new_context_with_signer( + assert_data.kickoff_data, + deposit_data, + self.config.protocol_paramset(), + self.signer.clone(), + ); + + let mut txhandlers = builder::transaction::create_txhandlers( + TransactionType::LatestBlockhash, + context, + &mut TxHandlerCache::new(), + &mut ReimburseDbCache::new_for_deposit( + self.db.clone(), + assert_data.kickoff_data.operator_xonly_pk, + assert_data.deposit_outpoint, + self.config.protocol_paramset(), + dbtx, + ), + ) + .await?; + + let mut latest_blockhash_txhandler = + txhandlers + .remove(&TransactionType::LatestBlockhash) + .ok_or(TxError::TxHandlerNotFound(TransactionType::LatestBlockhash))?; + + let block_hash: [u8; 32] = { + let raw = block_hash.to_byte_array(); + + #[cfg(test)] + { + self.config.test_params.maybe_disrupt_block_hash(raw) + } + + #[cfg(not(test))] + { + raw + } + }; + + // get last 20 bytes of block_hash + let block_hash_last_20 = block_hash.last_20_bytes().to_vec(); + + tracing::info!( + "Creating latest blockhash tx with block hash's last 20 bytes: {:?}", + block_hash_last_20 + ); + self.signer.tx_sign_winternitz( + &mut latest_blockhash_txhandler, + &[( + block_hash_last_20, + ClementineBitVMPublicKeys::get_latest_blockhash_derivation( + assert_data.deposit_outpoint, + self.config.protocol_paramset(), + ), + )], + )?; + + let latest_blockhash_txhandler = latest_blockhash_txhandler.promote()?; + + // log the block hash witness + tracing::info!( + "Latest blockhash tx created with block hash witness: {:?}", + latest_blockhash_txhandler.get_cached_tx().input + ); + + Ok(( + latest_blockhash_txhandler.get_transaction_type(), + latest_blockhash_txhandler.get_cached_tx().to_owned(), + )) + } +} + +#[cfg(test)] +mod tests { + use std::str::FromStr; + + use crate::test::common::create_test_config_with_thread_name; + + use super::*; + + #[tokio::test] + /// Checks if get_kickoff_utxos_to_sign returns the same values as before. + /// This test should never fail, do not make changes to code that changes the result of + /// get_kickoff_utxos_to_sign, as doing so will invalidate all past deposits. + async fn test_get_kickoff_utxos_to_sign_consistency() { + let config = create_test_config_with_thread_name().await; + let mut paramset = config.protocol_paramset().clone(); + paramset.num_kickoffs_per_round = 2000; + paramset.num_signed_kickoffs = 20; + let paramset_ref: &'static ProtocolParamset = Box::leak(Box::new(paramset)); + let op_xonly_pk = XOnlyPublicKey::from_str( + "50929b74c1a04954b78b4b6035e97a5e078a5a0f28ec96d547bfee9ace803ac0", + ) + .unwrap(); + let deposit_blockhash = + BlockHash::from_str("0000000000000000000000000000000000000000000000000000000000000000") + .unwrap(); + let deposit_outpoint = OutPoint::from_str( + "0000000000000000000000000000000000000000000000000000000000000000:0", + ) + .unwrap(); + let utxos_to_sign = get_kickoff_utxos_to_sign( + paramset_ref, + op_xonly_pk, + deposit_blockhash, + deposit_outpoint, + ); + assert_eq!(utxos_to_sign.len(), 20); + assert_eq!( + utxos_to_sign, + vec![ + 1124, 447, 224, 1664, 1673, 1920, 713, 125, 1936, 1150, 1079, 1922, 596, 984, 567, + 1134, 530, 539, 700, 1864 + ] + ); + + // one more test + let deposit_blockhash = + BlockHash::from_str("1100000000000000000000000000000000000000000000000000000000000000") + .unwrap(); + let utxos_to_sign = get_kickoff_utxos_to_sign( + paramset_ref, + op_xonly_pk, + deposit_blockhash, + deposit_outpoint, + ); + + assert_eq!(utxos_to_sign.len(), 20); + assert_eq!( + utxos_to_sign, + vec![ + 1454, 26, 157, 1900, 451, 1796, 881, 544, 23, 1080, 1112, 1503, 1233, 1583, 1054, + 603, 329, 1635, 213, 1331 + ] + ); + } +} diff --git a/core/src/builder/transaction/txhandler.rs b/core/src/builder/transaction/txhandler.rs new file mode 100644 index 000000000..d9f8ce448 --- /dev/null +++ b/core/src/builder/transaction/txhandler.rs @@ -0,0 +1,609 @@ +//! # Transaction Handler Module +//! +//! This module defines the [`TxHandler`] abstraction, which wraps a protocol transaction and its metadata. +//! Metadata includes taproot scripts and protocol specific data to enable signing of the transactions. +//! [`TxHandlerBuilder`] is used to create [`TxHandler`]s. +//! + +use super::input::{SpendableTxIn, SpentTxIn, UtxoVout}; +use super::output::UnspentTxOut; +use crate::builder::script::SpendPath; +use crate::builder::sighash::{PartialSignatureInfo, SignatureInfo}; +use crate::builder::transaction::deposit_signature_owner::{DepositSigKeyOwner, EntityType}; +use crate::builder::transaction::TransactionType; +use crate::errors::{BridgeError, TxError}; +use crate::rpc::clementine::tagged_signature::SignatureId; +use crate::rpc::clementine::{NormalSignatureKind, RawSignedTx}; +use bitcoin::sighash::SighashCache; +use bitcoin::taproot::{self, LeafVersion}; +use bitcoin::transaction::Version; +use bitcoin::{absolute, OutPoint, Script, Sequence, TapNodeHash, Transaction, Witness}; +use bitcoin::{TapLeafHash, TapSighash, TapSighashType, TxOut, Txid}; +use eyre::{Context, OptionExt}; +use std::collections::BTreeMap; +use std::marker::PhantomData; + +pub const DEFAULT_SEQUENCE: Sequence = Sequence::ENABLE_RBF_NO_LOCKTIME; + +#[derive(Debug, Clone)] +/// Handler for protocol transactions, wrapping inputs, outputs, and cached transaction data. +pub struct TxHandler { + transaction_type: TransactionType, + txins: Vec, + txouts: Vec, + + /// Cached and immutable, same as other fields + cached_tx: bitcoin::Transaction, + cached_txid: bitcoin::Txid, + + phantom: PhantomData, +} + +pub trait State: Clone + std::fmt::Debug {} + +// #[derive(Debug, Clone)] +// pub struct PartialInputs; +#[derive(Debug, Clone)] +/// Marker type for signed transactions. +pub struct Signed; +#[derive(Debug, Clone)] +/// Marker type for unsigned transactions. +pub struct Unsigned; + +// impl State for PartialInputs {} +impl State for Unsigned {} +impl State for Signed {} +pub type SighashCalculator<'a> = + Box Result + 'a>; + +impl TxHandler { + /// Returns a spendable input for the specified output index in this transaction. + /// + /// # Arguments + /// * `vout` - The protocol-specific output index. + /// + /// # Returns + /// A [`SpendableTxIn`] for the specified output, or a [`BridgeError`] if not found. + pub fn get_spendable_output(&self, vout: UtxoVout) -> Result { + let idx = vout.get_vout(); + let txout = self + .txouts + .get(idx as usize) + .ok_or_else(|| eyre::eyre!("Could not find output {idx} in transaction"))?; + Ok(SpendableTxIn::new( + OutPoint { + txid: self.cached_txid, + vout: idx, + }, + txout.txout().clone(), + txout.scripts().clone(), + txout.spendinfo().clone(), + )) + } + + /// Returns the Taproot merkle root of the specified input, if available. + /// + /// # Arguments + /// * `idx` - The input index. + /// + /// # Returns + /// The Taproot merkle root, or a [`BridgeError`] if not found. + pub fn get_merkle_root_of_txin(&self, idx: usize) -> Result, BridgeError> { + let txin = self + .txins + .get(idx) + .ok_or(TxError::TxInputNotFound)? + .get_spendable(); + let merkle_root = txin + .get_spend_info() + .as_ref() + .ok_or(eyre::eyre!( + "Spend info not found for requested txin in get_merkle_root_of_txin" + ))? + .merkle_root(); + Ok(merkle_root) + } + + /// Returns the signature ID for the specified input. + /// + /// # Arguments + /// * `idx` - The input index. + /// + /// # Returns + /// The signature ID, or a [`BridgeError`] if not found. + pub fn get_signature_id(&self, idx: usize) -> Result { + let txin = self.txins.get(idx).ok_or(TxError::TxInputNotFound)?; + Ok(txin.get_signature_id()) + } + + /// Returns the protocol transaction type for this handler. + pub fn get_transaction_type(&self) -> TransactionType { + self.transaction_type + } + + /// Returns a reference to the cached Bitcoin transaction. + pub fn get_cached_tx(&self) -> &Transaction { + &self.cached_tx + } + + /// Returns a reference to the cached transaction ID. + pub fn get_txid(&self) -> &Txid { + // Not sure if this should be public + &self.cached_txid + } + + /// Returns a lambda function that calculates the sighash for the specified input, given the sighash type. + /// + /// # Arguments + /// * `idx` - The input index. + /// + /// # Returns + /// A lambda function that calculates the sighash for the specified input, given the sighash type. + fn get_sighash_calculator( + &self, + idx: usize, + ) -> impl FnOnce(TapSighashType) -> Result + '_ { + move |sighash_type: TapSighashType| -> Result { + match self.txins[idx].get_spend_path() { + SpendPath::KeySpend => self.calculate_pubkey_spend_sighash(idx, sighash_type), + SpendPath::ScriptSpend(script_idx) => { + self.calculate_script_spend_sighash_indexed(idx, script_idx, sighash_type) + } + SpendPath::Unknown => Err(TxError::SpendPathNotSpecified.into()), + } + } + } + + /// Signs all **unsigned** transaction inputs using the provided signer function. + /// + /// This function will skip all transaction inputs that already have a witness. + /// + /// # Arguments + /// * `signer` - A function that returns an optional witness for transaction inputs or returns an error + /// if the signing fails. The function takes the input idx, input object, and a sighash calculator closure. + /// + /// # Returns + /// * `Ok(())` if signing is successful + /// * `Err(BridgeError)` if signing fails + pub fn sign_txins( + &mut self, + mut signer: impl for<'a> FnMut( + usize, + &'a SpentTxIn, + SighashCalculator<'a>, + ) -> Result, BridgeError>, + ) -> Result<(), BridgeError> { + for idx in 0..self.txins.len() { + let calc_sighash = Box::new(self.get_sighash_calculator(idx)); + if self.txins[idx].get_witness().is_some() { + continue; + } + + if let Some(witness) = signer(idx, &self.txins[idx], calc_sighash) + .wrap_err_with(|| format!("Failed to sign input {idx}"))? + { + self.cached_tx.input[idx].witness = witness.clone(); + self.txins[idx].set_witness(witness); + } + } + Ok(()) + } + + /// Calculates the Taproot sighash for a key spend input for the given input and sighash type. + /// + /// # Arguments + /// * `txin_index` - The input index. + /// * `sighash_type` - The Taproot sighash type. + /// + /// # Returns + /// The calculated Taproot sighash, or a [`BridgeError`] if calculation fails. + pub fn calculate_pubkey_spend_sighash( + &self, + txin_index: usize, + sighash_type: TapSighashType, + ) -> Result { + let prevouts_vec: Vec<&TxOut> = self + .txins + .iter() + .map(|s| s.get_spendable().get_prevout()) + .collect(); + let mut sighash_cache: SighashCache<&bitcoin::Transaction> = + SighashCache::new(&self.cached_tx); + let prevouts = match sighash_type { + TapSighashType::SinglePlusAnyoneCanPay + | TapSighashType::AllPlusAnyoneCanPay + | TapSighashType::NonePlusAnyoneCanPay => { + bitcoin::sighash::Prevouts::One(txin_index, prevouts_vec[txin_index]) + } + _ => bitcoin::sighash::Prevouts::All(&prevouts_vec), + }; + + let sig_hash = sighash_cache + .taproot_key_spend_signature_hash(txin_index, &prevouts, sighash_type) + .wrap_err("Failed to calculate taproot sighash for key spend")?; + + Ok(sig_hash) + } + + /// Calculates the Taproot sighash for a script spend input by script index. + /// + /// # Arguments + /// * `txin_index` - The input index. + /// * `spend_script_idx` - The script index in the input's script list. + /// * `sighash_type` - The Taproot sighash type. + /// + /// # Returns + /// The calculated Taproot sighash, or a [`BridgeError`] if calculation fails. + pub fn calculate_script_spend_sighash_indexed( + &self, + txin_index: usize, + spend_script_idx: usize, + sighash_type: TapSighashType, + ) -> Result { + let script = self + .txins + .get(txin_index) + .ok_or(TxError::TxInputNotFound)? + .get_spendable() + .get_scripts() + .get(spend_script_idx) + .ok_or(TxError::ScriptNotFound(spend_script_idx))? + .to_script_buf(); + + self.calculate_script_spend_sighash(txin_index, &script, sighash_type) + } + + /// Calculates the Taproot sighash for a script spend input by script. + /// + /// # Arguments + /// * `txin_index` - The input index. + /// * `spend_script` - The script being spent. + /// * `sighash_type` - The Taproot sighash type. + /// + /// # Returns + /// The calculated Taproot sighash, or a [`BridgeError`] if calculation fails. + pub fn calculate_script_spend_sighash( + &self, + txin_index: usize, + spend_script: &Script, + sighash_type: TapSighashType, + ) -> Result { + let prevouts_vec: Vec<&TxOut> = self + .txins + .iter() + .map(|s| s.get_spendable().get_prevout()) + .collect(); + let mut sighash_cache: SighashCache<&bitcoin::Transaction> = + SighashCache::new(&self.cached_tx); + + let prevouts = &match sighash_type { + TapSighashType::SinglePlusAnyoneCanPay + | TapSighashType::AllPlusAnyoneCanPay + | TapSighashType::NonePlusAnyoneCanPay => { + bitcoin::sighash::Prevouts::One(txin_index, prevouts_vec[txin_index]) + } + _ => bitcoin::sighash::Prevouts::All(&prevouts_vec), + }; + let leaf_hash = TapLeafHash::from_script(spend_script, LeafVersion::TapScript); + let sig_hash = sighash_cache + .taproot_script_spend_signature_hash(txin_index, prevouts, leaf_hash, sighash_type) + .wrap_err("Failed to calculate taproot sighash for script spend")?; + + Ok(sig_hash) + } + + /// Calculates the sighash for the specified input, based on its spend path stored inside [`SpentTxIn`]. + /// + /// # Arguments + /// * `txin_index` - The input index. + /// * `sighash_type` - The Taproot sighash type. + /// + /// # Returns + /// The calculated Taproot sighash, or a [`BridgeError`] if calculation fails. + pub fn calculate_sighash_txin( + &self, + txin_index: usize, + sighash_type: TapSighashType, + ) -> Result { + match self.txins[txin_index].get_spend_path() { + SpendPath::ScriptSpend(idx) => { + self.calculate_script_spend_sighash_indexed(txin_index, idx, sighash_type) + } + SpendPath::KeySpend => self.calculate_pubkey_spend_sighash(txin_index, sighash_type), + SpendPath::Unknown => Err(TxError::MissingSpendInfo.into()), + } + } + + /// Calculates sighashes for all shared inputs for a given entity type. + /// + /// # Arguments + /// * `needed_entity` - The entity type (operator, verifier, etc.). + /// * `partial_signature_info` - Partial signature info for the entity. + /// + /// # Returns + /// A vector of (sighash, signature info) pairs, or a [`BridgeError`] if calculation fails. + pub fn calculate_shared_txins_sighash( + &self, + needed_entity: EntityType, + partial_signature_info: PartialSignatureInfo, + ) -> Result, BridgeError> { + let mut sighashes = Vec::with_capacity(self.txins.len()); + for idx in 0..self.txins.len() { + let sig_id = self.txins[idx].get_signature_id(); + let spend_data = self.txins[idx].get_tweak_data(); + let sig_owner = sig_id.get_deposit_sig_owner()?; + match (sig_owner, needed_entity) { + ( + DepositSigKeyOwner::OperatorSharedDeposit(sighash_type), + EntityType::OperatorDeposit, + ) + | ( + DepositSigKeyOwner::NofnSharedDeposit(sighash_type), + EntityType::VerifierDeposit, + ) + | ( + DepositSigKeyOwner::OperatorSharedSetup(sighash_type), + EntityType::OperatorSetup, + ) => { + sighashes.push(( + self.calculate_sighash_txin(idx, sighash_type)?, + partial_signature_info.complete(sig_id, spend_data), + )); + } + _ => {} + } + } + Ok(sighashes) + } + + #[cfg(test)] + /// Returns the previous output (TxOut) for the specified input + pub fn get_input_txout(&self, input_idx: usize) -> &TxOut { + self.txins[input_idx].get_spendable().get_prevout() + } +} + +impl TxHandler { + /// Encodes the signed transaction as a raw byte vector. + pub fn encode_tx(&self) -> RawSignedTx { + RawSignedTx { + raw_tx: bitcoin::consensus::encode::serialize(self.get_cached_tx()), + } + } +} + +impl TxHandler { + /// Promotes an unsigned handler to a signed handler, checking that all witnesses are present. + /// + /// # Returns + /// A [`TxHandler`] if all witnesses are present, or a [`BridgeError`] if not. + pub fn promote(self) -> Result, BridgeError> { + if self.txins.iter().any(|s| s.get_witness().is_none()) { + return Err(eyre::eyre!("Missing witness data").into()); + } + + Ok(TxHandler { + transaction_type: self.transaction_type, + txins: self.txins, + txouts: self.txouts, + cached_tx: self.cached_tx, + cached_txid: self.cached_txid, + phantom: PhantomData::, + }) + } + + /// Sets the witness for a script path spend input. + /// + /// # Arguments + /// * `script_inputs` - The inputs to the tapscript. + /// * `txin_index` - The input index. + /// * `script_index` - The script index in the input's script list. + /// + /// # Returns + /// Ok(()) if successful, or a [`BridgeError`] if not. + pub fn set_p2tr_script_spend_witness>( + &mut self, + script_inputs: &[T], + txin_index: usize, + script_index: usize, + ) -> Result<(), BridgeError> { + let txin = self + .txins + .get_mut(txin_index) + .ok_or(TxError::TxInputNotFound)?; + + if txin.get_witness().is_some() { + return Err(TxError::WitnessAlreadySet.into()); + } + + let script = txin + .get_spendable() + .get_scripts() + .get(script_index) + .ok_or_else(|| { + eyre::eyre!("Could not find script {script_index} in input {txin_index}") + })? + .to_script_buf(); + + let spend_control_block = txin + .get_spendable() + .get_spend_info() + .as_ref() + .ok_or(TxError::MissingSpendInfo)? + .control_block(&(script.clone(), LeafVersion::TapScript)) + .ok_or_eyre("Failed to find control block for script")?; + + let mut witness = Witness::new(); + script_inputs + .iter() + .for_each(|element| witness.push(element)); + witness.push(script.clone()); + witness.push(spend_control_block.serialize()); + + self.cached_tx.input[txin_index].witness = witness.clone(); + txin.set_witness(witness); + + Ok(()) + } + + /// Sets the witness for a key path spend input. + /// + /// # Arguments + /// * `signature` - The Taproot signature. + /// * `txin_index` - The input index. + /// + /// # Returns + /// Ok(()) if successful, or a [`BridgeError`] if not. + pub fn set_p2tr_key_spend_witness( + &mut self, + signature: &taproot::Signature, + txin_index: usize, + ) -> Result<(), BridgeError> { + let txin = self + .txins + .get_mut(txin_index) + .ok_or(TxError::TxInputNotFound)?; + + if txin.get_witness().is_none() { + let witness = Witness::p2tr_key_spend(signature); + txin.set_witness(witness.clone()); + self.cached_tx.input[txin_index].witness = witness; + + Ok(()) + } else { + Err(TxError::WitnessAlreadySet.into()) + } + } +} + +#[derive(Debug, Clone)] +/// Builder for [`TxHandler`], allowing stepwise construction of inputs and outputs. +pub struct TxHandlerBuilder { + transaction_type: TransactionType, + version: Version, + lock_time: absolute::LockTime, + txins: Vec, + txouts: Vec, +} + +impl TxHandlerBuilder { + /// Creates a new [`TxHandlerBuilder`] for the specified transaction type. + pub fn new(transaction_type: TransactionType) -> TxHandlerBuilder { + TxHandlerBuilder { + transaction_type, + version: Version::TWO, + lock_time: absolute::LockTime::ZERO, + txins: vec![], + txouts: vec![], + } + } + + /// Sets the version for the transaction being built. + pub fn with_version(mut self, version: Version) -> Self { + self.version = version; + self + } + + /// Adds an input to the transaction being built. + pub fn add_input( + mut self, + input_id: impl Into, + spendable: SpendableTxIn, + spend_path: SpendPath, + sequence: Sequence, + ) -> Self { + self.txins.push(SpentTxIn::from_spendable( + input_id.into(), + spendable, + spend_path, + sequence, + None, + )); + + self + } + + /// Adds an input with a pre-specified witness to the transaction being built. + pub fn add_input_with_witness( + mut self, + spendable: SpendableTxIn, + sequence: Sequence, + witness: Witness, + ) -> Self { + self.txins.push(SpentTxIn::from_spendable( + NormalSignatureKind::NormalSignatureUnknown.into(), + spendable, + SpendPath::Unknown, + sequence, + Some(witness), + )); + + self + } + + /// Adds an output to the transaction being built. + pub fn add_output(mut self, output: UnspentTxOut) -> Self { + self.txouts.push(output); + + self + } + + /// Finalizes the transaction, returning an unsigned [`TxHandler`]. + pub fn finalize(self) -> TxHandler { + // construct cached Transaction + let tx = Transaction { + version: self.version, + lock_time: self.lock_time, + input: self.txins.iter().map(|s| s.to_txin()).collect(), + output: self.txouts.iter().map(|s| s.txout().clone()).collect(), + }; + let txid = tx.compute_txid(); + + // #[cfg(debug_assertions)] + // { + // // txins >= txouts + // assert!( + // self.txins + // .iter() + // .map(|s| s.get_spendable().get_prevout().value) + // .sum::() + // >= self + // .txouts + // .iter() + // .map(|s| s.txout().value) + // .sum::(), + // "Txins should be bigger than txouts" + // ); + // } + TxHandler:: { + transaction_type: self.transaction_type, + txins: self.txins, + txouts: self.txouts, + cached_tx: tx, + cached_txid: txid, + phantom: PhantomData, + } + } + + /// Finalizes the transaction and promotes it to signed, checking all witnesses. + pub fn finalize_signed(self) -> Result, BridgeError> { + self.finalize().promote() + } +} + +/// Removes a [`TxHandler`] from a map by transaction type, returning an error if not found. +/// +/// # Arguments +/// * `txhandlers` - The map of transaction handlers. +/// * `tx_type` - The transaction type to remove. +/// +/// # Returns +/// The removed [`TxHandler`], or a [`BridgeError`] if not found. +pub fn remove_txhandler_from_map( + txhandlers: &mut BTreeMap>, + tx_type: TransactionType, +) -> Result, BridgeError> { + txhandlers + .remove(&tx_type) + .ok_or(TxError::TxHandlerNotFound(tx_type).into()) +} diff --git a/core/src/citrea.rs b/core/src/citrea.rs new file mode 100644 index 000000000..957342872 --- /dev/null +++ b/core/src/citrea.rs @@ -0,0 +1,662 @@ +//! # Citrea Related Utilities + +use crate::config::protocol::ProtocolParamset; +use crate::database::DatabaseTransaction; +use crate::errors::BridgeError; +use crate::{citrea::BRIDGE_CONTRACT::DepositReplaced, database::Database}; +use alloy::{ + eips::{BlockId, BlockNumberOrTag}, + network::EthereumWallet, + primitives::{keccak256, U256}, + providers::{ + fillers::{ + BlobGasFiller, ChainIdFiller, FillProvider, GasFiller, JoinFill, NonceFiller, + WalletFiller, + }, + Provider, ProviderBuilder, RootProvider, + }, + rpc::types::{EIP1186AccountProofResponse, Filter, Log}, + signers::{local::PrivateKeySigner, Signer}, + sol, + sol_types::SolEvent, + transports::http::reqwest::Url, +}; +use bitcoin::{hashes::Hash, OutPoint, Txid, XOnlyPublicKey}; +use bridge_circuit_host::receipt_from_inner; +use circuits_lib::bridge_circuit::{ + lc_proof::check_method_id, + structs::{LightClientProof, StorageProof}, +}; +use citrea_sov_rollup_interface::zk::light_client_proof::output::LightClientCircuitOutput; +use eyre::Context; +use jsonrpsee::http_client::{HttpClient, HttpClientBuilder}; +use jsonrpsee::proc_macros::rpc; +use risc0_zkvm::{InnerReceipt, Receipt}; +use std::{fmt::Debug, time::Duration}; +use tonic::async_trait; + +pub const LIGHT_CLIENT_ADDRESS: &str = "0x3100000000000000000000000000000000000001"; +pub const BRIDGE_CONTRACT_ADDRESS: &str = "0x3100000000000000000000000000000000000002"; +pub const SATS_TO_WEI_MULTIPLIER: u64 = 10_000_000_000; +const UTXOS_STORAGE_INDEX: [u8; 32] = + hex_literal::hex!("0000000000000000000000000000000000000000000000000000000000000007"); +const DEPOSIT_STORAGE_INDEX: [u8; 32] = + hex_literal::hex!("0000000000000000000000000000000000000000000000000000000000000008"); + +// Codegen from ABI file to interact with the contract. +sol!( + #[allow(missing_docs)] + #[sol(rpc)] + #[derive(Debug)] + BRIDGE_CONTRACT, + "../scripts/Bridge.json" +); + +#[async_trait] +pub trait CitreaClientT: Send + Sync + Debug + Clone + 'static { + /// # Parameters + /// + /// - `citrea_rpc_url`: URL of the Citrea RPC. + /// - `light_client_prover_url`: URL of the Citrea light client prover RPC. + /// - `chain_id`: Citrea's EVM chain id. + /// - `secret_key`: EVM secret key of the EVM user. If not given, random + /// secret key is used (wallet is not required). This is given mostly for + /// testing purposes. + async fn new( + citrea_rpc_url: String, + light_client_prover_url: String, + chain_id: u32, + secret_key: Option, + timeout: Option, + ) -> Result; + + /// Returns deposit move txids, starting from the last deposit index. + /// + /// # Parameters + /// + /// - `last_deposit_idx`: Last deposit index. None if no deposit + /// - `to_height`: End block height (inclusive) + async fn collect_deposit_move_txids( + &self, + last_deposit_idx: Option, + to_height: u64, + ) -> Result, BridgeError>; + + /// Returns withdrawal utxos, starting from the last withdrawal index. + /// + /// # Parameters + /// + /// - `last_withdrawal_idx`: Last withdrawal index. None if no withdrawal + /// - `to_height`: End block height (inclusive) + async fn collect_withdrawal_utxos( + &self, + last_withdrawal_idx: Option, + to_height: u64, + ) -> Result, BridgeError>; + + /// Returns the light client proof and its L2 height for the given L1 block + /// height. + /// + /// # Returns + /// + /// A tuple, wrapped around a [`Some`] if present: + /// + /// - [`u64`]: Last L2 block height. + /// - [`ProtocolParamset`]: Protocol paramset. + /// + /// If not present, [`None`] is returned. + async fn get_light_client_proof( + &self, + l1_height: u64, + paramset: &'static ProtocolParamset, + ) -> Result, BridgeError>; + + /// Returns the L2 block height range for the given L1 block height. + /// + /// # Parameters + /// + /// - `block_height`: L1 block height. + /// - `timeout`: Timeout duration. + /// + /// # Returns + /// + /// A tuple of: + /// + /// - [`u64`]: Start of the L2 block height (not inclusive) + /// - [`u64`]: End of the L2 block height (inclusive) + async fn get_citrea_l2_height_range( + &self, + block_height: u64, + timeout: Duration, + paramset: &'static ProtocolParamset, + ) -> Result<(u64, u64), BridgeError>; + + /// Returns the replacement deposit move txids for the given range of blocks. + /// + /// # Parameters + /// + /// - `from_height`: Start block height (inclusive) + /// - `to_height`: End block height (inclusive) + /// + /// # Returns + /// + /// A vector of tuples, each containing: + /// + /// - [`Txid`]: The original move txid. + /// - [`Txid`]: The replacement move txid. + async fn get_replacement_deposit_move_txids( + &self, + from_height: u64, + to_height: u64, + ) -> Result, BridgeError>; + + async fn check_nofn_correctness( + &self, + nofn_xonly_pk: XOnlyPublicKey, + ) -> Result<(), BridgeError>; + + async fn get_storage_proof( + &self, + l2_height: u64, + deposit_index: u32, + ) -> Result; + + async fn fetch_validate_and_store_lcp( + &self, + payout_block_height: u64, + deposit_index: u32, + db: &Database, + dbtx: Option>, + paramset: &'static ProtocolParamset, + ) -> Result; +} + +/// Citrea client is responsible for interacting with the Citrea EVM and Citrea +/// RPC. +#[derive(Clone, Debug)] +pub struct CitreaClient { + pub client: HttpClient, + pub light_client_prover_client: HttpClient, + pub wallet_address: alloy::primitives::Address, + pub contract: CitreaContract, +} + +impl CitreaClient { + /// Returns all logs for the given filter and block range while considering + /// about the 1000 block limit. + async fn get_logs( + &self, + filter: Filter, + from_height: u64, + to_height: u64, + ) -> Result, BridgeError> { + let mut logs = vec![]; + + let mut from_height = from_height; + while from_height <= to_height { + // Block num is 999 because limits are inclusive. + let to_height = std::cmp::min(from_height + 999, to_height); + tracing::debug!("Fetching logs from {} to {}", from_height, to_height); + + // Update filter with the new range. + let filter = filter.clone(); + let filter = filter.from_block(BlockNumberOrTag::Number(from_height)); + let filter = filter.to_block(BlockNumberOrTag::Number(to_height)); + + let logs_chunk = self + .contract + .provider() + .get_logs(&filter) + .await + .wrap_err("Failed to get logs")?; + logs.extend(logs_chunk); + + from_height = to_height + 1; + } + + Ok(logs) + } +} + +#[async_trait] +impl CitreaClientT for CitreaClient { + /// Fetches the storage proof for a given deposit index and transaction ID. + /// + /// This function interacts with an Citrea RPC endpoint to retrieve a storage proof, + /// which includes proof details for both the UTXO and the deposit index. + /// + /// # Arguments + /// * `l2_height` - A `u64` representing the L2 block height. + /// * `deposit_index` - A `u32` representing the deposit index. + /// + /// # Returns + /// Returns a `StorageProof` struct containing serialized storage proofs for the UTXO and deposit index. + async fn get_storage_proof( + &self, + l2_height: u64, + deposit_index: u32, + ) -> Result { + let ind = deposit_index; + let tx_index: u32 = ind * 2; + + let storage_address_wd_utxo_bytes = keccak256(UTXOS_STORAGE_INDEX); + let storage_address_wd_utxo: U256 = U256::from_be_bytes( + <[u8; 32]>::try_from(&storage_address_wd_utxo_bytes[..]) + .wrap_err("Storage address wd utxo bytes slice with incorrect length")?, + ); + + // Storage key address calculation UTXO + let storage_key_wd_utxo: U256 = storage_address_wd_utxo + U256::from(tx_index); + let storage_key_wd_utxo_hex = + format!("0x{}", hex::encode(storage_key_wd_utxo.to_be_bytes::<32>())); + + // Storage key address calculation Vout + let storage_key_vout: U256 = storage_address_wd_utxo + U256::from(tx_index + 1); + let storage_key_vout_hex = + format!("0x{}", hex::encode(storage_key_vout.to_be_bytes::<32>())); + + // Storage key address calculation Deposit + let storage_address_deposit_bytes = keccak256(DEPOSIT_STORAGE_INDEX); + let storage_address_deposit: U256 = U256::from_be_bytes( + <[u8; 32]>::try_from(&storage_address_deposit_bytes[..]) + .wrap_err("Storage address deposit bytes slice with incorrect length")?, + ); + + let storage_key_deposit: U256 = storage_address_deposit + U256::from(deposit_index); + let storage_key_deposit_hex = hex::encode(storage_key_deposit.to_be_bytes::<32>()); + let storage_key_deposit_hex = format!("0x{}", storage_key_deposit_hex); + + let response: serde_json::Value = self + .client + .get_proof( + BRIDGE_CONTRACT_ADDRESS, + vec![ + storage_key_wd_utxo_hex, + storage_key_vout_hex, + storage_key_deposit_hex, + ], + format!("0x{:x}", l2_height), + ) + .await + .wrap_err("Failed to get storage proof from rpc")?; + + let response: EIP1186AccountProofResponse = serde_json::from_value(response) + .wrap_err("Failed to deserialize EIP1186AccountProofResponse")?; + + // It does not seem possible to get a storage proof with less than 3 items. But still + // we check it to avoid panics. + if response.storage_proof.len() < 3 { + return Err(eyre::eyre!( + "Expected at least 3 storage proofs, got {}", + response.storage_proof.len() + ) + .into()); + } + + let serialized_utxo = serde_json::to_string(&response.storage_proof[0]) + .wrap_err("Failed to serialize storage proof utxo")?; + + let serialized_vout = serde_json::to_string(&response.storage_proof[1]) + .wrap_err("Failed to serialize storage proof vout")?; + + let serialized_deposit = serde_json::to_string(&response.storage_proof[2]) + .wrap_err("Failed to serialize storage proof deposit")?; + + Ok(StorageProof { + storage_proof_utxo: serialized_utxo, + storage_proof_vout: serialized_vout, + storage_proof_deposit_txid: serialized_deposit, + index: ind, + }) + } + + async fn fetch_validate_and_store_lcp( + &self, + payout_block_height: u64, + deposit_index: u32, + db: &Database, + mut dbtx: Option>, + paramset: &'static ProtocolParamset, + ) -> Result { + let saved_data = db + .get_lcp_for_assert(dbtx.as_deref_mut(), deposit_index) + .await?; + if let Some(lcp) = saved_data { + // if already saved, do nothing + return Ok(lcp); + }; + + let lcp_result = self + .get_light_client_proof(payout_block_height, paramset) + .await?; + let (_lcp, lcp_receipt, _l2_height) = match lcp_result { + Some(lcp) => lcp, + None => { + return Err(eyre::eyre!( + "Light client proof could not be fetched found for block height {}", + payout_block_height + ) + .into()) + } + }; + + // save the LCP for assert + db.insert_lcp_for_assert(dbtx, deposit_index, lcp_receipt.clone()) + .await?; + + Ok(lcp_receipt) + } + + async fn new( + citrea_rpc_url: String, + light_client_prover_url: String, + chain_id: u32, + secret_key: Option, + timeout: Option, + ) -> Result { + let citrea_rpc_url = Url::parse(&citrea_rpc_url).wrap_err("Can't parse Citrea RPC URL")?; + let light_client_prover_url = + Url::parse(&light_client_prover_url).wrap_err("Can't parse Citrea LCP RPC URL")?; + let secret_key = secret_key.unwrap_or(PrivateKeySigner::random()); + + let key = secret_key.with_chain_id(Some(chain_id.into())); + let wallet_address = key.address(); + + tracing::info!("Wallet address: {}", wallet_address); + + let provider = ProviderBuilder::new() + .wallet(EthereumWallet::from(key)) + .on_http(citrea_rpc_url.clone()); + + tracing::info!("Provider created"); + + let contract = BRIDGE_CONTRACT::new( + BRIDGE_CONTRACT_ADDRESS + .parse() + .expect("Correct contract address"), + provider, + ); + + tracing::info!("Contract created"); + + let client = HttpClientBuilder::default() + .request_timeout(timeout.unwrap_or(Duration::from_secs(60))) + .build(citrea_rpc_url) + .wrap_err("Failed to create Citrea RPC client")?; + + tracing::info!("Citrea RPC client created"); + + let light_client_prover_client = HttpClientBuilder::default() + .request_timeout(timeout.unwrap_or(Duration::from_secs(60))) + .build(light_client_prover_url) + .wrap_err("Failed to create Citrea LCP RPC client")?; + + tracing::info!("Citrea LCP RPC client created"); + + Ok(CitreaClient { + client, + light_client_prover_client, + wallet_address, + contract, + }) + } + + async fn collect_deposit_move_txids( + &self, + last_deposit_idx: Option, + to_height: u64, + ) -> Result, BridgeError> { + let mut move_txids = vec![]; + + let mut start_idx = match last_deposit_idx { + Some(idx) => idx + 1, + None => 0, + }; + + loop { + let deposit_txid = self + .contract + .depositTxIds(U256::from(start_idx)) + .block(BlockId::Number(BlockNumberOrTag::Number(to_height))) + .call() + .await; + match deposit_txid { + Err(e) if e.to_string().contains("execution reverted") => { + tracing::trace!("Deposit txid not found for index, error: {:?}", e); + break; + } + Err(e) => return Err(e.into()), + Ok(_) => {} + } + tracing::info!("Deposit txid found for index: {:?}", deposit_txid); + + let deposit_txid = deposit_txid.expect("Failed to get deposit txid"); + let move_txid = Txid::from_slice(deposit_txid._0.as_ref()) + .wrap_err("Failed to convert move txid to Txid")?; + move_txids.push((start_idx as u64, move_txid)); + start_idx += 1; + } + Ok(move_txids) + } + + async fn collect_withdrawal_utxos( + &self, + last_withdrawal_idx: Option, + to_height: u64, + ) -> Result, BridgeError> { + let mut utxos = vec![]; + + let mut start_idx = match last_withdrawal_idx { + Some(idx) => idx + 1, + None => 0, + }; + + loop { + let withdrawal_utxo = self + .contract + .withdrawalUTXOs(U256::from(start_idx)) + .block(BlockId::Number(BlockNumberOrTag::Number(to_height))) + .call() + .await; + if withdrawal_utxo.is_err() { + break; + } + let withdrawal_utxo = withdrawal_utxo.expect("Failed to get withdrawal UTXO"); + let txid = withdrawal_utxo.txId.0; + let txid = + Txid::from_slice(txid.as_ref()).wrap_err("Failed to convert txid to Txid")?; + let vout = withdrawal_utxo.outputId.0; + let vout = u32::from_le_bytes(vout); + let utxo = OutPoint { txid, vout }; + utxos.push((start_idx as u64, utxo)); + start_idx += 1; + } + Ok(utxos) + } + + async fn get_light_client_proof( + &self, + l1_height: u64, + paramset: &'static ProtocolParamset, + ) -> Result, BridgeError> { + let proof_result = self + .light_client_prover_client + .get_light_client_proof_by_l1_height(l1_height) + .await + .wrap_err("Failed to get light client proof")?; + tracing::debug!( + "Light client proof result {}: {:?}", + l1_height, + proof_result + ); + + let ret = if let Some(proof_result) = proof_result { + let decoded: InnerReceipt = bincode::deserialize(&proof_result.proof) + .wrap_err("Failed to deserialize light client proof from citrea lcp")?; + let receipt = receipt_from_inner(decoded) + .wrap_err("Failed to create receipt from light client proof")?; + + let l2_height = u64::try_from(proof_result.light_client_proof_output.last_l2_height) + .wrap_err("Failed to convert l2 height to u64")?; + let hex_l2_str = format!("0x{:x}", l2_height); + + let lc_image_id = paramset.get_lcp_image_id()?; + + if receipt.verify(lc_image_id).is_err() { + return Err(eyre::eyre!("Current light client proof verification failed").into()); + } + + let proof_output: LightClientCircuitOutput = borsh::from_slice(&receipt.journal.bytes) + .wrap_err("Failed to deserialize light client circuit output")?; + + if !check_method_id(&proof_output, lc_image_id) { + return Err(eyre::eyre!( + "Current light client proof method ID does not match the expected LC image ID" + ) + .into()); + } + + Some(( + LightClientProof { + lc_journal: receipt.journal.bytes.clone(), + l2_height: hex_l2_str, + }, + receipt, + l2_height, + )) + } else { + None + }; + + Ok(ret) + } + + async fn get_citrea_l2_height_range( + &self, + block_height: u64, + timeout: Duration, + paramset: &'static ProtocolParamset, + ) -> Result<(u64, u64), BridgeError> { + let start = std::time::Instant::now(); + let proof_current = loop { + if let Some(proof) = self.get_light_client_proof(block_height, paramset).await? { + break proof; + } + + if start.elapsed() > timeout { + return Err(eyre::eyre!( + "Light client proof not found for block height {} after {} seconds", + block_height, + timeout.as_secs() + ) + .into()); + } + + tokio::time::sleep(Duration::from_secs(1)).await; + }; + + let proof_previous = self + .get_light_client_proof(block_height - 1, paramset) + .await? + .ok_or(eyre::eyre!( + "Light client proof not found for block height: {}", + block_height - 1 + ))?; + + let l2_height_end: u64 = proof_current.2; + let l2_height_start: u64 = proof_previous.2; + + Ok((l2_height_start, l2_height_end)) + } + + async fn get_replacement_deposit_move_txids( + &self, + from_height: u64, + to_height: u64, + ) -> Result, BridgeError> { + let mut replacement_move_txids = vec![]; + + // get logs + let filter = self.contract.event_filter::().filter; + let logs = self.get_logs(filter, from_height, to_height).await?; + + for log in logs { + let replacement_raw_data = &log.data().data; + + let idx = DepositReplaced::abi_decode_data(replacement_raw_data, false) + .wrap_err("Failed to decode replacement deposit data")? + .0; + let new_move_txid = DepositReplaced::abi_decode_data(replacement_raw_data, false) + .wrap_err("Failed to decode replacement deposit data")? + .2; + + let idx = u32::try_from(idx).wrap_err("Failed to convert idx to u32")?; + let new_move_txid = Txid::from_slice(new_move_txid.as_ref()) + .wrap_err("Failed to convert new move txid to Txid")?; + + replacement_move_txids.push((idx, new_move_txid)); + } + + Ok(replacement_move_txids) + } + + async fn check_nofn_correctness( + &self, + nofn_xonly_pk: XOnlyPublicKey, + ) -> Result<(), BridgeError> { + if std::env::var("DISABLE_NOFN_CHECK").is_ok() { + return Ok(()); + } + + let contract_nofn_xonly_pk = self + .contract + .getAggregatedKey() + .call() + .await + .wrap_err("Failed to get script prefix")? + ._0; + + let contract_nofn_xonly_pk = XOnlyPublicKey::from_slice(contract_nofn_xonly_pk.as_ref()) + .wrap_err("Failed to convert citrea contract script nofn bytes to xonly pk")?; + if contract_nofn_xonly_pk != nofn_xonly_pk { + return Err(eyre::eyre!("Nofn of deposit does not match with citrea contract").into()); + } + Ok(()) + } +} + +#[rpc(client, namespace = "lightClientProver")] +trait LightClientProverRpc { + /// Generate state transition data for the given L1 block height, and return the data as a borsh serialized hex string. + #[method(name = "getLightClientProofByL1Height")] + async fn get_light_client_proof_by_l1_height( + &self, + l1_height: u64, + ) -> RpcResult>; +} + +#[rpc(client, namespace = "eth")] +pub trait CitreaRpc { + #[method(name = "getProof")] + async fn get_proof( + &self, + address: &str, + storage_keys: Vec, + block: String, + ) -> RpcResult; +} + +// Ugly typedefs. +type CitreaContract = BRIDGE_CONTRACT::BRIDGE_CONTRACTInstance< + (), + FillProvider< + JoinFill< + JoinFill< + alloy::providers::Identity, + JoinFill>>, + >, + WalletFiller, + >, + RootProvider, + >, +>; diff --git a/core/src/cli.rs b/core/src/cli.rs new file mode 100644 index 000000000..7cdddf4fe --- /dev/null +++ b/core/src/cli.rs @@ -0,0 +1,584 @@ +//! # Command Line Interface +//! +//! This module defines command line interface for server binaries. `Clap` is used +//! for easy generation of help messages and handling arguments. + +use crate::config::protocol::ProtocolParamset; +use crate::config::BridgeConfig; +use crate::errors::BridgeError; +use crate::errors::ErrorExt; +use crate::utils::delayed_panic; +use clap::Parser; +use clap::ValueEnum; +use eyre::Context; +use std::env; +use std::ffi::OsString; +use std::path::PathBuf; +use std::process; + +#[derive(Debug, Clone, Copy, ValueEnum, Eq, PartialEq)] +pub enum Actors { + Verifier, + Operator, + Aggregator, + TestActor, +} + +/// Clementine (C) 2025 Chainway Limited +#[derive(Parser, Debug, Clone)] +#[command(version, about, long_about = None)] +pub struct Args { + /// Actor to run. + pub actor: Actors, + /// TOML formatted configuration file. + #[arg(short, long)] + pub config: Option, + /// TOML formatted protocol parameters file. + #[arg(short, long)] + pub protocol_params: Option, + /// Verbosity level, ranging from 0 (none) to 5 (highest) + #[arg(short, long, default_value_t = 3)] + pub verbose: u8, +} + +/// Parse given iterator. This is good for isolated environments, like tests. +fn parse_from(itr: I) -> Result +where + I: IntoIterator, + T: Into + Clone, +{ + match Args::try_parse_from(itr) { + Ok(c) => Ok(c), + Err(e) + if matches!( + e.kind(), + clap::error::ErrorKind::DisplayHelp + | clap::error::ErrorKind::DisplayHelpOnMissingArgumentOrSubcommand + | clap::error::ErrorKind::DisplayVersion + ) => + { + Err(BridgeError::CLIDisplayAndExit(e.render())) + } + Err(e) => Err(BridgeError::ConfigError(e.to_string())), + } +} + +#[derive(Debug, Clone, Eq, PartialEq)] +pub enum ConfigSource { + File(PathBuf), + Env, +} +/// Selects a configuration source for the main config or the protocol paramset. +/// +/// Configuration can be loaded either from a file specified by a path in the CLI args, +/// or from environment variables. +/// +/// Selection logic is as follows: +/// +/// 1. If the named environment variable (eg. `READ_CONFIG_FROM_ENV`) is not set +/// or if the named environment variable is set to `0` or `off`, we use the file +/// path provided in the CLI args (fail if not provided) +/// +/// 2. If the named environment variable is set to `1` or `on`, we explicitly read from the +/// environment variable +/// +/// 3. If the named environment variable is set to an unknown value, we print a +/// warning and default to environment variables +/// +/// # Examples +/// +/// ```bash +/// # Load config from a file and protocol params from a file +/// READ_CONFIG_FROM_ENV=0 READ_PARAMSET_FROM_ENV=0 clementine-core verifier --config /path/to/config.toml --protocol-params /path/to/protocol-params.toml +/// +/// # or +/// # define all config variables in the environment +/// export CONFIG_ONE=1 +/// export PARAM_ONE=1 +/// # and source from environment variables +/// READ_CONFIG_FROM_ENV=1 READ_PARAMSET_FROM_ENV=1 clementine-core verifier +/// +/// # or +/// # source paramset from environment variables but use config from a file +/// export PARAM_ONE=1 +/// export PARAM_TWO=1 +/// READ_CONFIG_FROM_ENV=0 READ_PARAMSET_FROM_ENV=1 clementine-core verifier --config /path/to/config.toml +/// +/// # WRONG usage (will use environment variables for both config and paramset) +/// export CONFIG_ONE=1 +/// export PARAM_ONE=1 +/// READ_CONFIG_FROM_ENV=1 READ_PARAMSET_FROM_ENV=1 clementine-core --config /path/to/config.toml --protocol-params /path/to/protocol-params.toml +/// ``` +pub fn get_config_source( + read_from_env_name: &'static str, + provided_arg: Option, +) -> Result { + Ok(match std::env::var(read_from_env_name) { + Err(_) => ConfigSource::File(provided_arg.ok_or(BridgeError::ConfigError( + "No file path or environment variable provided for config file.".to_string(), + ))?), + Ok(str) if str == "0" || str == "off" => ConfigSource::File(provided_arg.ok_or( + BridgeError::ConfigError("No file path provided for config file.".to_string()), + )?), + Ok(str) => { + if str != "1" && str != "on" { + tracing::warn!("Unknown value for {read_from_env_name}: {str}. Expected 1/0/off/on. Defaulting to environment variables."); + } + + if provided_arg.is_some() { + tracing::warn!("File path provided in CLI arguments while {read_from_env_name} is set to 1. Ignoring provided file path and reading from environment variables."); + } + + ConfigSource::Env + } + }) +} + +/// Gets configuration using CLI arguments, for binaries. If there are any errors, prints +/// error and panics. +/// +/// Steps: +/// +/// 1. Get CLI arguments +/// 2. Initialize logger +/// 3. Get configuration, either from environment variables or +/// configuration file +/// 4. Get protocol parameters, either from environment variables or +/// protocol parameters file +/// +/// # Returns +/// +/// A tuple, containing: +/// +/// - [`BridgeConfig`] from CLI argument +/// - [`Args`] from CLI options +pub fn get_cli_config() -> (BridgeConfig, Args) { + let args = env::args(); + + match get_cli_config_from_args(args) { + Ok(config) => config, + Err(e) => { + let e = e.into_eyre(); + match e.root_cause().downcast_ref::() { + Some(BridgeError::CLIDisplayAndExit(msg)) => { + println!("{}", msg); + process::exit(0); + } + _ => delayed_panic!("Failed to get CLI config: {e:?}"), + } + } + } +} + +/// Wrapped function for tests +fn get_cli_config_from_args(itr: I) -> Result<(BridgeConfig, Args), BridgeError> +where + I: IntoIterator, + T: Into + Clone, +{ + let args = parse_from(itr).wrap_err("Failed to parse CLI arguments.")?; + + let config_source = get_config_source("READ_CONFIG_FROM_ENV", args.config.clone()); + + let mut config = + match config_source.wrap_err("Failed to determine source for configuration.")? { + ConfigSource::File(config_file) => { + // Read from configuration file ONLY + BridgeConfig::try_parse_file(config_file) + .wrap_err("Failed to read configuration from file.")? + } + ConfigSource::Env => BridgeConfig::from_env() + .wrap_err("Failed to read configuration from environment variables.")?, + }; + + let protocol_params_source = + get_config_source("READ_PARAMSET_FROM_ENV", args.protocol_params.clone()) + .wrap_err("Failed to determine source for protocol parameters.")?; + + // Leaks memory to get a static reference to the paramset + // This is needed to reduce copies of the protocol paramset when passing it around. + // This is fine, since this will only run once in the lifetime of the program. + let paramset: &'static ProtocolParamset = Box::leak(Box::new(match protocol_params_source { + ConfigSource::File(path) => ProtocolParamset::from_toml_file(path.as_path()) + .wrap_err("Failed to read protocol parameters from file.")?, + ConfigSource::Env => ProtocolParamset::from_env() + .wrap_err("Failed to read protocol parameters from environment.")?, + })); + + // The default will be REGTEST_PARAMSET and is overridden from the selected source above. + config.protocol_paramset = paramset; + + Ok((config, args)) +} + +#[cfg(test)] +mod tests { + use super::{get_cli_config_from_args, get_config_source, parse_from, ConfigSource}; + use crate::cli::Actors; + use crate::errors::BridgeError; + use std::env; + use std::fs::File; + use std::io::Write; + use std::path::PathBuf; + + /// With help message flag, we should see the help message. Shocking. + #[test] + fn help_message() { + match parse_from(vec!["clementine-core", "--help"]) { + Ok(_) => panic!("expected configuration error"), + Err(BridgeError::CLIDisplayAndExit(_)) => {} + e => panic!("unexpected error {e:#?}"), + } + } + + /// With version flag, we should see the program version read from + /// `Cargo.toml`. + #[test] + fn version() { + match parse_from(vec!["clementine-core", "--version"]) { + Ok(_) => panic!("expected configuration error"), + Err(BridgeError::CLIDisplayAndExit(_)) => {} + e => panic!("unexpected error {e:#?}"), + } + } + + // Helper function to set and unset environment variables for tests + fn with_env_var(name: &str, value: Option<&str>, test: F) -> T + where + F: FnOnce() -> T, + { + let prev_value = env::var(name).ok(); + match value { + Some(val) => env::set_var(name, val), + None => env::remove_var(name), + } + let result = test(); + match prev_value { + Some(val) => env::set_var(name, val), + None => env::remove_var(name), + } + result + } + + #[test] + #[serial_test::serial] + fn test_get_config_source_env_not_set() { + with_env_var("TEST_READ_FROM_ENV", None, || { + let path = PathBuf::from("/path/to/config"); + let result = get_config_source("TEST_READ_FROM_ENV", Some(path.clone())); + assert_eq!(result.unwrap(), ConfigSource::File(path)); + + // When path is not provided, should return error + let result = get_config_source("TEST_READ_FROM_ENV", None); + assert!(result.is_err()); + assert!(matches!(result.unwrap_err(), BridgeError::ConfigError(_))); + }) + } + + #[test] + #[serial_test::serial] + fn test_get_config_source_env_set_to_off() { + // Test with "0" + with_env_var("TEST_READ_FROM_ENV", Some("0"), || { + let path = PathBuf::from("/path/to/config"); + let result = get_config_source("TEST_READ_FROM_ENV", Some(path.clone())); + assert_eq!(result.unwrap(), ConfigSource::File(path)); + + // When path is not provided, should return error + let result = get_config_source("TEST_READ_FROM_ENV", None); + assert!(result.is_err()); + }); + + // Test with "off" + with_env_var("TEST_READ_FROM_ENV", Some("off"), || { + let path = PathBuf::from("/path/to/config"); + let result = get_config_source("TEST_READ_FROM_ENV", Some(path.clone())); + assert_eq!(result.unwrap(), ConfigSource::File(path)); + }) + } + + #[test] + #[serial_test::serial] + fn test_get_config_source_env_set_to_on() { + // Test with "1" + with_env_var("TEST_READ_FROM_ENV", Some("1"), || { + let result = get_config_source("TEST_READ_FROM_ENV", None); + assert_eq!(result.unwrap(), ConfigSource::Env); + + // Even if path is provided, should still return Env + let path = PathBuf::from("/path/to/config"); + let result = get_config_source("TEST_READ_FROM_ENV", Some(path)); + assert_eq!(result.unwrap(), ConfigSource::Env); + }); + + // Test with "on" + with_env_var("TEST_READ_FROM_ENV", Some("on"), || { + let result = get_config_source("TEST_READ_FROM_ENV", None); + assert_eq!(result.unwrap(), ConfigSource::Env); + }) + } + + #[test] + #[serial_test::serial] + fn test_get_config_source_env_unknown_value() { + with_env_var("TEST_READ_FROM_ENV", Some("invalid"), || { + let result = get_config_source("TEST_READ_FROM_ENV", None); + assert_eq!(result.unwrap(), ConfigSource::Env); + }) + } + + // Helper to create a temporary config file + fn with_temp_config_file(content: &str, test: F) -> T + where + F: FnOnce(PathBuf) -> T, + { + let temp_dir = tempfile::tempdir().unwrap(); + let file_path = temp_dir.path().join("bridge_config.toml"); + + let mut file = File::create(&file_path).unwrap(); + file.write_all(content.as_bytes()).unwrap(); + + let result = test(file_path); + temp_dir.close().unwrap(); + result + } + + // Helper to set up all environment variables needed for config + fn setup_config_env_vars() { + env::set_var("HOST", "127.0.0.1"); + env::set_var("PORT", "17000"); + env::set_var( + "SECRET_KEY", + "1111111111111111111111111111111111111111111111111111111111111111", + ); + env::set_var("OPERATOR_WITHDRAWAL_FEE_SATS", "100000"); + env::set_var("BITCOIN_RPC_URL", "http://127.0.0.1:18443/wallet/admin"); + env::set_var("BITCOIN_RPC_USER", "admin"); + env::set_var("BITCOIN_RPC_PASSWORD", "admin"); + env::set_var("DB_HOST", "127.0.0.1"); + env::set_var("DB_PORT", "5432"); + env::set_var("DB_USER", "clementine"); + env::set_var("DB_PASSWORD", "clementine"); + env::set_var("DB_NAME", "clementine"); + env::set_var("CITREA_RPC_URL", ""); + env::set_var("CITREA_LIGHT_CLIENT_PROVER_URL", ""); + env::set_var("CITREA_CHAIN_ID", "5655"); + env::set_var( + "BRIDGE_CONTRACT_ADDRESS", + "3100000000000000000000000000000000000002", + ); + env::set_var("SERVER_CERT_PATH", "certs/server/server.pem"); + env::set_var("SERVER_KEY_PATH", "certs/server/server.key"); + env::set_var("CA_CERT_PATH", "certs/ca/ca.pem"); + env::set_var("CLIENT_CERT_PATH", "certs/client/client.pem"); + env::set_var("CLIENT_KEY_PATH", "certs/client/client.key"); + env::set_var("AGGREGATOR_CERT_PATH", "certs/aggregator/aggregator.pem"); + env::set_var("CLIENT_VERIFICATION", "true"); + env::set_var( + "SECURITY_COUNCIL", + "1:50929b74c1a04954b78b4b6035e97a5e078a5a0f28ec96d547bfee9ace803ac0", + ); + + env::set_var("TELEMETRY_HOST", "0.0.0.0"); + env::set_var("TELEMETRY_PORT", "8081"); + } + + // Helper to set up all environment variables needed for protocol paramset + fn setup_protocol_paramset_env_vars() { + env::set_var("NETWORK", "regtest"); + env::set_var("NUM_ROUND_TXS", "2"); + env::set_var("NUM_KICKOFFS_PER_ROUND", "10"); + env::set_var("NUM_SIGNED_KICKOFFS", "2"); + env::set_var("BRIDGE_AMOUNT", "1000000000"); + env::set_var("KICKOFF_AMOUNT", "0"); + env::set_var("OPERATOR_CHALLENGE_AMOUNT", "200000000"); + env::set_var("COLLATERAL_FUNDING_AMOUNT", "99000000"); + env::set_var("KICKOFF_BLOCKHASH_COMMIT_LENGTH", "40"); + env::set_var("WATCHTOWER_CHALLENGE_BYTES", "144"); + env::set_var("WINTERNITZ_LOG_D", "4"); + env::set_var("USER_TAKES_AFTER", "200"); + env::set_var("OPERATOR_CHALLENGE_TIMEOUT_TIMELOCK", "144"); + env::set_var("OPERATOR_CHALLENGE_NACK_TIMELOCK", "432"); + env::set_var("DISPROVE_TIMEOUT_TIMELOCK", "720"); + env::set_var("ASSERT_TIMEOUT_TIMELOCK", "576"); + env::set_var("OPERATOR_REIMBURSE_TIMELOCK", "12"); + env::set_var("WATCHTOWER_CHALLENGE_TIMEOUT_TIMELOCK", "288"); + env::set_var("TIME_TO_SEND_WATCHTOWER_CHALLENGE", "216"); + env::set_var("LATEST_BLOCKHASH_TIMEOUT_TIMELOCK", "360"); + env::set_var("FINALITY_DEPTH", "1"); + env::set_var("START_HEIGHT", "8148"); + env::set_var("GENESIS_HEIGHT", "0"); + env::set_var( + "GENESIS_CHAIN_STATE_HASH", + "5f7302ad16c8bd9ef2f3be00c8199a86f9e0ba861484abb4af5f7e457f8c2216", + ); + env::set_var("HEADER_CHAIN_PROOF_BATCH_SIZE", "100"); + env::set_var("BRIDGE_NONSTANDARD", "true"); + } + + // Helper to clean up all environment variables + fn cleanup_config_env_vars() { + env::remove_var("HOST"); + env::remove_var("PORT"); + env::remove_var("SECRET_KEY"); + env::remove_var("OPERATOR_WITHDRAWAL_FEE_SATS"); + env::remove_var("BITCOIN_RPC_URL"); + env::remove_var("BITCOIN_RPC_USER"); + env::remove_var("BITCOIN_RPC_PASSWORD"); + env::remove_var("DB_HOST"); + env::remove_var("DB_PORT"); + env::remove_var("DB_USER"); + env::remove_var("DB_PASSWORD"); + env::remove_var("DB_NAME"); + env::remove_var("CITREA_RPC_URL"); + env::remove_var("CITREA_LIGHT_CLIENT_PROVER_URL"); + env::remove_var("BRIDGE_CONTRACT_ADDRESS"); + env::remove_var("SERVER_CERT_PATH"); + env::remove_var("SERVER_KEY_PATH"); + env::remove_var("CA_CERT_PATH"); + env::remove_var("CLIENT_CERT_PATH"); + env::remove_var("CLIENT_KEY_PATH"); + env::remove_var("AGGREGATOR_CERT_PATH"); + env::remove_var("CLIENT_VERIFICATION"); + env::remove_var("SECURITY_COUNCIL"); + env::remove_var("TELEMETRY_HOST"); + env::remove_var("TELEMETRY_PORT"); + } + + // Helper to clean up all protocol paramset environment variables + fn cleanup_protocol_paramset_env_vars() { + env::remove_var("NETWORK"); + env::remove_var("NUM_ROUND_TXS"); + env::remove_var("NUM_KICKOFFS_PER_ROUND"); + env::remove_var("NUM_SIGNED_KICKOFFS"); + env::remove_var("BRIDGE_AMOUNT"); + env::remove_var("KICKOFF_AMOUNT"); + env::remove_var("OPERATOR_CHALLENGE_AMOUNT"); + env::remove_var("COLLATERAL_FUNDING_AMOUNT"); + env::remove_var("KICKOFF_BLOCKHASH_COMMIT_LENGTH"); + env::remove_var("WATCHTOWER_CHALLENGE_BYTES"); + env::remove_var("WINTERNITZ_LOG_D"); + env::remove_var("USER_TAKES_AFTER"); + env::remove_var("OPERATOR_CHALLENGE_TIMEOUT_TIMELOCK"); + env::remove_var("OPERATOR_CHALLENGE_NACK_TIMELOCK"); + env::remove_var("DISPROVE_TIMEOUT_TIMELOCK"); + env::remove_var("ASSERT_TIMEOUT_TIMELOCK"); + env::remove_var("OPERATOR_REIMBURSE_TIMELOCK"); + env::remove_var("WATCHTOWER_CHALLENGE_TIMEOUT_TIMELOCK"); + env::remove_var("TIME_TO_SEND_WATCHTOWER_CHALLENGE"); + env::remove_var("FINALITY_DEPTH"); + env::remove_var("START_HEIGHT"); + env::remove_var("HEADER_CHAIN_PROOF_BATCH_SIZE"); + } + + // Basic minimum toml config content + const MINIMAL_CONFIG_CONTENT: &str = include_str!("test/data/bridge_config.toml"); + + #[test] + #[serial_test::serial] + fn test_get_cli_config_file_mode() { + with_env_var("READ_CONFIG_FROM_ENV", Some("0"), || { + with_temp_config_file(MINIMAL_CONFIG_CONTENT, |config_path| { + // Create a temp protocol paramset file + with_temp_config_file( + include_str!("./test/data/protocol_paramset.toml"), + |protocol_path| { + let args = vec![ + "clementine-core", + "verifier", + "--config", + config_path.to_str().unwrap(), + "--protocol-params", + protocol_path.to_str().unwrap(), + ]; + + let result = get_cli_config_from_args(args); + + let (config, cli_args) = result.expect("Failed to get CLI config"); + assert_eq!(config.host, "127.0.0.1"); + assert_eq!(config.port, 17000); + assert_eq!(cli_args.actor, Actors::Verifier); + + // Assert some protocol paramset values + assert_eq!(config.protocol_paramset.network.to_string(), "regtest"); + assert_eq!(config.protocol_paramset.num_round_txs, 2); + assert_eq!(config.protocol_paramset.winternitz_log_d, 4); + }, + ) + }) + }) + } + + #[test] + #[serial_test::serial] + fn test_get_cli_config_env_mode() { + setup_config_env_vars(); + setup_protocol_paramset_env_vars(); + + with_env_var("READ_CONFIG_FROM_ENV", Some("1"), || { + with_env_var("READ_PARAMSET_FROM_ENV", Some("1"), || { + let args = vec!["clementine-core", "operator"]; + + let result = get_cli_config_from_args(args); + + let (config, cli_args) = result.expect("Failed to get CLI config"); + assert_eq!(config.host, "127.0.0.1"); + assert_eq!(config.port, 17000); + assert_eq!(cli_args.actor, Actors::Operator); + + // Assert some protocol paramset values + assert_eq!(config.protocol_paramset.network.to_string(), "regtest"); + assert_eq!(config.protocol_paramset.num_round_txs, 2); + assert_eq!(config.protocol_paramset.winternitz_log_d, 4); + assert_eq!(config.protocol_paramset.start_height, 8148); // This should be from the environment variable + }); + }); + + cleanup_config_env_vars(); + cleanup_protocol_paramset_env_vars(); + } + + #[test] + #[serial_test::serial] + fn test_mixed_config_sources() { + // Set up config from file but protocol paramset from env + setup_protocol_paramset_env_vars(); + + with_env_var("READ_CONFIG_FROM_ENV", Some("0"), || { + with_env_var("READ_PARAMSET_FROM_ENV", Some("1"), || { + with_temp_config_file(MINIMAL_CONFIG_CONTENT, |config_path| { + let args = vec![ + "clementine-core", + "verifier", + "--config", + config_path.to_str().unwrap(), + ]; + + let result = get_cli_config_from_args(args); + + let (config, cli_args) = result.expect("Failed to get CLI config"); + assert_eq!(config.host, "127.0.0.1"); + assert_eq!(config.port, 17000); + assert_eq!(cli_args.actor, Actors::Verifier); + + // Assert some protocol paramset values from env + assert_eq!(config.protocol_paramset.network.to_string(), "regtest"); + assert_eq!(config.protocol_paramset.start_height, 8148); // This should be from the environment variable + }) + }) + }); + + cleanup_protocol_paramset_env_vars(); + } + + #[test] + #[serial_test::serial] + fn test_get_cli_config_file_without_path() { + with_env_var("READ_CONFIG_FROM_ENV", Some("0"), || { + let args = vec!["clementine-core", "verifier"]; + + let result = get_cli_config_from_args(args); + result.expect_err("Expected error when config file path is not provided"); + }) + } +} diff --git a/core/src/config/env.rs b/core/src/config/env.rs new file mode 100644 index 000000000..e14b864c6 --- /dev/null +++ b/core/src/config/env.rs @@ -0,0 +1,496 @@ +//! # Environment Variable Support For [`BridgeConfig`] + +use super::BridgeConfig; +use crate::{ + config::{default_grpc_limits, GrpcLimits, TelemetryConfig}, + deposit::SecurityCouncil, + errors::BridgeError, +}; +use bitcoin::{address::NetworkUnchecked, secp256k1::SecretKey, Amount}; +use eyre::Context; +use std::{path::PathBuf, str::FromStr, time::Duration}; + +pub(crate) fn read_string_from_env(env_var: &'static str) -> Result { + std::env::var(env_var).map_err(|e| BridgeError::EnvVarNotSet(e, env_var)) +} + +pub(crate) fn read_string_from_env_then_parse( + env_var: &'static str, +) -> Result +where + ::Err: std::fmt::Debug, +{ + read_string_from_env(env_var)? + .parse::() + .map_err(|e| BridgeError::EnvVarMalformed(env_var, format!("{:?}", e))) +} + +impl GrpcLimits { + pub fn from_env() -> Result { + let defaults = default_grpc_limits(); + Ok(GrpcLimits { + max_message_size: read_string_from_env_then_parse::("GRPC_MAX_MESSAGE_SIZE") + .unwrap_or(defaults.max_message_size), + timeout_secs: read_string_from_env_then_parse::("GRPC_TIMEOUT_SECS") + .unwrap_or(defaults.timeout_secs), + tcp_keepalive_secs: read_string_from_env_then_parse::("GRPC_TCP_KEEPALIVE_SECS") + .unwrap_or(defaults.tcp_keepalive_secs), + req_concurrency_limit: read_string_from_env_then_parse::( + "GRPC_REQ_CONCURRENCY_LIMIT", + ) + .unwrap_or(defaults.req_concurrency_limit), + ratelimit_req_count: read_string_from_env_then_parse::( + "GRPC_RATELIMIT_REQ_COUNT", + ) + .unwrap_or(defaults.ratelimit_req_count), + ratelimit_req_interval_secs: read_string_from_env_then_parse::( + "GRPC_RATELIMIT_REQ_INTERVAL_SECS", + ) + .unwrap_or(defaults.ratelimit_req_interval_secs), + }) + } +} + +impl BridgeConfig { + pub fn from_env() -> Result { + let verifier_endpoints = + std::env::var("VERIFIER_ENDPOINTS") + .ok() + .map(|verifier_endpoints| { + verifier_endpoints + .split(",") + .collect::>() + .iter() + .map(|x| x.to_string()) + .collect::>() + }); + let operator_endpoints = + std::env::var("OPERATOR_ENDPOINTS") + .ok() + .map(|operator_endpoints| { + operator_endpoints + .split(",") + .collect::>() + .iter() + .map(|x| x.to_string()) + .collect::>() + }); + + let winternitz_secret_key = if let Ok(sk) = std::env::var("WINTERNITZ_SECRET_KEY") { + Some(sk.parse::().map_err(|e| { + BridgeError::EnvVarMalformed("WINTERNITZ_SECRET_KEY", e.to_string()) + })?) + } else { + None + }; + + let operator_withdrawal_fee_sats = if let Ok(operator_withdrawal_fee_sats) = + std::env::var("OPERATOR_WITHDRAWAL_FEE_SATS") + { + Some(Amount::from_sat( + operator_withdrawal_fee_sats.parse::().map_err(|e| { + BridgeError::EnvVarMalformed("OPERATOR_WITHDRAWAL_FEE_SATS", e.to_string()) + })?, + )) + } else { + None + }; + + let header_chain_proof_path = + if let Ok(header_chain_proof_path) = std::env::var("HEADER_CHAIN_PROOF_PATH") { + Some(PathBuf::from(header_chain_proof_path)) + } else { + None + }; + + let operator_reimbursement_address = if let Ok(operator_reimbursement_address) = + std::env::var("OPERATOR_REIMBURSEMENT_ADDRESS") + { + Some( + operator_reimbursement_address + .parse::>() + .map_err(|e| { + BridgeError::EnvVarMalformed( + "OPERATOR_REIMBURSEMENT_ADDRESS", + e.to_string(), + ) + })?, + ) + } else { + None + }; + + let operator_collateral_funding_outpoint = if let Ok(operator_collateral_funding_outpoint) = + std::env::var("OPERATOR_COLLATERAL_FUNDING_OUTPOINT") + { + Some( + operator_collateral_funding_outpoint + .parse::() + .map_err(|e| { + BridgeError::EnvVarMalformed( + "OPERATOR_COLLATERAL_FUNDING_OUTPOINT", + e.to_string(), + ) + })?, + ) + } else { + None + }; + + let aggregator_verification_address = std::env::var("AGGREGATOR_VERIFICATION_ADDRESS") + .ok() + .map(|addr| { + addr.parse::() + .wrap_err("Failed to parse AGGREGATOR_VERIFICATION_ADDRESS") + }) + .transpose()?; + + // TLS certificate and key paths + let server_cert_path = read_string_from_env("SERVER_CERT_PATH").map(PathBuf::from)?; + let server_key_path = read_string_from_env("SERVER_KEY_PATH").map(PathBuf::from)?; + let client_cert_path = read_string_from_env("CLIENT_CERT_PATH").map(PathBuf::from)?; + let ca_cert_path = read_string_from_env("CA_CERT_PATH").map(PathBuf::from)?; + let client_key_path = read_string_from_env("CLIENT_KEY_PATH").map(PathBuf::from)?; + let aggregator_cert_path = + read_string_from_env("AGGREGATOR_CERT_PATH").map(PathBuf::from)?; + let client_verification = + read_string_from_env("CLIENT_VERIFICATION").is_ok_and(|s| s == "true" || s == "1"); + + let security_council_string = read_string_from_env("SECURITY_COUNCIL")?; + + let security_council = SecurityCouncil::from_str(&security_council_string)?; + + let citrea_request_timeout = std::env::var("CITREA_REQUEST_TIMEOUT") + .ok() + .and_then(|timeout| timeout.parse::().ok()) + .map(Duration::from_secs); + + let config = BridgeConfig { + // Protocol paramset's source is independently defined + protocol_paramset: Default::default(), + host: read_string_from_env("HOST")?, + port: read_string_from_env_then_parse::("PORT")?, + secret_key: read_string_from_env_then_parse::("SECRET_KEY")?, + winternitz_secret_key, + operator_withdrawal_fee_sats, + operator_reimbursement_address, + operator_collateral_funding_outpoint, + bitcoin_rpc_url: read_string_from_env("BITCOIN_RPC_URL")?, + bitcoin_rpc_user: read_string_from_env("BITCOIN_RPC_USER")?.into(), + bitcoin_rpc_password: read_string_from_env("BITCOIN_RPC_PASSWORD")?.into(), + mempool_api_host: read_string_from_env("MEMPOOL_API_HOST").ok(), + mempool_api_endpoint: read_string_from_env("MEMPOOL_API_ENDPOINT").ok(), + db_host: read_string_from_env("DB_HOST")?, + db_port: read_string_from_env_then_parse::("DB_PORT")?, + db_user: read_string_from_env("DB_USER")?.into(), + db_password: read_string_from_env("DB_PASSWORD")?.into(), + db_name: read_string_from_env("DB_NAME")?, + citrea_rpc_url: read_string_from_env("CITREA_RPC_URL")?, + citrea_light_client_prover_url: read_string_from_env("CITREA_LIGHT_CLIENT_PROVER_URL")?, + citrea_chain_id: read_string_from_env_then_parse::("CITREA_CHAIN_ID")?, + citrea_request_timeout, + bridge_contract_address: read_string_from_env("BRIDGE_CONTRACT_ADDRESS")?, + header_chain_proof_path, + verifier_endpoints, + operator_endpoints, + security_council, + aggregator_verification_address, + client_verification, + server_cert_path, + server_key_path, + ca_cert_path, + client_cert_path, + client_key_path, + aggregator_cert_path, + emergency_stop_encryption_public_key: read_string_from_env( + "EMERGENCY_STOP_ENCRYPTION_PUBLIC_KEY", + ) + .ok() + .map(|key| { + hex::decode(key) + .expect("valid hex") + .try_into() + .expect("valid key") + }), + + telemetry: TelemetryConfig::from_env().ok(), + grpc: GrpcLimits::from_env()?, + + #[cfg(test)] + test_params: super::TestParams::default(), + }; + + tracing::debug!("BridgeConfig from env: {:?}", config); + Ok(config) + } +} + +#[cfg(test)] +mod tests { + use secrecy::ExposeSecret; + + use crate::config::{ + protocol::{ProtocolParamset, REGTEST_PARAMSET}, + BridgeConfig, + }; + + #[test] + #[serial_test::serial] + fn get_config_from_env_vars() { + let default_config = BridgeConfig::default(); + + std::env::set_var("HOST", &default_config.host); + std::env::set_var("PORT", default_config.port.to_string()); + std::env::set_var( + "SECRET_KEY", + default_config.secret_key.display_secret().to_string(), + ); + if let Some(ref winternitz_secret_key) = default_config.winternitz_secret_key { + std::env::set_var( + "WINTERNITZ_SECRET_KEY", + winternitz_secret_key.display_secret().to_string(), + ); + } + if let Some(ref operator_withdrawal_fee_sats) = default_config.operator_withdrawal_fee_sats + { + std::env::set_var( + "OPERATOR_WITHDRAWAL_FEE_SATS", + operator_withdrawal_fee_sats.to_sat().to_string(), + ); + } + std::env::set_var("BITCOIN_RPC_URL", &default_config.bitcoin_rpc_url); + std::env::set_var( + "BITCOIN_RPC_USER", + default_config.bitcoin_rpc_user.expose_secret(), + ); + std::env::set_var( + "BITCOIN_RPC_PASSWORD", + default_config.bitcoin_rpc_password.expose_secret(), + ); + std::env::set_var("DB_HOST", default_config.db_host.clone()); + std::env::set_var("DB_PORT", default_config.db_port.to_string()); + std::env::set_var("DB_USER", default_config.db_user.expose_secret()); + std::env::set_var("DB_PASSWORD", default_config.db_password.expose_secret()); + std::env::set_var("DB_NAME", &default_config.db_name); + std::env::set_var("CITREA_RPC_URL", &default_config.citrea_rpc_url); + std::env::set_var( + "CITREA_LIGHT_CLIENT_PROVER_URL", + &default_config.citrea_light_client_prover_url, + ); + std::env::set_var( + "CITREA_CHAIN_ID", + default_config.citrea_chain_id.to_string(), + ); + std::env::set_var( + "BRIDGE_CONTRACT_ADDRESS", + &default_config.bridge_contract_address, + ); + std::env::set_var( + "AGGREGATOR_CERT_PATH", + default_config.aggregator_cert_path.clone(), + ); + std::env::set_var("CLIENT_CERT_PATH", default_config.client_cert_path.clone()); + std::env::set_var("CLIENT_KEY_PATH", default_config.client_key_path.clone()); + std::env::set_var("SERVER_CERT_PATH", default_config.server_cert_path.clone()); + std::env::set_var("SERVER_KEY_PATH", default_config.server_key_path.clone()); + std::env::set_var("CA_CERT_PATH", default_config.ca_cert_path.clone()); + std::env::set_var( + "CLIENT_VERIFICATION", + default_config.client_verification.to_string(), + ); + + std::env::set_var( + "SECURITY_COUNCIL", + default_config.security_council.to_string(), + ); + + if let Some(ref header_chain_proof_path) = default_config.header_chain_proof_path { + std::env::set_var("HEADER_CHAIN_PROOF_PATH", header_chain_proof_path); + } + if let Some(ref verifier_endpoints) = default_config.verifier_endpoints { + std::env::set_var("VERIFIER_ENDPOINTS", verifier_endpoints.join(",")); + } + if let Some(ref operator_endpoints) = default_config.operator_endpoints { + std::env::set_var("OPERATOR_ENDPOINTS", operator_endpoints.join(",")); + } + + if let Some(ref operator_reimbursement_address) = + default_config.operator_reimbursement_address + { + std::env::set_var( + "OPERATOR_REIMBURSEMENT_ADDRESS", + operator_reimbursement_address + .to_owned() + .assume_checked() + .to_string(), + ); + } + + if let Some(ref operator_collateral_funding_outpoint) = + default_config.operator_collateral_funding_outpoint + { + std::env::set_var( + "OPERATOR_COLLATERAL_FUNDING_OUTPOINT", + operator_collateral_funding_outpoint.to_string(), + ); + } + + std::env::set_var( + "TELEMETRY_HOST", + default_config.telemetry.as_ref().unwrap().host.clone(), + ); + std::env::set_var( + "TELEMETRY_PORT", + default_config.telemetry.as_ref().unwrap().port.to_string(), + ); + + std::env::set_var( + "GRPC_MAX_MESSAGE_SIZE", + default_config.grpc.max_message_size.to_string(), + ); + std::env::set_var( + "GRPC_TIMEOUT_SECS", + default_config.grpc.timeout_secs.to_string(), + ); + std::env::set_var( + "GRPC_TCP_KEEPALIVE_SECS", + default_config.grpc.tcp_keepalive_secs.to_string(), + ); + std::env::set_var( + "GRPC_CONCURRENCY_LIMIT", + default_config.grpc.req_concurrency_limit.to_string(), + ); + std::env::set_var( + "GRPC_RATELIMIT_REQ_COUNT", + default_config.grpc.ratelimit_req_count.to_string(), + ); + if let Some(ref aggregator_verification_address) = + default_config.aggregator_verification_address + { + std::env::set_var( + "AGGREGATOR_VERIFICATION_ADDRESS", + aggregator_verification_address.to_string(), + ); + } + + if let Some(ref emergency_stop_encryption_public_key) = + default_config.emergency_stop_encryption_public_key + { + std::env::set_var( + "EMERGENCY_STOP_ENCRYPTION_PUBLIC_KEY", + hex::encode(emergency_stop_encryption_public_key), + ); + } + + assert_eq!(super::BridgeConfig::from_env().unwrap(), default_config); + } + + #[test] + #[serial_test::serial] + fn get_protocol_paramset_from_env_vars() { + let default_config = REGTEST_PARAMSET; + + std::env::set_var("NETWORK", default_config.network.to_string()); + std::env::set_var("NUM_ROUND_TXS", default_config.num_round_txs.to_string()); + std::env::set_var( + "NUM_KICKOFFS_PER_ROUND", + default_config.num_kickoffs_per_round.to_string(), + ); + std::env::set_var( + "NUM_SIGNED_KICKOFFS", + default_config.num_signed_kickoffs.to_string(), + ); + std::env::set_var( + "BRIDGE_AMOUNT", + default_config.bridge_amount.to_sat().to_string(), + ); + std::env::set_var( + "KICKOFF_AMOUNT", + default_config.kickoff_amount.to_sat().to_string(), + ); + std::env::set_var( + "OPERATOR_CHALLENGE_AMOUNT", + default_config + .operator_challenge_amount + .to_sat() + .to_string(), + ); + std::env::set_var( + "COLLATERAL_FUNDING_AMOUNT", + default_config + .collateral_funding_amount + .to_sat() + .to_string(), + ); + std::env::set_var( + "KICKOFF_BLOCKHASH_COMMIT_LENGTH", + default_config.kickoff_blockhash_commit_length.to_string(), + ); + std::env::set_var( + "WATCHTOWER_CHALLENGE_BYTES", + default_config.watchtower_challenge_bytes.to_string(), + ); + std::env::set_var( + "WINTERNITZ_LOG_D", + default_config.winternitz_log_d.to_string(), + ); + std::env::set_var( + "USER_TAKES_AFTER", + default_config.user_takes_after.to_string(), + ); + std::env::set_var( + "OPERATOR_CHALLENGE_TIMEOUT_TIMELOCK", + default_config + .operator_challenge_timeout_timelock + .to_string(), + ); + std::env::set_var( + "OPERATOR_CHALLENGE_NACK_TIMELOCK", + default_config.operator_challenge_nack_timelock.to_string(), + ); + std::env::set_var( + "DISPROVE_TIMEOUT_TIMELOCK", + default_config.disprove_timeout_timelock.to_string(), + ); + std::env::set_var( + "ASSERT_TIMEOUT_TIMELOCK", + default_config.assert_timeout_timelock.to_string(), + ); + std::env::set_var( + "OPERATOR_REIMBURSE_TIMELOCK", + default_config.operator_reimburse_timelock.to_string(), + ); + std::env::set_var( + "WATCHTOWER_CHALLENGE_TIMEOUT_TIMELOCK", + default_config + .watchtower_challenge_timeout_timelock + .to_string(), + ); + std::env::set_var( + "TIME_TO_SEND_WATCHTOWER_CHALLENGE", + default_config.time_to_send_watchtower_challenge.to_string(), + ); + std::env::set_var("FINALITY_DEPTH", default_config.finality_depth.to_string()); + std::env::set_var("START_HEIGHT", default_config.start_height.to_string()); + std::env::set_var("GENESIS_HEIGHT", default_config.genesis_height.to_string()); + std::env::set_var( + "GENESIS_CHAIN_STATE_HASH", + hex::encode(default_config.genesis_chain_state_hash), + ); + std::env::set_var( + "LATEST_BLOCKHASH_TIMEOUT_TIMELOCK", + default_config.latest_blockhash_timeout_timelock.to_string(), + ); + std::env::set_var( + "HEADER_CHAIN_PROOF_BATCH_SIZE", + default_config.header_chain_proof_batch_size.to_string(), + ); + + std::env::set_var( + "BRIDGE_NONSTANDARD", + default_config.bridge_nonstandard.to_string(), + ); + + assert_eq!(ProtocolParamset::from_env().unwrap(), default_config); + } +} diff --git a/core/src/config/mod.rs b/core/src/config/mod.rs new file mode 100644 index 000000000..d3e175909 --- /dev/null +++ b/core/src/config/mod.rs @@ -0,0 +1,440 @@ +//! # Configuration Options +//! +//! This module defines configuration options. +//! +//! This module is base for `cli` module and not dependent on it. Therefore, +//! this module can be used independently. +//! +//! ## Configuration File +//! +//! Configuration options can be read from a TOML file. File contents are +//! described in `BridgeConfig` struct. + +use crate::config::env::{read_string_from_env, read_string_from_env_then_parse}; +use crate::deposit::SecurityCouncil; +use crate::errors::BridgeError; +use bitcoin::address::NetworkUnchecked; +use bitcoin::secp256k1::SecretKey; +use bitcoin::{Address, Amount, OutPoint, XOnlyPublicKey}; +use protocol::ProtocolParamset; +use secrecy::SecretString; +use serde::Deserialize; +use std::str::FromStr; +use std::time::Duration; +use std::{fs::File, io::Read, path::PathBuf}; + +pub mod env; +pub mod protocol; + +#[cfg(test)] +mod test; + +#[cfg(test)] +pub use test::*; + +/// Configuration options for any Clementine target (tests, binaries etc.). +#[derive(Debug, Clone, Deserialize)] +pub struct BridgeConfig { + /// Protocol paramset + /// + /// Sourced from either a file or the environment, is set to REGTEST_PARAMSET in tests + /// + /// Skipped in deserialization and replaced by either file/environment source. See [`crate::cli::get_cli_config`] + #[serde(skip)] + pub protocol_paramset: &'static ProtocolParamset, + /// Host of the operator or the verifier + pub host: String, + /// Port of the operator or the verifier + pub port: u16, + /// Secret key for the operator or the verifier. + pub secret_key: SecretKey, + /// Additional secret key that will be used for creating Winternitz one time signature. + pub winternitz_secret_key: Option, + /// Operator's fee for withdrawal, in satoshis. + pub operator_withdrawal_fee_sats: Option, + /// Bitcoin remote procedure call URL. + pub bitcoin_rpc_url: String, + /// Bitcoin RPC user. + pub bitcoin_rpc_user: SecretString, + /// Bitcoin RPC user password. + pub bitcoin_rpc_password: SecretString, + /// mempool.space API host for retrieving the fee rate. If None, Bitcoin Core RPC will be used. + pub mempool_api_host: Option, + /// mempool.space API endpoint for retrieving the fee rate. If None, Bitcoin Core RPC will be used. + pub mempool_api_endpoint: Option, + + /// PostgreSQL database host address. + pub db_host: String, + /// PostgreSQL database port. + pub db_port: usize, + /// PostgreSQL database user name. + pub db_user: SecretString, + /// PostgreSQL database user password. + pub db_password: SecretString, + /// PostgreSQL database name. + pub db_name: String, + /// Citrea RPC URL. + pub citrea_rpc_url: String, + /// Citrea light client prover RPC URL. + pub citrea_light_client_prover_url: String, + /// Citrea's EVM Chain ID. + pub citrea_chain_id: u32, + /// Timeout in seconds for Citrea RPC calls. + pub citrea_request_timeout: Option, + /// Bridge contract address. + pub bridge_contract_address: String, + // Initial header chain proof receipt's file path. + pub header_chain_proof_path: Option, + + /// Security council. + pub security_council: SecurityCouncil, + + /// Verifier endpoints. For the aggregator only + pub verifier_endpoints: Option>, + /// Operator endpoint. For the aggregator only + pub operator_endpoints: Option>, + + /// Own operator's reimbursement address. + pub operator_reimbursement_address: Option>, + + /// Own operator's collateral funding outpoint. + pub operator_collateral_funding_outpoint: Option, + + // TLS certificates + /// Path to the server certificate file. + /// + /// Required for all entities. + pub server_cert_path: PathBuf, + /// Path to the server key file. + pub server_key_path: PathBuf, + + /// Path to the client certificate file. (used to communicate with other gRPC services) + /// + /// Required for all entities. This is used to authenticate requests. + /// Aggregator's client certificate should match the expected aggregator + /// certificate in other entities. + /// + /// Aggregator needs this to call other entities, other entities need this + /// to call their own internal endpoints. + pub client_cert_path: PathBuf, + /// Path to the client key file. + pub client_key_path: PathBuf, + + /// Path to the CA certificate file which is used to verify client + /// certificates. + pub ca_cert_path: PathBuf, + + /// Whether client certificates should be restricted to Aggregator and Self certificates. + /// + /// Client certificates are always validated against the CA certificate + /// according to mTLS regardless of this setting. + pub client_verification: bool, + + /// Path to the aggregator certificate file. (used to authenticate requests from aggregator) + /// + /// Aggregator's client cert should be equal to the this certificate. + pub aggregator_cert_path: PathBuf, + + /// Telemetry configuration + pub telemetry: Option, + + /// The ECDSA address of the citrea/aggregator that will sign the withdrawal params + /// after manual verification of the optimistic payout and operator's withdrawal. + /// Used for both an extra verification of aggregator's identity and to force citrea + /// to check withdrawal params manually during some time after launch. + pub aggregator_verification_address: Option, + + /// The X25519 public key that will be used to encrypt the emergency stop message. + pub emergency_stop_encryption_public_key: Option<[u8; 32]>, + + #[cfg(test)] + #[serde(skip)] + pub test_params: test::TestParams, + + /// gRPC client/server limits + #[serde(default = "default_grpc_limits")] + pub grpc: GrpcLimits, +} + +#[derive(Debug, Clone, Deserialize, PartialEq)] +pub struct GrpcLimits { + pub max_message_size: usize, + pub timeout_secs: u64, + pub tcp_keepalive_secs: u64, + pub req_concurrency_limit: usize, + pub ratelimit_req_count: usize, + pub ratelimit_req_interval_secs: u64, +} + +fn default_grpc_limits() -> GrpcLimits { + GrpcLimits { + max_message_size: 4 * 1024 * 1024, + timeout_secs: 12 * 60 * 60, // 12 hours + tcp_keepalive_secs: 60, + req_concurrency_limit: 300, // 100 deposits at the same time + ratelimit_req_count: 1000, + ratelimit_req_interval_secs: 60, + } +} + +impl BridgeConfig { + /// Create a new `BridgeConfig` with default values. + pub fn new() -> Self { + BridgeConfig { + ..Default::default() + } + } + + /// Get the protocol paramset defined by the paramset name. + pub fn protocol_paramset(&self) -> &'static ProtocolParamset { + self.protocol_paramset + } + + /// Read contents of a TOML file and generate a `BridgeConfig`. + pub fn try_parse_file(path: PathBuf) -> Result { + let mut contents = String::new(); + + let mut file = match File::open(path.clone()) { + Ok(f) => f, + Err(e) => return Err(BridgeError::ConfigError(e.to_string())), + }; + + if let Err(e) = file.read_to_string(&mut contents) { + return Err(BridgeError::ConfigError(e.to_string())); + } + + tracing::trace!("Using configuration file: {:?}", path); + + BridgeConfig::try_parse_from(contents) + } + + /// Try to parse a `BridgeConfig` from given TOML formatted string and + /// generate a `BridgeConfig`. + pub fn try_parse_from(input: String) -> Result { + match toml::from_str::(&input) { + Ok(c) => Ok(c), + Err(e) => Err(BridgeError::ConfigError(e.to_string())), + } + } +} + +// only needed for one test +#[cfg(test)] +impl PartialEq for BridgeConfig { + fn eq(&self, other: &Self) -> bool { + use secrecy::ExposeSecret; + + let all_eq = self.protocol_paramset == other.protocol_paramset + && self.host == other.host + && self.port == other.port + && self.secret_key == other.secret_key + && self.winternitz_secret_key == other.winternitz_secret_key + && self.operator_withdrawal_fee_sats == other.operator_withdrawal_fee_sats + && self.bitcoin_rpc_url == other.bitcoin_rpc_url + && self.bitcoin_rpc_user.expose_secret() == other.bitcoin_rpc_user.expose_secret() + && self.bitcoin_rpc_password.expose_secret() + == other.bitcoin_rpc_password.expose_secret() + && self.db_host == other.db_host + && self.db_port == other.db_port + && self.db_user.expose_secret() == other.db_user.expose_secret() + && self.db_password.expose_secret() == other.db_password.expose_secret() + && self.db_name == other.db_name + && self.citrea_rpc_url == other.citrea_rpc_url + && self.citrea_light_client_prover_url == other.citrea_light_client_prover_url + && self.citrea_chain_id == other.citrea_chain_id + && self.bridge_contract_address == other.bridge_contract_address + && self.header_chain_proof_path == other.header_chain_proof_path + && self.security_council == other.security_council + && self.verifier_endpoints == other.verifier_endpoints + && self.operator_endpoints == other.operator_endpoints + && self.operator_reimbursement_address == other.operator_reimbursement_address + && self.operator_collateral_funding_outpoint + == other.operator_collateral_funding_outpoint + && self.server_cert_path == other.server_cert_path + && self.server_key_path == other.server_key_path + && self.client_cert_path == other.client_cert_path + && self.client_key_path == other.client_key_path + && self.ca_cert_path == other.ca_cert_path + && self.client_verification == other.client_verification + && self.aggregator_cert_path == other.aggregator_cert_path + && self.test_params == other.test_params + && self.grpc == other.grpc; + + all_eq + } +} + +impl Default for BridgeConfig { + fn default() -> Self { + Self { + protocol_paramset: Default::default(), + host: "127.0.0.1".to_string(), + port: 17000, + + secret_key: SecretKey::from_str( + "1111111111111111111111111111111111111111111111111111111111111111", + ) + .expect("known valid input"), + + operator_withdrawal_fee_sats: Some(Amount::from_sat(100000)), + + bitcoin_rpc_url: "http://127.0.0.1:18443/wallet/admin".to_string(), + bitcoin_rpc_user: "admin".to_string().into(), + bitcoin_rpc_password: "admin".to_string().into(), + mempool_api_host: None, + mempool_api_endpoint: None, + + db_host: "127.0.0.1".to_string(), + db_port: 5432, + db_user: "clementine".to_string().into(), + db_password: "clementine".to_string().into(), + db_name: "clementine".to_string(), + + citrea_rpc_url: "".to_string(), + citrea_light_client_prover_url: "".to_string(), + citrea_chain_id: 5655, + bridge_contract_address: "3100000000000000000000000000000000000002".to_string(), + citrea_request_timeout: None, + + header_chain_proof_path: None, + + operator_reimbursement_address: None, + operator_collateral_funding_outpoint: None, + + security_council: SecurityCouncil { + pks: vec![ + XOnlyPublicKey::from_str( + "9ac20335eb38768d2052be1dbbc3c8f6178407458e51e6b4ad22f1d91758895b", + ) + .expect("valid xonly"), + XOnlyPublicKey::from_str( + "5ab4689e400a4a160cf01cd44730845a54768df8547dcdf073d964f109f18c30", + ) + .expect("valid xonly"), + ], + threshold: 1, + }, + + winternitz_secret_key: Some( + SecretKey::from_str( + "2222222222222222222222222222222222222222222222222222222222222222", + ) + .expect("known valid input"), + ), + verifier_endpoints: None, + operator_endpoints: None, + + server_cert_path: PathBuf::from("certs/server/server.pem"), + server_key_path: PathBuf::from("certs/server/server.key"), + client_cert_path: PathBuf::from("certs/client/client.pem"), + client_key_path: PathBuf::from("certs/client/client.key"), + ca_cert_path: PathBuf::from("certs/ca/ca.pem"), + aggregator_cert_path: PathBuf::from("certs/aggregator/aggregator.pem"), + client_verification: true, + aggregator_verification_address: Some( + alloy::primitives::Address::from_str("0x242fbec93465ce42b3d7c0e1901824a2697193fd") + .expect("valid address"), + ), + emergency_stop_encryption_public_key: Some( + hex::decode("025d32d10ec7b899df4eeb4d80918b7f0a1f2a28f6af24f71aa2a59c69c0d531") + .expect("valid hex") + .try_into() + .expect("valid key"), + ), + + telemetry: Some(TelemetryConfig::default()), + + #[cfg(test)] + test_params: test::TestParams::default(), + + // New hardening parameters, optional so they don't break existing configs. + grpc: default_grpc_limits(), + } + } +} + +#[derive(Debug, Clone, Deserialize)] +pub struct TelemetryConfig { + pub host: String, + pub port: u16, +} + +impl Default for TelemetryConfig { + fn default() -> Self { + Self { + host: "0.0.0.0".to_string(), + port: 8081, + } + } +} + +impl TelemetryConfig { + pub fn from_env() -> Result { + let host = read_string_from_env("TELEMETRY_HOST")?; + let port = read_string_from_env_then_parse::("TELEMETRY_PORT")?; + Ok(Self { host, port }) + } +} + +#[cfg(test)] +mod tests { + use super::BridgeConfig; + use std::{ + fs::{self, File}, + io::Write, + }; + + #[test] + fn parse_from_string() { + // In case of a incorrect file content, we should receive an error. + let content = "brokenfilecontent"; + assert!(BridgeConfig::try_parse_from(content.to_string()).is_err()); + } + + #[test] + fn parse_from_file() { + let file_name = "parse_from_file"; + let content = "invalid file content"; + let mut file = File::create(file_name).unwrap(); + file.write_all(content.as_bytes()).unwrap(); + + assert!(BridgeConfig::try_parse_file(file_name.into()).is_err()); + + // Read first example test file use for this test. + let base_path = env!("CARGO_MANIFEST_DIR"); + let config_path = format!("{}/src/test/data/bridge_config.toml", base_path); + let content = fs::read_to_string(config_path).unwrap(); + let mut file = File::create(file_name).unwrap(); + file.write_all(content.as_bytes()).unwrap(); + + BridgeConfig::try_parse_file(file_name.into()).unwrap(); + + fs::remove_file(file_name).unwrap(); + } + + #[test] + fn parse_from_file_with_invalid_headers() { + let file_name = "parse_from_file_with_invalid_headers"; + let content = "[header1] + num_verifiers = 4 + + [header2] + confirmation_threshold = 1 + network = \"regtest\" + bitcoin_rpc_url = \"http://localhost:18443\" + bitcoin_rpc_user = \"admin\" + bitcoin_rpc_password = \"admin\"\n"; + let mut file = File::create(file_name).unwrap(); + file.write_all(content.as_bytes()).unwrap(); + + assert!(BridgeConfig::try_parse_file(file_name.into()).is_err()); + + fs::remove_file(file_name).unwrap(); + } + + #[test] + fn test_test_config_parseable() { + let content = include_str!("../test/data/bridge_config.toml"); + BridgeConfig::try_parse_from(content.to_string()).unwrap(); + } +} diff --git a/core/src/config/protocol.rs b/core/src/config/protocol.rs new file mode 100644 index 000000000..5d4245914 --- /dev/null +++ b/core/src/config/protocol.rs @@ -0,0 +1,549 @@ +use crate::config::env::read_string_from_env_then_parse; +use crate::constants::{MIN_TAPROOT_AMOUNT, NON_EPHEMERAL_ANCHOR_AMOUNT}; +use crate::errors::BridgeError; +use bitcoin::{Amount, Network}; +use bridge_circuit_host::utils::is_dev_mode; +use circuits_lib::bridge_circuit::constants::{ + DEVNET_LC_IMAGE_ID, MAINNET_LC_IMAGE_ID, REGTEST_LC_IMAGE_ID, TESTNET_LC_IMAGE_ID, +}; +use eyre::Context; +use serde::{Deserialize, Serialize}; +use std::fmt::Display; +use std::fs; +use std::path::Path; +use std::str::FromStr; + +pub const BLOCKS_PER_HOUR: u16 = 6; + +pub const BLOCKS_PER_DAY: u16 = BLOCKS_PER_HOUR * 24; + +pub const BLOCKS_PER_WEEK: u16 = BLOCKS_PER_DAY * 7; + +/// This is the log_d used across the codebase. +/// +/// All protocol paramsets should use this value since it's used in the BitVM static. +pub const WINTERNITZ_LOG_D: u32 = 4; + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)] +#[serde(rename_all = "lowercase")] +/// A pre-defined paramset name that can be converted into a +/// [`ProtocolParamset`] reference. +/// +/// See: [`REGTEST_PARAMSET`] +pub enum ProtocolParamsetName { + // Pre-defined paramsets + Regtest, +} + +impl FromStr for ProtocolParamsetName { + type Err = BridgeError; + + fn from_str(s: &str) -> Result { + match s { + "regtest" => Ok(ProtocolParamsetName::Regtest), + _ => Err(BridgeError::ConfigError(format!( + "Unknown paramset name: {}", + s + ))), + } + } +} + +impl Display for ProtocolParamsetName { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + ProtocolParamsetName::Regtest => write!(f, "regtest"), + } + } +} + +impl From for &'static ProtocolParamset { + fn from(name: ProtocolParamsetName) -> Self { + match name { + ProtocolParamsetName::Regtest => ®TEST_PARAMSET, + } + } +} + +#[derive(Debug, Clone, PartialEq, Eq, Hash, Deserialize, Serialize)] +/// Protocol parameters that affect the transactions in the contract (which also +/// change the pre-calculated txids and sighashes). +/// +/// These parameters are used when generating the transactions and changing them +/// will break compatibility between actors, making deposits impossible. A +/// paramset is chosen by the actor by choosing a ParamsetName inside the +/// [`crate::config::BridgeConfig`]. +pub struct ProtocolParamset { + /// Bitcoin network to work on (mainnet, testnet, regtest). + pub network: Network, + /// Number of round transactions that the operator will create. + pub num_round_txs: usize, + /// Number of kickoff UTXOs per round transaction. + pub num_kickoffs_per_round: usize, + /// Number of kickoffs that are signed per round and deposit. + /// There are num_kickoffs_per_round utxo's, but only num_signed_kickoffs are signed. + pub num_signed_kickoffs: usize, + /// Bridge deposit amount that users can deposit. + pub bridge_amount: Amount, + /// Amount allocated for each kickoff UTXO. + pub kickoff_amount: Amount, + /// Amount allocated for operator challenge transactions. + pub operator_challenge_amount: Amount, + /// Collateral funding amount for operators used to fund the round transaction chain. + pub collateral_funding_amount: Amount, + /// Length of the blockhash commitment in kickoff transactions. + pub kickoff_blockhash_commit_length: u32, + /// Total number of bytes of a watchtower challenge. + pub watchtower_challenge_bytes: usize, + /// Winternitz derivation log_d (shared for all WOTS commitments) + /// Currently used in statics and thus cannot be different from [`WINTERNITZ_LOG_D`]. + pub winternitz_log_d: u32, + /// Number of blocks after which user can take deposit back if deposit request fails. + pub user_takes_after: u16, + /// Number of blocks for operator challenge timeout timelock (currently BLOCKS_PER_WEEK) + pub operator_challenge_timeout_timelock: u16, + /// Number of blocks for operator challenge NACK timelock (currently BLOCKS_PER_WEEK * 3) + pub operator_challenge_nack_timelock: u16, + /// Number of blocks for disprove timeout timelock (currently BLOCKS_PER_WEEK * 5) + pub disprove_timeout_timelock: u16, + /// Number of blocks for assert timeout timelock (currently BLOCKS_PER_WEEK * 4) + pub assert_timeout_timelock: u16, + /// Number of blocks for latest blockhash timeout timelock (currently BLOCKS_PER_WEEK * 2.5) + pub latest_blockhash_timeout_timelock: u16, + /// Number of blocks for operator reimburse timelock (currently BLOCKS_PER_DAY * 2) + /// Timelocks operator from sending the next Round Tx after the Ready to Reimburse Tx. + pub operator_reimburse_timelock: u16, + /// Number of blocks for watchtower challenge timeout timelock (currently BLOCKS_PER_WEEK * 2) + pub watchtower_challenge_timeout_timelock: u16, + /// Time to wait after a kickoff to send a watchtower challenge + pub time_to_send_watchtower_challenge: u16, + /// Amount of depth a block should have from the current head to be considered finalized + pub finality_depth: u32, + /// start height to sync the chain from, i.e. the height bridge was deployed + pub start_height: u32, + /// Genesis height to sync the header chain proofs from + pub genesis_height: u32, + /// Genesis chain state hash + pub genesis_chain_state_hash: [u8; 32], + /// Batch size of the header chain proofs + pub header_chain_proof_batch_size: u32, + /// Denotes if the bridge is non-standard, i.e. uses 0 sat outputs for round tx (except collateral) and kickoff outputs + pub bridge_nonstandard: bool, +} + +impl ProtocolParamset { + pub fn from_toml_file(path: &Path) -> Result { + let contents = fs::read_to_string(path).wrap_err("Failed to read config file")?; + + let paramset: Self = toml::from_str(&contents).wrap_err("Failed to parse TOML")?; + + Ok(paramset) + } + pub fn from_env() -> Result { + let config = ProtocolParamset { + network: read_string_from_env_then_parse::("NETWORK")?, + num_round_txs: read_string_from_env_then_parse::("NUM_ROUND_TXS")?, + num_kickoffs_per_round: read_string_from_env_then_parse::( + "NUM_KICKOFFS_PER_ROUND", + )?, + num_signed_kickoffs: read_string_from_env_then_parse::("NUM_SIGNED_KICKOFFS")?, + bridge_amount: Amount::from_sat(read_string_from_env_then_parse::( + "BRIDGE_AMOUNT", + )?), + kickoff_amount: Amount::from_sat(read_string_from_env_then_parse::( + "KICKOFF_AMOUNT", + )?), + operator_challenge_amount: Amount::from_sat(read_string_from_env_then_parse::( + "OPERATOR_CHALLENGE_AMOUNT", + )?), + collateral_funding_amount: Amount::from_sat(read_string_from_env_then_parse::( + "COLLATERAL_FUNDING_AMOUNT", + )?), + kickoff_blockhash_commit_length: read_string_from_env_then_parse::( + "KICKOFF_BLOCKHASH_COMMIT_LENGTH", + )?, + watchtower_challenge_bytes: read_string_from_env_then_parse::( + "WATCHTOWER_CHALLENGE_BYTES", + )?, + winternitz_log_d: read_string_from_env_then_parse::("WINTERNITZ_LOG_D")?, + user_takes_after: read_string_from_env_then_parse::("USER_TAKES_AFTER")?, + operator_challenge_timeout_timelock: read_string_from_env_then_parse::( + "OPERATOR_CHALLENGE_TIMEOUT_TIMELOCK", + )?, + operator_challenge_nack_timelock: read_string_from_env_then_parse::( + "OPERATOR_CHALLENGE_NACK_TIMELOCK", + )?, + disprove_timeout_timelock: read_string_from_env_then_parse::( + "DISPROVE_TIMEOUT_TIMELOCK", + )?, + assert_timeout_timelock: read_string_from_env_then_parse::( + "ASSERT_TIMEOUT_TIMELOCK", + )?, + operator_reimburse_timelock: read_string_from_env_then_parse::( + "OPERATOR_REIMBURSE_TIMELOCK", + )?, + watchtower_challenge_timeout_timelock: read_string_from_env_then_parse::( + "WATCHTOWER_CHALLENGE_TIMEOUT_TIMELOCK", + )?, + time_to_send_watchtower_challenge: read_string_from_env_then_parse::( + "TIME_TO_SEND_WATCHTOWER_CHALLENGE", + )?, + finality_depth: read_string_from_env_then_parse::("FINALITY_DEPTH")?, + start_height: read_string_from_env_then_parse::("START_HEIGHT")?, + genesis_height: read_string_from_env_then_parse::("GENESIS_HEIGHT")?, + genesis_chain_state_hash: convert_hex_string_to_bytes( + &read_string_from_env_then_parse::("GENESIS_CHAIN_STATE_HASH")?, + )?, + header_chain_proof_batch_size: read_string_from_env_then_parse::( + "HEADER_CHAIN_PROOF_BATCH_SIZE", + )?, + latest_blockhash_timeout_timelock: read_string_from_env_then_parse::( + "LATEST_BLOCKHASH_TIMEOUT_TIMELOCK", + )?, + bridge_nonstandard: read_string_from_env_then_parse::("BRIDGE_NONSTANDARD")?, + }; + + Ok(config) + } + + pub fn default_utxo_amount(&self) -> Amount { + if self.bridge_nonstandard { + Amount::from_sat(0) + } else { + MIN_TAPROOT_AMOUNT + } + } + + pub fn anchor_amount(&self) -> Amount { + if self.bridge_nonstandard { + Amount::from_sat(0) + } else { + NON_EPHEMERAL_ANCHOR_AMOUNT + } + } + + pub fn bridge_circuit_constant(&self) -> Result<&[u8; 32], BridgeError> { + match self.network { + Network::Regtest => { + if is_dev_mode() { + Ok(®TEST_TEST_BRIDGE_CIRCUIT_CONSTANT) + } else { + Ok(®TEST_BRIDGE_CIRCUIT_CONSTANT) + } + } + Network::Bitcoin => Ok(&MAINNET_BRIDGE_CIRCUIT_CONSTANT), + Network::Testnet4 => Ok(&TESTNET4_BRIDGE_CIRCUIT_CONSTANT), + Network::Signet => Ok(&SIGNET_BRIDGE_CIRCUIT_CONSTANT), + _ => Err(BridgeError::UnsupportedNetwork), + } + } + + /// Get the light client proof image id for the network. + pub fn get_lcp_image_id(&self) -> Result<[u8; 32], BridgeError> { + Ok(match self.network { + bitcoin::Network::Bitcoin => MAINNET_LC_IMAGE_ID, + bitcoin::Network::Testnet4 => TESTNET_LC_IMAGE_ID, + bitcoin::Network::Signet => DEVNET_LC_IMAGE_ID, + bitcoin::Network::Regtest => REGTEST_LC_IMAGE_ID, + _ => return Err(eyre::eyre!("Unsupported Bitcoin network").into()), + }) + } +} + +fn convert_hex_string_to_bytes(hex: &str) -> Result<[u8; 32], BridgeError> { + let hex_decode = hex::decode(hex).wrap_err("Failed to decode hex string")?; + let hex_bytes: [u8; 32] = hex_decode + .as_slice() + .try_into() + .wrap_err("Hex string is not 32 bytes")?; + Ok(hex_bytes) +} + +impl Default for ProtocolParamset { + fn default() -> Self { + REGTEST_PARAMSET + } +} +impl Default for &'static ProtocolParamset { + fn default() -> Self { + ®TEST_PARAMSET + } +} + +pub const REGTEST_PARAMSET: ProtocolParamset = ProtocolParamset { + network: Network::Regtest, + num_round_txs: 2, + num_kickoffs_per_round: 10, + num_signed_kickoffs: 2, + bridge_amount: Amount::from_sat(1_000_000_000), + kickoff_amount: Amount::from_sat(0), + operator_challenge_amount: Amount::from_sat(200_000_000), + collateral_funding_amount: Amount::from_sat(99_000_000), + watchtower_challenge_bytes: 144, + kickoff_blockhash_commit_length: 40, + winternitz_log_d: WINTERNITZ_LOG_D, + user_takes_after: 200, + operator_challenge_timeout_timelock: 4 * BLOCKS_PER_HOUR, + operator_challenge_nack_timelock: 4 * BLOCKS_PER_HOUR * 3, + disprove_timeout_timelock: 4 * BLOCKS_PER_HOUR * 5, + assert_timeout_timelock: 4 * BLOCKS_PER_HOUR * 4, + operator_reimburse_timelock: 2, + watchtower_challenge_timeout_timelock: 4 * BLOCKS_PER_HOUR * 2, + time_to_send_watchtower_challenge: 4 * BLOCKS_PER_HOUR * 3 / 2, + latest_blockhash_timeout_timelock: 4 * BLOCKS_PER_HOUR * 5 / 2, + finality_depth: 5, // citrea e2e finality depth + start_height: 190, + genesis_height: 0, + genesis_chain_state_hash: [ + 95, 115, 2, 173, 22, 200, 189, 158, 242, 243, 190, 0, 200, 25, 154, 134, 249, 224, 186, + 134, 20, 132, 171, 180, 175, 95, 126, 69, 127, 140, 34, 22, + ], + header_chain_proof_batch_size: 100, + bridge_nonstandard: true, +}; + +pub const TESTNET4_TEST_PARAMSET: ProtocolParamset = ProtocolParamset { + network: Network::Testnet4, + num_round_txs: 2, + num_kickoffs_per_round: 10, + num_signed_kickoffs: 2, + bridge_amount: Amount::from_sat(1_000_000), + kickoff_amount: Amount::from_sat(0), + operator_challenge_amount: Amount::from_sat(200_000), + collateral_funding_amount: Amount::from_sat(99_000), + watchtower_challenge_bytes: 144, + kickoff_blockhash_commit_length: 40, + winternitz_log_d: WINTERNITZ_LOG_D, + user_takes_after: 200, + operator_challenge_timeout_timelock: 4 * BLOCKS_PER_HOUR, + operator_challenge_nack_timelock: 4 * BLOCKS_PER_HOUR * 3, + disprove_timeout_timelock: 4 * BLOCKS_PER_HOUR * 5, + assert_timeout_timelock: 4 * BLOCKS_PER_HOUR * 4, + operator_reimburse_timelock: 2, + watchtower_challenge_timeout_timelock: 4 * BLOCKS_PER_HOUR * 2, + time_to_send_watchtower_challenge: 4 * BLOCKS_PER_HOUR * 3 / 2, + latest_blockhash_timeout_timelock: 4 * BLOCKS_PER_HOUR * 5 / 2, + finality_depth: 1, + start_height: 92700, + genesis_height: 92700, + genesis_chain_state_hash: [ + 0xe4, 0xe1, 0x28, 0xa8, 0x99, 0xaf, 0xee, 0xb1, 0x85, 0x5b, 0x4a, 0xb7, 0x2e, 0x4d, 0x88, + 0x50, 0xab, 0x35, 0x1b, 0xde, 0xf9, 0x4f, 0xc2, 0x78, 0xe8, 0x5c, 0x13, 0x11, 0xe2, 0x72, + 0xfe, 0x6a, + ], + header_chain_proof_batch_size: 10000, + bridge_nonstandard: true, +}; + +pub const REGTEST_TEST_BRIDGE_CIRCUIT_CONSTANT: [u8; 32] = [ + 191, 41, 245, 76, 47, 243, 175, 215, 219, 221, 93, 163, 232, 132, 93, 27, 3, 251, 171, 32, 120, + 230, 199, 203, 123, 174, 113, 106, 70, 185, 3, 52, +]; + +pub const REGTEST_BRIDGE_CIRCUIT_CONSTANT: [u8; 32] = [ + 18, 34, 120, 86, 134, 81, 152, 68, 107, 120, 50, 144, 102, 218, 1, 50, 193, 184, 244, 176, 250, + 99, 34, 217, 11, 101, 238, 187, 119, 235, 11, 151, +]; + +pub const SIGNET_BRIDGE_CIRCUIT_CONSTANT: [u8; 32] = [ + 11, 11, 164, 223, 65, 47, 193, 97, 48, 121, 43, 219, 141, 45, 132, 241, 237, 185, 34, 46, 87, + 93, 148, 53, 5, 212, 234, 57, 173, 153, 91, 112, +]; +pub const MAINNET_BRIDGE_CIRCUIT_CONSTANT: [u8; 32] = [ + 183, 151, 179, 224, 163, 237, 236, 35, 211, 80, 80, 124, 24, 149, 248, 213, 7, 0, 139, 82, 196, + 135, 166, 9, 43, 148, 68, 42, 8, 91, 181, 212, +]; +pub const TESTNET4_BRIDGE_CIRCUIT_CONSTANT: [u8; 32] = [ + 136, 222, 235, 50, 115, 169, 44, 20, 138, 181, 161, 199, 16, 200, 72, 54, 94, 72, 165, 23, 123, + 178, 74, 175, 179, 104, 63, 255, 208, 234, 165, 189, +]; + +#[cfg(test)] +mod tests { + use bridge_circuit_host::{ + bridge_circuit_host::{ + MAINNET_BRIDGE_CIRCUIT_ELF, REGTEST_BRIDGE_CIRCUIT_ELF, SIGNET_BRIDGE_CIRCUIT_ELF, + TESTNET4_BRIDGE_CIRCUIT_ELF, + }, + utils::calculate_succinct_output_prefix, + }; + use circuits_lib::{ + bridge_circuit::constants::{ + MAINNET_WORK_ONLY_METHOD_ID, REGTEST_WORK_ONLY_METHOD_ID, SIGNET_WORK_ONLY_METHOD_ID, + TESTNET4_WORK_ONLY_METHOD_ID, + }, + common::constants::{ + MAINNET_HEADER_CHAIN_METHOD_ID, REGTEST_HEADER_CHAIN_METHOD_ID, + SIGNET_HEADER_CHAIN_METHOD_ID, TESTNET4_HEADER_CHAIN_METHOD_ID, + }, + }; + use risc0_zkvm::compute_image_id; + + use bridge_circuit_host::bridge_circuit_host::{ + MAINNET_HEADER_CHAIN_ELF, MAINNET_WORK_ONLY_ELF, REGTEST_HEADER_CHAIN_ELF, + REGTEST_WORK_ONLY_ELF, SIGNET_HEADER_CHAIN_ELF, SIGNET_WORK_ONLY_ELF, + TESTNET4_HEADER_CHAIN_ELF, TESTNET4_WORK_ONLY_ELF, + }; + + use super::*; + + #[test] + fn test_regtest_test_bridge_circuit_constant() { + let regtest_bridge_elf = + include_bytes!("../../../risc0-circuits/elfs/test-regtest-bridge-circuit-guest.bin"); + let regtest_bridge_circuit_method_id = + compute_image_id(regtest_bridge_elf).expect("should compute image id"); + let calculated_regtest_bridge_circuit_constant = + calculate_succinct_output_prefix(regtest_bridge_circuit_method_id.as_bytes()); + + let regtest_bridge_circuit_constant = REGTEST_TEST_BRIDGE_CIRCUIT_CONSTANT; + assert_eq!( + calculated_regtest_bridge_circuit_constant, + regtest_bridge_circuit_constant, + "You forgot to update regtest-(test) bridge_circuit_constant with the new method id. Please change it in these places: core/src/config/protocol.rs. The expected value is: {:?}, hex format: {:?}", + calculated_regtest_bridge_circuit_constant, + hex::encode(calculated_regtest_bridge_circuit_constant) + ); + } + + #[test] + fn test_regtest_bridge_circuit_constant() { + let regtest_bridge_elf = REGTEST_BRIDGE_CIRCUIT_ELF; + let regtest_bridge_circuit_method_id = + compute_image_id(regtest_bridge_elf).expect("should compute image id"); + let calculated_regtest_bridge_circuit_constant = + calculate_succinct_output_prefix(regtest_bridge_circuit_method_id.as_bytes()); + + let regtest_bridge_circuit_constant = REGTEST_BRIDGE_CIRCUIT_CONSTANT; + assert_eq!( + calculated_regtest_bridge_circuit_constant, + regtest_bridge_circuit_constant, + "You forgot to update regtest bridge_circuit_constant with the new method id. Please change it in these places: core/src/config/protocol.rs. The expected value is: {:?}, hex format: {:?}", + calculated_regtest_bridge_circuit_constant, + hex::encode(calculated_regtest_bridge_circuit_constant) + ); + } + + #[test] + fn test_mainnet_bridge_circuit_constant() { + let mainnet_bridge_elf = MAINNET_BRIDGE_CIRCUIT_ELF; + let mainnet_bridge_circuit_method_id = + compute_image_id(mainnet_bridge_elf).expect("should compute image id"); + let calculated_mainnet_bridge_circuit_constant = + calculate_succinct_output_prefix(mainnet_bridge_circuit_method_id.as_bytes()); + + let mainnet_bridge_circuit_constant = MAINNET_BRIDGE_CIRCUIT_CONSTANT; + assert_eq!( + calculated_mainnet_bridge_circuit_constant, + mainnet_bridge_circuit_constant, + "You forgot to update mainnet bridge_circuit_constant with the new method id. Please change it in these places: core/src/config/protocol.rs. The expected value is: {:?}, hex format: {:?}", + calculated_mainnet_bridge_circuit_constant, + hex::encode(calculated_mainnet_bridge_circuit_constant) + ); + } + + #[test] + fn test_testnet4_bridge_circuit_constant() { + let testnet4_bridge_elf = TESTNET4_BRIDGE_CIRCUIT_ELF; + let testnet4_bridge_circuit_method_id = + compute_image_id(testnet4_bridge_elf).expect("should compute image id"); + let calculated_testnet4_bridge_circuit_constant = + calculate_succinct_output_prefix(testnet4_bridge_circuit_method_id.as_bytes()); + + let testnet4_bridge_circuit_constant = TESTNET4_BRIDGE_CIRCUIT_CONSTANT; + assert_eq!( + calculated_testnet4_bridge_circuit_constant, + testnet4_bridge_circuit_constant, + "You forgot to update testnet4 bridge_circuit_constant with the new method id. Please change it in these places: core/src/config/protocol.rs. The expected value is: {:?}, hex format: {:?}", + calculated_testnet4_bridge_circuit_constant, + hex::encode(calculated_testnet4_bridge_circuit_constant) + ); + } + + #[test] + fn test_signet_bridge_circuit_constant() { + let signet_bridge_elf = SIGNET_BRIDGE_CIRCUIT_ELF; + let signet_bridge_circuit_method_id = + compute_image_id(signet_bridge_elf).expect("should compute image id"); + let calculated_signet_bridge_circuit_constant = + calculate_succinct_output_prefix(signet_bridge_circuit_method_id.as_bytes()); + + let signet_bridge_circuit_constant = SIGNET_BRIDGE_CIRCUIT_CONSTANT; + assert_eq!( + calculated_signet_bridge_circuit_constant, + signet_bridge_circuit_constant, + "You forgot to update signet bridge_circuit_constant with the new method id. Please change it in these places: core/src/config/protocol.rs. The expected value is: {:?}, hex format: {:?}", + calculated_signet_bridge_circuit_constant, + hex::encode(calculated_signet_bridge_circuit_constant) + ); + } + + #[test] + fn test_header_chain_method_ids() { + let networks = [ + ( + MAINNET_HEADER_CHAIN_ELF, + MAINNET_HEADER_CHAIN_METHOD_ID, + "mainnet", + ), + ( + TESTNET4_HEADER_CHAIN_ELF, + TESTNET4_HEADER_CHAIN_METHOD_ID, + "testnet4", + ), + ( + SIGNET_HEADER_CHAIN_ELF, + SIGNET_HEADER_CHAIN_METHOD_ID, + "signet", + ), + ( + REGTEST_HEADER_CHAIN_ELF, + REGTEST_HEADER_CHAIN_METHOD_ID, + "regtest", + ), + ]; + + for (elf, method_id, network) in networks.into_iter() { + let header_chain_circuit_method_id = compute_image_id(elf); + assert_eq!( + header_chain_circuit_method_id.expect("should compute image id").as_words(), + method_id, + "Header chain method ID mismatch for {network}, please update the constant here: circuits-lib/src/common/constants.rs", + ); + } + } + + #[test] + fn test_work_only_method_ids() { + let networks = [ + ( + MAINNET_WORK_ONLY_ELF, + MAINNET_WORK_ONLY_METHOD_ID, + "mainnet", + ), + ( + TESTNET4_WORK_ONLY_ELF, + TESTNET4_WORK_ONLY_METHOD_ID, + "testnet4", + ), + (SIGNET_WORK_ONLY_ELF, SIGNET_WORK_ONLY_METHOD_ID, "signet"), + ( + REGTEST_WORK_ONLY_ELF, + REGTEST_WORK_ONLY_METHOD_ID, + "regtest", + ), + ]; + + for (elf, method_id, network) in networks.into_iter() { + let work_only_circuit_method_id = + compute_image_id(elf).expect("should compute image id"); + let current_method_id = work_only_circuit_method_id.as_bytes(); + assert_eq!( + current_method_id, + method_id, + "Work only method ID mismatch for {network}, please update the constant here: circuits-lib/src/bridge_circuit/constants.rs. Hex format of correct value: {:?}", + hex::encode(current_method_id) + ); + } + } +} diff --git a/core/src/config/test.rs b/core/src/config/test.rs new file mode 100644 index 000000000..38e55aefc --- /dev/null +++ b/core/src/config/test.rs @@ -0,0 +1,543 @@ +use crate::builder::transaction::output::UnspentTxOut; +use crate::builder::transaction::TxHandlerBuilder; +use crate::constants::MIN_TAPROOT_AMOUNT; +use crate::deposit::DepositData; +use crate::header_chain_prover::HeaderChainProver; +use bitcoin::blockdata::block::BlockHash; +use bitcoin::secp256k1::PublicKey; +use bitcoin::secp256k1::SecretKey; +use bitcoin::ScriptBuf; +use bitcoin::TxOut; +use bitvm::chunk::api::Assertions; +use risc0_zkvm::Receipt; +use serde::{Deserialize, Serialize}; +use std::str::FromStr; + +#[derive(Debug, Clone, PartialEq)] +pub struct TestParams { + /// Controls whether the state manager component is initialized and run as part of the test setup. + /// Allows for testing components in isolation from the state manager. + pub should_run_state_manager: bool, + + /// Contains the secret keys for all simulated verifier nodes in the test environment. + pub all_verifiers_secret_keys: Vec, + + /// Contains the secret keys for all simulated operator nodes in the test. + pub all_operators_secret_keys: Vec, + + /// A fault injection flag. If true, an operator will intentionally commit to an incorrect latest block hash. + /// This is used to test if verifiers can correctly detect and handle this invalid commitment. + pub disrupt_latest_block_hash_commit: bool, + + /// A fault injection flag. If true, simulates an operator committing to an invalid block hash + /// for the payout transaction. + pub disrupt_payout_tx_block_hash_commit: bool, + + /// A fault injection flag for challenge sending watchtowers detection. When enabled, simulates an operator + /// sending a corrupted or invalid commitment to watchtowers who has sent challenges. + pub disrupt_challenge_sending_watchtowers_commit: bool, + + /// Simulates a scenario where an operator fails to include a watchtower, who has sent a challenge, + pub operator_forgot_watchtower_challenge: bool, + + /// A flag to introduce intentionally inconsistent or invalid data into the BitVM assertions. + pub corrupted_asserts: bool, + + /// A flag to indicate whether the public input for the BitVM challenge is corrupted. + pub corrupted_public_input: bool, + + /// A flag to generate blocks to the address of the wallet. + pub generate_to_address: bool, + + /// A flag to indicate whether to use small annexes in the watchtower challenge transactions. + pub use_small_annex: bool, + + /// A flag to indicate whether to use large annexes in the watchtower challenge transactions. + pub use_large_annex: bool, + + /// A flag to indicate whether to use large outputs in the watchtower challenge transactions. + pub use_large_output: bool, + + /// A flag to indicate whether to use large annexes and outputs in the watchtower challenge transactions. + pub use_large_annex_and_output: bool, + + /// A list of verifier indexes that should not attempt to send disprove transactions. + pub verifier_do_not_send_disprove_indexes: Option>, + + /// A flag to enable data generation for bridge circuit tests (diverse total works). + pub generate_varying_total_works_insufficient_total_work: bool, + + pub generate_varying_total_works: bool, + + pub generate_varying_total_works_first_two_valid: bool, + + /// A secret key for generating signatures of optimistic payout verification. + /// It should match the aggregator_verification_address in BridgeConfig. + pub aggregator_verification_secret_key: Option, + + /// Secret keys belonging to the security council. + /// Should match the xonly public keys in the security council of config, otherwise + /// some tests will fail. + pub sec_council_secret_keys: Vec, + + /// A flag to enable mining of 0-fee transactions. It's used so that we do not need to CPFP for no-automation to make tests easier. + pub mine_0_fee_txs: bool, + + pub timeout_params: TimeoutTestParams, +} + +impl TestParams { + /// Returns true if the verifier should attempt to send a disprove transaction, false otherwise. + pub fn should_disprove( + &self, + verifier_pk: &PublicKey, + deposit_data: &DepositData, + ) -> eyre::Result { + let verifier_idx = deposit_data.get_verifier_index(verifier_pk)?; + Ok(self + .verifier_do_not_send_disprove_indexes + .as_ref() + .is_none_or(|indexes| !indexes.contains(&verifier_idx))) + } + + pub fn maybe_corrupt_asserts(&self, asserts: Assertions) -> Assertions { + use rand::Rng; + let mut asserts = asserts; + if self.corrupted_asserts { + let mut rng = rand::thread_rng(); + + if rng.gen_bool(0.5) { + let i = rng.gen_range(0..asserts.1.len()); + let j = rng.gen_range(0..asserts.1[i].len()); + tracing::info!("Disrupting asserts commit 1 with i: {}, j: {}", i, j); + + asserts.1[i][j] ^= 0x01; + } else { + let i = rng.gen_range(0..asserts.2.len()); + let j = rng.gen_range(0..asserts.2[i].len()); + tracing::info!("Disrupting asserts commit 2 with i: {}, j: {}", i, j); + + asserts.2[i][j] ^= 0x01; + } + } else if self.corrupted_public_input { + let mut rng = rand::thread_rng(); + let j = rng.gen_range(1..asserts.0[0].len()); + + tracing::info!("Disrupting public input with i: 0, j: {}", j); + asserts.0[0][j] ^= 0x01; + } + asserts + } + + pub fn maybe_override_blockhashes_serialized( + &self, + blockhashes_serialized: Vec<[u8; 32]>, + payout_block_height: u32, + genesis_height: u32, + total_works: Vec<[u8; 16]>, + ) -> Vec<[u8; 32]> { + if self.generate_varying_total_works_insufficient_total_work { + let take_count = (payout_block_height + 1 - genesis_height) as usize; + tracing::info!( + "Overriding blockhashes: insufficient total work mode with {} blocks", + take_count + ); + return blockhashes_serialized + .iter() + .take(take_count) + .cloned() + .collect(); + } + + if self.generate_varying_total_works_first_two_valid { + let highest_valid_wt_index = self.highest_valid_wt_index(total_works).unwrap(); + + tracing::info!( + "Overriding blockhashes: first two valid mode with {} blocks", + highest_valid_wt_index + ); + return blockhashes_serialized + .iter() + .take(highest_valid_wt_index) + .cloned() + .collect(); + } + + blockhashes_serialized.to_vec() + } + + pub async fn maybe_override_current_hcp( + &self, + current_hcp: Receipt, + payout_block_hash: BlockHash, + block_hashes: &[(BlockHash, impl Sized)], + header_chain_prover: &HeaderChainProver, + total_works: Vec<[u8; 16]>, + ) -> eyre::Result { + if self.generate_varying_total_works_insufficient_total_work { + let (hcp, _) = header_chain_prover + .prove_till_hash(payout_block_hash) + .await?; + return Ok(hcp); + } + + if self.generate_varying_total_works_first_two_valid { + let highest_valid_wt_index = self.highest_valid_wt_index(total_works).unwrap(); + let target_blockhash = block_hashes.get(highest_valid_wt_index).ok_or_else(|| { + eyre::eyre!("Missing blockhash at index {}", highest_valid_wt_index) + })?; + + let (hcp, _) = header_chain_prover + .prove_till_hash(target_blockhash.0) + .await?; + return Ok(hcp); + } + + Ok(current_hcp) + } + + fn highest_valid_wt_index(&self, total_works: Vec<[u8; 16]>) -> eyre::Result { + if total_works.len() < 2 { + return Err(eyre::eyre!( + "Expected at least two total works for first two valid mode" + )); + } + + let mut total_works = total_works; + total_works.sort(); + + let second_lowest_total_work = &total_works[1]; + let second_lowest_total_work_index = usize::from_be_bytes( + second_lowest_total_work[8..16] + .try_into() + .expect("Expected 8 bytes for index conversion"), + ); + + Ok(second_lowest_total_work_index / 2 - 1) + } + + pub fn maybe_disrupt_block_hash(&self, block_hash: [u8; 32]) -> [u8; 32] { + if self.disrupt_latest_block_hash_commit { + tracing::info!("Disrupting block hash commitment for testing purposes"); + tracing::info!("Original block hash: {:?}", block_hash); + let mut disrupted = block_hash; + disrupted[31] ^= 0x01; + return disrupted; + } + + block_hash + } + + pub fn maybe_disrupt_commit_data_for_total_work( + &self, + commit_data: &mut [u8], + wt_index: usize, + ) { + if self.generate_varying_total_works_first_two_valid { + let ref_wt_index = 1; + if ref_wt_index < wt_index { + commit_data[0] ^= 0x01; + tracing::info!( + "Flipping first byte of commit data to generate varying total work. Wt index: {}", + wt_index + ); + } + } + } + + pub fn maybe_disrupt_payout_tx_block_hash_commit( + &self, + payout_tx_blockhash: [u8; 20], + ) -> [u8; 20] { + if self.disrupt_payout_tx_block_hash_commit { + tracing::info!( + "Disrupting payout transaction block hash commitment for testing purposes" + ); + let mut disrupted = payout_tx_blockhash; + disrupted[19] ^= 0x01; + return disrupted; + } + + payout_tx_blockhash + } + + pub fn maybe_disrupt_latest_block_hash_commit(&self, latest_block_hash: [u8; 20]) -> [u8; 20] { + if self.disrupt_latest_block_hash_commit { + tracing::info!("Disrupting latest block hash commitment for testing purposes"); + let mut disrupted = latest_block_hash; + disrupted[19] ^= 0x01; + return disrupted; + } + + latest_block_hash + } + + pub fn maybe_add_large_test_outputs( + &self, + mut builder: TxHandlerBuilder, + ) -> eyre::Result { + // Returns the modified builder + // Check if the large annex and output scenario is enabled + if self.use_large_annex_and_output { + for i in 0..2300 { + let mut test_taproot_address: [u8; 32] = [0; 32]; + let num_to_use: u32 = 30000 + i; + let num_to_use_bytes = num_to_use.to_le_bytes(); + // Last 4 bytes of test_taproot_address will be used to differentiate the outputs + test_taproot_address[28..32].copy_from_slice(&num_to_use_bytes); + let mut additional_taproot_script_vec = vec![0x51, 0x20]; + additional_taproot_script_vec.extend_from_slice(&test_taproot_address); + let additional_taproot_script = + ScriptBuf::from_bytes(additional_taproot_script_vec); + let additional_taproot_txout = TxOut { + value: MIN_TAPROOT_AMOUNT, + script_pubkey: additional_taproot_script, + }; + // Reassign the result of add_output back to builder + builder = builder.add_output(UnspentTxOut::from_partial(additional_taproot_txout)); + } + tracing::warn!("Using large annex and output"); + } else if self.use_large_output { + for i in 0..2300 { + let mut test_taproot_address: [u8; 32] = [0; 32]; + let num_to_use: u32 = 30000 + i; + let num_to_use_bytes = num_to_use.to_le_bytes(); + // Last 4 bytes of test_taproot_address will be used to differentiate the outputs + test_taproot_address[28..32].copy_from_slice(&num_to_use_bytes); + let mut additional_taproot_script_vec = vec![0x51, 0x20]; + additional_taproot_script_vec.extend_from_slice(&test_taproot_address); + let additional_taproot_script = + ScriptBuf::from_bytes(additional_taproot_script_vec); + let additional_taproot_txout = TxOut { + value: MIN_TAPROOT_AMOUNT, + script_pubkey: additional_taproot_script, + }; + // Reassign the result of add_output back to builder + builder = builder.add_output(UnspentTxOut::from_partial(additional_taproot_txout)); + } + tracing::warn!("Using large output"); + } + Ok(builder) + } + + pub fn maybe_dump_bridge_circuit_params_to_file( + &self, + bridge_circuit_host_params: &impl borsh::BorshSerialize, + ) -> eyre::Result<()> { + use std::path::PathBuf; + + let cases = [ + ( + self.use_small_annex, + "../bridge-circuit-host/bin-files/bch_params_challenge_tx_with_annex.bin", + ), + ( + self.use_large_annex, + "../bridge-circuit-host/bin-files/bch_params_challenge_tx_with_large_annex.bin", + ), + ( + self.use_large_output, + "../bridge-circuit-host/bin-files/bch_params_challenge_tx_with_large_output.bin", + ), + ( + self.use_large_annex_and_output, + "../bridge-circuit-host/bin-files/bch_params_challenge_tx_with_large_annex_and_output.bin", + ), + ( + self.generate_varying_total_works, + "../bridge-circuit-host/bin-files/bch_params_varying_total_works.bin", + ), + ( + self.generate_varying_total_works_insufficient_total_work, + "../bridge-circuit-host/bin-files/bch_params_varying_total_works_insufficient_total_work.bin", + ), + ( + self.generate_varying_total_works_first_two_valid, + "../bridge-circuit-host/bin-files/bch_params_varying_total_works_first_two_valid.bin", + ), + ]; + + let active_cases: Vec<_> = cases.iter().filter(|(cond, _)| *cond).collect(); + + if active_cases.len() > 1 { + panic!("Multiple conflicting bridge circuit dump conditions are enabled"); + } + + if let Some((_, file_path)) = active_cases.first() { + let path = PathBuf::from(file_path); + std::fs::create_dir_all(path.parent().unwrap()) + .map_err(|e| eyre::eyre!("Failed to create directory for output file: {}", e))?; + let serialized_params = borsh::to_vec(bridge_circuit_host_params).map_err(|e| { + eyre::eyre!("Failed to serialize bridge circuit host params: {}", e) + })?; + std::fs::write(&path, serialized_params).map_err(|e| { + eyre::eyre!("Failed to write bridge circuit host params to file: {}", e) + })?; + tracing::info!("Bridge circuit host params written to {:?}", &path); + } + + Ok(()) + } +} + +#[derive(Debug, Copy, Clone, Serialize, Deserialize, PartialEq, Default)] +pub struct TimeoutTestParams { + /// Verifier index that should time out during key distribution. + pub key_distribution_verifier_idx: Option, + /// Operator index that should time out during key distribution. + pub key_collection_operator_idx: Option, + /// Verifier index that should time out during nonce stream creation. + pub nonce_stream_creation_verifier_idx: Option, + /// Verifier index that should time out during partial signature stream creation. + pub partial_sig_stream_creation_verifier_idx: Option, + /// Operator index that should time out during operator signature collection. + pub operator_sig_collection_operator_idx: Option, + /// Verifier index that should time out during deposit finalization. + pub deposit_finalize_verifier_idx: Option, +} + +impl TimeoutTestParams { + pub fn any_timeout(&self) -> bool { + self.key_distribution_verifier_idx.is_some() + || self.key_collection_operator_idx.is_some() + || self.nonce_stream_creation_verifier_idx.is_some() + || self.partial_sig_stream_creation_verifier_idx.is_some() + || self.operator_sig_collection_operator_idx.is_some() + } + + pub async fn hook_timeout_key_distribution_verifier(&self, idx: usize) { + if self.key_distribution_verifier_idx == Some(idx) { + use tokio::time::sleep; + tokio::time::pause(); + sleep(crate::constants::KEY_DISTRIBUTION_TIMEOUT + std::time::Duration::from_secs(1)) + .await; + } + } + + pub async fn hook_timeout_key_collection_operator(&self, idx: usize) { + if self.key_collection_operator_idx == Some(idx) { + use tokio::time::sleep; + tokio::time::pause(); + sleep(crate::constants::KEY_DISTRIBUTION_TIMEOUT + std::time::Duration::from_secs(1)) + .await; + } + } + + pub async fn hook_timeout_nonce_stream_creation_verifier(&self, idx: usize) { + if self.nonce_stream_creation_verifier_idx == Some(idx) { + use tokio::time::sleep; + tokio::time::pause(); + sleep( + crate::constants::NONCE_STREAM_CREATION_TIMEOUT + std::time::Duration::from_secs(1), + ) + .await; + } + } + + pub async fn hook_timeout_partial_sig_stream_creation_verifier(&self, idx: usize) { + if self.partial_sig_stream_creation_verifier_idx == Some(idx) { + use tokio::time::sleep; + tokio::time::pause(); + sleep( + crate::constants::PARTIAL_SIG_STREAM_CREATION_TIMEOUT + + std::time::Duration::from_secs(1), + ) + .await; + } + } + + pub async fn hook_timeout_operator_sig_collection_operator(&self, idx: usize) { + if self.operator_sig_collection_operator_idx == Some(idx) { + use tokio::time::sleep; + tokio::time::pause(); + sleep( + crate::constants::OPERATOR_SIGS_STREAM_CREATION_TIMEOUT + + std::time::Duration::from_secs(1), + ) + .await; + } + } + + pub async fn hook_timeout_deposit_finalize_verifier(&self, idx: usize) { + if self.deposit_finalize_verifier_idx == Some(idx) { + use tokio::time::sleep; + tokio::time::pause(); + sleep( + crate::constants::DEPOSIT_FINALIZATION_TIMEOUT + std::time::Duration::from_secs(1), + ) + .await; + } + } +} + +impl Default for TestParams { + fn default() -> Self { + Self { + should_run_state_manager: true, + all_verifiers_secret_keys: vec![ + SecretKey::from_str( + "1111111111111111111111111111111111111111111111111111111111111111", + ) + .expect("known valid input"), + SecretKey::from_str( + "2222222222222222222222222222222222222222222222222222222222222222", + ) + .expect("known valid input"), + SecretKey::from_str( + "3333333333333333333333333333333333333333333333333333333333333333", + ) + .expect("known valid input"), + SecretKey::from_str( + "4444444444444444444444444444444444444444444444444444444444444444", + ) + .expect("known valid input"), + ], + all_operators_secret_keys: vec![ + SecretKey::from_str( + "1111111111111111111111111111111111111111111111111111111111111111", + ) + .expect("known valid input"), + SecretKey::from_str( + "2222222222222222222222222222222222222222222222222222222222222222", + ) + .expect("known valid input"), + ], + disrupt_latest_block_hash_commit: false, + disrupt_payout_tx_block_hash_commit: false, + disrupt_challenge_sending_watchtowers_commit: false, + operator_forgot_watchtower_challenge: false, + corrupted_asserts: false, + corrupted_public_input: false, + use_small_annex: false, + use_large_annex: false, + use_large_output: false, + use_large_annex_and_output: false, + timeout_params: TimeoutTestParams::default(), + verifier_do_not_send_disprove_indexes: None, + generate_to_address: true, + generate_varying_total_works_insufficient_total_work: false, + generate_varying_total_works: false, + generate_varying_total_works_first_two_valid: false, + aggregator_verification_secret_key: Some( + alloy::signers::k256::ecdsa::SigningKey::from_slice( + &hex::decode( + "7ee82330d90423649d065f2c31f342a323c0d7b29a72eff10d88a9b8b00bed87", + ) + .expect("valid hex"), + ) + .expect("valid secret key"), + ), + sec_council_secret_keys: vec![ + SecretKey::from_str( + "5555555555555555555555555555555555555555555555555555555555555555", + ) + .expect("known valid input"), + SecretKey::from_str( + "6666666666666666666666666666666666666666666666666666666666666666", + ) + .expect("known valid input"), + ], + mine_0_fee_txs: false, + } + } +} diff --git a/core/src/constants.rs b/core/src/constants.rs new file mode 100644 index 000000000..9283003d6 --- /dev/null +++ b/core/src/constants.rs @@ -0,0 +1,87 @@ +use bitcoin::{transaction::Version, Address, Amount, ScriptBuf}; + +/// The amount of the non-ephemeral P2A anchor output. +pub const NON_EPHEMERAL_ANCHOR_AMOUNT: Amount = Amount::from_sat(240); + +/// The minimum possible amount that a UTXO can have when created into a Taproot address. +pub const MIN_TAPROOT_AMOUNT: Amount = Amount::from_sat(330); + +pub const TEN_MINUTES_IN_SECS: u32 = 600; + +pub const DEFAULT_CHANNEL_SIZE: usize = 1280; + +/// The maximum number of nonces that can be generated in a single nonce generation session. +/// A single nonce takes 132 (musig2 secret nonce) bytes. We calculate NUM_NONCES so that a nonce +/// session takes at maximum 150MB. +pub const NUM_NONCES_LIMIT: u32 = 150 * 1_000_000 / MUSIG_SECNONCE_LEN as u32; + +/// The maximum number of bytes that can be used by all nonce sessions. +/// If it exceeds this limit, the verifier will delete the oldest nonce sessions. +/// This limit is approximate, because it doesn't take into account the internal extra bytes used in +/// HashMap and VecDeque used in the AllSessions. It only takes into account bytes used for the secnonces. +pub const MAX_ALL_SESSIONS_BYTES: usize = 2_000_000_000; + +/// The maximum number of nonce sessions that can be stored in the verifier. +/// It is used so that the allsessions do not store too many small (1 nonce) sessions. +pub const MAX_NUM_SESSIONS: usize = 2000; + +use secp256k1::ffi::MUSIG_SECNONCE_LEN; +/// The maximum number of Winternitz digits per key. +/// This is used to limit the size of the Winternitz public keys in the protocol +/// to prevent excessive memory usage and ensure efficient processing. +/// This value is achieved when signing a 32-byte message with a Winternitz key, +/// resulting in a maximum of 64 + 4 digits per key, where the last 4 digits are used for +/// the sum-check operation. +pub const MAX_WINTERNITZ_DIGITS_PER_KEY: usize = 68; + +/// The maximum number of script replacement operations allowed in a single BitVM operation. +/// This is a safeguard to prevent excessive resource usage and ensure that the BitVM protocol +/// remains efficient and manageable. +/// The limit is set to 100,000 operations, which is a reasonable upper bound for +/// script replacement operations in the context of BitVM, which is normally a constant +/// equal to 47544. +pub const MAX_SCRIPT_REPLACEMENT_OPERATIONS: usize = 100_000; + +/// The maximum number of bytes per Winternitz key. +pub const MAX_BYTES_PER_WINTERNITZ_KEY: usize = MAX_WINTERNITZ_DIGITS_PER_KEY * 20; + +pub use timeout::*; + +mod timeout { + use std::time::Duration; + + pub const OVERALL_DEPOSIT_TIMEOUT: Duration = Duration::from_secs(7200); // 2 hours + + pub const KEY_DISTRIBUTION_TIMEOUT: Duration = Duration::from_secs(1200); // 20 minutes + pub const OPERATOR_GET_KEYS_TIMEOUT: Duration = Duration::from_secs(600); // 10 minutes + pub const VERIFIER_SEND_KEYS_TIMEOUT: Duration = Duration::from_secs(600); // 10 minutes + + pub const NONCE_STREAM_CREATION_TIMEOUT: Duration = Duration::from_secs(300); // 5 minutes + pub const PARTIAL_SIG_STREAM_CREATION_TIMEOUT: Duration = Duration::from_secs(300); // 5 minutes + pub const OPERATOR_SIGS_STREAM_CREATION_TIMEOUT: Duration = Duration::from_secs(300); // 5 minutes + pub const DEPOSIT_FINALIZE_STREAM_CREATION_TIMEOUT: Duration = Duration::from_secs(300); // 5 minutes + + pub const PIPELINE_COMPLETION_TIMEOUT: Duration = Duration::from_secs(3600); // 60 minutes + pub const OPERATOR_SIGS_TIMEOUT: Duration = Duration::from_secs(1200); // 20 minutes + pub const SEND_OPERATOR_SIGS_TIMEOUT: Duration = Duration::from_secs(600); // 10 minutes + pub const DEPOSIT_FINALIZATION_TIMEOUT: Duration = Duration::from_secs(2400); // 40 minutes + + pub const RESTART_BACKGROUND_TASKS_TIMEOUT: Duration = Duration::from_secs(60); + + pub const ENTITY_STATUS_POLL_TIMEOUT: Duration = Duration::from_secs(120); + + pub const PUBLIC_KEY_COLLECTION_TIMEOUT: Duration = Duration::from_secs(30); + + pub const WITHDRAWAL_TIMEOUT: Duration = Duration::from_secs(120); // 2 minutes +} + +pub const NON_STANDARD_V3: Version = Version(3); + +lazy_static::lazy_static! { + pub static ref BURN_SCRIPT: ScriptBuf = ("1111111111111111111114oLvT2") + .parse::>() + .expect("valid burn address") + .assume_checked() + .script_pubkey(); + +} diff --git a/core/src/database/aggregator.rs b/core/src/database/aggregator.rs new file mode 100644 index 000000000..66902841d --- /dev/null +++ b/core/src/database/aggregator.rs @@ -0,0 +1,158 @@ +//! # Verifier Related Database Operations +//! +//! This module includes database functions which are mainly used by a verifier. + +use super::{wrapper::TxidDB, Database, DatabaseTransaction}; +use crate::{errors::BridgeError, execute_query_with_tx}; +use bitcoin::Txid; +use eyre; +use sqlx::QueryBuilder; + +impl Database { + /// Sets a signed emergency stop transaction for a given move transaction ID + pub async fn insert_signed_emergency_stop_tx_if_not_exists( + &self, + tx: Option>, + move_txid: &Txid, + encrypted_emergency_stop_tx: &[u8], + ) -> Result<(), BridgeError> { + let query = sqlx::query( + "INSERT INTO emergency_stop_sigs (move_txid, emergency_stop_tx) VALUES ($1, $2) + ON CONFLICT (move_txid) DO NOTHING;", + ) + .bind(TxidDB(*move_txid)) + .bind(encrypted_emergency_stop_tx); + + execute_query_with_tx!(self.connection, tx, query, execute)?; + + Ok(()) + } + + /// Gets emergency stop transactions for a list of move transaction IDs + pub async fn get_emergency_stop_txs( + &self, + tx: Option>, + move_txids: Vec, + ) -> Result)>, BridgeError> { + if move_txids.is_empty() { + return Ok(Vec::new()); + } + + let mut query_builder = QueryBuilder::new( + "SELECT move_txid, emergency_stop_tx FROM emergency_stop_sigs WHERE move_txid IN (", + ); + + let mut separated = query_builder.separated(", "); + for txid in &move_txids { + separated.push_bind(TxidDB(*txid)); + } + query_builder.push(")"); + + let query = query_builder.build_query_as::<(TxidDB, Vec)>(); + + let results: Vec<(TxidDB, Vec)> = + execute_query_with_tx!(self.connection, tx, query, fetch_all)?; + + Ok(results + .into_iter() + .map(|(txid, tx_data)| Ok((txid.0, tx_data))) + .collect::>()?) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{ + builder::transaction::{TransactionType, TxHandlerBuilder}, + test::common::*, + }; + use bitcoin::{ + consensus::{self}, + hashes::Hash, + Transaction, Txid, + }; + fn create_test_transaction() -> Transaction { + let tx_handler = TxHandlerBuilder::new(TransactionType::Dummy).finalize(); + tx_handler.get_cached_tx().clone() + } + + #[tokio::test] + async fn test_set_get_emergency_stop_tx() { + let config = create_test_config_with_thread_name().await; + let database = Database::new(&config).await.unwrap(); + + let move_txid = Txid::from_byte_array([1u8; 32]); + let emergency_stop_tx = create_test_transaction(); + database + .insert_signed_emergency_stop_tx_if_not_exists( + None, + &move_txid, + &consensus::serialize(&emergency_stop_tx), + ) + .await + .unwrap(); + + let results = database + .get_emergency_stop_txs(None, vec![move_txid]) + .await + .unwrap(); + + assert_eq!(results.len(), 1); + assert_eq!(results[0].0, move_txid); + assert_eq!(results[0].1, consensus::serialize(&emergency_stop_tx)); + + // Test getting non-existent tx + let non_existent_txid = Txid::from_byte_array([2u8; 32]); + let results = database + .get_emergency_stop_txs(None, vec![non_existent_txid]) + .await + .unwrap(); + assert!(results.is_empty()); + + // Test getting multiple txs + let move_txid2 = Txid::from_byte_array([3u8; 32]); + let emergency_stop_tx2 = create_test_transaction(); + database + .insert_signed_emergency_stop_tx_if_not_exists( + None, + &move_txid2, + &consensus::serialize(&emergency_stop_tx2), + ) + .await + .unwrap(); + + let results = database + .get_emergency_stop_txs(None, vec![move_txid, move_txid2]) + .await + .unwrap(); + + assert_eq!(results.len(), 2); + let mut results = results; + results.sort_by(|a, b| a.0.cmp(&b.0)); + assert_eq!(results[0].0, move_txid); + assert_eq!(results[0].1, consensus::serialize(&emergency_stop_tx)); + assert_eq!(results[1].0, move_txid2); + assert_eq!(results[1].1, consensus::serialize(&emergency_stop_tx2)); + + // Test updating existing tx + let updated_tx = create_test_transaction(); + database + .insert_signed_emergency_stop_tx_if_not_exists( + None, + &move_txid, + &consensus::serialize(&updated_tx), + ) + .await + .unwrap(); + + let results = database + .get_emergency_stop_txs(None, vec![move_txid]) + .await + .unwrap(); + + assert_eq!(results.len(), 1); + assert_eq!(results[0].0, move_txid); + assert_eq!(results[0].1, consensus::serialize(&updated_tx)); + } +} diff --git a/core/src/database/bitcoin_syncer.rs b/core/src/database/bitcoin_syncer.rs new file mode 100644 index 000000000..3c15aa767 --- /dev/null +++ b/core/src/database/bitcoin_syncer.rs @@ -0,0 +1,965 @@ +use super::{ + wrapper::{BlockHashDB, TxidDB}, + Database, DatabaseTransaction, +}; +use crate::{ + bitcoin_syncer::BitcoinSyncerEvent, config::protocol::ProtocolParamset, errors::BridgeError, + execute_query_with_tx, +}; +use bitcoin::{BlockHash, OutPoint, Txid}; +use eyre::Context; +use std::ops::DerefMut; + +impl Database { + /// # Returns + /// + /// - [`u32`]: Database entry id, later to be used while referring block + pub async fn insert_block_info( + &self, + tx: Option>, + block_hash: &BlockHash, + prev_block_hash: &BlockHash, + block_height: u32, + ) -> Result { + let query = sqlx::query_scalar( + "INSERT INTO bitcoin_syncer (blockhash, prev_blockhash, height) VALUES ($1, $2, $3) RETURNING id", + ) + .bind(BlockHashDB(*block_hash)) + .bind(BlockHashDB(*prev_block_hash)) + .bind(i32::try_from(block_height).wrap_err(BridgeError::IntConversionError)?); + + let id: i32 = execute_query_with_tx!(self.connection, tx, query, fetch_one)?; + + u32::try_from(id) + .wrap_err(BridgeError::IntConversionError) + .map_err(Into::into) + } + + /// Sets the block with given block hash as canonical if it exists in the database + /// Returns the block id if the block was found and set as canonical, None otherwise + pub async fn update_block_as_canonical( + &self, + tx: Option>, + block_hash: BlockHash, + ) -> Result, BridgeError> { + let query = sqlx::query_scalar( + "UPDATE bitcoin_syncer SET is_canonical = true WHERE blockhash = $1 RETURNING id", + ) + .bind(BlockHashDB(block_hash)); + + let id: Option = execute_query_with_tx!(self.connection, tx, query, fetch_optional)?; + + id.map(|id| u32::try_from(id).wrap_err(BridgeError::IntConversionError)) + .transpose() + .map_err(Into::into) + } + + /// # Returns + /// + /// [`Some`] if the block exists in the database, [`None`] otherwise: + /// + /// - [`BlockHash`]: Previous block hash + /// - [`u32`]: Height of the block + pub async fn get_block_info_from_hash( + &self, + tx: Option>, + block_hash: BlockHash, + ) -> Result, BridgeError> { + let query = sqlx::query_as( + "SELECT prev_blockhash, height FROM bitcoin_syncer WHERE blockhash = $1 AND is_canonical = true", + ) + .bind(BlockHashDB(block_hash)); + + let ret: Option<(BlockHashDB, i32)> = + execute_query_with_tx!(self.connection, tx, query, fetch_optional)?; + + ret.map( + |(prev_hash, height)| -> Result<(BlockHash, u32), BridgeError> { + let height = u32::try_from(height).wrap_err(BridgeError::IntConversionError)?; + Ok((prev_hash.0, height)) + }, + ) + .transpose() + } + + /// Gets block hash and height from block id (internal id used in bitcoin_syncer) + pub async fn get_block_info_from_id( + &self, + tx: Option>, + block_id: u32, + ) -> Result, BridgeError> { + let query = sqlx::query_as("SELECT blockhash, height FROM bitcoin_syncer WHERE id = $1") + .bind(i32::try_from(block_id).wrap_err(BridgeError::IntConversionError)?); + + let ret: Option<(BlockHashDB, i32)> = + execute_query_with_tx!(self.connection, tx, query, fetch_optional)?; + + ret.map( + |(block_hash, height)| -> Result<(BlockHash, u32), BridgeError> { + let height = u32::try_from(height).wrap_err(BridgeError::IntConversionError)?; + Ok((block_hash.0, height)) + }, + ) + .transpose() + } + + /// Stores the full block in bytes in the database, with its height and hash + pub async fn upsert_full_block( + &self, + tx: Option>, + block: &bitcoin::Block, + block_height: u32, + ) -> Result<(), BridgeError> { + let block_bytes = bitcoin::consensus::serialize(block); + let query = sqlx::query( + "INSERT INTO bitcoin_blocks (height, block_data, block_hash) VALUES ($1, $2, $3) + ON CONFLICT (height) DO UPDATE SET block_data = $2, block_hash = $3", + ) + .bind(i32::try_from(block_height).wrap_err(BridgeError::IntConversionError)?) + .bind(&block_bytes) + .bind(BlockHashDB(block.header.block_hash())); + + execute_query_with_tx!(self.connection, tx, query, execute)?; + Ok(()) + } + + /// Gets the full block from the database, given the block height + pub async fn get_full_block( + &self, + tx: Option>, + block_height: u32, + ) -> Result, BridgeError> { + let query = sqlx::query_as("SELECT block_data FROM bitcoin_blocks WHERE height = $1") + .bind(i32::try_from(block_height).wrap_err(BridgeError::IntConversionError)?); + + let block_data: Option<(Vec,)> = + execute_query_with_tx!(self.connection, tx, query, fetch_optional)?; + + match block_data { + Some((bytes,)) => { + let block = bitcoin::consensus::deserialize(&bytes) + .wrap_err(BridgeError::IntConversionError)?; + Ok(Some(block)) + } + None => Ok(None), + } + } + + /// Gets the full block and its height from the database, given the block hash + pub async fn get_full_block_from_hash( + &self, + tx: Option>, + block_hash: BlockHash, + ) -> Result, BridgeError> { + let query = + sqlx::query_as("SELECT height, block_data FROM bitcoin_blocks WHERE block_hash = $1") + .bind(BlockHashDB(block_hash)); + + let block_data: Option<(i32, Vec)> = + execute_query_with_tx!(self.connection, tx, query, fetch_optional)?; + + match block_data { + Some((height_i32, bytes)) => { + let height = u32::try_from(height_i32).wrap_err(BridgeError::IntConversionError)?; + let block = bitcoin::consensus::deserialize(&bytes) + .wrap_err(BridgeError::IntConversionError)?; + Ok(Some((height, block))) + } + None => Ok(None), + } + } + + /// Gets the maximum height of the canonical blocks in the bitcoin_syncer database + pub async fn get_max_height( + &self, + tx: Option>, + ) -> Result, BridgeError> { + let query = + sqlx::query_as("SELECT height FROM bitcoin_syncer WHERE is_canonical = true ORDER BY height DESC LIMIT 1"); + let result: Option<(i32,)> = + execute_query_with_tx!(self.connection, tx, query, fetch_optional)?; + + result + .map(|(height,)| u32::try_from(height).wrap_err(BridgeError::IntConversionError)) + .transpose() + .map_err(Into::into) + } + + /// Gets the block hashes that have height bigger then the given height and deletes them. + /// Marks blocks with height bigger than the given height as non-canonical. + /// + /// # Parameters + /// + /// - `tx`: Optional transaction to use for the query. + /// - `height`: Height to start marking blocks as such (not inclusive). + /// + /// # Returns + /// + /// - [`Vec`]: List of block ids that were marked as non-canonical in + /// ascending order. + pub async fn update_non_canonical_block_hashes( + &self, + tx: Option>, + height: u32, + ) -> Result, BridgeError> { + let query = sqlx::query_as( + "WITH deleted AS ( + UPDATE bitcoin_syncer + SET is_canonical = false + WHERE height > $1 + RETURNING id + ) SELECT id FROM deleted", + ) + .bind(i32::try_from(height).wrap_err(BridgeError::IntConversionError)?); + + let block_ids: Vec<(i32,)> = execute_query_with_tx!(self.connection, tx, query, fetch_all)?; + block_ids + .into_iter() + .map(|(block_id,)| u32::try_from(block_id).wrap_err(BridgeError::IntConversionError)) + .collect::, eyre::Report>>() + .map_err(Into::into) + } + + /// Gets the block id of the canonical block at the given height + pub async fn get_canonical_block_id_from_height( + &self, + tx: Option>, + height: u32, + ) -> Result, BridgeError> { + let query = sqlx::query_as( + "SELECT id FROM bitcoin_syncer WHERE height = $1 AND is_canonical = true", + ) + .bind(i32::try_from(height).wrap_err(BridgeError::IntConversionError)?); + + let block_id: Option<(i32,)> = + execute_query_with_tx!(self.connection, tx, query, fetch_optional)?; + + block_id + .map(|(block_id,)| u32::try_from(block_id).wrap_err(BridgeError::IntConversionError)) + .transpose() + .map_err(Into::into) + } + + /// Saves the txid with the id of the block that contains it to the database + pub async fn insert_txid_to_block( + &self, + tx: DatabaseTransaction<'_, '_>, + block_id: u32, + txid: &bitcoin::Txid, + ) -> Result<(), BridgeError> { + let query = sqlx::query("INSERT INTO bitcoin_syncer_txs (block_id, txid) VALUES ($1, $2)") + .bind(i32::try_from(block_id).wrap_err(BridgeError::IntConversionError)?) + .bind(super::wrapper::TxidDB(*txid)); + + execute_query_with_tx!(self.connection, Some(tx), query, execute)?; + + Ok(()) + } + + /// Gets all the txids that are contained in the block with the given id + pub async fn get_block_txids( + &self, + tx: Option>, + block_id: u32, + ) -> Result, BridgeError> { + let query = sqlx::query_as("SELECT txid FROM bitcoin_syncer_txs WHERE block_id = $1") + .bind(i32::try_from(block_id).wrap_err(BridgeError::IntConversionError)?); + + let txids: Vec<(TxidDB,)> = execute_query_with_tx!(self.connection, tx, query, fetch_all)?; + + Ok(txids.into_iter().map(|(txid,)| txid.0).collect()) + } + + /// Inserts a spent utxo into the database, with the block id that contains it, the spending txid and the vout + pub async fn insert_spent_utxo( + &self, + tx: DatabaseTransaction<'_, '_>, + block_id: u32, + spending_txid: &bitcoin::Txid, + txid: &bitcoin::Txid, + vout: i64, + ) -> Result<(), BridgeError> { + sqlx::query( + "INSERT INTO bitcoin_syncer_spent_utxos (block_id, spending_txid, txid, vout) VALUES ($1, $2, $3, $4)", + ) + .bind(block_id as i32) + .bind(super::wrapper::TxidDB(*spending_txid)) + .bind(super::wrapper::TxidDB(*txid)) + .bind(vout) + .execute(tx.deref_mut()) + .await?; + Ok(()) + } + + /// For a given outpoint, gets the block height of the canonical block that spent it. + /// Returns None if the outpoint is not spent. + pub async fn get_block_height_of_spending_txid( + &self, + tx: Option>, + outpoint: OutPoint, + ) -> Result, BridgeError> { + let query = sqlx::query_scalar::<_, i32>( + "SELECT bs.height FROM bitcoin_syncer_spent_utxos bspu + INNER JOIN bitcoin_syncer bs ON bspu.block_id = bs.id + WHERE bspu.txid = $1 AND bspu.vout = $2 AND bs.is_canonical = true", + ) + .bind(super::wrapper::TxidDB(outpoint.txid)) + .bind(outpoint.vout as i64); + + let result: Option = + execute_query_with_tx!(self.connection, tx, query, fetch_optional)?; + + result + .map(|height| u32::try_from(height).wrap_err(BridgeError::IntConversionError)) + .transpose() + .map_err(Into::into) + } + + /// Checks if the utxo is spent, if so checks if the spending tx is finalized + /// Returns true if the utxo is spent and the spending tx is finalized, false otherwise + pub async fn check_if_utxo_spending_tx_is_finalized( + &self, + tx: Option>, + outpoint: OutPoint, + current_chain_height: u32, + finality_depth: u32, + ) -> Result { + let spending_tx_height = self.get_block_height_of_spending_txid(tx, outpoint).await?; + match spending_tx_height { + Some(spending_tx_height) => { + if spending_tx_height > current_chain_height + || current_chain_height - spending_tx_height < finality_depth + { + return Ok(false); + } + Ok(true) + } + None => Ok(false), + } + } + + /// Gets all the spent utxos for a given txid + pub async fn get_spent_utxos_for_txid( + &self, + tx: Option>, + txid: Txid, + ) -> Result, BridgeError> { + let query = sqlx::query_as( + "SELECT block_id, txid, vout FROM bitcoin_syncer_spent_utxos WHERE spending_txid = $1", + ) + .bind(TxidDB(txid)); + + let spent_utxos: Vec<(i64, TxidDB, i64)> = + execute_query_with_tx!(self.connection, tx, query, fetch_all)?; + + spent_utxos + .into_iter() + .map( + |(block_id, txid, vout)| -> Result<(i64, OutPoint), BridgeError> { + let vout = u32::try_from(vout).wrap_err(BridgeError::IntConversionError)?; + Ok((block_id, OutPoint { txid: txid.0, vout })) + }, + ) + .collect::, BridgeError>>() + } + + /// Adds a bitcoin syncer event to the database. These events can currently be new block or reorged block. + pub async fn insert_event( + &self, + tx: Option>, + event_type: BitcoinSyncerEvent, + ) -> Result<(), BridgeError> { + let query = match event_type { + BitcoinSyncerEvent::NewBlock(block_id) => sqlx::query( + "INSERT INTO bitcoin_syncer_events (block_id, event_type) VALUES ($1, 'new_block'::bitcoin_syncer_event_type)", + ) + .bind(i32::try_from(block_id).wrap_err(BridgeError::IntConversionError)?), + BitcoinSyncerEvent::ReorgedBlock(block_id) => sqlx::query( + "INSERT INTO bitcoin_syncer_events (block_id, event_type) VALUES ($1, 'reorged_block'::bitcoin_syncer_event_type)", + ) + .bind(i32::try_from(block_id).wrap_err(BridgeError::IntConversionError)?), + }; + execute_query_with_tx!(self.connection, tx, query, execute)?; + Ok(()) + } + + /// Returns the last processed Bitcoin Syncer event's block height for given consumer. + /// If the last processed event is missing, i.e. there are no processed events for the consumer, returns `None`. + pub async fn get_last_processed_event_block_height( + &self, + tx: Option>, + consumer_handle: &str, + ) -> Result, BridgeError> { + let query = sqlx::query_scalar::<_, i32>( + r#"SELECT bs.height + FROM bitcoin_syncer_event_handlers bseh + INNER JOIN bitcoin_syncer_events bse ON bseh.last_processed_event_id = bse.id + INNER JOIN bitcoin_syncer bs ON bse.block_id = bs.id + WHERE bseh.consumer_handle = $1"#, + ) + .bind(consumer_handle); + + let result: Option = + execute_query_with_tx!(self.connection, tx, query, fetch_optional)?; + + result + .map(|h| { + u32::try_from(h) + .wrap_err(BridgeError::IntConversionError) + .map_err(BridgeError::from) + }) + .transpose() + } + + /// Gets the last processed event id for a given consumer + pub async fn get_last_processed_event_id( + &self, + tx: DatabaseTransaction<'_, '_>, + consumer_handle: &str, + ) -> Result { + // Step 1: Insert the consumer_handle if it doesn't exist + sqlx::query( + r#" + INSERT INTO bitcoin_syncer_event_handlers (consumer_handle, last_processed_event_id) + VALUES ($1, 0) + ON CONFLICT (consumer_handle) DO NOTHING + "#, + ) + .bind(consumer_handle) + .execute(tx.deref_mut()) + .await?; + + // Step 2: Get the last processed event ID for this consumer + let last_processed_event_id: i32 = sqlx::query_scalar( + r#" + SELECT last_processed_event_id + FROM bitcoin_syncer_event_handlers + WHERE consumer_handle = $1 + "#, + ) + .bind(consumer_handle) + .fetch_one(tx.deref_mut()) + .await?; + + Ok(last_processed_event_id) + } + + /// Returns the maximum block height of the blocks that have been processed by the given consumer. + /// If the last processed event is missing, i.e. there are no processed events for the consumer, returns `None`. + pub async fn get_max_processed_block_height( + &self, + tx: Option>, + consumer_handle: &str, + ) -> Result, BridgeError> { + let query = sqlx::query_scalar::<_, Option>( + r#"SELECT MAX(bs.height) + FROM bitcoin_syncer_events bse + INNER JOIN bitcoin_syncer bs ON bse.block_id = bs.id + WHERE bse.id <= ( + SELECT last_processed_event_id + FROM bitcoin_syncer_event_handlers + WHERE consumer_handle = $1 + )"#, + ) + .bind(consumer_handle); + + let result: Option = execute_query_with_tx!(self.connection, tx, query, fetch_one)?; + + result + .map(|h| { + u32::try_from(h) + .wrap_err(BridgeError::IntConversionError) + .map_err(BridgeError::from) + }) + .transpose() + } + + /// Returns the next finalized block height that should be processed by the given consumer. + /// If there are no processed events, returns the paramset start height. + /// Next height is the max height of the processed block - finality depth + 1. + pub async fn get_next_finalized_block_height_for_consumer( + &self, + tx: Option>, + consumer_handle: &str, + paramset: &'static ProtocolParamset, + ) -> Result { + let max_processed_block_height = self + .get_max_processed_block_height(tx, consumer_handle) + .await?; + + let max_processed_finalized_block_height = match max_processed_block_height { + Some(max_processed_block_height) => { + max_processed_block_height.checked_sub(paramset.finality_depth) + } + None => None, + }; + + let next_height = max_processed_finalized_block_height + .map(|h| h + 1) + .unwrap_or(paramset.start_height); + + Ok(std::cmp::max(next_height, paramset.start_height)) + } + + /// Fetches the next bitcoin syncer event for a given consumer + /// This function is used to fetch the next event that hasn't been processed yet + /// It will return the event which includes the event type and the block id + /// The last updated event id is also updated to the id that is returned + /// If there are no more events to fetch, None is returned + pub async fn fetch_next_bitcoin_syncer_evt( + &self, + tx: DatabaseTransaction<'_, '_>, + consumer_handle: &str, + ) -> Result, BridgeError> { + // Get the last processed event ID for this consumer + let last_processed_event_id = self + .get_last_processed_event_id(tx, consumer_handle) + .await?; + + // Retrieve the next event that hasn't been processed yet + let event = sqlx::query_as::<_, (i32, i32, String)>( + r#" + SELECT id, block_id, event_type::text + FROM bitcoin_syncer_events + WHERE id > $1 + ORDER BY id ASC + LIMIT 1 + "#, + ) + .bind(last_processed_event_id) + .fetch_optional(tx.deref_mut()) + .await?; + + if event.is_none() { + return Ok(None); + } + + let event = event.expect("should exist since we checked is_none()"); + let event_id = event.0; + let event_type: BitcoinSyncerEvent = (event.2, event.1).try_into()?; + + // Update last_processed_event_id for this consumer + sqlx::query( + r#" + UPDATE bitcoin_syncer_event_handlers + SET last_processed_event_id = $1 + WHERE consumer_handle = $2 + "#, + ) + .bind(event_id) + .bind(consumer_handle) + .execute(tx.deref_mut()) + .await?; + + Ok(Some(event_type)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::database::Database; + use crate::test::common::*; + use bitcoin::hashes::Hash; + use bitcoin::{BlockHash, CompactTarget}; + + async fn setup_test_db() -> Database { + let config = create_test_config_with_thread_name().await; + Database::new(&config).await.unwrap() + } + + #[tokio::test] + async fn test_event_handling() { + let db = setup_test_db().await; + let mut dbtx = db.begin_transaction().await.unwrap(); + + // Create a test block + let prev_block_hash = BlockHash::from_raw_hash(Hash::from_byte_array([0x1F; 32])); + let block_hash = BlockHash::from_raw_hash(Hash::from_byte_array([0x45; 32])); + let height = 0x45; + + let block_id = db + .insert_block_info(Some(&mut dbtx), &block_hash, &prev_block_hash, height) + .await + .unwrap(); + + // Add new block event + db.insert_event(Some(&mut dbtx), BitcoinSyncerEvent::NewBlock(block_id)) + .await + .unwrap(); + + // Test event consumption + let consumer_handle = "test_consumer"; + let event = db + .fetch_next_bitcoin_syncer_evt(&mut dbtx, consumer_handle) + .await + .unwrap(); + + assert!(matches!(event, Some(BitcoinSyncerEvent::NewBlock(id)) if id == block_id)); + + // Test that the same event is not returned twice + let event = db + .fetch_next_bitcoin_syncer_evt(&mut dbtx, consumer_handle) + .await + .unwrap(); + assert!(event.is_none()); + + // Add reorg event + db.insert_event(Some(&mut dbtx), BitcoinSyncerEvent::ReorgedBlock(block_id)) + .await + .unwrap(); + + // Test that new event is received + let event = db + .fetch_next_bitcoin_syncer_evt(&mut dbtx, consumer_handle) + .await + .unwrap(); + assert!(matches!(event, Some(BitcoinSyncerEvent::ReorgedBlock(id)) if id == block_id)); + + dbtx.commit().await.unwrap(); + } + + #[tokio::test] + async fn test_store_and_get_block() { + let db = setup_test_db().await; + let block_height = 123u32; + + // Create a dummy block + let dummy_header = bitcoin::block::Header { + version: bitcoin::block::Version::TWO, + prev_blockhash: BlockHash::from_raw_hash(Hash::from_byte_array([0x42; 32])), + merkle_root: bitcoin::TxMerkleNode::all_zeros(), + time: 1_000_000, + bits: CompactTarget::from_consensus(0), + nonce: 12345, + }; + + let dummy_txs = vec![bitcoin::Transaction { + version: bitcoin::blockdata::transaction::Version::TWO, + lock_time: bitcoin::absolute::LockTime::ZERO, + input: vec![], + output: vec![], + }]; + + let dummy_block = bitcoin::Block { + header: dummy_header, + txdata: dummy_txs.clone(), + }; + + let dummy_block_hash = dummy_block.block_hash(); + + // Store the block + db.upsert_full_block(None, &dummy_block, block_height) + .await + .unwrap(); + + // Retrieve the block + let retrieved_block = db + .get_full_block(None, block_height) + .await + .unwrap() + .unwrap(); + + // Verify block fields match + assert_eq!(retrieved_block, dummy_block); + + // Retrieve the block + let retrieved_block_from_hash = db + .get_full_block_from_hash(None, dummy_block_hash) + .await + .unwrap() + .unwrap() + .1; + + // Verify block fields match + assert_eq!(retrieved_block_from_hash, dummy_block); + + // Non-existent block should return None + assert!(db.get_full_block(None, 999).await.unwrap().is_none()); + + // Overwrite the block + let updated_dummy_header = bitcoin::block::Header { + version: bitcoin::block::Version::ONE, // Changed version + ..dummy_header + }; + let updated_dummy_block = bitcoin::Block { + header: updated_dummy_header, + txdata: dummy_txs.clone(), + }; + + let updated_dummy_block_hash = updated_dummy_block.block_hash(); + + db.upsert_full_block(None, &updated_dummy_block, block_height) + .await + .unwrap(); + + // Verify the update worked + let retrieved_updated_block = db + .get_full_block(None, block_height) + .await + .unwrap() + .unwrap(); + assert_eq!(updated_dummy_block, retrieved_updated_block); + + let retrieved_updated_block_from_hash = db + .get_full_block_from_hash(None, updated_dummy_block_hash) + .await + .unwrap() + .unwrap() + .1; + assert_eq!(updated_dummy_block, retrieved_updated_block_from_hash); + } + + #[tokio::test] + async fn test_multiple_event_consumers() { + let db = setup_test_db().await; + let mut dbtx = db.begin_transaction().await.unwrap(); + + // Create a test block + let prev_block_hash = BlockHash::from_raw_hash(Hash::from_byte_array([0x1F; 32])); + let block_hash = BlockHash::from_raw_hash(Hash::from_byte_array([0x45; 32])); + let height = 0x45; + + let block_id = db + .insert_block_info(Some(&mut dbtx), &block_hash, &prev_block_hash, height) + .await + .unwrap(); + + // Add events + db.insert_event(Some(&mut dbtx), BitcoinSyncerEvent::NewBlock(block_id)) + .await + .unwrap(); + db.insert_event(Some(&mut dbtx), BitcoinSyncerEvent::ReorgedBlock(block_id)) + .await + .unwrap(); + + // Test with multiple consumers + let consumer1 = "consumer1"; + let consumer2 = "consumer2"; + + // First consumer gets both events in order + let event1 = db + .fetch_next_bitcoin_syncer_evt(&mut dbtx, consumer1) + .await + .unwrap(); + assert!(matches!(event1, Some(BitcoinSyncerEvent::NewBlock(id)) if id == block_id)); + + let event2 = db + .fetch_next_bitcoin_syncer_evt(&mut dbtx, consumer1) + .await + .unwrap(); + assert!(matches!(event2, Some(BitcoinSyncerEvent::ReorgedBlock(id)) if id == block_id)); + + // Second consumer also gets both events independently + let event1 = db + .fetch_next_bitcoin_syncer_evt(&mut dbtx, consumer2) + .await + .unwrap(); + assert!(matches!(event1, Some(BitcoinSyncerEvent::NewBlock(id)) if id == block_id)); + + let event2 = db + .fetch_next_bitcoin_syncer_evt(&mut dbtx, consumer2) + .await + .unwrap(); + assert!(matches!(event2, Some(BitcoinSyncerEvent::ReorgedBlock(id)) if id == block_id)); + + dbtx.commit().await.unwrap(); + } + + #[tokio::test] + async fn test_non_canonical_blocks() { + let db = setup_test_db().await; + let mut dbtx = db.begin_transaction().await.unwrap(); + + // Create a chain of blocks + let prev_block_hash = BlockHash::from_raw_hash(Hash::from_byte_array([0x1F; 32])); + let heights = [1, 2, 3, 4, 5]; + let mut last_hash = prev_block_hash; + + // Save some initial blocks. + let mut block_ids = Vec::new(); + for height in heights { + let block_hash = BlockHash::from_raw_hash(Hash::from_byte_array([height as u8; 32])); + let block_id = db + .insert_block_info(Some(&mut dbtx), &block_hash, &last_hash, height) + .await + .unwrap(); + block_ids.push(block_id); + last_hash = block_hash; + } + + // Mark blocks above height 2 as non-canonical. + let non_canonical_blocks = db + .update_non_canonical_block_hashes(Some(&mut dbtx), 2) + .await + .unwrap(); + assert_eq!(non_canonical_blocks.len(), 3); + assert_eq!(non_canonical_blocks, vec![3, 4, 5]); + + // Verify blocks above height 2 are not returned + for height in heights { + let block_hash = BlockHash::from_raw_hash(Hash::from_byte_array([height as u8; 32])); + let block_info = db + .get_block_info_from_hash(Some(&mut dbtx), block_hash) + .await + .unwrap(); + + if height <= 2 { + assert!(block_info.is_some()); + } else { + assert!(block_info.is_none()); + } + } + + // Verify max height is now 2 + let max_height = db.get_max_height(Some(&mut dbtx)).await.unwrap().unwrap(); + assert_eq!(max_height, 2); + + dbtx.commit().await.unwrap(); + } + + #[tokio::test] + async fn add_get_block_info() { + let config = create_test_config_with_thread_name().await; + let db = Database::new(&config).await.unwrap(); + + let prev_block_hash = BlockHash::from_raw_hash(Hash::from_byte_array([0x1F; 32])); + let block_hash = BlockHash::from_raw_hash(Hash::from_byte_array([0x45; 32])); + let height = 0x45; + + assert!(db + .get_block_info_from_hash(None, block_hash) + .await + .unwrap() + .is_none()); + + db.insert_block_info(None, &block_hash, &prev_block_hash, height) + .await + .unwrap(); + let block_info = db + .get_block_info_from_hash(None, block_hash) + .await + .unwrap() + .unwrap(); + let max_height = db.get_max_height(None).await.unwrap().unwrap(); + assert_eq!(block_info.0, prev_block_hash); + assert_eq!(block_info.1, height); + assert_eq!(max_height, height); + + db.insert_block_info( + None, + &BlockHash::from_raw_hash(Hash::from_byte_array([0x1; 32])), + &prev_block_hash, + height - 1, + ) + .await + .unwrap(); + let max_height = db.get_max_height(None).await.unwrap().unwrap(); + assert_eq!(max_height, height); + + db.insert_block_info( + None, + &BlockHash::from_raw_hash(Hash::from_byte_array([0x2; 32])), + &prev_block_hash, + height + 1, + ) + .await + .unwrap(); + let max_height = db.get_max_height(None).await.unwrap().unwrap(); + assert_ne!(max_height, height); + assert_eq!(max_height, height + 1); + } + + #[tokio::test] + async fn add_and_get_txids_from_block() { + let config = create_test_config_with_thread_name().await; + let db = Database::new(&config).await.unwrap(); + let mut dbtx = db.begin_transaction().await.unwrap(); + + assert!(db + .insert_txid_to_block(&mut dbtx, 0, &Txid::all_zeros()) + .await + .is_err()); + let mut dbtx = db.begin_transaction().await.unwrap(); + + let prev_block_hash = BlockHash::from_raw_hash(Hash::from_byte_array([0x1F; 32])); + let block_hash = BlockHash::from_raw_hash(Hash::from_byte_array([0x45; 32])); + let height = 0x45; + let block_id = db + .insert_block_info(Some(&mut dbtx), &block_hash, &prev_block_hash, height) + .await + .unwrap(); + + let txids = vec![ + Txid::from_raw_hash(Hash::from_byte_array([0x1; 32])), + Txid::from_raw_hash(Hash::from_byte_array([0x2; 32])), + Txid::from_raw_hash(Hash::from_byte_array([0x3; 32])), + ]; + for txid in &txids { + db.insert_txid_to_block(&mut dbtx, block_id, txid) + .await + .unwrap(); + } + + let txids_from_db = db.get_block_txids(Some(&mut dbtx), block_id).await.unwrap(); + assert_eq!(txids_from_db, txids); + + assert!(db + .get_block_txids(Some(&mut dbtx), block_id + 1) + .await + .unwrap() + .is_empty()); + + dbtx.commit().await.unwrap(); + } + + #[tokio::test] + async fn insert_get_spent_utxos() { + let config = create_test_config_with_thread_name().await; + let db = Database::new(&config).await.unwrap(); + let mut dbtx = db.begin_transaction().await.unwrap(); + + let prev_block_hash = BlockHash::from_raw_hash(Hash::from_byte_array([0x1F; 32])); + let block_hash = BlockHash::from_raw_hash(Hash::from_byte_array([0x45; 32])); + let height = 0x45; + let block_id = db + .insert_block_info(Some(&mut dbtx), &block_hash, &prev_block_hash, height) + .await + .unwrap(); + + let spending_txid = Txid::from_raw_hash(Hash::from_byte_array([0x2; 32])); + let txid = Txid::from_raw_hash(Hash::from_byte_array([0x1; 32])); + let vout = 0; + db.insert_txid_to_block(&mut dbtx, block_id, &spending_txid) + .await + .unwrap(); + + assert_eq!( + db.get_spent_utxos_for_txid(Some(&mut dbtx), txid) + .await + .unwrap() + .len(), + 0 + ); + + db.insert_spent_utxo(&mut dbtx, block_id, &spending_txid, &txid, vout) + .await + .unwrap(); + + let spent_utxos = db + .get_spent_utxos_for_txid(Some(&mut dbtx), spending_txid) + .await + .unwrap(); + assert_eq!(spent_utxos.len(), 1); + assert_eq!(spent_utxos[0].0, block_id as i64); + assert_eq!( + spent_utxos[0].1, + bitcoin::OutPoint { + txid, + vout: vout as u32, + } + ); + + dbtx.commit().await.unwrap(); + } +} diff --git a/core/src/database/header_chain_prover.rs b/core/src/database/header_chain_prover.rs new file mode 100644 index 000000000..88bdf1ee7 --- /dev/null +++ b/core/src/database/header_chain_prover.rs @@ -0,0 +1,900 @@ +//! # Header Chain Prover Related Database Operations +//! +//! This module includes database functions which are mainly used by the header +//! chain prover. + +use super::{ + wrapper::{BlockHashDB, BlockHeaderDB}, + Database, DatabaseTransaction, +}; +use crate::{errors::BridgeError, execute_query_with_tx, extended_bitcoin_rpc::ExtendedBitcoinRpc}; +use bitcoin::{ + block::{self, Header}, + BlockHash, +}; +use eyre::Context; +use risc0_zkvm::Receipt; + +impl Database { + /// Adds a new finalized block to the database, later to be updated with a + /// proof. + pub async fn save_unproven_finalized_block( + &self, + tx: Option>, + block_hash: block::BlockHash, + block_header: block::Header, + block_height: u64, + ) -> Result<(), BridgeError> { + let query = sqlx::query( + "INSERT INTO header_chain_proofs (block_hash, block_header, prev_block_hash, height) VALUES ($1, $2, $3, $4) + ON CONFLICT (block_hash) DO NOTHING", + ) + .bind(BlockHashDB(block_hash)).bind(BlockHeaderDB(block_header)).bind(BlockHashDB(block_header.prev_blockhash)).bind(block_height as i64); + + execute_query_with_tx!(self.connection, tx, query, execute)?; + + Ok(()) + } + + /// Collect block info from rpc and save it to hcp table. + async fn save_block_infos_within_range( + &self, + rpc: &ExtendedBitcoinRpc, + height_start: u32, + height_end: u32, + ) -> Result<(), BridgeError> { + const BATCH_SIZE: u32 = 100; + + for batch_start in (height_start..=height_end).step_by(BATCH_SIZE as usize) { + let batch_end = std::cmp::min(batch_start + BATCH_SIZE - 1, height_end); + + // Collect all block headers in this batch + let mut block_infos = Vec::with_capacity((batch_end - batch_start + 1) as usize); + for height in batch_start..=batch_end { + let (block_hash, block_header) = + rpc.get_block_info_by_height(height as u64).await?; + block_infos.push((block_hash, block_header, height)); + } + + // Save all blocks in this batch + let mut db_tx = self.begin_transaction().await?; + for (block_hash, block_header, height) in block_infos { + self.save_unproven_finalized_block( + Some(&mut db_tx), + block_hash, + block_header, + height as u64, + ) + .await?; + } + db_tx.commit().await?; + } + Ok(()) + } + + /// This function assumes there are no blocks or some contiguous blocks starting from 0 already in the table. + /// Saves the block hashes and headers until given height(exclusive) + /// as they are needed for spv and hcp proofs. + pub async fn fetch_and_save_missing_blocks( + &self, + rpc: &ExtendedBitcoinRpc, + genesis_height: u32, + until_height: u32, + ) -> Result<(), BridgeError> { + if until_height == 0 { + return Ok(()); + } + let max_height = self.get_latest_finalized_block_height(None).await?; + if let Some(max_height) = max_height { + if max_height < until_height as u64 { + self.save_block_infos_within_range(rpc, max_height as u32 + 1, until_height - 1) + .await?; + } + } else { + tracing::debug!("Saving blocks from start until {}", until_height); + self.save_block_infos_within_range(rpc, genesis_height, until_height - 1) + .await?; + } + Ok(()) + } + + /// Returns block hash and header for a given range of heights. Ranges are + /// inclusive on both ends. + pub async fn get_block_info_from_range( + &self, + tx: Option>, + start_height: u64, + end_height: u64, + ) -> Result, BridgeError> { + let query = sqlx::query_as( + "SELECT block_hash, block_header + FROM header_chain_proofs + WHERE height >= $1 AND height <= $2 + ORDER BY height ASC;", + ) + .bind(start_height as i64) + .bind(end_height as i64); + + let result: Vec<(BlockHashDB, BlockHeaderDB)> = + execute_query_with_tx!(self.connection, tx, query, fetch_all)?; + + let result = result + .iter() + .map(|result| (result.0 .0, result.1 .0)) + .collect::>(); + + Ok(result) + } + + /// Returns the previous block hash and header for a given block hash. + /// + /// # Returns + /// + /// Returns `None` if the block hash is not found. + /// + /// - [`BlockHash`] - Previous block's hash + /// - [`Header`] - Block's header + /// - [`u32`] - Block's height + pub async fn get_block_info_from_hash_hcp( + &self, + tx: Option>, + block_hash: BlockHash, + ) -> Result, BridgeError> { + let query = sqlx::query_as( + "SELECT prev_block_hash, block_header, height FROM header_chain_proofs WHERE block_hash = $1", + ) + .bind(BlockHashDB(block_hash)); + let result: Option<(BlockHashDB, BlockHeaderDB, i64)> = + execute_query_with_tx!(self.connection, tx, query, fetch_optional)?; + result + .map(|result| -> Result<(BlockHash, Header, u32), BridgeError> { + let height = result.2.try_into().wrap_err("Can't convert i64 to u32")?; + Ok((result.0 .0, result.1 .0, height)) + }) + .transpose() + } + + /// Returns latest finalized blocks height from the database. + pub async fn get_latest_finalized_block_height( + &self, + tx: Option>, + ) -> Result, BridgeError> { + let query = + sqlx::query_as("SELECT height FROM header_chain_proofs ORDER BY height DESC LIMIT 1;"); + + let result: Option<(i64,)> = + execute_query_with_tx!(self.connection, tx, query, fetch_optional)?; + + Ok(result.map(|height| height.0 as u64)) + } + + /// Gets the first finalized block after the latest proven block (i.e. proof != null). + /// This block will be the candidate block for the prover. + /// + /// # Returns + /// + /// Returns `None` if either no proved blocks are exists or blockchain tip + /// is already proven. + /// + /// - [`BlockHash`] - Hash of the block + /// - [`Header`] - Header of the block + /// - [`u64`] - Height of the block + /// - [`Receipt`] - Previous block's proof + pub async fn get_next_unproven_block( + &self, + mut tx: Option>, + ) -> Result, BridgeError> { + let latest_proven_block_height = self + .get_latest_proven_block_info(tx.as_deref_mut()) + .await? + .map(|(_, _, height)| height); + + let query = sqlx::query_as( + "SELECT h1.block_hash, + h1.block_header, + h1.height, + h2.proof + FROM header_chain_proofs h1 + JOIN header_chain_proofs h2 ON h1.prev_block_hash = h2.block_hash + WHERE h2.proof IS NOT NULL AND h1.proof IS NULL + ORDER BY h1.height DESC + LIMIT 1", + ); + + let result: Option<(BlockHashDB, BlockHeaderDB, i64, Vec)> = + execute_query_with_tx!(self.connection, tx, query, fetch_optional)?; + + let result = match result { + Some(result) => { + let receipt: Receipt = + borsh::from_slice(&result.3).wrap_err(BridgeError::BorshError)?; + let height: u64 = result.2.try_into().wrap_err("Can't convert i64 to u64")?; + Some((result.0 .0, result.1 .0, height, receipt)) + } + None => None, + }; + + // If the latest block is already proven, return None instead of the old + // unproven block. + if let (Some((_, _, height, _)), Some(latest_proven_block_height)) = + (&result, latest_proven_block_height) + { + if *height < latest_proven_block_height { + return Ok(None); + } + } + + Ok(result) + } + + /// Gets the newest n number of block's info that their previous block has + /// proven before. These blocks will be the candidate blocks for the prover. + /// + /// # Returns + /// + /// Returns `None` if either no proved blocks are exists or blockchain tip + /// is already proven. + /// + /// - [`BlockHash`] - Hash of last block in the batch + /// - [`Header`] - Headers of the blocks + /// - [`u64`] - Height of the last block in the batch + /// - [`Receipt`] - Previous block's proof + pub async fn get_next_n_non_proven_block( + &self, + count: u32, + ) -> Result, Receipt)>, BridgeError> { + let Some(next_non_proven_block) = self.get_next_unproven_block(None).await? else { + return Ok(None); + }; + + let query = sqlx::query_as( + "SELECT block_hash, + block_header, + height + FROM header_chain_proofs + WHERE height >= $1 + ORDER BY height ASC + LIMIT $2;", + ) + .bind(next_non_proven_block.2 as i64) + .bind(count as i64); + let result: Vec<(BlockHashDB, BlockHeaderDB, i64)> = execute_query_with_tx!( + self.connection, + None::, + query, + fetch_all + )?; + + let blocks = result + .iter() + .map(|result| { + let height = result.2.try_into().wrap_err("Can't convert i64 to u64")?; + + Ok((result.0 .0, result.1 .0, height)) + }) + .collect::, BridgeError>>()?; + + // If not yet enough entries are found, return `None`. + if blocks.len() != count as usize { + tracing::error!( + "Non proven block count: {}, required count: {}", + blocks.len(), + count + ); + return Ok(None); + } + + Ok(Some((blocks, next_non_proven_block.3))) + } + + /// Gets the latest block's info that it's proven. + /// + /// # Returns + /// + /// Returns `None` if no block is proven. + /// + /// - [`BlockHash`] - Hash of the block + /// - [`Header`] - Header of the block + /// - [`u64`] - Height of the block + pub async fn get_latest_proven_block_info( + &self, + tx: Option>, + ) -> Result, BridgeError> { + let query = sqlx::query_as( + "SELECT block_hash, block_header, height + FROM header_chain_proofs + WHERE proof IS NOT NULL + ORDER BY height DESC + LIMIT 1;", + ); + + let result: Option<(BlockHashDB, BlockHeaderDB, i64)> = + execute_query_with_tx!(self.connection, tx, query, fetch_optional)?; + + let result = match result { + Some(result) => { + let height = result.2.try_into().wrap_err("Can't convert i64 to u64")?; + Some((result.0 .0, result.1 .0, height)) + } + None => None, + }; + + Ok(result) + } + + /// Gets the latest block's info that it's proven and has height less than or equal to the given height. + /// + /// # Returns + /// + /// Returns `None` if no block is proven. + /// + /// - [`BlockHash`] - Hash of the block + /// - [`Header`] - Header of the block + /// - [`u64`] - Height of the block + pub async fn get_latest_proven_block_info_until_height( + &self, + tx: Option>, + height: u32, + ) -> Result, BridgeError> { + let query = sqlx::query_as( + "SELECT block_hash, block_header, height + FROM header_chain_proofs + WHERE proof IS NOT NULL AND height <= $1 + ORDER BY height DESC + LIMIT 1;", + ) + .bind(height as i64); + + let result: Option<(BlockHashDB, BlockHeaderDB, i64)> = + execute_query_with_tx!(self.connection, tx, query, fetch_optional)?; + + let result = match result { + Some(result) => { + let height = result.2.try_into().wrap_err("Can't convert i64 to u64")?; + Some((result.0 .0, result.1 .0, height)) + } + None => None, + }; + + Ok(result) + } + + /// Sets an existing block's (in database) proof by referring to it by it's + /// hash. + pub async fn set_block_proof( + &self, + tx: Option>, + hash: block::BlockHash, + proof: Receipt, + ) -> Result<(), BridgeError> { + let proof = borsh::to_vec(&proof).wrap_err(BridgeError::BorshError)?; + + let query = sqlx::query("UPDATE header_chain_proofs SET proof = $1 WHERE block_hash = $2") + .bind(proof) + .bind(BlockHashDB(hash)); + + execute_query_with_tx!(self.connection, tx, query, execute)?; + + Ok(()) + } + + /// Gets a block's proof by referring to it by it's hash. + pub async fn get_block_proof_by_hash( + &self, + tx: Option>, + hash: block::BlockHash, + ) -> Result, BridgeError> { + let query = sqlx::query_as("SELECT proof FROM header_chain_proofs WHERE block_hash = $1") + .bind(BlockHashDB(hash)); + + let receipt: (Option>,) = + execute_query_with_tx!(self.connection, tx, query, fetch_one)?; + let receipt = match receipt.0 { + Some(r) => r, + None => return Ok(None), + }; + + let receipt: Receipt = borsh::from_slice(&receipt).wrap_err(BridgeError::BorshError)?; + + Ok(Some(receipt)) + } +} + +#[cfg(test)] +mod tests { + use crate::database::Database; + use crate::test::common::*; + use bitcoin::block::{self, Header, Version}; + use bitcoin::hashes::Hash; + use bitcoin::{BlockHash, CompactTarget, TxMerkleNode}; + use borsh::BorshDeserialize; + use risc0_zkvm::Receipt; + + #[tokio::test] + async fn save_get_new_block() { + let config = create_test_config_with_thread_name().await; + let db = Database::new(&config).await.unwrap(); + + assert!(db + .get_latest_finalized_block_height(None) + .await + .unwrap() + .is_none()); + + // Set first block, so that get_non_proven_block won't return error. + let block = block::Block { + header: Header { + version: Version::TWO, + prev_blockhash: BlockHash::all_zeros(), + merkle_root: TxMerkleNode::all_zeros(), + time: 0, + bits: CompactTarget::default(), + nonce: 0, + }, + txdata: vec![], + }; + let block_hash = block.block_hash(); + let height = 1; + db.save_unproven_finalized_block(None, block_hash, block.header, height) + .await + .unwrap(); + assert_eq!( + db.get_latest_finalized_block_height(None) + .await + .unwrap() + .unwrap(), + height + ); + let receipt = Receipt::try_from_slice(include_bytes!("../test/data/first_1.bin")).unwrap(); + db.set_block_proof(None, block_hash, receipt).await.unwrap(); + let latest_proven_block = db + .get_latest_proven_block_info(None) + .await + .unwrap() + .unwrap(); + assert_eq!(latest_proven_block.0, block_hash); + assert_eq!(latest_proven_block.1, block.header); + assert_eq!(latest_proven_block.2, height); + + let block = block::Block { + header: Header { + version: Version::TWO, + prev_blockhash: block_hash, + merkle_root: TxMerkleNode::all_zeros(), + time: 1, + bits: CompactTarget::default(), + nonce: 1, + }, + txdata: vec![], + }; + let block_hash = block.block_hash(); + let height = 2; + db.save_unproven_finalized_block(None, block_hash, block.header, height) + .await + .unwrap(); + + let (read_block_hash, read_block_header, _, _) = + db.get_next_unproven_block(None).await.unwrap().unwrap(); + assert_eq!(block_hash, read_block_hash); + assert_eq!(block.header, read_block_header); + } + + #[tokio::test] + pub async fn save_get_block_proof() { + let config = create_test_config_with_thread_name().await; + let db = Database::new(&config).await.unwrap(); + + // Save dummy block. + let block = block::Block { + header: Header { + version: Version::TWO, + prev_blockhash: BlockHash::all_zeros(), + merkle_root: TxMerkleNode::all_zeros(), + time: 0x1F, + bits: CompactTarget::default(), + nonce: 0x45, + }, + txdata: vec![], + }; + let block_hash = block.block_hash(); + let height = 0x45; + db.save_unproven_finalized_block(None, block_hash, block.header, height) + .await + .unwrap(); + + // Requesting proof for an existing block without a proof should + // return `None`. + let read_receipt = db.get_block_proof_by_hash(None, block_hash).await.unwrap(); + assert!(read_receipt.is_none()); + + // Update it with a proof. + let receipt = Receipt::try_from_slice(include_bytes!("../test/data/first_1.bin")).unwrap(); + db.set_block_proof(None, block_hash, receipt.clone()) + .await + .unwrap(); + + let read_receipt = db + .get_block_proof_by_hash(None, block_hash) + .await + .unwrap() + .unwrap(); + assert_eq!(receipt.journal, read_receipt.journal); + assert_eq!(receipt.metadata, read_receipt.metadata); + } + + #[tokio::test] + pub async fn get_non_proven_block() { + let config = create_test_config_with_thread_name().await; + let db = Database::new(&config).await.unwrap(); + + assert!(db.get_next_unproven_block(None).await.unwrap().is_none()); + assert!(db + .get_latest_proven_block_info(None) + .await + .unwrap() + .is_none()); + + let base_height = 0x45; + + // Save initial block without a proof. + let block = block::Block { + header: Header { + version: Version::TWO, + prev_blockhash: BlockHash::all_zeros(), + merkle_root: TxMerkleNode::all_zeros(), + time: 0x1F, + bits: CompactTarget::default(), + nonce: 0x45, + }, + txdata: vec![], + }; + let block_hash = block.block_hash(); + let height = base_height; + db.save_unproven_finalized_block(None, block_hash, block.header, height) + .await + .unwrap(); + assert!(db.get_next_unproven_block(None).await.unwrap().is_none()); + assert!(db + .get_latest_proven_block_info(None) + .await + .unwrap() + .is_none()); + + // Save second block with a proof. + let block = block::Block { + header: Header { + version: Version::TWO, + prev_blockhash: block_hash, + merkle_root: TxMerkleNode::all_zeros(), + time: 0x1F, + bits: CompactTarget::default(), + nonce: 0x45 + 1, + }, + txdata: vec![], + }; + let block_hash1 = block.block_hash(); + let height1 = base_height + 1; + db.save_unproven_finalized_block(None, block_hash1, block.header, height1) + .await + .unwrap(); + let receipt = Receipt::try_from_slice(include_bytes!("../test/data/first_1.bin")).unwrap(); + db.set_block_proof(None, block_hash1, receipt.clone()) + .await + .unwrap(); + assert!(db.get_next_unproven_block(None).await.unwrap().is_none()); + let latest_proven_block = db + .get_latest_proven_block_info(None) + .await + .unwrap() + .unwrap(); + assert_eq!(latest_proven_block.0, block_hash1); + assert_eq!(latest_proven_block.1, block.header); + assert_eq!(latest_proven_block.2 as u64, height1); + + // Save third block without a proof. + let block = block::Block { + header: Header { + version: Version::TWO, + prev_blockhash: block_hash1, + merkle_root: TxMerkleNode::all_zeros(), + time: 0x1F, + bits: CompactTarget::default(), + nonce: 0x45 + 3, + }, + txdata: vec![], + }; + let block_hash2 = block.block_hash(); + let height2 = base_height + 2; + db.save_unproven_finalized_block(None, block_hash2, block.header, height2) + .await + .unwrap(); + + // This time, `get_non_proven_block` should return third block's details. + let res = db.get_next_unproven_block(None).await.unwrap().unwrap(); + assert_eq!(res.0, block_hash2); + assert_eq!(res.2 as u64, height2); + + // Save fourth block with a proof. + let block = block::Block { + header: Header { + version: Version::TWO, + prev_blockhash: block_hash1, + merkle_root: TxMerkleNode::all_zeros(), + time: 0x1F, + bits: CompactTarget::default(), + nonce: 0x45 + 4, + }, + txdata: vec![], + }; + let block_hash3 = block.block_hash(); + let height3 = base_height + 3; + db.save_unproven_finalized_block(None, block_hash3, block.header, height3) + .await + .unwrap(); + db.set_block_proof(None, block_hash3, receipt.clone()) + .await + .unwrap(); + + // This time, `get_non_proven_block` shouldn't return any block because latest is proved. + assert!(db.get_next_unproven_block(None).await.unwrap().is_none()); + + // Save fifth block without a proof. + let block = block::Block { + header: Header { + version: Version::TWO, + prev_blockhash: block_hash1, + merkle_root: TxMerkleNode::all_zeros(), + time: 0x1F, + bits: CompactTarget::default(), + nonce: 0x45 + 5, + }, + txdata: vec![], + }; + let block_hash4 = block.block_hash(); + let height4 = base_height + 4; + db.save_unproven_finalized_block(None, block_hash4, block.header, height4) + .await + .unwrap(); + + // This time, `get_non_proven_block` should return fifth block's details. + let res = db.get_next_unproven_block(None).await.unwrap().unwrap(); + assert_eq!(res.2 as u64, height4); + assert_eq!(res.0, block_hash4); + } + + #[tokio::test] + pub async fn get_non_proven_blocks() { + let config = create_test_config_with_thread_name().await; + let db = Database::new(&config).await.unwrap(); + + let batch_size = config.protocol_paramset().header_chain_proof_batch_size; + + assert!(db + .get_next_n_non_proven_block(batch_size) + .await + .unwrap() + .is_none()); + assert!(db.get_next_unproven_block(None).await.unwrap().is_none()); + assert!(db + .get_latest_proven_block_info(None) + .await + .unwrap() + .is_none()); + + let mut height = 0x45; + + // Save initial block without a proof. + let block = block::Block { + header: Header { + version: Version::TWO, + prev_blockhash: BlockHash::all_zeros(), + merkle_root: TxMerkleNode::all_zeros(), + time: 0x1F, + bits: CompactTarget::default(), + nonce: 0x45, + }, + txdata: vec![], + }; + let block_hash = block.block_hash(); + db.save_unproven_finalized_block(None, block_hash, block.header, height) + .await + .unwrap(); + assert!(db + .get_next_n_non_proven_block(batch_size) + .await + .unwrap() + .is_none()); + assert!(db.get_next_unproven_block(None).await.unwrap().is_none()); + assert!(db + .get_latest_proven_block_info(None) + .await + .unwrap() + .is_none()); + + // Save second block with a proof. + let block = block::Block { + header: Header { + version: Version::TWO, + prev_blockhash: block_hash, + merkle_root: TxMerkleNode::all_zeros(), + time: 0x1F, + bits: CompactTarget::default(), + nonce: 0x45 + 1, + }, + txdata: vec![], + }; + let block_hash1 = block.block_hash(); + height += 1; + db.save_unproven_finalized_block(None, block_hash1, block.header, height) + .await + .unwrap(); + let receipt = Receipt::try_from_slice(include_bytes!("../test/data/first_1.bin")).unwrap(); + db.set_block_proof(None, block_hash1, receipt.clone()) + .await + .unwrap(); + assert!(db + .get_next_n_non_proven_block(batch_size) + .await + .unwrap() + .is_none()); + assert!(db.get_next_unproven_block(None).await.unwrap().is_none()); + let latest_proven_block = db + .get_latest_proven_block_info(None) + .await + .unwrap() + .unwrap(); + assert_eq!(latest_proven_block.0, block_hash1); + assert_eq!(latest_proven_block.1, block.header); + assert_eq!(latest_proven_block.2 as u64, height); + + // Save next blocks without a proof. + let mut blocks: Vec<(BlockHash, u32)> = Vec::new(); + let mut prev_block_hash = block_hash1; + for i in 0..batch_size { + let block = block::Block { + header: Header { + version: Version::TWO, + prev_blockhash: prev_block_hash, + merkle_root: TxMerkleNode::all_zeros(), + time: 0x1F, + bits: CompactTarget::default(), + nonce: 0x45 + 2 + i, + }, + txdata: vec![], + }; + let block_hash = block.block_hash(); + + height += 1; + prev_block_hash = block_hash; + + db.save_unproven_finalized_block(None, block_hash, block.header, height) + .await + .unwrap(); + + blocks.push((block_hash, height.try_into().unwrap())); + } + + // This time, `get_non_proven_block` should return third block's details. + let res = db + .get_next_n_non_proven_block(batch_size) + .await + .unwrap() + .unwrap(); + assert_eq!(res.0.len(), batch_size as usize); + for i in 0..batch_size { + let i = i as usize; + assert_eq!(res.0[i].2, blocks[i].1 as u64); + assert_eq!(res.0[i].0, blocks[i].0); + } + } + + #[tokio::test] + async fn get_block_info_from_range() { + let config = create_test_config_with_thread_name().await; + let db = Database::new(&config).await.unwrap(); + + let start_height = 0x45; + let end_height = 0x55; + assert!(db + .get_block_info_from_range(None, start_height, end_height) + .await + .unwrap() + .is_empty()); + + let mut infos = Vec::new(); + + for height in start_height..end_height { + let block = block::Block { + header: Header { + version: Version::TWO, + prev_blockhash: BlockHash::all_zeros(), + merkle_root: TxMerkleNode::all_zeros(), + time: 0x1F, + bits: CompactTarget::default(), + nonce: height as u32, + }, + txdata: vec![], + }; + let block_hash = block.block_hash(); + + db.save_unproven_finalized_block(None, block_hash, block.header, height) + .await + .unwrap(); + infos.push((block_hash, block.header)); + + let res = db + .get_block_info_from_range(None, start_height, height) + .await + .unwrap(); + assert_eq!(res.len() as u64, height - start_height + 1); + assert_eq!(infos, res); + } + } + + #[tokio::test] + async fn get_latest_proven_block_info() { + let config = create_test_config_with_thread_name().await; + let db = Database::new(&config).await.unwrap(); + let proof = Receipt::try_from_slice(include_bytes!("../test/data/first_1.bin")).unwrap(); + + assert!(db + .get_latest_proven_block_info(None) + .await + .unwrap() + .is_none()); + + let block = block::Block { + header: Header { + version: Version::TWO, + prev_blockhash: BlockHash::all_zeros(), + merkle_root: TxMerkleNode::all_zeros(), + time: 0x1F, + bits: CompactTarget::default(), + nonce: 0x45, + }, + txdata: vec![], + }; + let mut block_hash = block.block_hash(); + let mut height = 0x45; + db.save_unproven_finalized_block(None, block_hash, block.header, height) + .await + .unwrap(); + assert!(db + .get_latest_proven_block_info(None) + .await + .unwrap() + .is_none()); + + for i in 0..3 { + let block = block::Block { + header: Header { + version: Version::TWO, + prev_blockhash: block_hash, + merkle_root: TxMerkleNode::all_zeros(), + time: 0x1F, + bits: CompactTarget::default(), + nonce: 0x45 + i, + }, + txdata: vec![], + }; + block_hash = block.block_hash(); + height += 1; + + db.save_unproven_finalized_block(None, block_hash, block.header, height) + .await + .unwrap(); + db.set_block_proof(None, block_hash, proof.clone()) + .await + .unwrap(); + + let latest_proven_block = db + .get_latest_proven_block_info(None) + .await + .unwrap() + .unwrap(); + assert_eq!(latest_proven_block.0, block_hash); + assert_eq!(latest_proven_block.1, block.header); + assert_eq!(latest_proven_block.2, height); + } + } +} diff --git a/core/src/database/mod.rs b/core/src/database/mod.rs new file mode 100644 index 000000000..0a21e3616 --- /dev/null +++ b/core/src/database/mod.rs @@ -0,0 +1,239 @@ +//! # Database Operations +//! +//! Database crate provides functions that adds/reads values from PostgreSQL +//! database. +//! +//! **Warning:** This crate won't configure PostgreSQL itself and excepts admin +//! privileges to create/drop databases. + +use std::time::Duration; + +use crate::{config::BridgeConfig, errors::BridgeError}; +use alloy::transports::http::reqwest::Url; +use eyre::Context; +use secrecy::ExposeSecret; +use sqlx::postgres::PgConnectOptions; +use sqlx::ConnectOptions; +use sqlx::{Pool, Postgres}; + +mod aggregator; +mod bitcoin_syncer; +mod header_chain_prover; +mod operator; +#[cfg(feature = "automation")] +mod state_machine; +#[cfg(all(test, feature = "automation"))] +mod test; +#[cfg(feature = "automation")] +mod tx_sender; +mod verifier; +mod wrapper; + +#[cfg(test)] +pub use wrapper::*; + +/// PostgreSQL database connection details. +#[derive(Clone, Debug)] +pub struct Database { + connection: Pool, +} + +/// Database transaction for Postgres. +pub type DatabaseTransaction<'a, 'b> = &'a mut sqlx::Transaction<'b, Postgres>; + +/// Executes a query with a transaction if it is provided. +/// +/// # Parameters +/// +/// - `$conn`: Database connection. +/// - `$tx`: Optional database transaction +/// - `$query`: Query to execute. +/// - `$method`: Method to execute on the query. +#[macro_export] +macro_rules! execute_query_with_tx { + ($conn:expr, $tx:expr, $query:expr, $method:ident) => { + match $tx { + Some(tx) => $query.$method(&mut **tx).await, + None => $query.$method(&$conn).await, + } + }; +} + +impl Database { + /// Establishes a new connection to a PostgreSQL database with given + /// configuration. + /// + /// # Errors + /// + /// Returns a [`BridgeError`] if database is not accessible. + pub async fn new(config: &BridgeConfig) -> Result { + let url = Database::get_postgresql_database_url(config); + let url = Url::parse(&url).wrap_err("Failed to parse database URL")?; + let mut opt = PgConnectOptions::from_url(&url).map_err(BridgeError::DatabaseError)?; + // Change default sqlx warnings from Warn to Debug + // These logs really clutter our CI logs, and they were never useful. + // But in the future if we fix slow statements (if they are actually a problem?), we can revert this. + opt = opt.log_slow_statements(log::LevelFilter::Debug, Duration::from_secs(3)); + + let opts = sqlx::postgres::PgPoolOptions::new().acquire_slow_level(log::LevelFilter::Debug); + + #[cfg(test)] + let opts = if config.test_params.timeout_params.any_timeout() { + // increase timeout for pool connections beyond any other to avoid flakiness + opts.acquire_timeout(Duration::from_secs(10000)) + .acquire_slow_threshold(Duration::from_secs(10000)) + } else { + opts + }; + + let connection = opts + .connect_with(opt) + .await + .map_err(BridgeError::DatabaseError)?; + + Ok(Self { connection }) + } + + /// Closes database connection. + pub async fn close(&self) { + self.connection.close().await; + } + + pub fn get_pool(&self) -> Pool { + self.connection.clone() + } + + pub async fn is_pgmq_installed( + &self, + tx: Option>, + ) -> Result { + let query = sqlx::query_as::<_, (i64,)>( + "SELECT COUNT(*) FROM information_schema.tables WHERE table_schema = 'pgmq' AND table_name = 'meta'" + ); + + let result = execute_query_with_tx!(self.connection, tx, query, fetch_one)?; + + Ok(result.0 > 0) + } + + /// Runs the schema script on a database for the given configuration. + /// + /// # Errors + /// + /// Will return [`BridgeError`] if there was a problem with database + /// connection. + pub async fn run_schema_script( + config: &BridgeConfig, + is_verifier: bool, + ) -> Result<(), BridgeError> { + let database = Database::new(config).await?; + + sqlx::raw_sql(include_str!("schema.sql")) + .execute(&database.connection) + .await?; + if is_verifier { + // Check if PGMQ schema already exists + let is_pgmq_installed = database.is_pgmq_installed(None).await?; + + // Only execute PGMQ setup if it doesn't exist + if !is_pgmq_installed { + sqlx::raw_sql(include_str!("pgmq.sql")) + .execute(&database.connection) + .await?; + } + } + + database.close().await; + Ok(()) + } + + /// Prepares a valid PostgreSQL URL. + /// + /// URL contains user, password, host and port fields, which are picked from + /// the given configuration. + pub fn get_postgresql_url(config: &BridgeConfig) -> String { + "postgresql://".to_owned() + + config.db_user.expose_secret() + + ":" + + config.db_password.expose_secret() + + "@" + + &config.db_host + + ":" + + &config.db_port.to_string() + } + + /// Prepares a valid PostgreSQL URL to a specific database. + /// + /// URL contains user, password, host, port and database name fields, which + /// are picked from the given configuration. + pub fn get_postgresql_database_url(config: &BridgeConfig) -> String { + Database::get_postgresql_url(config) + "/" + &config.db_name + } + + /// Starts a database transaction. + /// + /// Return value can be used for committing changes. If not committed, + /// database will rollback every operation done after that call. + pub async fn begin_transaction( + &self, + ) -> Result, BridgeError> { + Ok(self.connection.begin().await?) + } +} + +#[cfg(test)] +mod tests { + use crate::test::common::*; + use crate::{config::BridgeConfig, database::Database}; + + #[tokio::test] + async fn valid_database_connection() { + let config = create_test_config_with_thread_name().await; + + Database::new(&config).await.unwrap(); + } + + #[tokio::test] + #[should_panic] + async fn invalid_database_connection() { + let mut config = BridgeConfig::new(); + config.db_host = "nonexistinghost".to_string(); + config.db_name = "nonexistingpassword".to_string(); + config.db_user = "nonexistinguser".to_string().into(); + config.db_password = "nonexistingpassword".to_string().into(); + config.db_port = 123; + + Database::new(&config).await.unwrap(); + } + + #[test] + fn get_postgresql_url() { + let mut config = BridgeConfig::new(); + + config.db_password = "sofun".to_string().into(); + config.db_port = 45; + config.db_user = "iam".to_string().into(); + config.db_host = "parties".to_string(); + + assert_eq!( + &Database::get_postgresql_url(&config), + "postgresql://iam:sofun@parties:45" + ); + } + + #[test] + fn get_postgresql_database_url() { + let mut config = BridgeConfig::new(); + + config.db_name = "times".to_string(); + config.db_password = "funnier".to_string().into(); + config.db_port = 45; + config.db_user = "butyouare".to_string().into(); + config.db_host = "parties".to_string(); + + assert_eq!( + &Database::get_postgresql_database_url(&config), + "postgresql://butyouare:funnier@parties:45/times" + ); + } +} diff --git a/core/src/database/operator.rs b/core/src/database/operator.rs new file mode 100644 index 000000000..00646620a --- /dev/null +++ b/core/src/database/operator.rs @@ -0,0 +1,1536 @@ +//! # Operator Related Database Operations +//! +//! This module includes database functions which are mainly used by an operator. + +use super::{ + wrapper::{ + AddressDB, DepositParamsDB, OutPointDB, ReceiptDB, SignaturesDB, TxidDB, XOnlyPublicKeyDB, + }, + Database, DatabaseTransaction, +}; +use crate::{ + builder::transaction::create_move_to_vault_txhandler, + config::protocol::ProtocolParamset, + deposit::{DepositData, KickoffData, OperatorData}, + operator::RoundIndex, +}; +use crate::{ + errors::BridgeError, + execute_query_with_tx, + operator::PublicHash, + rpc::clementine::{DepositSignatures, TaggedSignature}, +}; +use bitcoin::{OutPoint, Txid, XOnlyPublicKey}; +use bitvm::signatures::winternitz; +use bitvm::signatures::winternitz::PublicKey as WinternitzPublicKey; +use eyre::{eyre, Context}; +use risc0_zkvm::Receipt; +use std::str::FromStr; + +pub type RootHash = [u8; 32]; +//pub type PublicInputWots = Vec<[u8; 20]>; +pub type AssertTxHash = Vec<[u8; 32]>; + +pub type BitvmSetup = (AssertTxHash, RootHash, RootHash); + +impl Database { + /// Sets the operator details to the db. + /// This function additionally checks if the operator data already exists in the db. + /// As we don't want to overwrite operator data on the db, as it can prevent us slash malicious operators that signed + /// previous deposits. This function should give an error if an operator changed its data. + pub async fn insert_operator_if_not_exists( + &self, + mut tx: Option>, + xonly_pubkey: XOnlyPublicKey, + wallet_address: &bitcoin::Address, + collateral_funding_outpoint: OutPoint, + ) -> Result<(), BridgeError> { + let query = sqlx::query( + "INSERT INTO operators (xonly_pk, wallet_reimburse_address, collateral_funding_outpoint) + VALUES ($1, $2, $3) + ON CONFLICT (xonly_pk) DO NOTHING", + ) + .bind(XOnlyPublicKeyDB(xonly_pubkey)) + .bind(AddressDB(wallet_address.as_unchecked().clone())) + .bind(OutPointDB(collateral_funding_outpoint)); + + let result = execute_query_with_tx!(self.connection, tx.as_deref_mut(), query, execute)?; + + // If no rows were affected, data already exists - check if it matches + if result.rows_affected() == 0 { + let existing = self.get_operator(tx, xonly_pubkey).await?; + if let Some(op) = existing { + if op.reimburse_addr != *wallet_address + || op.collateral_funding_outpoint != collateral_funding_outpoint + { + return Err(BridgeError::OperatorDataMismatch(xonly_pubkey)); + } + } + } + + Ok(()) + } + + pub async fn get_operators( + &self, + tx: Option>, + ) -> Result, BridgeError> { + let query = sqlx::query_as( + "SELECT xonly_pk, wallet_reimburse_address, collateral_funding_outpoint FROM operators;" + ); + + let operators: Vec<(XOnlyPublicKeyDB, AddressDB, OutPointDB)> = + execute_query_with_tx!(self.connection, tx, query, fetch_all)?; + + // Convert the result to the desired format + let data = operators + .into_iter() + .map(|(pk, addr, outpoint_db)| { + let xonly_pk = pk.0; + let addr = addr.0.assume_checked(); + let outpoint = outpoint_db.0; // Extract the Txid from TxidDB + Ok((xonly_pk, addr, outpoint)) + }) + .collect::, BridgeError>>()?; + Ok(data) + } + + pub async fn get_operator( + &self, + tx: Option>, + operator_xonly_pk: XOnlyPublicKey, + ) -> Result, BridgeError> { + let query = sqlx::query_as( + "SELECT xonly_pk, wallet_reimburse_address, collateral_funding_outpoint FROM operators WHERE xonly_pk = $1;" + ).bind(XOnlyPublicKeyDB(operator_xonly_pk)); + + let result: Option<(String, String, OutPointDB)> = + execute_query_with_tx!(self.connection, tx, query, fetch_optional)?; + + match result { + None => Ok(None), + Some((_, addr, outpoint_db)) => { + // Convert the result to the desired format + let addr = bitcoin::Address::from_str(&addr) + .wrap_err("Invalid Address")? + .assume_checked(); + let outpoint = outpoint_db.0; // Extract the Txid from TxidDB + Ok(Some(OperatorData { + xonly_pk: operator_xonly_pk, + reimburse_addr: addr, + collateral_funding_outpoint: outpoint, + })) + } + } + } + + /// Sets the unspent kickoff sigs received from operators during initial setup. + /// Sigs of each round are stored together in the same row. + /// On conflict, do not update the existing sigs. Although technically, as long as kickoff winternitz keys + /// and operator data(collateral funding outpoint and reimburse address) are not changed, the sigs are still valid + /// even if they are changed. + pub async fn insert_unspent_kickoff_sigs_if_not_exist( + &self, + tx: Option>, + operator_xonly_pk: XOnlyPublicKey, + round_idx: RoundIndex, + signatures: Vec, + ) -> Result<(), BridgeError> { + let query = sqlx::query( + "INSERT INTO unspent_kickoff_signatures (xonly_pk, round_idx, signatures) VALUES ($1, $2, $3) + ON CONFLICT (xonly_pk, round_idx) DO NOTHING;", + ).bind(XOnlyPublicKeyDB(operator_xonly_pk)).bind(round_idx.to_index() as i32).bind(SignaturesDB(DepositSignatures{signatures})); + + execute_query_with_tx!(self.connection, tx, query, execute)?; + Ok(()) + } + + /// Get unspent kickoff sigs for a specific operator and round. + pub async fn get_unspent_kickoff_sigs( + &self, + tx: Option>, + operator_xonly_pk: XOnlyPublicKey, + round_idx: RoundIndex, + ) -> Result>, BridgeError> { + let query = sqlx::query_as::<_, (SignaturesDB,)>("SELECT signatures FROM unspent_kickoff_signatures WHERE xonly_pk = $1 AND round_idx = $2") + .bind(XOnlyPublicKeyDB(operator_xonly_pk)) + .bind(round_idx.to_index() as i32); + + let result: Result<(SignaturesDB,), sqlx::Error> = + execute_query_with_tx!(self.connection, tx, query, fetch_one); + + match result { + Ok((SignaturesDB(signatures),)) => Ok(Some(signatures.signatures)), + Err(sqlx::Error::RowNotFound) => Ok(None), + Err(e) => Err(BridgeError::DatabaseError(e)), + } + } + + /// Sets Winternitz public keys for bitvm related inputs of an operator. + pub async fn insert_operator_bitvm_keys_if_not_exist( + &self, + mut tx: Option>, + operator_xonly_pk: XOnlyPublicKey, + deposit_outpoint: OutPoint, + winternitz_public_key: Vec, + ) -> Result<(), BridgeError> { + let wpk = borsh::to_vec(&winternitz_public_key).wrap_err(BridgeError::BorshError)?; + let deposit_id = self + .get_deposit_id(tx.as_deref_mut(), deposit_outpoint) + .await?; + let query = sqlx::query( + "INSERT INTO operator_bitvm_winternitz_public_keys (xonly_pk, deposit_id, bitvm_winternitz_public_keys) VALUES ($1, $2, $3) + ON CONFLICT DO NOTHING;", + ) + .bind(XOnlyPublicKeyDB(operator_xonly_pk)) + .bind(i32::try_from(deposit_id).wrap_err("Failed to convert deposit id to i32")?) + .bind(wpk); + + execute_query_with_tx!(self.connection, tx, query, execute)?; + + Ok(()) + } + + /// Gets Winternitz public keys for bitvm related inputs of an operator. + pub async fn get_operator_bitvm_keys( + &self, + mut tx: Option>, + operator_xonly_pk: XOnlyPublicKey, + deposit_outpoint: OutPoint, + ) -> Result, BridgeError> { + let deposit_id = self + .get_deposit_id(tx.as_deref_mut(), deposit_outpoint) + .await?; + let query = sqlx::query_as( + "SELECT bitvm_winternitz_public_keys FROM operator_bitvm_winternitz_public_keys WHERE xonly_pk = $1 AND deposit_id = $2;" + ) + .bind(XOnlyPublicKeyDB(operator_xonly_pk)) + .bind(i32::try_from(deposit_id).wrap_err("Failed to convert deposit id to i32")?); + + let winternitz_pks: (Vec,) = + execute_query_with_tx!(self.connection, tx, query, fetch_one)?; + + { + let operator_winternitz_pks: Vec = + borsh::from_slice(&winternitz_pks.0).wrap_err(BridgeError::BorshError)?; + Ok(operator_winternitz_pks) + } + } + + /// Sets Winternitz public keys (only for kickoff blockhash commit) for an operator. + /// On conflict, do not update the existing keys. This is very important, as otherwise the txids of + /// operators round tx's will change. + pub async fn insert_operator_kickoff_winternitz_public_keys_if_not_exist( + &self, + mut tx: Option>, + operator_xonly_pk: XOnlyPublicKey, + winternitz_public_key: Vec, + ) -> Result<(), BridgeError> { + let wpk = borsh::to_vec(&winternitz_public_key).wrap_err(BridgeError::BorshError)?; + + let query = sqlx::query( + "INSERT INTO operator_winternitz_public_keys (xonly_pk, winternitz_public_keys) + VALUES ($1, $2) + ON CONFLICT (xonly_pk) DO NOTHING", + ) + .bind(XOnlyPublicKeyDB(operator_xonly_pk)) + .bind(wpk); + + let result = execute_query_with_tx!(self.connection, tx.as_deref_mut(), query, execute)?; + + // If no rows were affected, data already exists - check if it matches + if result.rows_affected() == 0 { + let existing = self + .get_operator_kickoff_winternitz_public_keys(tx, operator_xonly_pk) + .await?; + if existing != winternitz_public_key { + return Err(BridgeError::OperatorWinternitzPublicKeysMismatch( + operator_xonly_pk, + )); + } + } + + Ok(()) + } + + /// Gets Winternitz public keys for every sequential collateral tx of an + /// operator and a watchtower. + pub async fn get_operator_kickoff_winternitz_public_keys( + &self, + tx: Option>, + op_xonly_pk: XOnlyPublicKey, + ) -> Result, BridgeError> { + let query = sqlx::query_as( + "SELECT winternitz_public_keys FROM operator_winternitz_public_keys WHERE xonly_pk = $1;", + ) + .bind(XOnlyPublicKeyDB(op_xonly_pk)); + + let wpks: (Vec,) = execute_query_with_tx!(self.connection, tx, query, fetch_one)?; + + let operator_winternitz_pks: Vec = + borsh::from_slice(&wpks.0).wrap_err(BridgeError::BorshError)?; + + Ok(operator_winternitz_pks) + } + + /// Sets public hashes for a specific operator, sequential collateral tx and + /// kickoff index combination. If there is hashes for given indexes, they + /// will be overwritten by the new hashes. + pub async fn insert_operator_challenge_ack_hashes_if_not_exist( + &self, + mut tx: Option>, + operator_xonly_pk: XOnlyPublicKey, + deposit_outpoint: OutPoint, + public_hashes: &Vec<[u8; 20]>, + ) -> Result<(), BridgeError> { + let deposit_id = self + .get_deposit_id(tx.as_deref_mut(), deposit_outpoint) + .await?; + let query = sqlx::query( + "INSERT INTO operators_challenge_ack_hashes (xonly_pk, deposit_id, public_hashes) + VALUES ($1, $2, $3) + ON CONFLICT (xonly_pk, deposit_id) DO NOTHING;", + ) + .bind(XOnlyPublicKeyDB(operator_xonly_pk)) + .bind(i32::try_from(deposit_id).wrap_err("Failed to convert deposit id to i32")?) + .bind(public_hashes); + + let result = execute_query_with_tx!(self.connection, tx.as_deref_mut(), query, execute)?; + + // If no rows were affected, data already exists - check if it matches + if result.rows_affected() == 0 { + let existing = self + .get_operators_challenge_ack_hashes(tx, operator_xonly_pk, deposit_outpoint) + .await?; + if let Some(existing_hashes) = existing { + if existing_hashes != *public_hashes { + return Err(BridgeError::OperatorChallengeAckHashesMismatch( + operator_xonly_pk, + deposit_outpoint, + )); + } + } + } + + Ok(()) + } + + /// Retrieves public hashes for a specific operator, sequential collateral + /// tx and kickoff index combination. + pub async fn get_operators_challenge_ack_hashes( + &self, + mut tx: Option>, + operator_xonly_pk: XOnlyPublicKey, + deposit_outpoint: OutPoint, + ) -> Result>, BridgeError> { + let deposit_id = self + .get_deposit_id(tx.as_deref_mut(), deposit_outpoint) + .await?; + let query = sqlx::query_as::<_, (Vec>,)>( + "SELECT public_hashes + FROM operators_challenge_ack_hashes + WHERE xonly_pk = $1 AND deposit_id = $2;", + ) + .bind(XOnlyPublicKeyDB(operator_xonly_pk)) + .bind(i32::try_from(deposit_id).wrap_err("Failed to convert deposit id to i32")?); + + let result = execute_query_with_tx!(self.connection, tx, query, fetch_optional)?; + + match result { + Some((public_hashes,)) => { + let mut converted_hashes = Vec::new(); + for hash in public_hashes { + match hash.try_into() { + Ok(public_hash) => converted_hashes.push(public_hash), + Err(err) => { + tracing::error!("Failed to convert hash: {:?}", err); + return Err(eyre::eyre!("Failed to convert public hash").into()); + } + } + } + Ok(Some(converted_hashes)) + } + None => Ok(None), // If no result is found, return Ok(None) + } + } + + /// Saves deposit infos, and returns the deposit_id + /// This function additionally checks if the deposit data already exists in the db. + /// As we don't want to overwrite deposit data on the db, this function should give an error if deposit data is changed. + pub async fn insert_deposit_data_if_not_exists( + &self, + mut tx: Option>, + deposit_data: &mut DepositData, + paramset: &'static ProtocolParamset, + ) -> Result { + // compute move to vault txid + let move_to_vault_txid = create_move_to_vault_txhandler(deposit_data, paramset)? + .get_cached_tx() + .compute_txid(); + + let query = sqlx::query_as::<_, (i32,)>( + "INSERT INTO deposits (deposit_outpoint, deposit_params, move_to_vault_txid) + VALUES ($1, $2, $3) + ON CONFLICT (deposit_outpoint) DO NOTHING + RETURNING deposit_id", + ) + .bind(OutPointDB(deposit_data.get_deposit_outpoint())) + .bind(DepositParamsDB(deposit_data.clone().into())) + .bind(TxidDB(move_to_vault_txid)); + + let result = + execute_query_with_tx!(self.connection, tx.as_deref_mut(), query, fetch_optional)?; + + // If we got a deposit_id back, that means we successfully inserted new data + if let Some((deposit_id,)) = result { + return Ok(u32::try_from(deposit_id).wrap_err("Failed to convert deposit id to u32")?); + } + + // If no rows were returned, data already exists - check if it matches + let existing_query = sqlx::query_as::<_, (i32, DepositParamsDB, TxidDB)>( + "SELECT deposit_id, deposit_params, move_to_vault_txid FROM deposits WHERE deposit_outpoint = $1" + ) + .bind(OutPointDB(deposit_data.get_deposit_outpoint())); + + let (existing_deposit_id, existing_deposit_params, existing_move_txid): ( + i32, + DepositParamsDB, + TxidDB, + ) = execute_query_with_tx!(self.connection, tx, existing_query, fetch_one)?; + + let existing_deposit_data: DepositData = existing_deposit_params + .0 + .try_into() + .map_err(|e| eyre::eyre!("Invalid deposit params {e}"))?; + + if existing_deposit_data != *deposit_data { + tracing::error!( + "Deposit data mismatch: Existing {:?}, New {:?}", + existing_deposit_data, + deposit_data + ); + return Err(BridgeError::DepositDataMismatch( + deposit_data.get_deposit_outpoint(), + )); + } + + if existing_move_txid.0 != move_to_vault_txid { + // This should never happen, only a sanity check + tracing::error!( + "Move to vault txid mismatch in set_deposit_data: Existing {:?}, New {:?}", + existing_move_txid.0, + move_to_vault_txid + ); + return Err(BridgeError::DepositDataMismatch( + deposit_data.get_deposit_outpoint(), + )); + } + + // If data matches, return the existing deposit_id + Ok(u32::try_from(existing_deposit_id).wrap_err("Failed to convert deposit id to u32")?) + } + + pub async fn get_deposit_data_with_move_tx( + &self, + tx: Option>, + move_to_vault_txid: Txid, + ) -> Result, BridgeError> { + let query = sqlx::query_as::<_, (DepositParamsDB,)>( + "SELECT deposit_params FROM deposits WHERE move_to_vault_txid = $1;", + ) + .bind(TxidDB(move_to_vault_txid)); + + let result: Option<(DepositParamsDB,)> = + execute_query_with_tx!(self.connection, tx, query, fetch_optional)?; + + match result { + Some((deposit_params,)) => Ok(Some( + deposit_params + .0 + .try_into() + .map_err(|e| eyre::eyre!("Invalid deposit params {e}"))?, + )), + None => Ok(None), + } + } + + pub async fn get_deposit_data( + &self, + tx: Option>, + deposit_outpoint: OutPoint, + ) -> Result, BridgeError> { + let query = sqlx::query_as( + "SELECT deposit_id, deposit_params FROM deposits WHERE deposit_outpoint = $1;", + ) + .bind(OutPointDB(deposit_outpoint)); + + let result: Option<(i32, DepositParamsDB)> = + execute_query_with_tx!(self.connection, tx, query, fetch_optional)?; + + match result { + Some((deposit_id, deposit_params)) => Ok(Some(( + u32::try_from(deposit_id).wrap_err("Failed to convert deposit id to u32")?, + deposit_params + .0 + .try_into() + .map_err(|e| eyre::eyre!("Invalid deposit params {e}"))?, + ))), + None => Ok(None), + } + } + + /// Saves the deposit signatures to the database for a single operator. + /// The signatures array is identified by the deposit_outpoint and operator_idx. + /// For the order of signatures, please check [`crate::builder::sighash::create_nofn_sighash_stream`] + /// which determines the order of the sighashes that are signed. + #[allow(clippy::too_many_arguments)] + pub async fn insert_deposit_signatures_if_not_exist( + &self, + mut tx: Option>, + deposit_outpoint: OutPoint, + operator_xonly_pk: XOnlyPublicKey, + round_idx: RoundIndex, + kickoff_idx: usize, + kickoff_txid: Txid, + signatures: Vec, + ) -> Result<(), BridgeError> { + let deposit_id = self + .get_deposit_id(tx.as_deref_mut(), deposit_outpoint) + .await?; + + // First check if the entry already exists. + let query = sqlx::query_as( + "SELECT kickoff_txid FROM deposit_signatures + WHERE deposit_id = $1 AND operator_xonly_pk = $2 AND round_idx = $3 AND kickoff_idx = $4;", + ) + .bind(i32::try_from(deposit_id).wrap_err("Failed to convert deposit id to i32")?) + .bind(XOnlyPublicKeyDB(operator_xonly_pk)) + .bind(round_idx.to_index() as i32) + .bind(kickoff_idx as i32); + let txid_and_signatures: Option<(TxidDB,)> = + execute_query_with_tx!(self.connection, tx.as_deref_mut(), query, fetch_optional)?; + + if let Some((existing_kickoff_txid,)) = txid_and_signatures { + if existing_kickoff_txid.0 == kickoff_txid { + return Ok(()); + } else { + return Err(eyre!("Kickoff txid or signatures already set!").into()); + } + } + // On conflict, the previous signatures are already valid. Signatures only depend on deposit_outpoint (which depends on nofn pk) and + // operator_xonly_pk (also depends on nofn_pk, as each operator is also a verifier and nofn_pk depends on verifiers pk) + // Additionally operator collateral outpoint and reimbursement addr should be unchanged which we ensure in relevant db fns. + // We add on conflict clause so it doesn't fail if the signatures are already set. + // Why do we need to do this? If deposit fails somehow just at the end because movetx + // signature fails to be collected, we might need to do a deposit again. Technically we can only collect movetx signature, not + // do the full deposit. + + let query = sqlx::query( + "INSERT INTO deposit_signatures (deposit_id, operator_xonly_pk, round_idx, kickoff_idx, kickoff_txid, signatures) + VALUES ($1, $2, $3, $4, $5, $6) + ON CONFLICT DO NOTHING;" + ) + .bind(i32::try_from(deposit_id).wrap_err("Failed to convert deposit id to i32")?) + .bind(XOnlyPublicKeyDB(operator_xonly_pk)) + .bind(round_idx.to_index() as i32) + .bind(kickoff_idx as i32) + .bind(TxidDB(kickoff_txid)) + .bind(SignaturesDB(DepositSignatures{signatures: signatures.clone()})); + + execute_query_with_tx!(self.connection, tx, query, execute)?; + + Ok(()) + } + + /// Gets a unique int for a deposit outpoint + pub async fn get_deposit_id( + &self, + tx: Option>, + deposit_outpoint: OutPoint, + ) -> Result { + let query = sqlx::query_as("INSERT INTO deposits (deposit_outpoint) + VALUES ($1) + ON CONFLICT (deposit_outpoint) DO UPDATE SET deposit_outpoint = deposits.deposit_outpoint + RETURNING deposit_id;") + .bind(OutPointDB(deposit_outpoint)); + + let deposit_id: Result<(i32,), sqlx::Error> = + execute_query_with_tx!(self.connection, tx, query, fetch_one); + Ok(u32::try_from(deposit_id?.0).wrap_err("Failed to convert deposit id to u32")?) + } + + /// For a given kickoff txid, get the deposit outpoint that corresponds to it + pub async fn get_deposit_outpoint_for_kickoff_txid( + &self, + tx: Option>, + kickoff_txid: Txid, + ) -> Result { + let query = sqlx::query_as::<_, (OutPointDB,)>( + "SELECT d.deposit_outpoint FROM deposit_signatures ds + INNER JOIN deposits d ON d.deposit_id = ds.deposit_id + WHERE ds.kickoff_txid = $1;", + ) + .bind(TxidDB(kickoff_txid)); + let result: (OutPointDB,) = execute_query_with_tx!(self.connection, tx, query, fetch_one)?; + Ok(result.0 .0) + } + + /// Retrieves the deposit signatures for a single operator for a single reimburse + /// process (single kickoff utxo). + /// The signatures are tagged so that each signature can be matched with the correct + /// txin it belongs to easily. + pub async fn get_deposit_signatures( + &self, + tx: Option>, + deposit_outpoint: OutPoint, + operator_xonly_pk: XOnlyPublicKey, + round_idx: RoundIndex, + kickoff_idx: usize, + ) -> Result>, BridgeError> { + let query = sqlx::query_as::<_, (SignaturesDB,)>( + "SELECT ds.signatures FROM deposit_signatures ds + INNER JOIN deposits d ON d.deposit_id = ds.deposit_id + WHERE d.deposit_outpoint = $1 + AND ds.operator_xonly_pk = $2 + AND ds.round_idx = $3 + AND ds.kickoff_idx = $4;", + ) + .bind(OutPointDB(deposit_outpoint)) + .bind(XOnlyPublicKeyDB(operator_xonly_pk)) + .bind(round_idx.to_index() as i32) + .bind(kickoff_idx as i32); + + let result: Result<(SignaturesDB,), sqlx::Error> = + execute_query_with_tx!(self.connection, tx, query, fetch_one); + + match result { + Ok((SignaturesDB(signatures),)) => Ok(Some(signatures.signatures)), + Err(sqlx::Error::RowNotFound) => Ok(None), + Err(e) => Err(BridgeError::DatabaseError(e)), + } + } + + /// Retrieves the light client proof for a deposit to be used while sending an assert. + pub async fn get_lcp_for_assert( + &self, + tx: Option>, + deposit_id: u32, + ) -> Result, BridgeError> { + let query = sqlx::query_as::<_, (ReceiptDB,)>( + "SELECT lcp_receipt FROM lcp_for_asserts WHERE deposit_id = $1;", + ) + .bind(i32::try_from(deposit_id).wrap_err("Failed to convert deposit id to i32")?); + + let result = execute_query_with_tx!(self.connection, tx, query, fetch_optional)?; + + Ok(result.map(|(lcp,)| lcp.0)) + } + + /// Saves the light client proof for a deposit to be used while sending an assert. + /// We save first before sending kickoff to be sure we have the LCP available if we need to assert. + pub async fn insert_lcp_for_assert( + &self, + tx: Option>, + deposit_id: u32, + lcp: Receipt, + ) -> Result<(), BridgeError> { + let query = sqlx::query( + "INSERT INTO lcp_for_asserts (deposit_id, lcp_receipt) + VALUES ($1, $2) + ON CONFLICT (deposit_id) DO NOTHING;", + ) + .bind(i32::try_from(deposit_id).wrap_err("Failed to convert deposit id to i32")?) + .bind(ReceiptDB(lcp)); + + execute_query_with_tx!(self.connection, tx, query, execute)?; + + Ok(()) + } + + pub async fn get_deposit_data_with_kickoff_txid( + &self, + tx: Option>, + kickoff_txid: Txid, + ) -> Result, BridgeError> { + let query = sqlx::query_as::<_, (DepositParamsDB, XOnlyPublicKeyDB, i32, i32)>( + "SELECT d.deposit_params, ds.operator_xonly_pk, ds.round_idx, ds.kickoff_idx + FROM deposit_signatures ds + INNER JOIN deposits d ON d.deposit_id = ds.deposit_id + WHERE ds.kickoff_txid = $1;", + ) + .bind(TxidDB(kickoff_txid)); + + let result = execute_query_with_tx!(self.connection, tx, query, fetch_optional)?; + + match result { + Some((deposit_params, operator_xonly_pk, round_idx, kickoff_idx)) => Ok(Some(( + deposit_params + .0 + .try_into() + .wrap_err("Can't convert deposit params")?, + KickoffData { + operator_xonly_pk: operator_xonly_pk.0, + round_idx: RoundIndex::from_index( + usize::try_from(round_idx) + .wrap_err("Failed to convert round idx to usize")?, + ), + kickoff_idx: u32::try_from(kickoff_idx) + .wrap_err("Failed to convert kickoff idx to u32")?, + }, + ))), + None => Ok(None), + } + } + + /// Sets BitVM setup data for a specific operator and deposit combination. + /// This function additionally checks if the BitVM setup data already exists in the db. + /// As we don't want to overwrite BitVM setup data on the db, as maliciously overwriting + /// can prevent us to regenerate previously signed kickoff tx's. + pub async fn insert_bitvm_setup_if_not_exists( + &self, + mut tx: Option>, + operator_xonly_pk: XOnlyPublicKey, + deposit_outpoint: OutPoint, + assert_tx_addrs: impl AsRef<[[u8; 32]]>, + root_hash: &[u8; 32], + latest_blockhash_root_hash: &[u8; 32], + ) -> Result<(), BridgeError> { + let deposit_id = self + .get_deposit_id(tx.as_deref_mut(), deposit_outpoint) + .await?; + + let query = sqlx::query( + "INSERT INTO bitvm_setups (xonly_pk, deposit_id, assert_tx_addrs, root_hash, latest_blockhash_root_hash) + VALUES ($1, $2, $3, $4, $5) + ON CONFLICT (xonly_pk, deposit_id) DO NOTHING;", + ) + .bind(XOnlyPublicKeyDB(operator_xonly_pk)) + .bind(i32::try_from(deposit_id).wrap_err("Failed to convert deposit id to i32")?) + .bind( + assert_tx_addrs + .as_ref() + .iter() + .map(|addr| addr.as_ref()) + .collect::>(), + ) + .bind(root_hash.to_vec()) + .bind(latest_blockhash_root_hash.to_vec()); + + let result = execute_query_with_tx!(self.connection, tx.as_deref_mut(), query, execute)?; + + // If no rows were affected, data already exists - check if it matches + if result.rows_affected() == 0 { + let existing = self + .get_bitvm_setup(tx, operator_xonly_pk, deposit_outpoint) + .await?; + if let Some((existing_addrs, existing_root, existing_blockhash)) = existing { + let new_addrs = assert_tx_addrs.as_ref(); + if existing_addrs != new_addrs + || existing_root != *root_hash + || existing_blockhash != *latest_blockhash_root_hash + { + return Err(BridgeError::BitvmSetupDataMismatch( + operator_xonly_pk, + deposit_outpoint, + )); + } + } + } + + Ok(()) + } + + /// Retrieves BitVM setup data for a specific operator, sequential collateral tx and kickoff index combination + pub async fn get_bitvm_setup( + &self, + mut tx: Option>, + operator_xonly_pk: XOnlyPublicKey, + deposit_outpoint: OutPoint, + ) -> Result, BridgeError> { + let deposit_id = self + .get_deposit_id(tx.as_deref_mut(), deposit_outpoint) + .await?; + let query = sqlx::query_as::<_, (Vec>, Vec, Vec)>( + "SELECT assert_tx_addrs, root_hash, latest_blockhash_root_hash + FROM bitvm_setups + WHERE xonly_pk = $1 AND deposit_id = $2;", + ) + .bind(XOnlyPublicKeyDB(operator_xonly_pk)) + .bind(i32::try_from(deposit_id).wrap_err("Failed to convert deposit id to i32")?); + + let result = execute_query_with_tx!(self.connection, tx, query, fetch_optional)?; + + match result { + Some((assert_tx_addrs, root_hash, latest_blockhash_root_hash)) => { + // Convert root_hash Vec back to [u8; 32] + let root_hash_array: [u8; 32] = root_hash + .try_into() + .map_err(|_| eyre::eyre!("root_hash must be 32 bytes"))?; + let latest_blockhash_root_hash_array: [u8; 32] = latest_blockhash_root_hash + .try_into() + .map_err(|_| eyre::eyre!("latest_blockhash_root_hash must be 32 bytes"))?; + + let assert_tx_addrs: Vec<[u8; 32]> = assert_tx_addrs + .into_iter() + .map(|addr| { + let mut addr_array = [0u8; 32]; + addr_array.copy_from_slice(&addr); + addr_array + }) + .collect(); + + Ok(Some(( + assert_tx_addrs, + root_hash_array, + latest_blockhash_root_hash_array, + ))) + } + None => Ok(None), + } + } + + pub async fn mark_kickoff_connector_as_used( + &self, + tx: Option>, + round_idx: RoundIndex, + kickoff_connector_idx: u32, + kickoff_txid: Option, + ) -> Result<(), BridgeError> { + let query = sqlx::query( + "INSERT INTO used_kickoff_connectors (round_idx, kickoff_connector_idx, kickoff_txid) + VALUES ($1, $2, $3) + ON CONFLICT (round_idx, kickoff_connector_idx) DO NOTHING;", + ) + .bind(round_idx.to_index() as i32) + .bind( + i32::try_from(kickoff_connector_idx) + .wrap_err("Failed to convert kickoff connector idx to i32")?, + ) + .bind(kickoff_txid.map(TxidDB)); + + execute_query_with_tx!(self.connection, tx, query, execute)?; + + Ok(()) + } + + pub async fn get_kickoff_connector_for_kickoff_txid( + &self, + tx: Option>, + kickoff_txid: Txid, + ) -> Result<(RoundIndex, u32), BridgeError> { + let query = sqlx::query_as::<_, (i32, i32)>( + "SELECT round_idx, kickoff_connector_idx FROM used_kickoff_connectors WHERE kickoff_txid = $1;", + ) + .bind(TxidDB(kickoff_txid)); + + let result: (i32, i32) = execute_query_with_tx!(self.connection, tx, query, fetch_one)?; + Ok(( + RoundIndex::from_index( + result + .0 + .try_into() + .wrap_err(BridgeError::IntConversionError)?, + ), + result + .1 + .try_into() + .wrap_err(BridgeError::IntConversionError)?, + )) + } + + pub async fn get_kickoff_txid_for_used_kickoff_connector( + &self, + tx: Option>, + round_idx: RoundIndex, + kickoff_connector_idx: u32, + ) -> Result, BridgeError> { + let query = sqlx::query_as::<_, (Option,)>( + "SELECT kickoff_txid FROM used_kickoff_connectors WHERE round_idx = $1 AND kickoff_connector_idx = $2;", + ) + .bind(round_idx.to_index() as i32) + .bind(i32::try_from(kickoff_connector_idx).wrap_err("Failed to convert kickoff connector idx to i32")?); + + let result = execute_query_with_tx!(self.connection, tx, query, fetch_optional)?; + + match result { + Some((txid,)) => Ok(txid.map(|txid| txid.0)), + None => Ok(None), + } + } + + pub async fn get_unused_and_signed_kickoff_connector( + &self, + tx: Option>, + deposit_id: u32, + operator_xonly_pk: XOnlyPublicKey, + ) -> Result, BridgeError> { + let query = sqlx::query_as::<_, (i32, i32)>( + "WITH current_round AS ( + SELECT round_idx + FROM current_round_index + WHERE id = 1 + ) + SELECT + ds.round_idx as round_idx, + ds.kickoff_idx as kickoff_connector_idx + FROM deposit_signatures ds + CROSS JOIN current_round cr + WHERE ds.deposit_id = $1 -- Parameter for deposit_id + AND ds.operator_xonly_pk = $2 + AND ds.round_idx >= cr.round_idx + AND NOT EXISTS ( + SELECT 1 + FROM used_kickoff_connectors ukc + WHERE ukc.round_idx = ds.round_idx + AND ukc.kickoff_connector_idx = ds.kickoff_idx + ) + ORDER BY ds.round_idx ASC + LIMIT 1;", + ) + .bind(i32::try_from(deposit_id).wrap_err("Failed to convert deposit id to i32")?) + .bind(XOnlyPublicKeyDB(operator_xonly_pk)); + + let result = execute_query_with_tx!(self.connection, tx, query, fetch_optional)?; + + match result { + Some((round_idx, kickoff_connector_idx)) => Ok(Some(( + RoundIndex::from_index( + usize::try_from(round_idx).wrap_err("Failed to convert round idx to u32")?, + ), + u32::try_from(kickoff_connector_idx) + .wrap_err("Failed to convert kickoff connector idx to u32")?, + ))), + None => Ok(None), + } + } + + pub async fn get_current_round_index( + &self, + tx: Option>, + ) -> Result { + let query = + sqlx::query_as::<_, (i32,)>("SELECT round_idx FROM current_round_index WHERE id = 1"); + let result = execute_query_with_tx!(self.connection, tx, query, fetch_one)?; + Ok(RoundIndex::from_index( + usize::try_from(result.0).wrap_err(BridgeError::IntConversionError)?, + )) + } + + pub async fn update_current_round_index( + &self, + tx: Option>, + round_idx: RoundIndex, + ) -> Result<(), BridgeError> { + let query = sqlx::query("UPDATE current_round_index SET round_idx = $1 WHERE id = 1") + .bind(round_idx.to_index() as i32); + + execute_query_with_tx!(self.connection, tx, query, execute)?; + + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use crate::bitvm_client::{SECP, UNSPENDABLE_XONLY_PUBKEY}; + use crate::operator::{Operator, RoundIndex}; + use crate::rpc::clementine::{ + DepositSignatures, NormalSignatureKind, NumberedSignatureKind, TaggedSignature, + }; + use crate::test::common::citrea::MockCitreaClient; + use crate::{database::Database, test::common::*}; + use bitcoin::hashes::Hash; + use bitcoin::key::constants::SCHNORR_SIGNATURE_SIZE; + use bitcoin::key::Keypair; + use bitcoin::{Address, OutPoint, Txid, XOnlyPublicKey}; + use std::str::FromStr; + + #[tokio::test] + async fn test_set_get_operator() { + let config = create_test_config_with_thread_name().await; + let database = Database::new(&config).await.unwrap(); + let mut ops = Vec::new(); + let operator_xonly_pks = [generate_random_xonly_pk(), generate_random_xonly_pk()]; + let reimburse_addrs = [ + Address::from_str("bc1q6d6cztycxjpm7p882emln0r04fjqt0kqylvku2") + .unwrap() + .assume_checked(), + Address::from_str("bc1qj2mw4uh24qf67kn4nyqfsnta0mmxcutvhkyfp9") + .unwrap() + .assume_checked(), + ]; + for i in 0..2 { + let txid_str = format!( + "16b3a5951cb816afeb9dab8a30d0ece7acd3a7b34437436734edd1b72b6bf0{:02x}", + i + ); + let txid = Txid::from_str(&txid_str).unwrap(); + ops.push(( + operator_xonly_pks[i], + reimburse_addrs[i].clone(), + OutPoint { + txid, + vout: i as u32, + }, + )); + } + + // Test inserting multiple operators + for x in ops.iter() { + database + .insert_operator_if_not_exists(None, x.0, &x.1, x.2) + .await + .unwrap(); + } + + // Test getting all operators + let res = database.get_operators(None).await.unwrap(); + assert_eq!(res.len(), ops.len()); + for i in 0..2 { + assert_eq!(res[i].0, ops[i].0); + assert_eq!(res[i].1, ops[i].1); + assert_eq!(res[i].2, ops[i].2); + } + + // Test getting single operator + let res_single = database + .get_operator(None, operator_xonly_pks[1]) + .await + .unwrap() + .unwrap(); + assert_eq!(res_single.xonly_pk, ops[1].0); + assert_eq!(res_single.reimburse_addr, ops[1].1); + assert_eq!(res_single.collateral_funding_outpoint, ops[1].2); + + // Test that we can insert the same data without errors + database + .insert_operator_if_not_exists(None, ops[0].0, &ops[0].1, ops[0].2) + .await + .unwrap(); + + // Test updating operator data + let new_reimburse_addr = Address::from_str("bc1qj2mw4uh24qf67kn4nyqfsnta0mmxcutvhkyfp9") + .unwrap() + .assume_checked(); + let new_collateral_funding_outpoint = OutPoint { + txid: Txid::from_byte_array([2u8; 32]), + vout: 1, + }; + + // test that we can't update the reimburse address + assert!(database + .insert_operator_if_not_exists( + None, + operator_xonly_pks[0], + &reimburse_addrs[0], + new_collateral_funding_outpoint + ) + .await + .is_err()); + + // test that we can't update the collateral funding outpoint + assert!(database + .insert_operator_if_not_exists( + None, + operator_xonly_pks[0], + &new_reimburse_addr, + ops[0].2 + ) + .await + .is_err()); + + // test that we can't update both + assert!(database + .insert_operator_if_not_exists( + None, + operator_xonly_pks[0], + &new_reimburse_addr, + new_collateral_funding_outpoint + ) + .await + .is_err()); + + // Verify data remains unchanged after failed updates + let res_unchanged = database + .get_operator(None, operator_xonly_pks[0]) + .await + .unwrap() + .unwrap(); + assert_eq!(res_unchanged.xonly_pk, ops[0].0); + assert_eq!(res_unchanged.reimburse_addr, ops[0].1); + assert_eq!(res_unchanged.collateral_funding_outpoint, ops[0].2); + } + + #[tokio::test] + async fn test_set_get_operator_challenge_ack_hashes() { + let config = create_test_config_with_thread_name().await; + let database = Database::new(&config).await.unwrap(); + + let public_hashes = vec![[1u8; 20], [2u8; 20]]; + let new_public_hashes = vec![[3u8; 20], [4u8; 20]]; + + let deposit_outpoint = OutPoint { + txid: Txid::from_byte_array([1u8; 32]), + vout: 0, + }; + + let operator_xonly_pk = generate_random_xonly_pk(); + let non_existent_xonly_pk = generate_random_xonly_pk(); + + // Test inserting new data + database + .insert_operator_challenge_ack_hashes_if_not_exist( + None, + operator_xonly_pk, + deposit_outpoint, + &public_hashes, + ) + .await + .unwrap(); + + // Retrieve and verify + let result = database + .get_operators_challenge_ack_hashes(None, operator_xonly_pk, deposit_outpoint) + .await + .unwrap(); + assert_eq!(result, Some(public_hashes.clone())); + + // Test that we can insert the same data without errors + database + .insert_operator_challenge_ack_hashes_if_not_exist( + None, + operator_xonly_pk, + deposit_outpoint, + &public_hashes, + ) + .await + .unwrap(); + + // Test non-existent entry + let non_existent = database + .get_operators_challenge_ack_hashes(None, non_existent_xonly_pk, deposit_outpoint) + .await + .unwrap(); + assert!(non_existent.is_none()); + + // Test that we can't update with different data + assert!(database + .insert_operator_challenge_ack_hashes_if_not_exist( + None, + operator_xonly_pk, + deposit_outpoint, + &new_public_hashes, + ) + .await + .is_err()); + + // Verify data remains unchanged after failed update + let result = database + .get_operators_challenge_ack_hashes(None, operator_xonly_pk, deposit_outpoint) + .await + .unwrap(); + assert_eq!(result, Some(public_hashes)); + } + + #[tokio::test] + async fn test_save_get_unspent_kickoff_sigs() { + let config = create_test_config_with_thread_name().await; + let database = Database::new(&config).await.unwrap(); + + let round_idx = 1; + let signatures = DepositSignatures { + signatures: vec![ + TaggedSignature { + signature_id: Some((NumberedSignatureKind::UnspentKickoff1, 1).into()), + signature: vec![0x1F; SCHNORR_SIGNATURE_SIZE], + }, + TaggedSignature { + signature_id: Some((NumberedSignatureKind::UnspentKickoff2, 1).into()), + signature: (vec![0x2F; SCHNORR_SIGNATURE_SIZE]), + }, + TaggedSignature { + signature_id: Some((NumberedSignatureKind::UnspentKickoff1, 2).into()), + signature: vec![0x1F; SCHNORR_SIGNATURE_SIZE], + }, + TaggedSignature { + signature_id: Some((NumberedSignatureKind::UnspentKickoff2, 2).into()), + signature: (vec![0x2F; SCHNORR_SIGNATURE_SIZE]), + }, + ], + }; + + let operator_xonly_pk = generate_random_xonly_pk(); + let non_existent_xonly_pk = generate_random_xonly_pk(); + + database + .insert_unspent_kickoff_sigs_if_not_exist( + None, + operator_xonly_pk, + RoundIndex::Round(round_idx), + signatures.signatures.clone(), + ) + .await + .unwrap(); + + let result = database + .get_unspent_kickoff_sigs(None, operator_xonly_pk, RoundIndex::Round(round_idx)) + .await + .unwrap() + .unwrap(); + assert_eq!(result, signatures.signatures); + + let non_existent = database + .get_unspent_kickoff_sigs(None, non_existent_xonly_pk, RoundIndex::Round(round_idx)) + .await + .unwrap(); + assert!(non_existent.is_none()); + + let non_existent = database + .get_unspent_kickoff_sigs( + None, + non_existent_xonly_pk, + RoundIndex::Round(round_idx + 1), + ) + .await + .unwrap(); + assert!(non_existent.is_none()); + } + + #[tokio::test] + async fn test_bitvm_setup() { + let config = create_test_config_with_thread_name().await; + let database = Database::new(&config).await.unwrap(); + + let assert_tx_hashes: Vec<[u8; 32]> = vec![[1u8; 32], [4u8; 32]]; + let root_hash = [42u8; 32]; + let latest_blockhash_root_hash = [43u8; 32]; + + let deposit_outpoint = OutPoint { + txid: Txid::from_byte_array([1u8; 32]), + vout: 0, + }; + let operator_xonly_pk = generate_random_xonly_pk(); + let non_existent_xonly_pk = generate_random_xonly_pk(); + + // Test inserting new BitVM setup + database + .insert_bitvm_setup_if_not_exists( + None, + operator_xonly_pk, + deposit_outpoint, + &assert_tx_hashes, + &root_hash, + &latest_blockhash_root_hash, + ) + .await + .unwrap(); + + // Retrieve and verify + let result = database + .get_bitvm_setup(None, operator_xonly_pk, deposit_outpoint) + .await + .unwrap() + .unwrap(); + assert_eq!(result.0, assert_tx_hashes); + assert_eq!(result.1, root_hash); + assert_eq!(result.2, latest_blockhash_root_hash); + + // Test that we can insert the same data without errors + database + .insert_bitvm_setup_if_not_exists( + None, + operator_xonly_pk, + deposit_outpoint, + &assert_tx_hashes, + &root_hash, + &latest_blockhash_root_hash, + ) + .await + .unwrap(); + + // Test non-existent entry + let non_existent = database + .get_bitvm_setup(None, non_existent_xonly_pk, deposit_outpoint) + .await + .unwrap(); + assert!(non_existent.is_none()); + + // Test updating BitVM setup data + let new_assert_tx_hashes: Vec<[u8; 32]> = vec![[2u8; 32], [5u8; 32]]; + let new_root_hash = [44u8; 32]; + let new_latest_blockhash_root_hash = [45u8; 32]; + + // test that we can't update the assert_tx_hashes + assert!(database + .insert_bitvm_setup_if_not_exists( + None, + operator_xonly_pk, + deposit_outpoint, + &new_assert_tx_hashes, + &root_hash, + &latest_blockhash_root_hash, + ) + .await + .is_err()); + + // test that we can't update the root_hash + assert!(database + .insert_bitvm_setup_if_not_exists( + None, + operator_xonly_pk, + deposit_outpoint, + &assert_tx_hashes, + &new_root_hash, + &latest_blockhash_root_hash, + ) + .await + .is_err()); + + // test that we can't update the latest_blockhash_root_hash + assert!(database + .insert_bitvm_setup_if_not_exists( + None, + operator_xonly_pk, + deposit_outpoint, + &assert_tx_hashes, + &root_hash, + &new_latest_blockhash_root_hash, + ) + .await + .is_err()); + + // test that we can't update all of them + assert!(database + .insert_bitvm_setup_if_not_exists( + None, + operator_xonly_pk, + deposit_outpoint, + &new_assert_tx_hashes, + &new_root_hash, + &new_latest_blockhash_root_hash, + ) + .await + .is_err()); + + // Verify data remains unchanged after failed updates + let result = database + .get_bitvm_setup(None, operator_xonly_pk, deposit_outpoint) + .await + .unwrap() + .unwrap(); + assert_eq!(result.0, assert_tx_hashes); + assert_eq!(result.1, root_hash); + assert_eq!(result.2, latest_blockhash_root_hash); + } + + #[tokio::test] + async fn upsert_get_operator_winternitz_public_keys() { + let mut config = create_test_config_with_thread_name().await; + let database = Database::new(&config).await.unwrap(); + let _regtest = create_regtest_rpc(&mut config).await; + + let operator = Operator::::new(config.clone()) + .await + .unwrap(); + let op_xonly_pk = + XOnlyPublicKey::from_keypair(&Keypair::from_secret_key(&SECP, &config.secret_key)).0; + let deposit_outpoint = OutPoint { + txid: Txid::from_slice(&[0x45; 32]).unwrap(), + vout: 0x1F, + }; + let wpks = operator + .generate_assert_winternitz_pubkeys(deposit_outpoint) + .unwrap(); + + // Test inserting new data + database + .insert_operator_kickoff_winternitz_public_keys_if_not_exist( + None, + op_xonly_pk, + wpks.clone(), + ) + .await + .unwrap(); + + let result = database + .get_operator_kickoff_winternitz_public_keys(None, op_xonly_pk) + .await + .unwrap(); + assert_eq!(result, wpks); + + // Test that we can insert the same data without errors + database + .insert_operator_kickoff_winternitz_public_keys_if_not_exist( + None, + op_xonly_pk, + wpks.clone(), + ) + .await + .unwrap(); + + // Test that we can't update with different data + let different_wpks = operator + .generate_assert_winternitz_pubkeys(OutPoint { + txid: Txid::from_slice(&[0x46; 32]).unwrap(), + vout: 0x1F, + }) + .unwrap(); + assert!(database + .insert_operator_kickoff_winternitz_public_keys_if_not_exist( + None, + op_xonly_pk, + different_wpks + ) + .await + .is_err()); + + let non_existent = database + .get_operator_kickoff_winternitz_public_keys(None, *UNSPENDABLE_XONLY_PUBKEY) + .await; + assert!(non_existent.is_err()); + } + + #[tokio::test] + async fn upsert_get_operator_bitvm_wpks() { + let mut config = create_test_config_with_thread_name().await; + let database = Database::new(&config).await.unwrap(); + let _regtest = create_regtest_rpc(&mut config).await; + + let operator = Operator::::new(config.clone()) + .await + .unwrap(); + let op_xonly_pk = + XOnlyPublicKey::from_keypair(&Keypair::from_secret_key(&SECP, &config.secret_key)).0; + let deposit_outpoint = OutPoint { + txid: Txid::from_slice(&[0x45; 32]).unwrap(), + vout: 0x1F, + }; + let wpks = operator + .generate_assert_winternitz_pubkeys(deposit_outpoint) + .unwrap(); + + database + .insert_operator_bitvm_keys_if_not_exist( + None, + op_xonly_pk, + deposit_outpoint, + wpks.clone(), + ) + .await + .unwrap(); + + let result = database + .get_operator_bitvm_keys(None, op_xonly_pk, deposit_outpoint) + .await + .unwrap(); + assert_eq!(result, wpks); + + let non_existent = database + .get_operator_kickoff_winternitz_public_keys(None, *UNSPENDABLE_XONLY_PUBKEY) + .await; + assert!(non_existent.is_err()); + } + + #[tokio::test] + async fn upsert_get_deposit_signatures() { + let config = create_test_config_with_thread_name().await; + let database = Database::new(&config).await.unwrap(); + + let operator_xonly_pk = generate_random_xonly_pk(); + let unset_operator_xonly_pk = generate_random_xonly_pk(); + let deposit_outpoint = OutPoint { + txid: Txid::from_slice(&[0x45; 32]).unwrap(), + vout: 0x1F, + }; + let round_idx = 1; + let kickoff_idx = 1; + let signatures = DepositSignatures { + signatures: vec![ + TaggedSignature { + signature_id: Some(NormalSignatureKind::Reimburse1.into()), + signature: vec![0x1F; SCHNORR_SIGNATURE_SIZE], + }, + TaggedSignature { + signature_id: Some((NumberedSignatureKind::OperatorChallengeNack1, 1).into()), + signature: (vec![0x2F; SCHNORR_SIGNATURE_SIZE]), + }, + ], + }; + + database + .insert_deposit_signatures_if_not_exist( + None, + deposit_outpoint, + operator_xonly_pk, + RoundIndex::Round(round_idx), + kickoff_idx, + Txid::all_zeros(), + signatures.signatures.clone(), + ) + .await + .unwrap(); + // Setting this twice should not cause any issues + database + .insert_deposit_signatures_if_not_exist( + None, + deposit_outpoint, + operator_xonly_pk, + RoundIndex::Round(round_idx), + kickoff_idx, + Txid::all_zeros(), + signatures.signatures.clone(), + ) + .await + .unwrap(); + // But with different kickoff txid and signatures should. + assert!(database + .insert_deposit_signatures_if_not_exist( + None, + deposit_outpoint, + operator_xonly_pk, + RoundIndex::Round(round_idx), + kickoff_idx, + Txid::from_slice(&[0x1F; 32]).unwrap(), + signatures.signatures.clone(), + ) + .await + .is_err()); + + let result = database + .get_deposit_signatures( + None, + deposit_outpoint, + operator_xonly_pk, + RoundIndex::Round(round_idx), + kickoff_idx, + ) + .await + .unwrap() + .unwrap(); + assert_eq!(result, signatures.signatures); + + let non_existent = database + .get_deposit_signatures( + None, + deposit_outpoint, + operator_xonly_pk, + RoundIndex::Round(round_idx + 1), + kickoff_idx + 1, + ) + .await + .unwrap(); + assert!(non_existent.is_none()); + + let non_existent = database + .get_deposit_signatures( + None, + OutPoint::null(), + unset_operator_xonly_pk, + RoundIndex::Round(round_idx), + kickoff_idx, + ) + .await + .unwrap(); + assert!(non_existent.is_none()); + } +} diff --git a/core/src/database/pgmq.sql b/core/src/database/pgmq.sql new file mode 100644 index 000000000..6bbf3bb87 --- /dev/null +++ b/core/src/database/pgmq.sql @@ -0,0 +1,1157 @@ +------------------------------------------------------------ +-- Schema, tables, records, privileges, indexes, etc +------------------------------------------------------------ +-- When installed as an extension, we don't need to create the `pgmq` schema +-- because it is automatically created by postgres due to being declared in +-- the extension control file +DO $$ BEGIN IF ( + SELECT NOT EXISTS( + SELECT 1 + FROM pg_extension + WHERE extname = 'pgmq' + ) +) THEN CREATE SCHEMA IF NOT EXISTS pgmq; +END IF; +END $$; +-- Table where queues and metadata about them is stored +CREATE TABLE pgmq.meta ( + queue_name VARCHAR UNIQUE NOT NULL, + is_partitioned BOOLEAN NOT NULL, + is_unlogged BOOLEAN NOT NULL, + created_at TIMESTAMP WITH TIME ZONE DEFAULT now() NOT NULL +); +-- Grant permission to pg_monitor to all tables and sequences +GRANT USAGE ON SCHEMA pgmq TO pg_monitor; +GRANT SELECT ON ALL TABLES IN SCHEMA pgmq TO pg_monitor; +GRANT SELECT ON ALL SEQUENCES IN SCHEMA pgmq TO pg_monitor; +ALTER DEFAULT PRIVILEGES IN SCHEMA pgmq +GRANT SELECT ON TABLES TO pg_monitor; +ALTER DEFAULT PRIVILEGES IN SCHEMA pgmq +GRANT SELECT ON SEQUENCES TO pg_monitor; +-- This type has the shape of a message in a queue, and is often returned by +-- pgmq functions that return messages +CREATE TYPE pgmq.message_record AS ( + msg_id BIGINT, + read_ct INTEGER, + enqueued_at TIMESTAMP WITH TIME ZONE, + vt TIMESTAMP WITH TIME ZONE, + message JSONB, + headers JSONB +); +CREATE TYPE pgmq.queue_record AS ( + queue_name VARCHAR, + is_partitioned BOOLEAN, + is_unlogged BOOLEAN, + created_at TIMESTAMP WITH TIME ZONE +); +------------------------------------------------------------ +-- Functions +------------------------------------------------------------ +-- a helper to format table names and check for invalid characters +CREATE FUNCTION pgmq.format_table_name(queue_name text, prefix text) RETURNS TEXT AS $$ BEGIN IF queue_name ~ '\$|;|--|''' THEN RAISE EXCEPTION 'queue name contains invalid characters: $, ;, --, or \'''; + END IF; + RETURN lower(prefix || ' _ ' || queue_name); +END; +$$ LANGUAGE plpgsql; + +-- read +-- reads a number of messages from a queue, setting a visibility timeout on them +CREATE FUNCTION pgmq.read( + queue_name TEXT, + vt INTEGER, + qty INTEGER, + conditional JSONB DEFAULT ' { } ' +) +RETURNS SETOF pgmq.message_record AS $$ +DECLARE + sql TEXT; + qtable TEXT := pgmq.format_table_name(queue_name, ' q '); +BEGIN + sql := FORMAT( + $QUERY$ + WITH cte AS + ( + SELECT msg_id + FROM pgmq.%I + WHERE vt <= clock_timestamp() AND CASE + WHEN %L != ' { } '::jsonb THEN (message @> %2$L)::integer + ELSE 1 + END = 1 + ORDER BY msg_id ASC + LIMIT $1 + FOR UPDATE SKIP LOCKED + ) + UPDATE pgmq.%I m + SET + vt = clock_timestamp() + %L, + read_ct = read_ct + 1 + FROM cte + WHERE m.msg_id = cte.msg_id + RETURNING m.msg_id, m.read_ct, m.enqueued_at, m.vt, m.message, m.headers; + $QUERY$, + qtable, conditional, qtable, make_interval(secs => vt) + ); + RETURN QUERY EXECUTE sql USING qty; +END; +$$ LANGUAGE plpgsql; + +---- read_with_poll +---- reads a number of messages from a queue, setting a visibility timeout on them +CREATE FUNCTION pgmq.read_with_poll( + queue_name TEXT, + vt INTEGER, + qty INTEGER, + max_poll_seconds INTEGER DEFAULT 5, + poll_interval_ms INTEGER DEFAULT 100, + conditional JSONB DEFAULT ' { } ' +) +RETURNS SETOF pgmq.message_record AS $$ +DECLARE + r pgmq.message_record; + stop_at TIMESTAMP; + sql TEXT; + qtable TEXT := pgmq.format_table_name(queue_name, ' q '); +BEGIN + stop_at := clock_timestamp() + make_interval(secs => max_poll_seconds); + LOOP + IF (SELECT clock_timestamp() >= stop_at) THEN + RETURN; + END IF; + + sql := FORMAT( + $QUERY$ + WITH cte AS + ( + SELECT msg_id + FROM pgmq.%I + WHERE vt <= clock_timestamp() AND CASE + WHEN %L != ' { } '::jsonb THEN (message @> %2$L)::integer + ELSE 1 + END = 1 + ORDER BY msg_id ASC + LIMIT $1 + FOR UPDATE SKIP LOCKED + ) + UPDATE pgmq.%I m + SET + vt = clock_timestamp() + %L, + read_ct = read_ct + 1 + FROM cte + WHERE m.msg_id = cte.msg_id + RETURNING m.msg_id, m.read_ct, m.enqueued_at, m.vt, m.message, m.headers; + $QUERY$, + qtable, conditional, qtable, make_interval(secs => vt) + ); + + FOR r IN + EXECUTE sql USING qty + LOOP + RETURN NEXT r; + END LOOP; + IF FOUND THEN + RETURN; + ELSE + PERFORM pg_sleep(poll_interval_ms::numeric / 1000); + END IF; + END LOOP; +END; +$$ LANGUAGE plpgsql; + +---- archive +---- removes a message from the queue, and sends it to the archive, where its +---- saved permanently. +CREATE FUNCTION pgmq.archive( + queue_name TEXT, + msg_id BIGINT +) +RETURNS BOOLEAN AS $$ +DECLARE + sql TEXT; + result BIGINT; + qtable TEXT := pgmq.format_table_name(queue_name, ' q '); + atable TEXT := pgmq.format_table_name(queue_name, ' a '); +BEGIN + sql := FORMAT( + $QUERY$ + WITH archived AS ( + DELETE FROM pgmq.%I + WHERE msg_id = $1 + RETURNING msg_id, vt, read_ct, enqueued_at, message, headers + ) + INSERT INTO pgmq.%I (msg_id, vt, read_ct, enqueued_at, message, headers) + SELECT msg_id, vt, read_ct, enqueued_at, message, headers + FROM archived + RETURNING msg_id; + $QUERY$, + qtable, atable + ); + EXECUTE sql USING msg_id INTO result; + RETURN NOT (result IS NULL); +END; +$$ LANGUAGE plpgsql; + +---- archive +---- removes an array of message ids from the queue, and sends it to the archive, +---- where these messages will be saved permanently. +CREATE FUNCTION pgmq.archive( + queue_name TEXT, + msg_ids BIGINT[] +) +RETURNS SETOF BIGINT AS $$ +DECLARE + sql TEXT; + qtable TEXT := pgmq.format_table_name(queue_name, ' q '); + atable TEXT := pgmq.format_table_name(queue_name, ' a '); +BEGIN + sql := FORMAT( + $QUERY$ + WITH archived AS ( + DELETE FROM pgmq.%I + WHERE msg_id = ANY($1) + RETURNING msg_id, vt, read_ct, enqueued_at, message, headers + ) + INSERT INTO pgmq.%I (msg_id, vt, read_ct, enqueued_at, message, headers) + SELECT msg_id, vt, read_ct, enqueued_at, message, headers + FROM archived + RETURNING msg_id; + $QUERY$, + qtable, atable + ); + RETURN QUERY EXECUTE sql USING msg_ids; +END; +$$ LANGUAGE plpgsql; + +---- delete +---- deletes a message id from the queue permanently +CREATE FUNCTION pgmq.delete( + queue_name TEXT, + msg_id BIGINT +) +RETURNS BOOLEAN AS $$ +DECLARE + sql TEXT; + result BIGINT; + qtable TEXT := pgmq.format_table_name(queue_name, ' q '); +BEGIN + sql := FORMAT( + $QUERY$ + DELETE FROM pgmq.%I + WHERE msg_id = $1 + RETURNING msg_id + $QUERY$, + qtable + ); + EXECUTE sql USING msg_id INTO result; + RETURN NOT (result IS NULL); +END; +$$ LANGUAGE plpgsql; + +---- delete +---- deletes an array of message ids from the queue permanently +CREATE FUNCTION pgmq.delete( + queue_name TEXT, + msg_ids BIGINT[] +) +RETURNS SETOF BIGINT AS $$ +DECLARE + sql TEXT; + qtable TEXT := pgmq.format_table_name(queue_name, ' q '); +BEGIN + sql := FORMAT( + $QUERY$ + DELETE FROM pgmq.%I + WHERE msg_id = ANY($1) + RETURNING msg_id + $QUERY$, + qtable + ); + RETURN QUERY EXECUTE sql USING msg_ids; +END; +$$ LANGUAGE plpgsql; + +-- send: actual implementation +CREATE FUNCTION pgmq.send( + queue_name TEXT, + msg JSONB, + headers JSONB, + delay TIMESTAMP WITH TIME ZONE +) RETURNS SETOF BIGINT AS $$ +DECLARE + sql TEXT; + qtable TEXT := pgmq.format_table_name(queue_name, ' q '); +BEGIN + sql := FORMAT( + $QUERY$ + INSERT INTO pgmq.%I (vt, message, headers) + VALUES ($2, $1, $3) + RETURNING msg_id; + $QUERY$, + qtable + ); + RETURN QUERY EXECUTE sql USING msg, delay, headers; +END; +$$ LANGUAGE plpgsql; + +-- send: 2 args, no delay or headers +CREATE FUNCTION pgmq.send( + queue_name TEXT, + msg JSONB +) RETURNS SETOF BIGINT AS $$ + SELECT * FROM pgmq.send(queue_name, msg, NULL, clock_timestamp()); +$$ LANGUAGE sql; + +-- send: 3 args with headers +CREATE FUNCTION pgmq.send( + queue_name TEXT, + msg JSONB, + headers JSONB +) RETURNS SETOF BIGINT AS $$ + SELECT * FROM pgmq.send(queue_name, msg, headers, clock_timestamp()); +$$ LANGUAGE sql; + +-- send: 3 args with integer delay +CREATE FUNCTION pgmq.send( + queue_name TEXT, + msg JSONB, + delay INTEGER +) RETURNS SETOF BIGINT AS $$ + SELECT * FROM pgmq.send(queue_name, msg, NULL, clock_timestamp() + make_interval(secs => delay)); +$$ LANGUAGE sql; + +-- send: 3 args with timestamp +CREATE FUNCTION pgmq.send( + queue_name TEXT, + msg JSONB, + delay TIMESTAMP WITH TIME ZONE +) RETURNS SETOF BIGINT AS $$ + SELECT * FROM pgmq.send(queue_name, msg, NULL, delay); +$$ LANGUAGE sql; + +-- send: 4 args with integer delay +CREATE FUNCTION pgmq.send( + queue_name TEXT, + msg JSONB, + headers JSONB, + delay INTEGER +) RETURNS SETOF BIGINT AS $$ + SELECT * FROM pgmq.send(queue_name, msg, headers, clock_timestamp() + make_interval(secs => delay)); +$$ LANGUAGE sql; + +-- send_batch: actual implementation +CREATE FUNCTION pgmq.send_batch( + queue_name TEXT, + msgs JSONB[], + headers JSONB[], + delay TIMESTAMP WITH TIME ZONE +) RETURNS SETOF BIGINT AS $$ +DECLARE + sql TEXT; + qtable TEXT := pgmq.format_table_name(queue_name, ' q '); +BEGIN + sql := FORMAT( + $QUERY$ + INSERT INTO pgmq.%I (vt, message, headers) + SELECT $2, unnest($1), unnest(coalesce($3, ARRAY[]::jsonb[])) + RETURNING msg_id; + $QUERY$, + qtable + ); + RETURN QUERY EXECUTE sql USING msgs, delay, headers; +END; +$$ LANGUAGE plpgsql; + +-- send batch: 2 args +CREATE FUNCTION pgmq.send_batch( + queue_name TEXT, + msgs JSONB[] +) RETURNS SETOF BIGINT AS $$ + SELECT * FROM pgmq.send_batch(queue_name, msgs, NULL, clock_timestamp()); +$$ LANGUAGE sql; + +-- send batch: 3 args with headers +CREATE FUNCTION pgmq.send_batch( + queue_name TEXT, + msgs JSONB[], + headers JSONB[] +) RETURNS SETOF BIGINT AS $$ + SELECT * FROM pgmq.send_batch(queue_name, msgs, headers, clock_timestamp()); +$$ LANGUAGE sql; + +-- send batch: 3 args with integer delay +CREATE FUNCTION pgmq.send_batch( + queue_name TEXT, + msgs JSONB[], + delay INTEGER +) RETURNS SETOF BIGINT AS $$ + SELECT * FROM pgmq.send_batch(queue_name, msgs, NULL, clock_timestamp() + make_interval(secs => delay)); +$$ LANGUAGE sql; + +-- send batch: 3 args with timestamp +CREATE FUNCTION pgmq.send_batch( + queue_name TEXT, + msgs JSONB[], + delay TIMESTAMP WITH TIME ZONE +) RETURNS SETOF BIGINT AS $$ + SELECT * FROM pgmq.send_batch(queue_name, msgs, NULL, delay); +$$ LANGUAGE sql; + +-- send_batch: 4 args with integer delay +CREATE FUNCTION pgmq.send_batch( + queue_name TEXT, + msgs JSONB[], + headers JSONB[], + delay INTEGER +) RETURNS SETOF BIGINT AS $$ + SELECT * FROM pgmq.send_batch(queue_name, msgs, headers, clock_timestamp() + make_interval(secs => delay)); +$$ LANGUAGE sql; + +-- returned by pgmq.metrics() and pgmq.metrics_all +CREATE TYPE pgmq.metrics_result AS ( + queue_name text, + queue_length bigint, + newest_msg_age_sec int, + oldest_msg_age_sec int, + total_messages bigint, + scrape_time timestamp with time zone, + queue_visible_length bigint +); + +-- get metrics for a single queue +CREATE FUNCTION pgmq.metrics(queue_name TEXT) +RETURNS pgmq.metrics_result AS $$ +DECLARE + result_row pgmq.metrics_result; + query TEXT; + qtable TEXT := pgmq.format_table_name(queue_name, ' q '); +BEGIN + query := FORMAT( + $QUERY$ + WITH q_summary AS ( + SELECT + count(*) as queue_length, + count(CASE WHEN vt <= NOW() THEN 1 END) as queue_visible_length, + EXTRACT(epoch FROM (NOW() - max(enqueued_at)))::int as newest_msg_age_sec, + EXTRACT(epoch FROM (NOW() - min(enqueued_at)))::int as oldest_msg_age_sec, + NOW() as scrape_time + FROM pgmq.%I + ), + all_metrics AS ( + SELECT CASE + WHEN is_called THEN last_value ELSE 0 + END as total_messages + FROM pgmq.%I + ) + SELECT + %L as queue_name, + q_summary.queue_length, + q_summary.newest_msg_age_sec, + q_summary.oldest_msg_age_sec, + all_metrics.total_messages, + q_summary.scrape_time, + q_summary.queue_visible_length + FROM q_summary, all_metrics + $QUERY$, + qtable, qtable || ' _msg_id_seq ', queue_name + ); + EXECUTE query INTO result_row; + RETURN result_row; +END; +$$ LANGUAGE plpgsql; + +-- get metrics for all queues +CREATE FUNCTION pgmq."metrics_all"() +RETURNS SETOF pgmq.metrics_result AS $$ +DECLARE + row_name RECORD; + result_row pgmq.metrics_result; +BEGIN + FOR row_name IN SELECT queue_name FROM pgmq.meta LOOP + result_row := pgmq.metrics(row_name.queue_name); + RETURN NEXT result_row; + END LOOP; +END; +$$ LANGUAGE plpgsql; + +-- list queues +CREATE FUNCTION pgmq."list_queues"() +RETURNS SETOF pgmq.queue_record AS $$ +BEGIN + RETURN QUERY SELECT * FROM pgmq.meta; +END +$$ LANGUAGE plpgsql; + +-- purge queue, deleting all entries in it. +CREATE OR REPLACE FUNCTION pgmq."purge_queue"(queue_name TEXT) +RETURNS BIGINT AS $$ +DECLARE + deleted_count INTEGER; + qtable TEXT := pgmq.format_table_name(queue_name, ' q '); +BEGIN + -- Get the row count before truncating + EXECUTE format(' +SELECT count(*) +FROM pgmq.%I ', qtable) INTO deleted_count; + + -- Use TRUNCATE for better performance on large tables + EXECUTE format(' TRUNCATE TABLE pgmq.%I ', qtable); + + -- Return the number of purged rows + RETURN deleted_count; +END +$$ LANGUAGE plpgsql; + + +-- unassign archive, so it can be kept when a queue is deleted +CREATE FUNCTION pgmq."detach_archive"(queue_name TEXT) +RETURNS VOID AS $$ +DECLARE + atable TEXT := pgmq.format_table_name(queue_name, ' a '); +BEGIN + IF pgmq._extension_exists(' pgmq ') THEN + EXECUTE format(' ALTER EXTENSION pgmq DROP TABLE pgmq.%I ', atable); + END IF; +END +$$ LANGUAGE plpgsql; + +-- pop a single message +CREATE FUNCTION pgmq.pop(queue_name TEXT) +RETURNS SETOF pgmq.message_record AS $$ +DECLARE + sql TEXT; + result pgmq.message_record; + qtable TEXT := pgmq.format_table_name(queue_name, ' q '); +BEGIN + sql := FORMAT( + $QUERY$ + WITH cte AS + ( + SELECT msg_id + FROM pgmq.%I + WHERE vt <= clock_timestamp() + ORDER BY msg_id ASC + LIMIT 1 + FOR UPDATE SKIP LOCKED + ) + DELETE from pgmq.%I + WHERE msg_id = (select msg_id from cte) + RETURNING *; + $QUERY$, + qtable, qtable + ); + RETURN QUERY EXECUTE sql; +END; +$$ LANGUAGE plpgsql; + +-- Sets vt of a message, returns it +CREATE FUNCTION pgmq.set_vt(queue_name TEXT, msg_id BIGINT, vt INTEGER) +RETURNS SETOF pgmq.message_record AS $$ +DECLARE + sql TEXT; + result pgmq.message_record; + qtable TEXT := pgmq.format_table_name(queue_name, ' q '); +BEGIN + sql := FORMAT( + $QUERY$ + UPDATE pgmq.%I + SET vt = (clock_timestamp() + %L) + WHERE msg_id = %L + RETURNING *; + $QUERY$, + qtable, make_interval(secs => vt), msg_id + ); + RETURN QUERY EXECUTE sql; +END; +$$ LANGUAGE plpgsql; + +CREATE FUNCTION pgmq._get_pg_partman_schema() +RETURNS TEXT AS $$ + SELECT + extnamespace::regnamespace::text + FROM + pg_extension + WHERE + extname = ' pg_partman '; +$$ LANGUAGE SQL; + +CREATE FUNCTION pgmq.drop_queue(queue_name TEXT, partitioned BOOLEAN) +RETURNS BOOLEAN AS $$ +DECLARE + qtable TEXT := pgmq.format_table_name(queue_name, ' q '); + fq_qtable TEXT := ' pgmq.' || qtable; + atable TEXT := pgmq.format_table_name(queue_name, ' a '); + fq_atable TEXT := ' pgmq.' || atable; +BEGIN + RAISE WARNING ' drop_queue(queue_name, partitioned) is deprecated + and will be removed in PGMQ v2.0.Use drop_queue(queue_name) instead '; + + PERFORM pgmq.drop_queue(queue_name); + + RETURN TRUE; +END; +$$ LANGUAGE plpgsql; + +CREATE FUNCTION pgmq.drop_queue(queue_name TEXT) +RETURNS BOOLEAN AS $$ +DECLARE + qtable TEXT := pgmq.format_table_name(queue_name, ' q '); + qtable_seq TEXT := qtable || ' _msg_id_seq '; + fq_qtable TEXT := ' pgmq.' || qtable; + atable TEXT := pgmq.format_table_name(queue_name, ' a '); + fq_atable TEXT := ' pgmq.' || atable; + partitioned BOOLEAN; +BEGIN + EXECUTE FORMAT( + $QUERY$ + SELECT is_partitioned FROM pgmq.meta WHERE queue_name = %L + $QUERY$, + queue_name + ) INTO partitioned; + + IF pgmq._extension_exists(' pgmq ') THEN + EXECUTE FORMAT( + $QUERY$ + ALTER EXTENSION pgmq DROP TABLE pgmq.%I + $QUERY$, + qtable + ); + + EXECUTE FORMAT( + $QUERY$ + ALTER EXTENSION pgmq DROP SEQUENCE pgmq.%I + $QUERY$, + qtable_seq + ); + + EXECUTE FORMAT( + $QUERY$ + ALTER EXTENSION pgmq DROP TABLE pgmq.%I + $QUERY$, + atable + ); + END IF; + + EXECUTE FORMAT( + $QUERY$ + DROP TABLE IF EXISTS pgmq.%I + $QUERY$, + qtable + ); + + EXECUTE FORMAT( + $QUERY$ + DROP TABLE IF EXISTS pgmq.%I + $QUERY$, + atable + ); + + IF EXISTS ( + SELECT 1 + FROM information_schema.tables + WHERE table_name = ' meta ' and table_schema = ' pgmq ' + ) THEN + EXECUTE FORMAT( + $QUERY$ + DELETE FROM pgmq.meta WHERE queue_name = %L + $QUERY$, + queue_name + ); + END IF; + + IF partitioned THEN + EXECUTE FORMAT( + $QUERY$ + DELETE FROM %I.part_config where parent_table in (%L, %L) + $QUERY$, + pgmq._get_pg_partman_schema(), fq_qtable, fq_atable + ); + END IF; + + RETURN TRUE; +END; +$$ LANGUAGE plpgsql; + +CREATE FUNCTION pgmq.validate_queue_name(queue_name TEXT) +RETURNS void AS $$ +BEGIN + IF length(queue_name) >= 48 THEN + RAISE EXCEPTION ' queue name is too long, + maximum length is 48 characters '; + END IF; +END; +$$ LANGUAGE plpgsql; + +CREATE FUNCTION pgmq._belongs_to_pgmq(table_name TEXT) +RETURNS BOOLEAN AS $$ +DECLARE + sql TEXT; + result BOOLEAN; +BEGIN + SELECT EXISTS ( + SELECT 1 + FROM pg_depend + WHERE refobjid = (SELECT oid FROM pg_extension WHERE extname = ' pgmq ') + AND objid = ( + SELECT oid + FROM pg_class + WHERE relname = table_name + ) + ) INTO result; + RETURN result; +END; +$$ LANGUAGE plpgsql; + +CREATE FUNCTION pgmq.create_non_partitioned(queue_name TEXT) +RETURNS void AS $$ +DECLARE + qtable TEXT := pgmq.format_table_name(queue_name, ' q '); + qtable_seq TEXT := qtable || ' _msg_id_seq '; + atable TEXT := pgmq.format_table_name(queue_name, ' a '); +BEGIN + PERFORM pgmq.validate_queue_name(queue_name); + + EXECUTE FORMAT( + $QUERY$ + CREATE TABLE IF NOT EXISTS pgmq.%I ( + msg_id BIGINT PRIMARY KEY GENERATED ALWAYS AS IDENTITY, + read_ct INT DEFAULT 0 NOT NULL, + enqueued_at TIMESTAMP WITH TIME ZONE DEFAULT now() NOT NULL, + vt TIMESTAMP WITH TIME ZONE NOT NULL, + message JSONB, + headers JSONB + ) + $QUERY$, + qtable + ); + + EXECUTE FORMAT( + $QUERY$ + CREATE TABLE IF NOT EXISTS pgmq.%I ( + msg_id BIGINT PRIMARY KEY, + read_ct INT DEFAULT 0 NOT NULL, + enqueued_at TIMESTAMP WITH TIME ZONE DEFAULT now() NOT NULL, + archived_at TIMESTAMP WITH TIME ZONE DEFAULT now() NOT NULL, + vt TIMESTAMP WITH TIME ZONE NOT NULL, + message JSONB, + headers JSONB + ); + $QUERY$, + atable + ); + + IF pgmq._extension_exists(' pgmq ') THEN + IF NOT pgmq._belongs_to_pgmq(qtable) THEN + EXECUTE FORMAT(' ALTER EXTENSION pgmq +ADD TABLE pgmq.%I ', qtable); + EXECUTE FORMAT(' ALTER EXTENSION pgmq +ADD SEQUENCE pgmq.%I ', qtable_seq); + END IF; + + IF NOT pgmq._belongs_to_pgmq(atable) THEN + EXECUTE FORMAT(' ALTER EXTENSION pgmq +ADD TABLE pgmq.%I ', atable); + END IF; + END IF; + + EXECUTE FORMAT( + $QUERY$ + CREATE INDEX IF NOT EXISTS %I ON pgmq.%I (vt ASC); + $QUERY$, + qtable || ' _vt_idx ', qtable + ); + + EXECUTE FORMAT( + $QUERY$ + CREATE INDEX IF NOT EXISTS %I ON pgmq.%I (archived_at); + $QUERY$, + ' archived_at_idx_ ' || queue_name, atable + ); + + EXECUTE FORMAT( + $QUERY$ + INSERT INTO pgmq.meta (queue_name, is_partitioned, is_unlogged) + VALUES (%L, false, false) + ON CONFLICT + DO NOTHING; + $QUERY$, + queue_name + ); +END; +$$ LANGUAGE plpgsql; + +CREATE FUNCTION pgmq.create_unlogged(queue_name TEXT) +RETURNS void AS $$ +DECLARE + qtable TEXT := pgmq.format_table_name(queue_name, ' q '); + qtable_seq TEXT := qtable || ' _msg_id_seq '; + atable TEXT := pgmq.format_table_name(queue_name, ' a '); +BEGIN + PERFORM pgmq.validate_queue_name(queue_name); + EXECUTE FORMAT( + $QUERY$ + CREATE UNLOGGED TABLE IF NOT EXISTS pgmq.%I ( + msg_id BIGINT PRIMARY KEY GENERATED ALWAYS AS IDENTITY, + read_ct INT DEFAULT 0 NOT NULL, + enqueued_at TIMESTAMP WITH TIME ZONE DEFAULT now() NOT NULL, + vt TIMESTAMP WITH TIME ZONE NOT NULL, + message JSONB, + headers JSONB + ) + $QUERY$, + qtable + ); + + EXECUTE FORMAT( + $QUERY$ + CREATE TABLE IF NOT EXISTS pgmq.%I ( + msg_id BIGINT PRIMARY KEY, + read_ct INT DEFAULT 0 NOT NULL, + enqueued_at TIMESTAMP WITH TIME ZONE DEFAULT now() NOT NULL, + archived_at TIMESTAMP WITH TIME ZONE DEFAULT now() NOT NULL, + vt TIMESTAMP WITH TIME ZONE NOT NULL, + message JSONB, + headers JSONB + ); + $QUERY$, + atable + ); + + IF pgmq._extension_exists(' pgmq ') THEN + IF NOT pgmq._belongs_to_pgmq(qtable) THEN + EXECUTE FORMAT(' ALTER EXTENSION pgmq +ADD TABLE pgmq.%I ', qtable); + EXECUTE FORMAT(' ALTER EXTENSION pgmq +ADD SEQUENCE pgmq.%I ', qtable_seq); + END IF; + + IF NOT pgmq._belongs_to_pgmq(atable) THEN + EXECUTE FORMAT(' ALTER EXTENSION pgmq +ADD TABLE pgmq.%I ', atable); + END IF; + END IF; + + EXECUTE FORMAT( + $QUERY$ + CREATE INDEX IF NOT EXISTS %I ON pgmq.%I (vt ASC); + $QUERY$, + qtable || ' _vt_idx ', qtable + ); + + EXECUTE FORMAT( + $QUERY$ + CREATE INDEX IF NOT EXISTS %I ON pgmq.%I (archived_at); + $QUERY$, + ' archived_at_idx_ ' || queue_name, atable + ); + + EXECUTE FORMAT( + $QUERY$ + INSERT INTO pgmq.meta (queue_name, is_partitioned, is_unlogged) + VALUES (%L, false, true) + ON CONFLICT + DO NOTHING; + $QUERY$, + queue_name + ); +END; +$$ LANGUAGE plpgsql; + +CREATE FUNCTION pgmq._get_partition_col(partition_interval TEXT) +RETURNS TEXT AS $$ +DECLARE + num INTEGER; +BEGIN + BEGIN + num := partition_interval::INTEGER; + RETURN ' msg_id '; + EXCEPTION + WHEN others THEN + RETURN ' enqueued_at '; + END; +END; +$$ LANGUAGE plpgsql; + +CREATE FUNCTION pgmq._extension_exists(extension_name TEXT) + RETURNS BOOLEAN + LANGUAGE SQL +AS $$ +SELECT EXISTS ( + SELECT 1 + FROM pg_extension + WHERE extname = extension_name +) +$$; + +CREATE FUNCTION pgmq._ensure_pg_partman_installed() +RETURNS void AS $$ +BEGIN + IF NOT pgmq._extension_exists(' pg_partman ') THEN + RAISE EXCEPTION ' pg_partman is required for partitioned queues '; + END IF; +END; +$$ LANGUAGE plpgsql; + +CREATE FUNCTION pgmq._get_pg_partman_major_version() +RETURNS INT +LANGUAGE SQL +AS $$ + SELECT split_part(extversion, '.', 1)::INT + FROM pg_extension + WHERE extname = ' pg_partman ' +$$; + +CREATE FUNCTION pgmq.create_partitioned( + queue_name TEXT, + partition_interval TEXT DEFAULT ' 10000 ', + retention_interval TEXT DEFAULT ' 100000 ' +) +RETURNS void AS $$ +DECLARE + partition_col TEXT; + a_partition_col TEXT; + qtable TEXT := pgmq.format_table_name(queue_name, ' q '); + qtable_seq TEXT := qtable || ' _msg_id_seq '; + atable TEXT := pgmq.format_table_name(queue_name, ' a '); + fq_qtable TEXT := ' pgmq.' || qtable; + fq_atable TEXT := ' pgmq.' || atable; +BEGIN + PERFORM pgmq.validate_queue_name(queue_name); + PERFORM pgmq._ensure_pg_partman_installed(); + SELECT pgmq._get_partition_col(partition_interval) INTO partition_col; + + EXECUTE FORMAT( + $QUERY$ + CREATE TABLE IF NOT EXISTS pgmq.%I ( + msg_id BIGINT GENERATED ALWAYS AS IDENTITY, + read_ct INT DEFAULT 0 NOT NULL, + enqueued_at TIMESTAMP WITH TIME ZONE DEFAULT now() NOT NULL, + vt TIMESTAMP WITH TIME ZONE NOT NULL, + message JSONB, + headers JSONB + ) PARTITION BY RANGE (%I) + $QUERY$, + qtable, partition_col + ); + + IF pgmq._extension_exists(' pgmq ') THEN + IF NOT pgmq._belongs_to_pgmq(qtable) THEN + EXECUTE FORMAT(' ALTER EXTENSION pgmq +ADD TABLE pgmq.%I ', qtable); + EXECUTE FORMAT(' ALTER EXTENSION pgmq +ADD SEQUENCE pgmq.%I ', qtable_seq); + END IF; + END IF; + + -- https://github.com/pgpartman/pg_partman/blob/master/doc/pg_partman.md + -- p_parent_table - the existing parent table. MUST be schema qualified, even if in public schema. + EXECUTE FORMAT( + $QUERY$ + SELECT %I.create_parent( + p_parent_table := %L, + p_control := %L, + p_interval := %L, + p_type := case + when pgmq._get_pg_partman_major_version() = 5 then ' range ' + else ' native ' + end + ) + $QUERY$, + pgmq._get_pg_partman_schema(), + fq_qtable, + partition_col, + partition_interval + ); + + EXECUTE FORMAT( + $QUERY$ + CREATE INDEX IF NOT EXISTS %I ON pgmq.%I (%I); + $QUERY$, + qtable || ' _part_idx ', qtable, partition_col + ); + + EXECUTE FORMAT( + $QUERY$ + UPDATE %I.part_config + SET + retention = %L, + retention_keep_table = false, + retention_keep_index = true, + automatic_maintenance = ' on ' + WHERE parent_table = %L; + $QUERY$, + pgmq._get_pg_partman_schema(), + retention_interval, + ' pgmq.' || qtable + ); + + EXECUTE FORMAT( + $QUERY$ + INSERT INTO pgmq.meta (queue_name, is_partitioned, is_unlogged) + VALUES (%L, true, false) + ON CONFLICT + DO NOTHING; + $QUERY$, + queue_name + ); + + IF partition_col = ' enqueued_at ' THEN + a_partition_col := ' archived_at '; + ELSE + a_partition_col := partition_col; + END IF; + + EXECUTE FORMAT( + $QUERY$ + CREATE TABLE IF NOT EXISTS pgmq.%I ( + msg_id BIGINT NOT NULL, + read_ct INT DEFAULT 0 NOT NULL, + enqueued_at TIMESTAMP WITH TIME ZONE DEFAULT now() NOT NULL, + archived_at TIMESTAMP WITH TIME ZONE DEFAULT now() NOT NULL, + vt TIMESTAMP WITH TIME ZONE NOT NULL, + message JSONB, + headers JSONB + ) PARTITION BY RANGE (%I); + $QUERY$, + atable, a_partition_col + ); + + IF pgmq._extension_exists(' pgmq ') THEN + IF NOT pgmq._belongs_to_pgmq(atable) THEN + EXECUTE FORMAT(' ALTER EXTENSION pgmq +ADD TABLE pgmq.%I ', atable); + END IF; + END IF; + + -- https://github.com/pgpartman/pg_partman/blob/master/doc/pg_partman.md + -- p_parent_table - the existing parent table. MUST be schema qualified, even if in public schema. + EXECUTE FORMAT( + $QUERY$ + SELECT %I.create_parent( + p_parent_table := %L, + p_control := %L, + p_interval := %L, + p_type := case + when pgmq._get_pg_partman_major_version() = 5 then ' range ' + else ' native ' + end + ) + $QUERY$, + pgmq._get_pg_partman_schema(), + fq_atable, + a_partition_col, + partition_interval + ); + + EXECUTE FORMAT( + $QUERY$ + UPDATE %I.part_config + SET + retention = %L, + retention_keep_table = false, + retention_keep_index = true, + automatic_maintenance = ' on ' + WHERE parent_table = %L; + $QUERY$, + pgmq._get_pg_partman_schema(), + retention_interval, + ' pgmq.' || atable + ); + + EXECUTE FORMAT( + $QUERY$ + CREATE INDEX IF NOT EXISTS %I ON pgmq.%I (archived_at); + $QUERY$, + ' archived_at_idx_ ' || queue_name, atable + ); + +END; +$$ LANGUAGE plpgsql; + + +CREATE FUNCTION pgmq.create(queue_name TEXT) +RETURNS void AS $$ +BEGIN + PERFORM pgmq.create_non_partitioned(queue_name); +END; +$$ LANGUAGE plpgsql; + +CREATE OR REPLACE FUNCTION pgmq.convert_archive_partitioned( + table_name TEXT, + partition_interval TEXT DEFAULT ' 10000 ', + retention_interval TEXT DEFAULT ' 100000 ', + leading_partition INT DEFAULT 10 +) +RETURNS void AS $$ +DECLARE + a_table_name TEXT := pgmq.format_table_name(table_name, ' a '); + a_table_name_old TEXT := pgmq.format_table_name(table_name, ' a ') || ' _old '; + qualified_a_table_name TEXT := format(' pgmq.%I ', a_table_name); +BEGIN + + PERFORM c.relkind + FROM pg_class c + JOIN pg_namespace n ON n.oid = c.relnamespace + WHERE c.relname = a_table_name + AND c.relkind = ' p '; + + IF FOUND THEN + RAISE NOTICE ' Table %s is already partitioned ', a_table_name; + RETURN; + END IF; + + PERFORM c.relkind + FROM pg_class c + JOIN pg_namespace n ON n.oid = c.relnamespace + WHERE c.relname = a_table_name + AND c.relkind = ' r '; + + IF NOT FOUND THEN + RAISE NOTICE ' Table %s does not exists ', a_table_name; + RETURN; + END IF; + + EXECUTE ' +ALTER TABLE ' || qualified_a_table_name || ' + RENAME TO ' || a_table_name_old; + + EXECUTE format( ' CREATE TABLE pgmq.%I (LIKE pgmq.%I including all) PARTITION BY RANGE (msg_id) ', a_table_name, a_table_name_old ); + + EXECUTE ' ALTER INDEX pgmq.archived_at_idx_ ' || table_name || ' + RENAME TO archived_at_idx_ ' || table_name || ' _old '; + EXECUTE ' CREATE INDEX archived_at_idx_ '|| table_name || ' ON ' || qualified_a_table_name ||'(archived_at) '; + + -- https://github.com/pgpartman/pg_partman/blob/master/doc/pg_partman.md + -- p_parent_table - the existing parent table. MUST be schema qualified, even if in public schema. + EXECUTE FORMAT( + $QUERY$ + SELECT %I.create_parent( + p_parent_table := %L, + p_control := ' msg_id ', + p_interval := %L, + p_type := case + when pgmq._get_pg_partman_major_version() = 5 then ' range ' + else ' native ' + end + ) + $QUERY$, + pgmq._get_pg_partman_schema(), + qualified_a_table_name, + partition_interval + ); + + EXECUTE FORMAT( + $QUERY$ + UPDATE %I.part_config + SET + retention = %L, + retention_keep_table = false, + retention_keep_index = false, + infinite_time_partitions = true + WHERE + parent_table = %L; + $QUERY$, + pgmq._get_pg_partman_schema(), + retention_interval, + qualified_a_table_name + ); +END; +$$ LANGUAGE plpgsql; \ No newline at end of file diff --git a/core/src/database/schema.sql b/core/src/database/schema.sql new file mode 100644 index 000000000..02caa3e1b --- /dev/null +++ b/core/src/database/schema.sql @@ -0,0 +1,395 @@ +BEGIN; +create table if not exists operators ( + xonly_pk text primary key not null, + wallet_reimburse_address text not null, + collateral_funding_outpoint text not null check ( + collateral_funding_outpoint ~ '^[a-fA-F0-9]{64}:(0|[1-9][0-9]{0,9})$' + ) +); +create table if not exists bitcoin_blocks ( + height int primary key not null, + block_hash text not null, + block_data bytea not null, + created_at timestamp not null default now() +); +-- Watchtower header chain proofs +create table if not exists header_chain_proofs ( + block_hash text primary key not null, + block_header text, + prev_block_hash text, + height bigint not null, + proof bytea +); +create table if not exists watchtower_xonly_public_keys ( + watchtower_id int not null, + xonly_pk bytea not null, + primary key (watchtower_id) +); +-- Verifier table of operators Winternitz public keys for every kickoff utxo for committing blockhash +create table if not exists operator_winternitz_public_keys ( + xonly_pk text primary key not null, + winternitz_public_keys bytea not null +); +-- Verifier table of operators Winternitz public keys for every kickoff utxo for committing bitvm inputs +create table if not exists operator_bitvm_winternitz_public_keys ( + xonly_pk text not null, + deposit_id int not null, + bitvm_winternitz_public_keys bytea not null, + primary key (xonly_pk, deposit_id) +); +create table if not exists deposits ( + deposit_id serial primary key, + deposit_outpoint text unique not null check ( + deposit_outpoint ~ '^[a-fA-F0-9]{64}:(0|[1-9][0-9]{0,9})$' + ), + deposit_params bytea, + move_to_vault_txid bytea +); +-- Deposit signatures +create table if not exists deposit_signatures ( + deposit_id int not null references deposits (deposit_id), + operator_xonly_pk text not null, + round_idx int not null, + kickoff_idx int not null, + kickoff_txid bytea, + signatures bytea not null, + primary key ( + deposit_id, + operator_xonly_pk, + round_idx, + kickoff_idx + ) +); +-- Signatures of the operator for unspent kickoffs +create table if not exists unspent_kickoff_signatures ( + xonly_pk text not null, + round_idx int not null, + signatures bytea not null, + primary key (xonly_pk, round_idx) +); +-- LCP and storage proofs saved for sending assert +create table if not exists lcp_for_asserts ( + deposit_id int not null primary key, + lcp_receipt bytea not null +); +-- Verifier table for BitVM setup data +/* This table holds the BitVM setup data for each operator and deposit_id pair. */ +create table if not exists bitvm_setups ( + xonly_pk text not null, + deposit_id int not null, + assert_tx_addrs bytea [] not null, + root_hash bytea not null check (length(root_hash) = 32), + latest_blockhash_root_hash bytea not null check (length(latest_blockhash_root_hash) = 32), + --public_input_wots bytea[] not null, + created_at timestamp not null default now(), + primary key (xonly_pk, deposit_id) +); +-- Verifier table for the operators public digests to acknowledge watchtower challenges. +/* This table holds the public digests of the operators to use for the watchtower + challenges for each (xonly_pk, deposit_id) tuple. */ +create table if not exists operators_challenge_ack_hashes ( + xonly_pk text not null, + deposit_id int not null, + public_hashes bytea [] not null, + created_at timestamp not null default now(), + primary key (xonly_pk, deposit_id) +); + +/******************************************************************************* + * BITCOIN SYNCER + ******************************************************************************/ +create table if not exists bitcoin_syncer ( + id serial primary key, + blockhash text not null unique, + prev_blockhash text not null, + height int not null, + is_canonical boolean not null default true +); +create table if not exists bitcoin_syncer_txs ( + block_id int not null references bitcoin_syncer (id), + txid bytea not null, + primary key (block_id, txid) +); +create table if not exists bitcoin_syncer_spent_utxos ( + block_id bigint not null references bitcoin_syncer (id), + spending_txid bytea not null, + txid bytea not null, + vout bigint not null, + primary key (block_id, spending_txid, txid, vout), + foreign key (block_id, spending_txid) references bitcoin_syncer_txs (block_id, txid) +); +create table if not exists bitcoin_blocks ( + height int primary key not null, + block_hash text not null, + block_data bytea not null, + created_at timestamp not null default now() +); + +-- enum for bitcoin_syncer_events +DO $$ BEGIN IF NOT EXISTS ( + SELECT 1 + FROM pg_type + WHERE typname = 'bitcoin_syncer_event_type' +) THEN CREATE TYPE bitcoin_syncer_event_type AS ENUM ('new_block', 'reorged_block'); +END IF; +END $$; +create table if not exists bitcoin_syncer_events ( + id serial primary key, + block_id int not null references bitcoin_syncer (id), + event_type bitcoin_syncer_event_type not null, + created_at timestamp not null default now() +); +create table if not exists bitcoin_syncer_event_handlers ( + consumer_handle text not null, + last_processed_event_id int not null, + created_at timestamp not null default now(), + primary key (consumer_handle) +); + +/******************************************************************************* + * TX SENDER + ******************************************************************************/ +DO $$ BEGIN IF NOT EXISTS ( + SELECT 1 + FROM pg_type + WHERE typname = 'fee_paying_type' +) THEN CREATE TYPE fee_paying_type AS ENUM ('cpfp', 'rbf', 'nofunding'); +END IF; +END $$; + +-- Transactions that are needed to be fee bumped +create table if not exists tx_sender_try_to_send_txs ( + id serial primary key, + raw_tx bytea not null, + tx_metadata text, + fee_paying_type fee_paying_type not null, + effective_fee_rate bigint, + txid bytea, -- txid of the tx if it is CPFP + seen_block_id int references bitcoin_syncer(id), + latest_active_at timestamp, + created_at timestamp not null default now(), + rbf_signing_info text +); +create table if not exists tx_sender_rbf_txids ( + insertion_order serial not null, + id int not null references tx_sender_try_to_send_txs(id), + txid bytea not null, + created_at timestamp not null default now(), + primary key (id, txid) +); +create table if not exists tx_sender_fee_payer_utxos ( + id serial primary key, + replacement_of_id int references tx_sender_fee_payer_utxos(id), + bumped_id int not null references tx_sender_try_to_send_txs(id), + fee_payer_txid bytea not null, + vout int not null, + amount bigint not null, + seen_block_id int references bitcoin_syncer(id), + created_at timestamp not null default now() +); + +create table if not exists tx_sender_cancel_try_to_send_outpoints ( + cancelled_id int not null references tx_sender_try_to_send_txs(id), + txid bytea not null, + vout int not null, + seen_block_id int references bitcoin_syncer(id), + created_at timestamp not null default now(), + primary key (cancelled_id, txid, vout) +); +create table if not exists tx_sender_cancel_try_to_send_txids ( + cancelled_id int not null references tx_sender_try_to_send_txs(id), + txid bytea not null, + seen_block_id int references bitcoin_syncer(id), + created_at timestamp not null default now(), + primary key (cancelled_id, txid) +); + +create table if not exists tx_sender_activate_try_to_send_txids ( + activated_id int not null references tx_sender_try_to_send_txs(id), + txid bytea not null, + timelock bigint not null, + seen_block_id int references bitcoin_syncer(id), + created_at timestamp not null default now(), + primary key (activated_id, txid) +); +create table if not exists tx_sender_activate_try_to_send_outpoints ( + activated_id int not null references tx_sender_try_to_send_txs(id), + txid bytea not null, + vout int not null, + timelock bigint not null, + seen_block_id int references bitcoin_syncer(id), + created_at timestamp not null default now(), + primary key (activated_id, txid, vout) +); + +-- Triggers that sets the seen_block_id to the block id of the canonical block +-- when a row inserted to the tx_sender_*_try_to_send_* tables. +CREATE OR REPLACE FUNCTION update_cancel_txids_seen_block_id() RETURNS TRIGGER AS $$ BEGIN +UPDATE tx_sender_cancel_try_to_send_txids +SET seen_block_id = bs.id +FROM bitcoin_syncer_txs bst + JOIN bitcoin_syncer bs ON bst.block_id = bs.id +WHERE tx_sender_cancel_try_to_send_txids.cancelled_id = NEW.cancelled_id + AND tx_sender_cancel_try_to_send_txids.txid = NEW.txid + AND tx_sender_cancel_try_to_send_txids.seen_block_id IS NULL + AND bst.txid = NEW.txid + AND bs.is_canonical = TRUE; +RETURN NEW; +END; +$$ LANGUAGE plpgsql; +DROP TRIGGER IF EXISTS trigger_update_cancel_txids_seen_block_id ON tx_sender_cancel_try_to_send_txids; +CREATE TRIGGER trigger_update_cancel_txids_seen_block_id +AFTER +INSERT ON tx_sender_cancel_try_to_send_txids FOR EACH ROW EXECUTE FUNCTION update_cancel_txids_seen_block_id(); + +CREATE OR REPLACE FUNCTION update_cancel_outpoints_seen_block_id() RETURNS TRIGGER AS $$ BEGIN +UPDATE tx_sender_cancel_try_to_send_outpoints +SET seen_block_id = bs.id +FROM bitcoin_syncer_spent_utxos bsu + JOIN bitcoin_syncer bs ON bsu.block_id = bs.id +WHERE tx_sender_cancel_try_to_send_outpoints.cancelled_id = NEW.cancelled_id + AND tx_sender_cancel_try_to_send_outpoints.txid = NEW.txid + AND tx_sender_cancel_try_to_send_outpoints.vout = NEW.vout + AND tx_sender_cancel_try_to_send_outpoints.seen_block_id IS NULL + AND bsu.txid = NEW.txid + AND bsu.vout = NEW.vout + AND bs.is_canonical = TRUE; +RETURN NEW; +END; +$$ LANGUAGE plpgsql; +DROP TRIGGER IF EXISTS trigger_update_cancel_outpoints_seen_block_id ON tx_sender_cancel_try_to_send_outpoints; +CREATE TRIGGER trigger_update_cancel_outpoints_seen_block_id +AFTER +INSERT ON tx_sender_cancel_try_to_send_outpoints FOR EACH ROW EXECUTE FUNCTION update_cancel_outpoints_seen_block_id(); + +CREATE OR REPLACE FUNCTION update_activate_txids_seen_block_id() RETURNS TRIGGER AS $$ BEGIN +UPDATE tx_sender_activate_try_to_send_txids +SET seen_block_id = bs.id +FROM bitcoin_syncer_txs bst + JOIN bitcoin_syncer bs ON bst.block_id = bs.id +WHERE tx_sender_activate_try_to_send_txids.activated_id = NEW.activated_id + AND tx_sender_activate_try_to_send_txids.txid = NEW.txid + AND tx_sender_activate_try_to_send_txids.seen_block_id IS NULL + AND bst.txid = NEW.txid + AND bs.is_canonical = TRUE; +RETURN NEW; +END; +$$ LANGUAGE plpgsql; +DROP TRIGGER IF EXISTS trigger_update_activate_txids_seen_block_id ON tx_sender_activate_try_to_send_txids; +CREATE TRIGGER trigger_update_activate_txids_seen_block_id +AFTER +INSERT ON tx_sender_activate_try_to_send_txids FOR EACH ROW EXECUTE FUNCTION update_activate_txids_seen_block_id(); + +CREATE OR REPLACE FUNCTION update_activate_outpoints_seen_block_id() RETURNS TRIGGER AS $$ BEGIN +UPDATE tx_sender_activate_try_to_send_outpoints +SET seen_block_id = bs.id +FROM bitcoin_syncer_spent_utxos bsu + JOIN bitcoin_syncer bs ON bsu.block_id = bs.id +WHERE tx_sender_activate_try_to_send_outpoints.activated_id = NEW.activated_id + AND tx_sender_activate_try_to_send_outpoints.txid = NEW.txid + AND tx_sender_activate_try_to_send_outpoints.vout = NEW.vout + AND tx_sender_activate_try_to_send_outpoints.seen_block_id IS NULL + AND bsu.txid = NEW.txid + AND bsu.vout = NEW.vout + AND bs.is_canonical = TRUE; +RETURN NEW; +END; +$$ LANGUAGE plpgsql; +DROP TRIGGER IF EXISTS trigger_update_activate_outpoints_seen_block_id ON tx_sender_activate_try_to_send_outpoints; +CREATE TRIGGER trigger_update_activate_outpoints_seen_block_id +AFTER +INSERT ON tx_sender_activate_try_to_send_outpoints FOR EACH ROW EXECUTE FUNCTION update_activate_outpoints_seen_block_id(); + +/******************************************************************************* + * FINALIZED BLOCK SYNCER, CITREA DEPOSITS AND WITHDRAWALS + ******************************************************************************/ +create table if not exists withdrawals ( + idx int primary key, + move_to_vault_txid bytea not null, + withdrawal_utxo_txid bytea, + withdrawal_utxo_vout int, + withdrawal_batch_proof_bitcoin_block_height int, + payout_txid bytea, + payout_payer_operator_xonly_pk text, + payout_tx_blockhash text check (payout_tx_blockhash ~ '^[a-fA-F0-9]{64}'), + is_payout_handled boolean not null default false, + kickoff_txid bytea, + created_at timestamp not null default now() +); +-- Add state machine tables at the end of the file: +-- State machines table to store serialized machines +CREATE TABLE IF NOT EXISTS state_machines ( + id SERIAL PRIMARY KEY, + machine_type VARCHAR(50) NOT NULL, + -- 'kickoff' or 'round' + state_json TEXT NOT NULL, + kickoff_id TEXT NULL, + -- only for kickoff machines + operator_xonly_pk TEXT NULL, + -- only for round machines + owner_type VARCHAR(100) NOT NULL DEFAULT 'default', + -- Type of the owner managing this state machine + block_height INT NOT NULL, + created_at TIMESTAMP NOT NULL DEFAULT NOW(), + updated_at TIMESTAMP NOT NULL DEFAULT NOW(), + UNIQUE(machine_type, kickoff_id, owner_type), + -- For kickoff machines + UNIQUE(machine_type, operator_xonly_pk, owner_type) -- For round machines +); +-- Status table to track the last processed block +CREATE TABLE IF NOT EXISTS state_manager_status ( + owner_type VARCHAR(100) PRIMARY KEY, + next_height_to_process INT NOT NULL, + updated_at TIMESTAMP NOT NULL DEFAULT NOW() +); +-- Create indexes for better query performance +CREATE INDEX IF NOT EXISTS state_machines_block_height_idx ON state_machines(block_height); +CREATE INDEX IF NOT EXISTS state_machines_machine_type_idx ON state_machines(machine_type); +CREATE INDEX IF NOT EXISTS state_machines_kickoff_id_idx ON state_machines(kickoff_id) +WHERE kickoff_id IS NOT NULL; +CREATE INDEX IF NOT EXISTS state_machines_operator_xonly_pk_idx ON state_machines(operator_xonly_pk) +WHERE operator_xonly_pk IS NOT NULL; +CREATE INDEX IF NOT EXISTS state_machines_owner_type_idx ON state_machines(owner_type); +COMMIT; + +/******************************************************************************* + * ROUND MANAGEMENT FOR OPERATOR + ******************************************************************************/ +create table if not exists used_kickoff_connectors ( + round_idx int not null, + kickoff_connector_idx int not null, + kickoff_txid bytea, + created_at timestamp not null default now(), + primary key (round_idx, kickoff_connector_idx) +); +create table if not exists current_round_index ( + id int primary key, + round_idx int not null +); +INSERT INTO current_round_index (id, round_idx) +VALUES (1, 0) ON CONFLICT DO NOTHING; +COMMIT; +-- Table to store submission errors +CREATE TABLE IF NOT EXISTS tx_sender_debug_submission_errors ( + id SERIAL PRIMARY KEY, + tx_id INT NOT NULL REFERENCES tx_sender_try_to_send_txs(id), + error_message TEXT NOT NULL, + timestamp TIMESTAMP NOT NULL DEFAULT NOW() +); +-- Table to store TX sending state +CREATE TABLE IF NOT EXISTS tx_sender_debug_sending_state ( + tx_id INT PRIMARY KEY REFERENCES tx_sender_try_to_send_txs(id), + state TEXT NOT NULL, + -- 'waiting_for_fee_payer_utxos', 'ready_to_send', 'sent', etc. + last_update TIMESTAMP NOT NULL DEFAULT NOW(), + activated_timestamp TIMESTAMP -- the time when the conditions for this tx were satisfied - null if the conditions are not satisfied. +); +-- Index for faster queries +CREATE INDEX IF NOT EXISTS tx_sender_debug_submission_errors_tx_id_idx ON tx_sender_debug_submission_errors(tx_id); +-- Table to store emergency stop signatures +CREATE TABLE IF NOT EXISTS emergency_stop_sigs ( + move_txid bytea primary key not null, + emergency_stop_tx bytea not null, + created_at timestamp not null default now() +); +COMMIT; diff --git a/core/src/database/state_machine.rs b/core/src/database/state_machine.rs new file mode 100644 index 000000000..811577cb6 --- /dev/null +++ b/core/src/database/state_machine.rs @@ -0,0 +1,255 @@ +//! # State Machine Related Database Operations +//! +//! This module includes database functions for persisting and loading state machines. + +use bitcoin::XOnlyPublicKey; + +use super::{wrapper::XOnlyPublicKeyDB, Database, DatabaseTransaction}; +use crate::errors::BridgeError; +use crate::execute_query_with_tx; + +impl Database { + /// Saves state machines to the database with the current block height + /// + /// # Arguments + /// + /// * `tx` - Optional database transaction + /// * `kickoff_machines` - Vector of (state_json, kickoff_id, owner_type) tuples for kickoff machines + /// * `round_machines` - Vector of (state_json, operator_xonly_pk, owner_type) tuples for round machines + /// * `block_height` - Current block height + /// + /// # Errors + /// + /// Returns a `BridgeError` if the database operation fails + pub async fn save_state_machines( + &self, + mut tx: Option>, + kickoff_machines: Vec<(String, String)>, + round_machines: Vec<(String, XOnlyPublicKey)>, + block_height: i32, + owner_type: &str, + ) -> Result<(), BridgeError> { + // Save kickoff machines that are dirty + for (state_json, kickoff_id) in kickoff_machines { + let query = sqlx::query( + "INSERT INTO state_machines ( + machine_type, + state_json, + kickoff_id, + owner_type, + block_height, + created_at, + updated_at + ) VALUES ($1, $2, $3, $4, $5, NOW(), NOW()) + ON CONFLICT (machine_type, kickoff_id, owner_type) + DO UPDATE SET + state_json = EXCLUDED.state_json, + block_height = EXCLUDED.block_height, + updated_at = NOW()", + ) + .bind("kickoff") + .bind(&state_json) + .bind(kickoff_id) + .bind(owner_type) + .bind(block_height); + + execute_query_with_tx!(self.connection, tx.as_deref_mut(), query, execute)?; + } + + // Save round machines that are dirty + for (state_json, operator_xonly_pk) in round_machines { + let query = sqlx::query( + "INSERT INTO state_machines ( + machine_type, + state_json, + operator_xonly_pk, + owner_type, + block_height, + created_at, + updated_at + ) VALUES ($1, $2, $3, $4, $5, NOW(), NOW()) + ON CONFLICT (machine_type, operator_xonly_pk, owner_type) + DO UPDATE SET + state_json = EXCLUDED.state_json, + block_height = EXCLUDED.block_height, + updated_at = NOW()", + ) + .bind("round") + .bind(&state_json) + .bind(XOnlyPublicKeyDB(operator_xonly_pk)) + .bind(owner_type) + .bind(block_height); + + execute_query_with_tx!(self.connection, tx.as_deref_mut(), query, execute)?; + } + + // Update state manager status + let query = sqlx::query( + "INSERT INTO state_manager_status ( + owner_type, + next_height_to_process, + updated_at + ) VALUES ($1, $2, NOW()) + ON CONFLICT (owner_type) + DO UPDATE SET + next_height_to_process = EXCLUDED.next_height_to_process, + updated_at = NOW()", + ) + .bind(owner_type) + .bind(block_height); + + execute_query_with_tx!(self.connection, tx, query, execute)?; + + Ok(()) + } + + /// Gets the last processed block height + /// + /// # Arguments + /// + /// * `tx` - Optional database transaction + /// + /// # Errors + /// + /// Returns a `BridgeError` if the database operation fails + pub async fn get_next_height_to_process( + &self, + tx: Option>, + owner_type: &str, + ) -> Result, BridgeError> { + let query = sqlx::query_as( + "SELECT next_height_to_process FROM state_manager_status WHERE owner_type = $1", + ) + .bind(owner_type); + + let result: Option<(i32,)> = + execute_query_with_tx!(self.connection, tx, query, fetch_optional)?; + + Ok(result.map(|(height,)| height)) + } + + /// Loads kickoff machines from the database + /// + /// # Arguments + /// + /// * `tx` - Optional database transaction + /// * `owner_type` - The owner type to filter by + /// + /// # Errors + /// + /// Returns a `BridgeError` if the database operation fails + pub async fn load_kickoff_machines( + &self, + tx: Option>, + owner_type: &str, + ) -> Result, BridgeError> { + let query = sqlx::query_as( + "SELECT + state_json, + kickoff_id, + block_height + FROM state_machines + WHERE machine_type = 'kickoff' AND owner_type = $1", + ) + .bind(owner_type); + + let results = execute_query_with_tx!(self.connection, tx, query, fetch_all)?; + + Ok(results) + } + + /// Loads round machines from the database + /// + /// # Arguments + /// + /// * `tx` - Optional database transaction + /// * `owner_type` - The owner type to filter by + /// + /// # Errors + /// + /// Returns a `BridgeError` if the database operation fails + pub async fn load_round_machines( + &self, + tx: Option>, + owner_type: &str, + ) -> Result, BridgeError> { + let query = sqlx::query_as( + "SELECT + state_json, + operator_xonly_pk, + block_height + FROM state_machines + WHERE machine_type = 'round' AND owner_type = $1", + ) + .bind(owner_type); + + let results: Vec<(String, XOnlyPublicKeyDB, i32)> = + execute_query_with_tx!(self.connection, tx, query, fetch_all)?; + + Ok(results + .into_iter() + .map(|(state_json, operator_xonly_pk, block_height)| { + (state_json, operator_xonly_pk.0, block_height) + }) + .collect()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::test::common::*; + + #[tokio::test] + async fn test_save_and_load_state_machines() { + let config = create_test_config_with_thread_name().await; + let db = Database::new(&config).await.unwrap(); + + let xonly_pk1 = generate_random_xonly_pk(); + let xonly_pk2 = generate_random_xonly_pk(); + + // Create test data with owner_type + let owner_type = "test_owner"; + let kickoff_machines = vec![ + ("kickoff_state_1".to_string(), "kickoff_id_1".to_string()), + ("kickoff_state_2".to_string(), "kickoff_id_2".to_string()), + ]; + + let round_machines = vec![ + ("round_state_1".to_string(), xonly_pk1), + ("round_state_2".to_string(), xonly_pk2), + ]; + + // Save state machines + db.save_state_machines( + None, + kickoff_machines.clone(), + round_machines.clone(), + 123, + owner_type, + ) + .await + .unwrap(); + + // Check last processed block height + let block_height = db + .get_next_height_to_process(None, owner_type) + .await + .unwrap(); + assert_eq!(block_height, Some(123)); + + // Load kickoff machines + let loaded_kickoff = db.load_kickoff_machines(None, owner_type).await.unwrap(); + assert_eq!(loaded_kickoff.len(), 2); + assert_eq!(loaded_kickoff[0].0, "kickoff_state_1"); + assert_eq!(loaded_kickoff[0].1, "kickoff_id_1"); + assert_eq!(loaded_kickoff[0].2, 123); + + // Load round machines + let loaded_round = db.load_round_machines(None, owner_type).await.unwrap(); + assert_eq!(loaded_round.len(), 2); + assert_eq!(loaded_round[0].0, "round_state_1"); + assert_eq!(loaded_round[0].1, xonly_pk1); + assert_eq!(loaded_round[0].2, 123); + } +} diff --git a/core/src/database/test.rs b/core/src/database/test.rs new file mode 100644 index 000000000..6d6851e5e --- /dev/null +++ b/core/src/database/test.rs @@ -0,0 +1,299 @@ +use crate::database::DatabaseTransaction; +use crate::utils::TxMetadata; +use crate::{ + database::{Database, TxidDB}, + errors::BridgeError, + execute_query_with_tx, +}; +use bitcoin::{Amount, FeeRate, Txid}; +use eyre::Context; + +impl Database { + pub async fn get_fee_payer_utxos_for_tx( + &self, + tx: Option>, + tx_id: u32, + ) -> Result, BridgeError> { + let query = sqlx::query_as::<_, (TxidDB, i32, i64)>( + r#" + SELECT fee_payer_txid, vout, amount + FROM tx_sender_fee_payer_utxos + WHERE bumped_id = $1 + "#, + ) + .bind(i32::try_from(tx_id).wrap_err("Failed to convert tx_id to i32")?); + + let results: Vec<(TxidDB, i32, i64)> = + execute_query_with_tx!(self.connection, tx, query, fetch_all)?; + + results + .iter() + .map(|(fee_payer_txid, vout, amount)| { + Ok(( + fee_payer_txid.0, + u32::try_from(*vout).wrap_err("Failed to convert vout to u32")?, + Amount::from_sat( + u64::try_from(*amount).wrap_err("Failed to convert amount to u64")?, + ), + )) + }) + .collect::, BridgeError>>() + } + + pub async fn get_id_from_txid( + &self, + tx: Option>, + txid: Txid, + ) -> Result, BridgeError> { + let query = sqlx::query_scalar::<_, i32>( + "SELECT id FROM tx_sender_try_to_send_txs WHERE txid = $1", + ) + .bind(TxidDB(txid)); + + let result = execute_query_with_tx!(self.connection, tx, query, fetch_optional)?; + match result { + Some(id) => Ok(Some( + u32::try_from(id).wrap_err("Failed to convert id to u32")?, + )), + None => Ok(None), + } + } + + pub async fn debug_inactive_txs(&self, fee_rate: FeeRate, current_tip_height: u32) { + tracing::info!("TXSENDER_DBG_INACTIVE_TXS: Checking inactive transactions"); + + // Query all transactions that aren't confirmed yet + let unconfirmed_txs = match sqlx::query_as::<_, (i32, TxidDB, Option)>( + "SELECT id, txid, tx_metadata FROM tx_sender_try_to_send_txs WHERE seen_block_id IS NULL", + ) + .fetch_all(&self.connection) + .await + { + Ok(txs) => txs, + Err(e) => { + tracing::error!( + "TXSENDER_DBG_INACTIVE_TXS: Failed to query unconfirmed txs: {}", + e + ); + return; + } + }; + + let sendable_txs = match self + .get_sendable_txs(None, fee_rate, current_tip_height) + .await + { + Ok(txs) => txs, + Err(e) => { + tracing::error!( + "TXSENDER_DBG_INACTIVE_TXS: Failed to get sendable txs: {}", + e + ); + return; + } + }; + + for (tx_id, txid, tx_metadata) in unconfirmed_txs { + let tx_metadata: Option = + serde_json::from_str(tx_metadata.as_deref().unwrap_or("null")).ok(); + + let id = match u32::try_from(tx_id) { + Ok(id) => id, + Err(e) => { + tracing::error!("TXSENDER_DBG_INACTIVE_TXS: Failed to convert id: {}", e); + continue; + } + }; + + if sendable_txs.contains(&id) { + tracing::info!( + "TXSENDER_DBG_INACTIVE_TXS: TX {} (txid: {}) is ACTIVE", + id, + txid.0 + ); + continue; + } + + tracing::info!( + "TXSENDER_DBG_INACTIVE_TXS: TX {} (txid: {}, type: {:?}) is inactive, reasons:", + id, + txid.0, + tx_metadata.map(|metadata| metadata.tx_type) + ); + + // Check for txid activations that aren't active yet + let txid_activations = match sqlx::query_as::<_, (Option, i64, TxidDB)>( + "SELECT seen_block_id, timelock, txid + FROM tx_sender_activate_try_to_send_txids + WHERE activated_id = $1", + ) + .bind(tx_id) + .fetch_all(&self.connection) + .await + { + Ok(activations) => activations, + Err(e) => { + tracing::error!( + "TXSENDER_DBG_INACTIVE_TXS: Failed to query txid activations: {}", + e + ); + continue; + } + }; + + for (seen_block_id, timelock, txid) in txid_activations { + if seen_block_id.is_none() { + tracing::info!("TXSENDER_DBG_INACTIVE_TXS: TX {} is inactive because its txid activation {} has not been seen", id, txid.0); + continue; + } + + let block_height = match sqlx::query_scalar::<_, i32>( + "SELECT height FROM bitcoin_syncer WHERE id = $1", + ) + .bind(seen_block_id.expect("it is unwrapped")) + .fetch_one(&self.connection) + .await + { + Ok(height) => height, + Err(e) => { + tracing::error!( + "TXSENDER_DBG_INACTIVE_TXS: Failed to get block height: {}", + e + ); + continue; + } + }; + + if block_height + timelock as i32 > current_tip_height as i32 { + tracing::info!( + "TXSENDER_DBG_INACTIVE_TXS: TX {} is inactive because its txid activation timelock hasn't expired (block_height: {}, timelock: {}, current_tip_height: {})", + id, block_height, timelock, current_tip_height + ); + } + } + + // Check for outpoint activations that aren't active yet + let outpoint_activations = match sqlx::query_as::<_, (Option, i64, TxidDB, i32)>( + "SELECT seen_block_id, timelock, txid, vout + FROM tx_sender_activate_try_to_send_outpoints + WHERE activated_id = $1", + ) + .bind(tx_id) + .fetch_all(&self.connection) + .await + { + Ok(activations) => activations, + Err(e) => { + tracing::error!( + "TXSENDER_DBG_INACTIVE_TXS: Failed to query outpoint activations: {}", + e + ); + continue; + } + }; + + for (seen_block_id, timelock, txid, vout) in outpoint_activations { + if seen_block_id.is_none() { + tracing::info!("TXSENDER_DBG_INACTIVE_TXS: TX {} is inactive because its outpoint activation has not been seen ({}:{})", id, txid.0, vout); + continue; + } + + let block_height = match sqlx::query_scalar::<_, i32>( + "SELECT height FROM bitcoin_syncer WHERE id = $1", + ) + .bind(seen_block_id.expect("it is unwrapped")) + .fetch_one(&self.connection) + .await + { + Ok(height) => height, + Err(e) => { + tracing::error!( + "TXSENDER_DBG_INACTIVE_TXS: Failed to get block height: {}", + e + ); + continue; + } + }; + + if block_height + timelock as i32 > current_tip_height as i32 { + tracing::info!( + "TXSENDER_DBG_INACTIVE_TXS: TX {} is inactive because its outpoint activation timelock hasn't expired (block_height: {}, timelock: {}, current_tip_height: {})", + id, block_height, timelock, current_tip_height + ); + } + } + + // Check for cancelled conditions + let cancelled_outpoints = match sqlx::query_scalar::<_, i64>( + "SELECT COUNT(*) FROM tx_sender_cancel_try_to_send_outpoints + WHERE cancelled_id = $1 AND seen_block_id IS NOT NULL", + ) + .bind(tx_id) + .fetch_one(&self.connection) + .await + { + Ok(count) => count, + Err(e) => { + tracing::error!( + "TXSENDER_DBG_INACTIVE_TXS: Failed to query cancelled outpoints: {}", + e + ); + continue; + } + }; + + if cancelled_outpoints > 0 { + tracing::info!("TXSENDER_DBG_INACTIVE_TXS: TX {} is inactive because it has {} cancelled outpoints", id, cancelled_outpoints); + } + + let cancelled_txids = match sqlx::query_scalar::<_, i64>( + "SELECT COUNT(*) FROM tx_sender_cancel_try_to_send_txids + WHERE cancelled_id = $1 AND seen_block_id IS NOT NULL", + ) + .bind(tx_id) + .fetch_one(&self.connection) + .await + { + Ok(count) => count, + Err(e) => { + tracing::error!( + "TXSENDER_DBG_INACTIVE_TXS: Failed to query cancelled txids: {}", + e + ); + continue; + } + }; + + if cancelled_txids > 0 { + tracing::info!("TXSENDER_DBG_INACTIVE_TXS: TX {} is inactive because it has {} cancelled txids", id, cancelled_txids); + } + + // Check fee rate + let effective_fee_rate = match sqlx::query_scalar::<_, Option>( + "SELECT effective_fee_rate FROM tx_sender_try_to_send_txs WHERE id = $1", + ) + .bind(tx_id) + .fetch_one(&self.connection) + .await + { + Ok(rate) => rate, + Err(e) => { + tracing::error!( + "TXSENDER_DBG_INACTIVE_TXS: Failed to query effective fee rate: {}", + e + ); + continue; + } + }; + + if let Some(rate) = effective_fee_rate { + if rate >= fee_rate.to_sat_per_vb_ceil() as i64 { + tracing::info!( + "TXSENDER_DBG_INACTIVE_TXS: TX {} is inactive because its effective fee rate ({} sat/vB) is >= the current fee rate ({} sat/vB)", + id, rate, fee_rate.to_sat_per_vb_ceil() + ); + } + } + } + } +} diff --git a/core/src/database/tx_sender.rs b/core/src/database/tx_sender.rs new file mode 100644 index 000000000..798eace4d --- /dev/null +++ b/core/src/database/tx_sender.rs @@ -0,0 +1,1258 @@ +//! # Transaction Sender Related Database Operations +//! +//! This module includes database functions which are mainly used by the transaction sender. + +use super::{wrapper::TxidDB, Database, DatabaseTransaction}; +use crate::{ + errors::BridgeError, + execute_query_with_tx, + tx_sender::{ActivatedWithOutpoint, ActivatedWithTxid}, + utils::{FeePayingType, RbfSigningInfo, TxMetadata}, +}; +use bitcoin::{ + consensus::{deserialize, serialize}, + Amount, FeeRate, Transaction, Txid, +}; +use eyre::{Context, OptionExt}; +use sqlx::Executor; +use std::ops::DerefMut; + +impl Database { + /// Set all transactions' `seen_block_id` to the given block id. This will + /// be called once a block is confirmed on the Bitcoin side. + pub async fn confirm_transactions( + &self, + tx: DatabaseTransaction<'_, '_>, + block_id: u32, + ) -> Result<(), BridgeError> { + let block_id = i32::try_from(block_id).wrap_err("Failed to convert block id to i32")?; + + // CTEs for collecting a block's transactions, spent UTXOs and confirmed + // RBF transactions. + let common_ctes = r#" + WITH relevant_txs AS ( + SELECT txid + FROM bitcoin_syncer_txs + WHERE block_id = $1 + ), + relevant_spent_utxos AS ( + SELECT txid, vout + FROM bitcoin_syncer_spent_utxos + WHERE block_id = $1 + ), + confirmed_rbf_ids AS ( + SELECT rbf.id + FROM tx_sender_rbf_txids AS rbf + JOIN bitcoin_syncer_txs AS syncer ON rbf.txid = syncer.txid + WHERE syncer.block_id = $1 + ) + "#; + + // Update tx_sender_activate_try_to_send_txids + sqlx::query(&format!( + "{} + UPDATE tx_sender_activate_try_to_send_txids AS tap + SET seen_block_id = $1 + WHERE tap.txid IN (SELECT txid FROM relevant_txs) + AND tap.seen_block_id IS NULL", + common_ctes + )) + .bind(block_id) + .execute(tx.deref_mut()) + .await?; + + // Update tx_sender_activate_try_to_send_outpoints + sqlx::query(&format!( + "{} + UPDATE tx_sender_activate_try_to_send_outpoints AS tap + SET seen_block_id = $1 + WHERE (tap.txid, tap.vout) IN (SELECT txid, vout FROM relevant_spent_utxos) + AND tap.seen_block_id IS NULL", + common_ctes + )) + .bind(block_id) + .execute(tx.deref_mut()) + .await?; + + // Update tx_sender_cancel_try_to_send_txids + sqlx::query(&format!( + "{} + UPDATE tx_sender_cancel_try_to_send_txids AS ctt + SET seen_block_id = $1 + WHERE ctt.txid IN (SELECT txid FROM relevant_txs) + AND ctt.seen_block_id IS NULL", + common_ctes + )) + .bind(block_id) + .execute(tx.deref_mut()) + .await?; + + // Update tx_sender_cancel_try_to_send_outpoints + sqlx::query(&format!( + "{} + UPDATE tx_sender_cancel_try_to_send_outpoints AS cto + SET seen_block_id = $1 + WHERE (cto.txid, cto.vout) IN (SELECT txid, vout FROM relevant_spent_utxos) + AND cto.seen_block_id IS NULL", + common_ctes + )) + .bind(block_id) + .execute(tx.deref_mut()) + .await?; + + // Update tx_sender_fee_payer_utxos + sqlx::query(&format!( + "{} + UPDATE tx_sender_fee_payer_utxos AS fpu + SET seen_block_id = $1 + WHERE fpu.fee_payer_txid IN (SELECT txid FROM relevant_txs) + AND fpu.seen_block_id IS NULL", + common_ctes + )) + .bind(block_id) + .execute(tx.deref_mut()) + .await?; + + // Update tx_sender_try_to_send_txs for CPFP txid confirmation + sqlx::query(&format!( + "{} + UPDATE tx_sender_try_to_send_txs AS txs + SET seen_block_id = $1 + WHERE txs.txid IN (SELECT txid FROM relevant_txs) + AND txs.seen_block_id IS NULL", + common_ctes + )) + .bind(block_id) + .execute(tx.deref_mut()) + .await?; + + // Update tx_sender_try_to_send_txs for RBF txid confirmation + sqlx::query(&format!( + "{} + UPDATE tx_sender_try_to_send_txs AS txs + SET seen_block_id = $1 + WHERE txs.id IN (SELECT id FROM confirmed_rbf_ids) + AND txs.seen_block_id IS NULL", + common_ctes + )) + .bind(block_id) + .execute(tx.deref_mut()) + .await?; + + let bg_db = self.clone(); + // Update debug information in the background to not block core behavior + tokio::spawn(async move { + // Get confirmed direct transactions for debugging + let Ok(confirmed_direct_txs): Result, _> = sqlx::query_as(&format!( + "{} + SELECT txs.id, txs.txid + FROM tx_sender_try_to_send_txs AS txs + WHERE txs.txid IN (SELECT txid FROM relevant_txs) + AND txs.seen_block_id IS NULL", + common_ctes + )) + .bind(block_id) + .fetch_all(&bg_db.connection) + .await + else { + tracing::error!("Failed to update debug info for confirmed txs"); + return; + }; + + // Get confirmed RBF transactions for debugging + let Ok(confirmed_rbf_txs): Result, _> = sqlx::query_as(&format!( + "{} + SELECT txs.id + FROM tx_sender_try_to_send_txs AS txs + WHERE txs.id IN (SELECT id FROM confirmed_rbf_ids) + AND txs.seen_block_id IS NULL", + common_ctes + )) + .bind(block_id) + .fetch_all(&bg_db.connection) + .await + else { + tracing::error!("Failed to update debug info for confirmed txs"); + return; + }; + + // Record debug info for confirmed transactions + for (tx_id, txid) in confirmed_direct_txs { + // Add debug state change + tracing::debug!(try_to_send_id=?tx_id, "Transaction confirmed in block {}: direct confirmation of txid {}", + block_id, txid.0); + + // Update sending state + let _ = bg_db + .update_tx_debug_sending_state(tx_id as u32, "confirmed", true) + .await; + } + + // Record debug info for confirmed RBF transactions + for (tx_id,) in confirmed_rbf_txs { + // Add debug state change + tracing::debug!(try_to_send_id=?tx_id, "Transaction confirmed in block {}: RBF confirmation", + block_id); + + // Update sending state + let _ = bg_db + .update_tx_debug_sending_state(tx_id as u32, "confirmed", true) + .await; + } + }); + + Ok(()) + } + + /// Unassigns `seen_block_id` from all transactions in the given block id. + /// By default, all transactions' `seen_block_id` is set to NULL. And they + /// get assigned a block id when they are confirmed on Bitcoin side. If a + /// reorg happens, block ids must be unassigned from all transactions. + pub async fn unconfirm_transactions( + &self, + tx: DatabaseTransaction<'_, '_>, + block_id: u32, + ) -> Result<(), BridgeError> { + let block_id = i32::try_from(block_id).wrap_err("Failed to convert block id to i32")?; + + // Need to get these before they're unconfirmed below, so that we can update the debug info + // Ignore the error here to not affect production behavior. + let previously_confirmed_txs = sqlx::query_as::<_, (i32,)>( + "SELECT id FROM tx_sender_try_to_send_txs WHERE seen_block_id = $1", + ) + .bind(block_id) + .fetch_all(tx.deref_mut()) + .await; + + let bg_db = self.clone(); + tokio::spawn(async move { + let previously_confirmed_txs = match previously_confirmed_txs { + Ok(txs) => txs, + Err(e) => { + tracing::error!(error=?e, "Failed to get previously confirmed txs from database"); + return; + } + }; + + for (tx_id,) in previously_confirmed_txs { + tracing::debug!(try_to_send_id=?tx_id, "Transaction unconfirmed in block {}: unconfirming", block_id); + let _ = bg_db + .update_tx_debug_sending_state(tx_id as u32, "unconfirmed", false) + .await; + } + }); + + // Unconfirm tx_sender_fee_payer_utxos + // Update tx_sender_activate_try_to_send_txids + sqlx::query( + "UPDATE tx_sender_activate_try_to_send_txids AS tap + SET seen_block_id = NULL + WHERE tap.seen_block_id = $1", + ) + .bind(block_id) + .execute(tx.deref_mut()) + .await?; + + // Update tx_sender_activate_try_to_send_outpoints + sqlx::query( + "UPDATE tx_sender_activate_try_to_send_outpoints AS tap + SET seen_block_id = NULL + WHERE tap.seen_block_id = $1", + ) + .bind(block_id) + .execute(tx.deref_mut()) + .await?; + + // Update tx_sender_cancel_try_to_send_txids + sqlx::query( + "UPDATE tx_sender_cancel_try_to_send_txids AS ctt + SET seen_block_id = NULL + WHERE ctt.seen_block_id = $1", + ) + .bind(block_id) + .execute(tx.deref_mut()) + .await?; + + // Update tx_sender_cancel_try_to_send_outpoints + sqlx::query( + "UPDATE tx_sender_cancel_try_to_send_outpoints AS cto + SET seen_block_id = NULL + WHERE cto.seen_block_id = $1", + ) + .bind(block_id) + .execute(tx.deref_mut()) + .await?; + + // Update tx_sender_fee_payer_utxos + sqlx::query( + "UPDATE tx_sender_fee_payer_utxos AS fpu + SET seen_block_id = NULL + WHERE fpu.seen_block_id = $1", + ) + .bind(block_id) + .execute(tx.deref_mut()) + .await?; + + // Update tx_sender_try_to_send_txs + sqlx::query( + "UPDATE tx_sender_try_to_send_txs AS txs + SET seen_block_id = NULL + WHERE txs.seen_block_id = $1", + ) + .bind(block_id) + .execute(tx.deref_mut()) + .await?; + + Ok(()) + } + + /// Saves a fee payer transaction to the database. + /// + /// # Arguments + /// * `bumped_id` - The id of the bumped transaction + /// * `fee_payer_txid` - The txid of the fee payer transaction + /// * `vout` - The output index of the UTXO + /// * `script_pubkey` - The script pubkey of the UTXO + /// * `amount` - The amount in satoshis + pub async fn save_fee_payer_tx( + &self, + tx: Option>, + bumped_id: u32, + fee_payer_txid: Txid, + vout: u32, + amount: Amount, + replacement_of_id: Option, + ) -> Result<(), BridgeError> { + let query = sqlx::query( + "INSERT INTO tx_sender_fee_payer_utxos (bumped_id, fee_payer_txid, vout, amount, replacement_of_id) + VALUES ($1, $2, $3, $4, $5)", + ) + .bind(i32::try_from(bumped_id).wrap_err("Failed to convert bumped id to i32")?) + .bind(TxidDB(fee_payer_txid)) + .bind(i32::try_from(vout).wrap_err("Failed to convert vout to i32")?) + .bind(i64::try_from(amount.to_sat()).wrap_err("Failed to convert amount to i64")?) + .bind(replacement_of_id.map( i32::try_from).transpose().wrap_err("Failed to convert replacement of id to i32")?); + + execute_query_with_tx!(self.connection, tx, query, execute)?; + + Ok(()) + } + + /// Returns all unconfirmed fee payer transactions for a try-to-send tx. + /// Replaced (bumped) fee payers are not included. + /// + /// # Parameters + /// + /// - `bumped_id`: The id of the bumped transaction + /// + /// # Returns + /// + /// A vector of unconfirmed fee payer transaction details, including: + /// + /// - [`u32`]: Id of the fee payer transaction. + /// - [`Txid`]: Txid of the fee payer transaction. + /// - [`u32`]: Output index of the UTXO. + /// - [`Amount`]: Amount in satoshis. + pub async fn get_unconfirmed_fee_payer_txs( + &self, + tx: Option>, + bumped_id: u32, + ) -> Result, BridgeError> { + let query = sqlx::query_as::<_, (i32, TxidDB, i32, i64)>( + " + SELECT fpu.id, fpu.fee_payer_txid, fpu.vout, fpu.amount + FROM tx_sender_fee_payer_utxos fpu + WHERE fpu.bumped_id = $1 + AND fpu.seen_block_id IS NULL + AND NOT EXISTS ( + SELECT 1 + FROM tx_sender_fee_payer_utxos replacement + WHERE replacement.replacement_of_id = fpu.id + ) + ", + ) + .bind(i32::try_from(bumped_id).wrap_err("Failed to convert bumped id to i32")?); + + let results: Vec<(i32, TxidDB, i32, i64)> = + execute_query_with_tx!(self.connection, tx, query, fetch_all)?; + + results + .iter() + .map(|(id, fee_payer_txid, vout, amount)| { + Ok(( + u32::try_from(*id).wrap_err("Failed to convert id to u32")?, + fee_payer_txid.0, + u32::try_from(*vout).wrap_err("Failed to convert vout to u32")?, + Amount::from_sat( + u64::try_from(*amount).wrap_err("Failed to convert amount to u64")?, + ), + )) + }) + .collect::, BridgeError>>() + } + + pub async fn get_confirmed_fee_payer_utxos( + &self, + tx: Option>, + id: u32, + ) -> Result, BridgeError> { + let query = sqlx::query_as::<_, (TxidDB, i32, i64)>( + "SELECT fee_payer_txid, vout, amount + FROM tx_sender_fee_payer_utxos fpu + WHERE fpu.bumped_id = $1 AND fpu.seen_block_id IS NOT NULL", + ) + .bind(i32::try_from(id).wrap_err("Failed to convert id to i32")?); + + let results: Vec<(TxidDB, i32, i64)> = + execute_query_with_tx!(self.connection, tx, query, fetch_all)?; + + results + .iter() + .map(|(fee_payer_txid, vout, amount)| { + Ok(( + fee_payer_txid.0, + u32::try_from(*vout).wrap_err("Failed to convert vout to u32")?, + Amount::from_sat( + u64::try_from(*amount).wrap_err("Failed to convert amount to u64")?, + ), + )) + }) + .collect::, BridgeError>>() + } + + /// Returns the id of the tx in `tx_sender_try_to_send_txs` if it exists. + /// Used to avoid adding duplicate transactions to the txsender. + pub async fn check_if_tx_exists_on_txsender( + &self, + tx: Option>, + txid: Txid, + ) -> Result, BridgeError> { + let query = sqlx::query_as::<_, (i32,)>( + "SELECT id FROM tx_sender_try_to_send_txs WHERE txid = $1 LIMIT 1", + ) + .bind(TxidDB(txid)); + + let result: Option<(i32,)> = + execute_query_with_tx!(self.connection, tx, query, fetch_optional)?; + Ok(match result { + Some((id,)) => Some(u32::try_from(id).wrap_err("Failed to convert id to u32")?), + None => None, + }) + } + + pub async fn save_tx( + &self, + tx: Option>, + tx_metadata: Option, + raw_tx: &Transaction, + fee_paying_type: FeePayingType, + txid: Txid, + rbf_signing_info: Option, + ) -> Result { + let query = sqlx::query_scalar( + "INSERT INTO tx_sender_try_to_send_txs (raw_tx, fee_paying_type, tx_metadata, txid, rbf_signing_info) VALUES ($1, $2::fee_paying_type, $3, $4, $5) RETURNING id" + ) + .bind(serialize(raw_tx)) + .bind(fee_paying_type) + .bind(serde_json::to_string(&tx_metadata).wrap_err("Failed to encode tx_metadata to JSON")?) + .bind(TxidDB(txid)) + .bind(serde_json::to_string(&rbf_signing_info).wrap_err("Failed to encode tx_metadata to JSON")?); + + let id: i32 = execute_query_with_tx!(self.connection, tx, query, fetch_one)?; + u32::try_from(id) + .wrap_err("Failed to convert id to u32") + .map_err(Into::into) + } + + pub async fn save_rbf_txid( + &self, + tx: Option>, + id: u32, + txid: Txid, + ) -> Result<(), BridgeError> { + let query = sqlx::query("INSERT INTO tx_sender_rbf_txids (id, txid) VALUES ($1, $2)") + .bind(i32::try_from(id).wrap_err("Failed to convert id to i32")?) + .bind(TxidDB(txid)); + + execute_query_with_tx!(self.connection, tx, query, execute)?; + Ok(()) + } + + pub async fn get_last_rbf_txid( + &self, + tx: Option>, + id: u32, + ) -> Result, BridgeError> { + let query = sqlx::query_as::<_, (TxidDB,)>("SELECT txid FROM tx_sender_rbf_txids WHERE id = $1 ORDER BY insertion_order DESC LIMIT 1") + .bind(i32::try_from(id).wrap_err("Failed to convert id to i32")?); + + let result: Option<(TxidDB,)> = + execute_query_with_tx!(self.connection, tx, query, fetch_optional)?; + Ok(result.map(|(txid,)| txid.0)) + } + + pub async fn save_cancelled_outpoint( + &self, + tx: Option>, + cancelled_id: u32, + outpoint: bitcoin::OutPoint, + ) -> Result<(), BridgeError> { + let query = sqlx::query( + "INSERT INTO tx_sender_cancel_try_to_send_outpoints (cancelled_id, txid, vout) VALUES ($1, $2, $3)" + ) + .bind(i32::try_from(cancelled_id).wrap_err("Failed to convert cancelled id to i32")?) + .bind(TxidDB(outpoint.txid)) + .bind(i32::try_from(outpoint.vout).wrap_err("Failed to convert vout to i32")?); + + execute_query_with_tx!(self.connection, tx, query, execute)?; + Ok(()) + } + + pub async fn save_cancelled_txid( + &self, + tx: Option>, + cancelled_id: u32, + txid: bitcoin::Txid, + ) -> Result<(), BridgeError> { + let query = sqlx::query( + "INSERT INTO tx_sender_cancel_try_to_send_txids (cancelled_id, txid) VALUES ($1, $2)", + ) + .bind(i32::try_from(cancelled_id).wrap_err("Failed to convert cancelled id to i32")?) + .bind(TxidDB(txid)); + + execute_query_with_tx!(self.connection, tx, query, execute)?; + Ok(()) + } + + pub async fn save_activated_txid( + &self, + tx: Option>, + activated_id: u32, + prerequisite_tx: &ActivatedWithTxid, + ) -> Result<(), BridgeError> { + let query = sqlx::query( + "INSERT INTO tx_sender_activate_try_to_send_txids (activated_id, txid, timelock) VALUES ($1, $2, $3)" + ) + .bind(i32::try_from(activated_id).wrap_err("Failed to convert activated id to i32")?) + .bind(TxidDB(prerequisite_tx.txid)) + .bind(i32::try_from(prerequisite_tx.relative_block_height).wrap_err("Failed to convert relative block height to i32")?); + + execute_query_with_tx!(self.connection, tx, query, execute)?; + Ok(()) + } + + pub async fn save_activated_outpoint( + &self, + tx: Option>, + activated_id: u32, + activated_outpoint: &ActivatedWithOutpoint, + ) -> Result<(), BridgeError> { + let query = sqlx::query( + "INSERT INTO tx_sender_activate_try_to_send_outpoints (activated_id, txid, vout, timelock) VALUES ($1, $2, $3, $4)" + ) + .bind(i32::try_from(activated_id).wrap_err("Failed to convert activated id to i32")?) + .bind(TxidDB(activated_outpoint.outpoint.txid)) + .bind(i32::try_from(activated_outpoint.outpoint.vout).wrap_err("Failed to convert vout to i32")?) + .bind(i32::try_from(activated_outpoint.relative_block_height).wrap_err("Failed to convert relative block height to i32")?); + + execute_query_with_tx!(self.connection, tx, query, execute)?; + Ok(()) + } + + /// Returns unconfirmed try-to-send transactions that satisfy all activation + /// conditions for sending: + /// + /// - Not in the non-active list + /// - Not in the cancelled list + /// - Transaction itself is not already confirmed + /// - Transaction and UTXO timelocks must be passed + /// - Fee rate is lower than the provided fee rate or null (deprecated) + /// + /// # Parameters + /// + /// - `tx`: Optional database transaction + /// - `fee_rate`: Maximum fee rate for the transactions to be sendable + /// - `current_tip_height`: The current tip height of the Bitcoin blockchain + /// for checking timelocks + /// + /// # Returns + /// + /// - [`Vec`]: A vector of transaction ids (db id) that are sendable. + pub async fn get_sendable_txs( + &self, + tx: Option>, + fee_rate: FeeRate, + current_tip_height: u32, + ) -> Result, BridgeError> { + let select_query = sqlx::query_as::<_, (i32,)>( + "WITH + -- Find non-active transactions (not seen or timelock not passed) + non_active_txs AS ( + -- Transactions with txid activations that aren't active yet + SELECT DISTINCT + activate_txid.activated_id AS tx_id + FROM + tx_sender_activate_try_to_send_txids AS activate_txid + LEFT JOIN + bitcoin_syncer AS syncer ON activate_txid.seen_block_id = syncer.id + WHERE + activate_txid.seen_block_id IS NULL + OR (syncer.height + activate_txid.timelock > $2) + + UNION + + -- Transactions with outpoint activations that aren't active yet (not seen or timelock not passed) + SELECT DISTINCT + activate_outpoint.activated_id AS tx_id + FROM + tx_sender_activate_try_to_send_outpoints AS activate_outpoint + LEFT JOIN + bitcoin_syncer AS syncer ON activate_outpoint.seen_block_id = syncer.id + WHERE + activate_outpoint.seen_block_id IS NULL + OR (syncer.height + activate_outpoint.timelock > $2) + ), + + -- Transactions with cancelled conditions + cancelled_txs AS ( + -- Transactions with cancelled outpoints (not seen) + SELECT DISTINCT + cancelled_id AS tx_id + FROM + tx_sender_cancel_try_to_send_outpoints + WHERE + seen_block_id IS NOT NULL + + UNION + + -- Transactions with cancelled txids (not seen) + SELECT DISTINCT + cancelled_id AS tx_id + FROM + tx_sender_cancel_try_to_send_txids + WHERE + seen_block_id IS NOT NULL + ) + + -- Final query to get sendable transactions + SELECT + txs.id + FROM + tx_sender_try_to_send_txs AS txs + WHERE + -- Transaction must not be in the non-active list + txs.id NOT IN (SELECT tx_id FROM non_active_txs) + -- Transaction must not be in the cancelled list + AND txs.id NOT IN (SELECT tx_id FROM cancelled_txs) + -- Transaction must not be already confirmed + AND txs.seen_block_id IS NULL + -- Check if fee_rate is lower than the provided fee rate or null + AND (txs.effective_fee_rate IS NULL OR txs.effective_fee_rate < $1);", + ) + .bind( + i64::try_from(fee_rate.to_sat_per_vb_ceil()) + .wrap_err("Failed to convert fee rate to i64")?, + ) + .bind( + i32::try_from(current_tip_height) + .wrap_err("Failed to convert current tip height to i32")?, + ); + + let results = execute_query_with_tx!(self.connection, tx, select_query, fetch_all)?; + + let txs = results + .into_iter() + .map(|(id,)| u32::try_from(id)) + .collect::, _>>() + .wrap_err("Failed to convert id to u32")?; + + Ok(txs) + } + + pub async fn update_effective_fee_rate( + &self, + tx: Option>, + id: u32, + effective_fee_rate: FeeRate, + ) -> Result<(), BridgeError> { + let query = sqlx::query( + "UPDATE tx_sender_try_to_send_txs SET effective_fee_rate = $1 WHERE id = $2", + ) + .bind( + i64::try_from(effective_fee_rate.to_sat_per_vb_ceil()) + .wrap_err("Failed to convert effective fee rate to i64")?, + ) + .bind(i32::try_from(id).wrap_err("Failed to convert id to i32")?); + + execute_query_with_tx!(self.connection, tx, query, execute)?; + + Ok(()) + } + + pub async fn get_try_to_send_tx( + &self, + tx: Option>, + id: u32, + ) -> Result< + ( + Option, + Transaction, + FeePayingType, + Option, + Option, + ), + BridgeError, + > { + let query = sqlx::query_as::< + _, + ( + Option, + Option>, + FeePayingType, + Option, + Option, + ), + >( + "SELECT tx_metadata, raw_tx, fee_paying_type, seen_block_id, rbf_signing_info + FROM tx_sender_try_to_send_txs + WHERE id = $1 LIMIT 1", + ) + .bind(i32::try_from(id).wrap_err("Failed to convert id to i32")?); + + let result = execute_query_with_tx!(self.connection, tx, query, fetch_one)?; + Ok(( + serde_json::from_str(result.0.as_deref().unwrap_or("null")) + .wrap_err_with(|| format!("Failed to decode tx_metadata from {:?}", result.0))?, + result + .1 + .as_deref() + .map(deserialize) + .ok_or_eyre("Expected raw_tx to be present")? + .wrap_err("Failed to deserialize raw_tx")?, + result.2, + result + .3 + .map(u32::try_from) + .transpose() + .wrap_err("Failed to convert seen_block_id to u32")?, + serde_json::from_str(result.4.as_deref().unwrap_or("null")).wrap_err_with(|| { + format!("Failed to decode rbf_signing_info from {:?}", result.4) + })?, + )) + } + + // Debug Functions + + /// Saves a TX submission error to the debug table + pub async fn save_tx_debug_submission_error( + &self, + tx_id: u32, + error_message: &str, + ) -> Result<(), BridgeError> { + let query = sqlx::query( + "INSERT INTO tx_sender_debug_submission_errors (tx_id, error_message) VALUES ($1, $2)", + ) + .bind(i32::try_from(tx_id).wrap_err("Failed to convert tx_id to i32")?) + .bind(error_message); + + self.connection.execute(query).await?; + Ok(()) + } + + /// Updates or inserts the TX's sending state in the debug table + /// + /// Does not support a Transaction because it's for debugging purposes. Make + /// sure that tx_id exists (i.e. creation is committed) before use + pub async fn update_tx_debug_sending_state( + &self, + tx_id: u32, + state: &str, + activated: bool, + ) -> Result<(), BridgeError> { + let query = sqlx::query( + r#" + INSERT INTO tx_sender_debug_sending_state + (tx_id, state, last_update, activated_timestamp) + VALUES ($1, $2, NOW(), + CASE + WHEN $3 = TRUE THEN NOW() + ELSE NULL + END + ) + ON CONFLICT (tx_id) DO UPDATE SET + state = $2, + last_update = NOW(), + activated_timestamp = COALESCE(tx_sender_debug_sending_state.activated_timestamp, + CASE + WHEN $3 = TRUE THEN NOW() + ELSE NULL + END + ) + "#, + ) + .bind(i32::try_from(tx_id).wrap_err("Failed to convert tx_id to i32")?) + .bind(state) + .bind(activated); + + self.connection.execute(query).await?; + Ok(()) + } + + /// Gets the current debug state of a TX + pub async fn get_tx_debug_info( + &self, + tx: Option>, + tx_id: u32, + ) -> Result, BridgeError> { + let query = sqlx::query_as::<_, (Option,)>( + r#" + SELECT state + FROM tx_sender_debug_sending_state + WHERE tx_id = $1 + "#, + ) + .bind(i32::try_from(tx_id).wrap_err("Failed to convert tx_id to i32")?); + + let result = execute_query_with_tx!(self.connection, tx, query, fetch_optional)?; + match result { + Some((state,)) => Ok(state), + None => Ok(None), + } + } + + /// Gets all TX submission errors + pub async fn get_tx_debug_submission_errors( + &self, + tx: Option>, + tx_id: u32, + ) -> Result, BridgeError> { + let query = sqlx::query_as::<_, (String, String)>( + r#" + SELECT error_message, timestamp::TEXT + FROM tx_sender_debug_submission_errors + WHERE tx_id = $1 + ORDER BY timestamp ASC + "#, + ) + .bind(i32::try_from(tx_id).wrap_err("Failed to convert tx_id to i32")?); + + execute_query_with_tx!(self.connection, tx, query, fetch_all).map_err(Into::into) + } + + /// Gets all fee payer UTXOs for a TX with their confirmation status + pub async fn get_tx_debug_fee_payer_utxos( + &self, + tx: Option>, + tx_id: u32, + ) -> Result, BridgeError> { + let query = sqlx::query_as::<_, (TxidDB, i32, i64, Option)>( + r#" + SELECT fee_payer_txid, vout, amount, seen_block_id IS NOT NULL as confirmed + FROM tx_sender_fee_payer_utxos + WHERE bumped_id = $1 + "#, + ) + .bind(i32::try_from(tx_id).wrap_err("Failed to convert tx_id to i32")?); + + let results: Vec<(TxidDB, i32, i64, Option)> = + execute_query_with_tx!(self.connection, tx, query, fetch_all)?; + + results + .iter() + .map(|(fee_payer_txid, vout, amount, confirmed)| { + Ok(( + fee_payer_txid.0, + u32::try_from(*vout).wrap_err("Failed to convert vout to u32")?, + Amount::from_sat( + u64::try_from(*amount).wrap_err("Failed to convert amount to u64")?, + ), + confirmed.is_some(), + )) + }) + .collect::, BridgeError>>() + } + + /// Purges debug information for a successfully sent TX + pub async fn purge_tx_debug_info( + &self, + mut tx: Option>, + tx_id: u32, + ) -> Result<(), BridgeError> { + let queries = [ + "DELETE FROM tx_sender_debug_state_changes WHERE tx_id = $1", + "DELETE FROM tx_sender_debug_submission_errors WHERE tx_id = $1", + "DELETE FROM tx_sender_debug_sending_state WHERE tx_id = $1", + ]; + + for query_str in queries { + let query = sqlx::query(query_str) + .bind(i32::try_from(tx_id).wrap_err("Failed to convert tx_id to i32")?); + + execute_query_with_tx!(self.connection, tx.as_deref_mut(), query, execute)?; + } + + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::database::Database; + use crate::test::common::*; + use bitcoin::absolute::Height; + use bitcoin::hashes::Hash; + use bitcoin::transaction::Version; + use bitcoin::{Block, OutPoint, TapNodeHash, Txid}; + + async fn setup_test_db() -> Database { + let config = create_test_config_with_thread_name().await; + Database::new(&config).await.unwrap() + } + + #[tokio::test] + async fn test_save_and_get_tx() { + let db = setup_test_db().await; + let tx = Transaction { + version: Version::TWO, + lock_time: bitcoin::absolute::LockTime::Blocks(Height::ZERO), + input: vec![], + output: vec![], + }; + + // Test saving tx + let txid = tx.compute_txid(); + let rbfinfo = Some(RbfSigningInfo { + vout: 123, + tweak_merkle_root: Some(TapNodeHash::all_zeros()), + annex: None, + additional_taproot_output_count: None, + }); + let id = db + .save_tx(None, None, &tx, FeePayingType::CPFP, txid, rbfinfo.clone()) + .await + .unwrap(); + + // Test retrieving tx + let (_, retrieved_tx, fee_paying_type, seen_block_id, rbf_signing_info) = + db.get_try_to_send_tx(None, id).await.unwrap(); + assert_eq!(tx.version, retrieved_tx.version); + assert_eq!(fee_paying_type, FeePayingType::CPFP); + assert_eq!(seen_block_id, None); + assert_eq!(rbf_signing_info, rbfinfo); + } + + #[tokio::test] + async fn test_fee_payer_utxo_operations() { + let db = setup_test_db().await; + let mut dbtx = db.begin_transaction().await.unwrap(); + + // First create a transaction that will be bumped + let tx = Transaction { + version: Version::TWO, + lock_time: bitcoin::absolute::LockTime::Blocks(Height::ZERO), + input: vec![], + output: vec![], + }; + + // Save the transaction first + let tx_id = db + .save_tx( + Some(&mut dbtx), + None, + &tx, + FeePayingType::CPFP, + Txid::all_zeros(), + None, + ) + .await + .unwrap(); + + // Now we can use this tx_id as bumped_id + let fee_payer_txid = Txid::hash(&[1u8; 32]); + db.save_fee_payer_tx( + Some(&mut dbtx), + tx_id, + fee_payer_txid, + 0, + Amount::from_sat(50000), + None, + ) + .await + .unwrap(); + + dbtx.commit().await.unwrap(); + } + + #[tokio::test] + async fn test_confirm_and_unconfirm_transactions() { + const BLOCK_HEX: &str = "0200000035ab154183570282ce9afc0b494c9fc6a3cfea05aa8c1add2ecc56490000000038ba3d78e4500a5a7570dbe61960398add4410d278b21cd9708e6d9743f374d544fc055227f1001c29c1ea3b0101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff3703a08601000427f1001c046a510100522cfabe6d6d0000000000000000000068692066726f6d20706f6f6c7365727665726aac1eeeed88ffffffff0100f2052a010000001976a914912e2b234f941f30b18afbb4fa46171214bf66c888ac00000000"; + let block: Block = deserialize(&hex::decode(BLOCK_HEX).unwrap()).unwrap(); + + let db = setup_test_db().await; + let mut dbtx = db.begin_transaction().await.unwrap(); + + // Create a block to use for confirmation + let block_id = crate::bitcoin_syncer::save_block(&db, &mut dbtx, &block, 100) + .await + .unwrap(); + + // Create a transaction + let tx = Transaction { + version: Version::TWO, + lock_time: bitcoin::absolute::LockTime::Blocks(Height::ZERO), + input: vec![], + output: vec![], + }; + let tx_id = db + .save_tx( + Some(&mut dbtx), + None, + &tx, + FeePayingType::CPFP, + Txid::all_zeros(), + None, + ) + .await + .unwrap(); + + // Save fee payer UTXO + let fee_payer_txid = Txid::hash(&[1u8; 32]); + db.save_fee_payer_tx( + Some(&mut dbtx), + tx_id, + fee_payer_txid, + 0, + Amount::from_sat(50000), + None, + ) + .await + .unwrap(); + + // Save the transaction in the block + db.insert_txid_to_block(&mut dbtx, block_id, &fee_payer_txid) + .await + .unwrap(); + + // Confirm transactions + db.confirm_transactions(&mut dbtx, block_id).await.unwrap(); + + dbtx.commit().await.unwrap(); + } + + #[tokio::test] + async fn test_cancelled_outpoints_and_txids() { + let db = setup_test_db().await; + let mut dbtx = db.begin_transaction().await.unwrap(); + + // First create a transaction to cancel + let tx = Transaction { + version: Version::TWO, + lock_time: bitcoin::absolute::LockTime::Blocks(Height::ZERO), + input: vec![], + output: vec![], + }; + + // Save the transaction first + let tx_id = db + .save_tx( + Some(&mut dbtx), + None, + &tx, + FeePayingType::CPFP, + Txid::all_zeros(), + None, + ) + .await + .unwrap(); + + // Now we can use this tx_id as cancelled_id + let txid = Txid::hash(&[0u8; 32]); + let vout = 0; + + // Test cancelling by outpoint + db.save_cancelled_outpoint(Some(&mut dbtx), tx_id, OutPoint { txid, vout }) + .await + .unwrap(); + + // Test cancelling by txid + db.save_cancelled_txid(Some(&mut dbtx), tx_id, txid) + .await + .unwrap(); + + dbtx.commit().await.unwrap(); + } + + #[tokio::test] + async fn test_get_sendable_txs() { + let db = setup_test_db().await; + let mut dbtx = db.begin_transaction().await.unwrap(); + + // Create and save test transactions + let tx1 = Transaction { + version: Version::TWO, + lock_time: bitcoin::absolute::LockTime::Blocks(Height::ZERO), + input: vec![], + output: vec![], + }; + let tx2 = Transaction { + version: Version::TWO, + lock_time: bitcoin::absolute::LockTime::Blocks(Height::ZERO), + input: vec![], + output: vec![], + }; + + let id1 = db + .save_tx( + Some(&mut dbtx), + None, + &tx1, + FeePayingType::CPFP, + Txid::all_zeros(), + None, + ) + .await + .unwrap(); + let id2 = db + .save_tx( + Some(&mut dbtx), + None, + &tx2, + FeePayingType::RBF, + Txid::all_zeros(), + None, + ) + .await + .unwrap(); + + // Test getting sendable txs + let fee_rate = FeeRate::from_sat_per_vb(3).unwrap(); + let current_tip_height = 100; + let sendable_txs = db + .get_sendable_txs(Some(&mut dbtx), fee_rate, current_tip_height) + .await + .unwrap(); + + // Both transactions should be sendable as they have no prerequisites or cancellations + assert_eq!(sendable_txs.len(), 2); + assert!(sendable_txs.contains(&id1)); + assert!(sendable_txs.contains(&id2)); + + // Test updating effective fee rate for tx1 with a fee rate equal to the query fee rate + // This should make tx1 not sendable since the condition is "effective_fee_rate < fee_rate" + db.update_effective_fee_rate(Some(&mut dbtx), id1, fee_rate) + .await + .unwrap(); + + let sendable_txs = db + .get_sendable_txs(Some(&mut dbtx), fee_rate, current_tip_height) + .await + .unwrap(); + assert_eq!(sendable_txs.len(), 1); + assert!(sendable_txs.contains(&id2)); + + // Update tx1's effective fee rate to be higher than the query fee rate + let higher_fee_rate = FeeRate::from_sat_per_vb(3).unwrap(); + db.update_effective_fee_rate(Some(&mut dbtx), id1, higher_fee_rate) + .await + .unwrap(); + + // Now only tx2 should be sendable since tx1's effective fee rate is higher than the query fee rate + let sendable_txs = db + .get_sendable_txs(Some(&mut dbtx), fee_rate, current_tip_height) + .await + .unwrap(); + assert_eq!(sendable_txs.len(), 1); + assert!(sendable_txs.contains(&id2)); + + dbtx.commit().await.unwrap(); + } + + #[tokio::test] + async fn test_debug_sending_state() { + let db = setup_test_db().await; + let mut dbtx = db.begin_transaction().await.unwrap(); + + // Create a test transaction + let tx = Transaction { + version: Version::TWO, + lock_time: bitcoin::absolute::LockTime::Blocks(Height::ZERO), + input: vec![], + output: vec![], + }; + + // Insert the transaction into the database + let tx_id = db + .save_tx( + None, // needed so that tx_id is available + None, + &tx, + FeePayingType::RBF, + tx.compute_txid(), + None, + ) + .await + .unwrap(); + + // Test updating the sending state + let initial_state = "waiting_for_fee_payer_utxos"; + db.update_tx_debug_sending_state(tx_id, initial_state, false) + .await + .unwrap(); + + // Verify the state was saved correctly + let state = db.get_tx_debug_info(Some(&mut dbtx), tx_id).await.unwrap(); + assert_eq!(state, Some(initial_state.to_string())); + + // Update the state with activation + let active_state = "ready_to_send"; + db.update_tx_debug_sending_state(tx_id, active_state, true) + .await + .unwrap(); + + // Verify the state was updated + let state = db.get_tx_debug_info(Some(&mut dbtx), tx_id).await.unwrap(); + assert_eq!(state, Some(active_state.to_string())); + + // Test saving an error message + let error_message = "Failed to send transaction: insufficient fee"; + db.save_tx_debug_submission_error(tx_id, error_message) + .await + .unwrap(); + + // Verify the error was saved + let errors = db + .get_tx_debug_submission_errors(Some(&mut dbtx), tx_id) + .await + .unwrap(); + assert_eq!(errors.len(), 1); + assert_eq!(errors[0].0, error_message); + + // Add another error + let second_error = "Network connection timeout"; + db.save_tx_debug_submission_error(tx_id, second_error) + .await + .unwrap(); + + // Verify both errors are retrieved in order + let errors = db + .get_tx_debug_submission_errors(Some(&mut dbtx), tx_id) + .await + .unwrap(); + assert_eq!(errors.len(), 2); + assert_eq!(errors[0].0, error_message); + assert_eq!(errors[1].0, second_error); + + // Update state again + let final_state = "sent"; + db.update_tx_debug_sending_state(tx_id, final_state, true) + .await + .unwrap(); + + // Verify final state + let state = db.get_tx_debug_info(Some(&mut dbtx), tx_id).await.unwrap(); + assert_eq!(state, Some(final_state.to_string())); + + dbtx.commit().await.unwrap(); + } +} diff --git a/core/src/database/verifier.rs b/core/src/database/verifier.rs new file mode 100644 index 000000000..953826ace --- /dev/null +++ b/core/src/database/verifier.rs @@ -0,0 +1,519 @@ +//! # Verifier Related Database Operations +//! +//! This module includes database functions which are mainly used by a verifier. + +use std::ops::DerefMut; + +use super::{ + wrapper::{BlockHashDB, TxidDB, XOnlyPublicKeyDB}, + Database, DatabaseTransaction, +}; +use crate::{errors::BridgeError, execute_query_with_tx}; +use bitcoin::{BlockHash, OutPoint, Txid, XOnlyPublicKey}; +use eyre::Context; +use sqlx::QueryBuilder; + +impl Database { + /// Returns the last deposit index. + /// If no deposits exist, returns None + pub async fn get_last_deposit_idx( + &self, + tx: Option>, + ) -> Result, BridgeError> { + let query = sqlx::query_as::<_, (i32,)>("SELECT COALESCE(MAX(idx), -1) FROM withdrawals"); + let result = execute_query_with_tx!(self.connection, tx, query, fetch_one)?; + if result.0 == -1 { + Ok(None) + } else { + Ok(Some(result.0 as u32)) + } + } + + /// Returns the last withdrawal index where withdrawal_utxo_txid exists. + /// If no withdrawals with UTXOs exist, returns None. + pub async fn get_last_withdrawal_idx( + &self, + tx: Option>, + ) -> Result, BridgeError> { + let query = sqlx::query_as::<_, (i32,)>( + "SELECT COALESCE(MAX(idx), -1) FROM withdrawals WHERE withdrawal_utxo_txid IS NOT NULL", + ); + let result = execute_query_with_tx!(self.connection, tx, query, fetch_one)?; + if result.0 == -1 { + Ok(None) + } else { + Ok(Some(result.0 as u32)) + } + } + + pub async fn upsert_move_to_vault_txid_from_citrea_deposit( + &self, + tx: Option>, + citrea_idx: u32, + move_to_vault_txid: &Txid, + ) -> Result<(), BridgeError> { + let query = sqlx::query( + "INSERT INTO withdrawals (idx, move_to_vault_txid) + VALUES ($1, $2) + ON CONFLICT (idx) DO UPDATE + SET move_to_vault_txid = $2", + ) + .bind(i32::try_from(citrea_idx).wrap_err("Failed to convert citrea index to i32")?) + .bind(TxidDB(*move_to_vault_txid)); + + execute_query_with_tx!(self.connection, tx, query, execute)?; + Ok(()) + } + + pub async fn get_move_to_vault_txid_from_citrea_deposit( + &self, + tx: Option>, + citrea_idx: u32, + ) -> Result, BridgeError> { + let query = sqlx::query_as::<_, (TxidDB,)>( + "SELECT move_to_vault_txid FROM withdrawals WHERE idx = $1", + ) + .bind(i32::try_from(citrea_idx).wrap_err("Failed to convert citrea index to i32")?); + + let result: Option<(TxidDB,)> = + execute_query_with_tx!(self.connection, tx, query, fetch_optional)?; + + Ok(result.map(|(move_to_vault_txid,)| move_to_vault_txid.0)) + } + + pub async fn update_replacement_deposit_move_txid( + &self, + tx: DatabaseTransaction<'_, '_>, + idx: u32, + new_move_txid: Txid, + ) -> Result<(), BridgeError> { + let query = sqlx::query( + "UPDATE withdrawals + SET move_to_vault_txid = $2 + WHERE idx = $1 + RETURNING idx", + ) + .bind(i32::try_from(idx).wrap_err("Failed to convert idx to i32")?) + .bind(TxidDB(new_move_txid)) + .fetch_optional(tx.deref_mut()) + .await?; + + if query.is_none() { + return Err(eyre::eyre!("Replacement move txid not found: {}", idx).into()); + } + Ok(()) + } + + pub async fn update_withdrawal_utxo_from_citrea_withdrawal( + &self, + tx: Option>, + citrea_idx: u32, + withdrawal_utxo: OutPoint, + withdrawal_batch_proof_bitcoin_block_height: u32, + ) -> Result<(), BridgeError> { + let query = sqlx::query( + "UPDATE withdrawals + SET withdrawal_utxo_txid = $2, + withdrawal_utxo_vout = $3, + withdrawal_batch_proof_bitcoin_block_height = $4 + WHERE idx = $1", + ) + .bind(i32::try_from(citrea_idx).wrap_err("Failed to convert citrea index to i32")?) + .bind(TxidDB(withdrawal_utxo.txid)) + .bind( + i32::try_from(withdrawal_utxo.vout) + .wrap_err("Failed to convert withdrawal utxo vout to i32")?, + ) + .bind( + i32::try_from(withdrawal_batch_proof_bitcoin_block_height) + .wrap_err("Failed to convert withdrawal batch proof bitcoin block height to i32")?, + ); + + execute_query_with_tx!(self.connection, tx, query, execute)?; + Ok(()) + } + + /// For the given deposit index, returns the withdrawal utxo associated with it + /// If there is no withdrawal utxo set for the deposit, an error is returned + pub async fn get_withdrawal_utxo_from_citrea_withdrawal( + &self, + tx: Option>, + citrea_idx: u32, + ) -> Result { + let query = sqlx::query_as::<_, (Option, Option)>( + "SELECT w.withdrawal_utxo_txid, w.withdrawal_utxo_vout + FROM withdrawals w + WHERE w.idx = $1", + ) + .bind(i32::try_from(citrea_idx).wrap_err("Failed to convert citrea index to i32")?); + + let results = execute_query_with_tx!(self.connection, tx, query, fetch_optional)?; + + match results { + None => Err(eyre::eyre!("Deposit with id {} is not set", citrea_idx).into()), + Some((txid, vout)) => match (txid, vout) { + (Some(txid), Some(vout)) => Ok(OutPoint { + txid: txid.0, + vout: u32::try_from(vout) + .wrap_err("Failed to convert withdrawal utxo vout to u32")?, + }), + _ => { + Err(eyre::eyre!("Withdrawal utxo is not set for deposit {}", citrea_idx).into()) + } + }, + } + } + + /// Returns the withdrawal indexes and their spending txid for the given + /// block id. + pub async fn get_payout_txs_for_withdrawal_utxos( + &self, + tx: Option>, + block_id: u32, + ) -> Result, BridgeError> { + let query = sqlx::query_as::<_, (i32, TxidDB)>( + "SELECT w.idx, bsu.spending_txid + FROM withdrawals w + JOIN bitcoin_syncer_spent_utxos bsu + ON bsu.txid = w.withdrawal_utxo_txid + AND bsu.vout = w.withdrawal_utxo_vout + WHERE bsu.block_id = $1", + ) + .bind(i32::try_from(block_id).wrap_err("Failed to convert block id to i32")?); + + let results = execute_query_with_tx!(self.connection, tx, query, fetch_all)?; + + results + .into_iter() + .map(|(idx, txid)| { + Ok(( + u32::try_from(idx).wrap_err("Failed to convert withdrawal index to u32")?, + txid.0, + )) + }) + .collect() + } + + /// Sets the given payout txs' txid and operator index for the given index. + pub async fn update_payout_txs_and_payer_operator_xonly_pk( + &self, + tx: Option>, + payout_txs_and_payer_operator_xonly_pk: Vec<( + u32, + Txid, + Option, + bitcoin::BlockHash, + )>, + ) -> Result<(), BridgeError> { + if payout_txs_and_payer_operator_xonly_pk.is_empty() { + return Ok(()); + } + // Convert all values first, propagating any errors + let converted_values: Result, BridgeError> = payout_txs_and_payer_operator_xonly_pk + .iter() + .map(|(idx, txid, operator_xonly_pk, block_hash)| { + Ok(( + i32::try_from(*idx).wrap_err("Failed to convert payout index to i32")?, + TxidDB(*txid), + operator_xonly_pk.map(XOnlyPublicKeyDB), + BlockHashDB(*block_hash), + )) + }) + .collect(); + let converted_values = converted_values?; + + let mut query_builder = QueryBuilder::new( + "UPDATE withdrawals AS w SET + payout_txid = c.payout_txid, + payout_payer_operator_xonly_pk = c.payout_payer_operator_xonly_pk, + payout_tx_blockhash = c.payout_tx_blockhash + FROM (", + ); + + query_builder.push_values( + converted_values.into_iter(), + |mut b, (idx, txid, operator_xonly_pk, block_hash)| { + b.push_bind(idx) + .push_bind(txid) + .push_bind(operator_xonly_pk) + .push_bind(block_hash); + }, + ); + + query_builder + .push(") AS c(idx, payout_txid, payout_payer_operator_xonly_pk, payout_tx_blockhash) WHERE w.idx = c.idx"); + + let query = query_builder.build(); + execute_query_with_tx!(self.connection, tx, query, execute)?; + + Ok(()) + } + + pub async fn get_payout_info_from_move_txid( + &self, + tx: Option>, + move_to_vault_txid: Txid, + ) -> Result, BlockHash, Txid, i32)>, BridgeError> { + let query = sqlx::query_as::<_, (Option, BlockHashDB, TxidDB, i32)>( + "SELECT w.payout_payer_operator_xonly_pk, w.payout_tx_blockhash, w.payout_txid, w.idx + FROM withdrawals w + WHERE w.move_to_vault_txid = $1", + ) + .bind(TxidDB(move_to_vault_txid)); + + let result: Option<(Option, BlockHashDB, TxidDB, i32)> = + execute_query_with_tx!(self.connection, tx, query, fetch_optional)?; + + result + .map(|(operator_xonly_pk, block_hash, txid, deposit_idx)| { + Ok(( + operator_xonly_pk.map(|pk| pk.0), + block_hash.0, + txid.0, + deposit_idx, + )) + }) + .transpose() + } + + pub async fn get_first_unhandled_payout_by_operator_xonly_pk( + &self, + tx: Option>, + operator_xonly_pk: XOnlyPublicKey, + ) -> Result, BridgeError> { + let query = sqlx::query_as::<_, (i32, Option, Option)>( + "SELECT w.idx, w.move_to_vault_txid, w.payout_tx_blockhash + FROM withdrawals w + WHERE w.payout_txid IS NOT NULL + AND w.is_payout_handled = FALSE + AND w.payout_payer_operator_xonly_pk = $1 + ORDER BY w.idx ASC + LIMIT 1", + ) + .bind(XOnlyPublicKeyDB(operator_xonly_pk)); + + let results = execute_query_with_tx!(self.connection, tx, query, fetch_optional)?; + + results + .map(|(citrea_idx, move_to_vault_txid, payout_tx_blockhash)| { + Ok(( + u32::try_from(citrea_idx).wrap_err("Failed to convert citrea index to u32")?, + move_to_vault_txid + .expect("move_to_vault_txid Must be Some") + .0, + payout_tx_blockhash + .expect("payout_tx_blockhash Must be Some") + .0, + )) + }) + .transpose() + } + + pub async fn get_payer_xonly_pk_blockhash_and_kickoff_txid_from_deposit_id( + &self, + tx: Option>, + deposit_id: u32, + ) -> Result<(Option, Option, Option), BridgeError> { + let query = sqlx::query_as::< + _, + ( + Option, + Option, + Option, + ), + >( + "SELECT w.payout_payer_operator_xonly_pk, w.payout_tx_blockhash, w.kickoff_txid + FROM withdrawals w + INNER JOIN deposits d ON d.move_to_vault_txid = w.move_to_vault_txid + WHERE d.deposit_id = $1", + ) + .bind(i32::try_from(deposit_id).wrap_err("Failed to convert deposit id to i32")?); + + let result: ( + Option, + Option, + Option, + ) = execute_query_with_tx!(self.connection, tx, query, fetch_one)?; + + Ok(( + result.0.map(|pk| pk.0), + result.1.map(|block_hash| block_hash.0), + result.2.map(|txid| txid.0), + )) + } + + pub async fn mark_payout_handled( + &self, + tx: Option>, + citrea_idx: u32, + kickoff_txid: Txid, + ) -> Result<(), BridgeError> { + let query = sqlx::query( + "UPDATE withdrawals SET is_payout_handled = TRUE, kickoff_txid = $2 WHERE idx = $1", + ) + .bind(i32::try_from(citrea_idx).wrap_err("Failed to convert citrea index to i32")?) + .bind(TxidDB(kickoff_txid)); + + execute_query_with_tx!(self.connection, tx, query, execute)?; + Ok(()) + } + + pub async fn get_handled_payout_kickoff_txid( + &self, + tx: Option>, + payout_txid: Txid, + ) -> Result, BridgeError> { + let query = sqlx::query_as::<_, (Option,)>( + "SELECT kickoff_txid FROM withdrawals WHERE payout_txid = $1 AND is_payout_handled = TRUE", + ) + .bind(TxidDB(payout_txid)); + + let result: Option<(Option,)> = + execute_query_with_tx!(self.connection, tx, query, fetch_optional)?; + + Ok(result + .map(|(kickoff_txid,)| kickoff_txid.expect("If handled, kickoff_txid must exist").0)) + } +} + +#[cfg(test)] +mod tests { + use crate::{ + database::Database, + test::common::{create_test_config_with_thread_name, generate_random_xonly_pk}, + }; + use bitcoin::{hashes::Hash, BlockHash, Txid}; + + #[tokio::test] + async fn update_get_payout_txs_from_citrea_withdrawal() { + let config = create_test_config_with_thread_name().await; + let db = Database::new(&config).await.unwrap(); + + let txid = Txid::from_byte_array([0x45; 32]); + let index = 0x1F; + let operator_xonly_pk = generate_random_xonly_pk(); + let utxo = bitcoin::OutPoint { + txid: bitcoin::Txid::from_byte_array([0x45; 32]), + vout: 0, + }; + + let mut dbtx = db.begin_transaction().await.unwrap(); + + let block_id = db + .insert_block_info( + Some(&mut dbtx), + &BlockHash::all_zeros(), + &BlockHash::all_zeros(), + utxo.vout, + ) + .await + .unwrap(); + db.insert_txid_to_block(&mut dbtx, block_id, &txid) + .await + .unwrap(); + db.insert_spent_utxo(&mut dbtx, block_id, &txid, &utxo.txid, utxo.vout.into()) + .await + .unwrap(); + + assert!(db + .get_withdrawal_utxo_from_citrea_withdrawal(Some(&mut dbtx), index) + .await + .is_err()); + db.upsert_move_to_vault_txid_from_citrea_deposit(Some(&mut dbtx), index, &txid) + .await + .unwrap(); + db.update_withdrawal_utxo_from_citrea_withdrawal(Some(&mut dbtx), index, utxo, block_id) + .await + .unwrap(); + + let block_hash = BlockHash::all_zeros(); + + db.update_payout_txs_and_payer_operator_xonly_pk( + Some(&mut dbtx), + vec![(index, txid, Some(operator_xonly_pk), block_hash)], + ) + .await + .unwrap(); + + let txs = db + .get_payout_txs_for_withdrawal_utxos(Some(&mut dbtx), block_id) + .await + .unwrap(); + + assert_eq!(txs.len(), 1); + assert_eq!(txs[0].0, index); + assert_eq!(txs[0].1, txid); + + let withdrawal_utxo = db + .get_withdrawal_utxo_from_citrea_withdrawal(Some(&mut dbtx), index) + .await + .unwrap(); + assert_eq!(withdrawal_utxo, utxo); + + let move_txid = db + .get_move_to_vault_txid_from_citrea_deposit(Some(&mut dbtx), index) + .await + .unwrap() + .unwrap(); + assert_eq!(move_txid, txid); + + // Test payout info retrieval with Some operator xonly pk + let payout_info = db + .get_payout_info_from_move_txid(Some(&mut dbtx), move_txid) + .await + .unwrap() + .unwrap(); + assert_eq!(payout_info.0, Some(operator_xonly_pk)); + assert_eq!(payout_info.1, block_hash); + assert_eq!(payout_info.2, txid); + assert_eq!(payout_info.3, index as i32); + + // Test with None operator xonly pk (optimistic payout or incorrect payout) + let index2 = 0x2F; + let txid2 = Txid::from_byte_array([0x55; 32]); + let utxo2 = bitcoin::OutPoint { + txid: bitcoin::Txid::from_byte_array([0x55; 32]), + vout: 1, + }; + + db.insert_txid_to_block(&mut dbtx, block_id, &txid2) + .await + .unwrap(); + db.insert_spent_utxo(&mut dbtx, block_id, &txid2, &utxo2.txid, utxo2.vout.into()) + .await + .unwrap(); + + db.upsert_move_to_vault_txid_from_citrea_deposit(Some(&mut dbtx), index2, &txid2) + .await + .unwrap(); + db.update_withdrawal_utxo_from_citrea_withdrawal(Some(&mut dbtx), index2, utxo2, block_id) + .await + .unwrap(); + + // Set payout with None operator xonly pk + db.update_payout_txs_and_payer_operator_xonly_pk( + Some(&mut dbtx), + vec![(index2, txid2, None, block_hash)], + ) + .await + .unwrap(); + + // Test payout info retrieval with None operator xonly pk + let payout_info2 = db + .get_payout_info_from_move_txid(Some(&mut dbtx), txid2) + .await + .unwrap() + .unwrap(); + assert_eq!(payout_info2.0, None); // No operator xonly pk + assert_eq!(payout_info2.1, block_hash); + assert_eq!(payout_info2.2, txid2); + assert_eq!(payout_info2.3, index2 as i32); + + // Verify we now have 2 payout transactions + let all_txs = db + .get_payout_txs_for_withdrawal_utxos(Some(&mut dbtx), block_id) + .await + .unwrap(); + assert_eq!(all_txs.len(), 2); + } +} diff --git a/core/src/database/wrapper.rs b/core/src/database/wrapper.rs new file mode 100644 index 000000000..230e9d0b1 --- /dev/null +++ b/core/src/database/wrapper.rs @@ -0,0 +1,640 @@ +//! # Type Wrappers for Parsing +//! +//! This module includes wrappers for easy parsing of the foreign types. + +use crate::EVMAddress; +use bitcoin::{ + address::NetworkUnchecked, + block, + consensus::{deserialize, serialize, Decodable, Encodable}, + hashes::Hash, + hex::DisplayHex, + secp256k1::{schnorr, Message, PublicKey}, + Address, OutPoint, ScriptBuf, TxOut, Txid, XOnlyPublicKey, +}; +use eyre::eyre; +use prost::Message as _; +use risc0_zkvm::Receipt; +use secp256k1::musig; +use serde::{Deserialize, Serialize}; +use sqlx::{ + error::BoxDynError, + postgres::{PgArgumentBuffer, PgValueRef}, + Decode, Encode, Postgres, +}; +use std::str::FromStr; + +/// Macro to reduce boilerplate for [`impl_text_wrapper_custom`]. +/// +/// Implements the Type, Encode and Decode traits for a wrapper type. +/// Assumes the type is declared. +macro_rules! impl_text_wrapper_base { + ($wrapper:ident, $inner:ty, $encode:expr, $decode:expr) => { + impl sqlx::Type for $wrapper { + fn type_info() -> sqlx::postgres::PgTypeInfo { + sqlx::postgres::PgTypeInfo::with_name("TEXT") + } + } + + impl Encode<'_, Postgres> for $wrapper { + fn encode_by_ref( + &self, + buf: &mut PgArgumentBuffer, + ) -> Result { + let s = $encode(&self.0); + <&str as Encode>::encode_by_ref(&s.as_str(), buf) + } + } + + impl<'r> Decode<'r, Postgres> for $wrapper { + fn decode(value: PgValueRef<'r>) -> Result { + let s = <&str as Decode>::decode(value)?; + Ok(Self($decode(s)?)) + } + } + }; +} + +/// Macro for implementing text-based SQL wrapper types with custom encoding/decoding +/// +/// # Parameters +/// - `$wrapper`: The name of the wrapper type to create +/// - `$inner`: The inner type being wrapped +/// - `$encode`: Expression for converting inner type to string +/// - `$decode`: Expression for converting string back to inner type +/// +/// The macro creates a new type that wraps the inner type and implements: +/// - SQLx Type trait to indicate TEXT column type +/// - SQLx Encode trait for converting to database format +/// - SQLx Decode trait for converting from database format +macro_rules! impl_text_wrapper_custom { + // Default case (include serde) + ($wrapper:ident, $inner:ty, $encode:expr, $decode:expr) => { + impl_text_wrapper_custom!($wrapper, $inner, $encode, $decode, true); + }; + + // true case - with serde + ($wrapper:ident, $inner:ty, $encode:expr, $decode:expr, true) => { + #[derive(sqlx::FromRow, Debug, Clone, Serialize, Deserialize, PartialEq)] + pub struct $wrapper(pub $inner); + + impl_text_wrapper_base!($wrapper, $inner, $encode, $decode); + }; + + // false case - without serde + ($wrapper:ident, $inner:ty, $encode:expr, $decode:expr, false) => { + #[derive(sqlx::FromRow, Debug, Clone, PartialEq)] + pub struct $wrapper(pub $inner); + + impl_text_wrapper_base!($wrapper, $inner, $encode, $decode); + }; +} + +/// Macro for implementing BYTEA-based SQL wrapper types with custom encoding/decoding +/// +/// # Parameters +/// - `$wrapper`: The name of the wrapper type to create +/// - `$inner`: The inner type being wrapped +/// - `$encode`: Expression for converting inner type to bytes +/// - `$decode`: Expression for converting bytes back to inner type +/// +/// The macro creates a new type that wraps the inner type and implements: +/// - SQLx Type trait to indicate BYTEA column type +/// - SQLx Encode trait for converting to database format +/// - SQLx Decode trait for converting from database format +macro_rules! impl_bytea_wrapper_custom { + ($wrapper:ident, $inner:ty, $encode:expr, $decode:expr) => { + #[derive(sqlx::FromRow, Debug, Clone, PartialEq)] + pub struct $wrapper(pub $inner); + + impl sqlx::Type for $wrapper { + fn type_info() -> sqlx::postgres::PgTypeInfo { + sqlx::postgres::PgTypeInfo::with_name("BYTEA") + } + } + + impl Encode<'_, Postgres> for $wrapper { + fn encode_by_ref( + &self, + buf: &mut PgArgumentBuffer, + ) -> Result { + let bytes = $encode(&self.0); + <&[u8] as Encode>::encode(bytes.as_ref(), buf) + } + } + + impl<'r> Decode<'r, Postgres> for $wrapper { + fn decode(value: PgValueRef<'r>) -> Result { + let bytes = as Decode>::decode(value)?; + Ok(Self($decode(&bytes)?)) + } + } + }; +} + +/// Same as `impl_bytea_wrapper_custom` but with an encode function that returns a Result +macro_rules! impl_bytea_wrapper_custom_with_error { + ($wrapper:ident, $inner:ty, $encode:expr, $decode:expr) => { + #[derive(sqlx::FromRow, Debug, Clone)] + pub struct $wrapper(pub $inner); + + impl sqlx::Type for $wrapper { + fn type_info() -> sqlx::postgres::PgTypeInfo { + sqlx::postgres::PgTypeInfo::with_name("BYTEA") + } + } + + impl Encode<'_, Postgres> for $wrapper { + fn encode_by_ref( + &self, + buf: &mut PgArgumentBuffer, + ) -> Result { + let bytes = $encode(&self.0)?; + <&[u8] as Encode>::encode(bytes.as_ref(), buf) + } + } + + impl<'r> Decode<'r, Postgres> for $wrapper { + fn decode(value: PgValueRef<'r>) -> Result { + let bytes = as Decode>::decode(value)?; + Ok(Self($decode(&bytes)?)) + } + } + }; +} + +/// Macro for implementing BYTEA-based SQL wrapper types using standard serialization +/// +/// This macro creates a wrapper type that uses the inner type's default serialization +/// methods (`serialize()` and `from_slice()`) for encoding/decoding to/from BYTEA columns. +/// +/// # Parameters +/// - `$wrapper`: The name of the wrapper type to create +/// - `$inner`: The inner type being wrapped +/// +/// The macro creates a new type that wraps the inner type and implements: +/// - SQLx Type trait to indicate BYTEA column type +/// - SQLx Encode trait for converting to database format +/// - SQLx Decode trait for converting from database format +macro_rules! impl_bytea_wrapper_default { + ($wrapper:ident, $inner:ty) => { + impl_bytea_wrapper_custom!( + $wrapper, + $inner, + |x: &$inner| x.serialize(), + |x: &[u8]| -> Result<$inner, BoxDynError> { + <$inner>::from_slice(x).map_err(|e| Box::new(e) as sqlx::error::BoxDynError) + } + ); + }; +} + +/// Macro for implementing text-based SQL wrapper types using standard string conversion +/// +/// This macro creates a wrapper type that uses the inner type's default string conversion +/// methods (`to_string()` and `from_str()`) for encoding/decoding to/from TEXT columns. +/// +/// # Parameters +/// - `$wrapper`: The name of the wrapper type to create +/// - `$inner`: The inner type being wrapped +/// +/// The macro creates a new type that wraps the inner type and implements: +/// - SQLx Type trait to indicate TEXT column type +/// - SQLx Encode trait for converting to database format +/// - SQLx Decode trait for converting from database format +macro_rules! impl_text_wrapper_default { + ($wrapper:ident, $inner:ty) => { + impl_text_wrapper_custom!( + $wrapper, + $inner, + <$inner as ToString>::to_string, + <$inner as FromStr>::from_str + ); + }; +} + +impl_text_wrapper_default!(OutPointDB, OutPoint); +impl_text_wrapper_default!(BlockHashDB, block::BlockHash); +impl_text_wrapper_default!(PublicKeyDB, PublicKey); +impl_text_wrapper_default!(XOnlyPublicKeyDB, XOnlyPublicKey); + +impl_bytea_wrapper_default!(SignatureDB, schnorr::Signature); + +impl_bytea_wrapper_custom!( + MusigPubNonceDB, + musig::PublicNonce, + |pub_nonce: &musig::PublicNonce| pub_nonce.serialize(), + |x: &[u8]| -> Result { + let arr: &[u8; 66] = x + .try_into() + .map_err(|_| eyre!("Expected 66 bytes for PublicNonce"))?; + Ok(musig::PublicNonce::from_byte_array(arr)?) + } +); + +impl_bytea_wrapper_custom!( + MusigAggNonceDB, + musig::AggregatedNonce, + |pub_nonce: &musig::AggregatedNonce| pub_nonce.serialize(), + |x: &[u8]| -> Result { + let arr: &[u8; 66] = x + .try_into() + .map_err(|_| eyre!("Expected 66 bytes for AggregatedNonce"))?; + Ok(musig::AggregatedNonce::from_byte_array(arr)?) + } +); + +impl_bytea_wrapper_custom_with_error!( + ReceiptDB, + Receipt, + |lcp: &Receipt| -> Result, BoxDynError> { borsh::to_vec(lcp).map_err(Into::into) }, + |x: &[u8]| -> Result { borsh::from_slice(x).map_err(Into::into) } +); + +impl_text_wrapper_custom!( + AddressDB, + Address, + |addr: &Address| addr.clone().assume_checked().to_string(), + |s: &str| Address::from_str(s) +); + +impl_text_wrapper_custom!( + EVMAddressDB, + EVMAddress, + |addr: &EVMAddress| hex::encode(addr.0), + |s: &str| -> Result { + let bytes = hex::decode(s).map_err(Box::new)?; + + Ok(EVMAddress(bytes.try_into().map_err(|arr: Vec| { + eyre!("Failed to deserialize EVMAddress from {:?}", arr) + })?)) + } +); + +impl_bytea_wrapper_custom!( + TxidDB, + Txid, + |txid: &Txid| *txid, // Txid is Copy, which requires this hack + |x: &[u8]| -> Result { Ok(Txid::from_slice(x)?) } +); + +impl_bytea_wrapper_custom!( + MessageDB, + Message, + |msg: &Message| *msg, // Message is Copy, which requires this hack + |x: &[u8]| -> Result { Ok(Message::from_digest(x.try_into()?)) } +); + +use crate::rpc::clementine::DepositSignatures; +impl_bytea_wrapper_custom!( + SignaturesDB, + DepositSignatures, + |signatures: &DepositSignatures| { signatures.encode_to_vec() }, + |x: &[u8]| -> Result { + DepositSignatures::decode(x).map_err(Into::into) + } +); + +use crate::rpc::clementine::DepositParams; +impl_bytea_wrapper_custom!( + DepositParamsDB, + DepositParams, + |deposit_params: &DepositParams| { deposit_params.encode_to_vec() }, + |x: &[u8]| -> Result { + DepositParams::decode(x).map_err(Into::into) + } +); + +impl_bytea_wrapper_custom!( + ScriptBufDB, + ScriptBuf, + |script: &ScriptBuf| serialize(script), + |x: &[u8]| -> Result { deserialize(x).map_err(Into::into) } +); + +impl_text_wrapper_custom!( + BlockHeaderDB, + block::Header, + |header: &block::Header| { + let mut bytes = Vec::new(); + header + .consensus_encode(&mut bytes) + .expect("exceeded max Vec size or ran out of memory"); + bytes.to_hex_string(bitcoin::hex::Case::Lower) + }, + |s: &str| -> Result { + let bytes = hex::decode(s)?; + block::Header::consensus_decode(&mut bytes.as_slice()).map_err(Into::into) + } +); + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, sqlx::FromRow)] +pub struct UtxoDB { + pub outpoint_db: OutPointDB, + pub txout_db: TxOutDB, +} + +impl_text_wrapper_custom!( + TxOutDB, + TxOut, + |txout: &TxOut| bitcoin::consensus::encode::serialize_hex(&txout), + |s: &str| -> Result { + bitcoin::consensus::encode::deserialize_hex(s) + .map_err(|e| Box::new(e) as sqlx::error::BoxDynError) + } +); + +#[cfg(test)] +mod tests { + use super::*; + use crate::{ + bitvm_client::{self, SECP}, + database::Database, + musig2, + rpc::clementine::TaggedSignature, + test::common::*, + EVMAddress, + }; + use bitcoin::{ + block::{self, Version}, + hashes::Hash, + key::Keypair, + secp256k1::{schnorr::Signature, SecretKey}, + Amount, BlockHash, CompactTarget, OutPoint, ScriptBuf, TxMerkleNode, TxOut, Txid, + }; + use secp256k1::{musig::AggregatedNonce, SECP256K1}; + use sqlx::{Executor, Type}; + + macro_rules! test_encode_decode_invariant { + ($db_type:ty, $inner:ty, $db_wrapper:expr, $table_name:expr, $column_type:expr) => { + let db_wrapper = $db_wrapper; + + let config = create_test_config_with_thread_name().await; + let database = Database::new(&config).await.unwrap(); + + // Create table if it doesn't exist + database + .connection + .execute(sqlx::query(&format!( + "CREATE TABLE IF NOT EXISTS {} ({} {} PRIMARY KEY)", + $table_name, $table_name, $column_type + ))) + .await + .unwrap(); + + // Insert the value + database + .connection + .execute( + sqlx::query(&format!( + "INSERT INTO {} ({}) VALUES ($1)", + $table_name, $table_name + )) + .bind(db_wrapper.clone()), + ) + .await + .unwrap(); + + // Retrieve the value + let retrieved: $db_type = sqlx::query_scalar(&format!( + "SELECT {} FROM {} WHERE {} = $1", + $table_name, $table_name, $table_name + )) + .bind(db_wrapper.clone()) + .fetch_one(&database.connection) + .await + .unwrap(); + + // Verify the retrieved value matches the original + assert_eq!(retrieved, db_wrapper); + + // Clean up + database + .connection + .execute(sqlx::query(&format!("DROP TABLE {}", $table_name))) + .await + .unwrap(); + }; + } + #[tokio::test] + async fn outpoint_encode_decode_invariant() { + assert_eq!( + OutPointDB::type_info(), + sqlx::postgres::PgTypeInfo::with_name("TEXT") + ); + + test_encode_decode_invariant!( + OutPointDB, + OutPoint, + OutPointDB(OutPoint { + txid: Txid::all_zeros(), + vout: 0x45 + }), + "outpoint", + "TEXT" + ); + } + + #[tokio::test] + async fn txoutdb_encode_decode_invariant() { + assert_eq!( + TxOutDB::type_info(), + sqlx::postgres::PgTypeInfo::with_name("TEXT") + ); + + test_encode_decode_invariant!( + TxOutDB, + TxOut, + TxOutDB(TxOut { + value: Amount::from_sat(0x45), + script_pubkey: ScriptBuf::new(), + }), + "txout", + "TEXT" + ); + } + + #[tokio::test] + async fn addressdb_encode_decode_invariant() { + assert_eq!( + AddressDB::type_info(), + sqlx::postgres::PgTypeInfo::with_name("TEXT") + ); + + let address = bitcoin::Address::p2tr( + &SECP, + *bitvm_client::UNSPENDABLE_XONLY_PUBKEY, + None, + bitcoin::Network::Regtest, + ); + let address = AddressDB(address.as_unchecked().clone()); + + test_encode_decode_invariant!( + AddressDB, + Address, + address, + "address", + "TEXT" + ); + } + + #[tokio::test] + async fn evmaddressdb_encode_decode_invariant() { + assert_eq!( + EVMAddressDB::type_info(), + sqlx::postgres::PgTypeInfo::with_name("TEXT") + ); + + let evmaddress = EVMAddressDB(EVMAddress([0x45u8; 20])); + test_encode_decode_invariant!(EVMAddressDB, EVMAddress, evmaddress, "evmaddress", "TEXT"); + } + + #[tokio::test] + async fn txiddb_encode_decode_invariant() { + assert_eq!( + TxidDB::type_info(), + sqlx::postgres::PgTypeInfo::with_name("BYTEA") + ); + + let txid = TxidDB(Txid::all_zeros()); + test_encode_decode_invariant!(TxidDB, Txid, txid, "txid", "BYTEA"); + } + + #[tokio::test] + async fn signaturedb_encode_decode_invariant() { + assert_eq!( + SignatureDB::type_info(), + sqlx::postgres::PgTypeInfo::with_name("BYTEA") + ); + + let signature = SignatureDB(Signature::from_slice(&[0u8; 64]).unwrap()); + test_encode_decode_invariant!(SignatureDB, Signature, signature, "signature", "BYTEA"); + } + + #[tokio::test] + async fn signaturesdb_encode_decode_invariant() { + assert_eq!( + SignaturesDB::type_info(), + sqlx::postgres::PgTypeInfo::with_name("BYTEA") + ); + + use crate::rpc::clementine::{ + DepositSignatures, NormalSignatureKind, NumberedSignatureKind, + }; + let signatures = DepositSignatures { + signatures: vec![ + TaggedSignature { + signature: vec![0x1Fu8; 64], + signature_id: Some(NormalSignatureKind::NormalSignatureUnknown.into()), + }, + TaggedSignature { + signature: vec![0x45u8; 64], + signature_id: Some((NumberedSignatureKind::NumberedSignatureUnknown, 1).into()), + }, + ], + }; + test_encode_decode_invariant!( + SignaturesDB, + DepositSignatures, + SignaturesDB(signatures), + "signatures", + "BYTEA" + ); + } + + #[tokio::test] + async fn utxodb_json_encode_decode_invariant() { + use sqlx::types::Json; + + assert_eq!( + Json::::type_info(), + sqlx::postgres::PgTypeInfo::with_name("JSONB") + ); + + let utxodb = UtxoDB { + outpoint_db: OutPointDB(OutPoint { + txid: Txid::all_zeros(), + vout: 0x45, + }), + txout_db: TxOutDB(TxOut { + value: Amount::from_sat(0x45), + script_pubkey: ScriptBuf::new(), + }), + }; + + test_encode_decode_invariant!(Json, Utxodb, Json(utxodb), "utxodb", "JSONB"); + } + + #[tokio::test] + async fn blockhashdb_encode_decode_invariant() { + assert_eq!( + OutPointDB::type_info(), + sqlx::postgres::PgTypeInfo::with_name("TEXT") + ); + + let blockhash = BlockHashDB(BlockHash::all_zeros()); + test_encode_decode_invariant!(BlockHashDB, BlockHash, blockhash, "blockhash", "TEXT"); + } + + #[tokio::test] + async fn blockheaderdb_encode_decode_invariant() { + assert_eq!( + OutPointDB::type_info(), + sqlx::postgres::PgTypeInfo::with_name("TEXT") + ); + + let blockheader = BlockHeaderDB(block::Header { + version: Version::TWO, + prev_blockhash: BlockHash::all_zeros(), + merkle_root: TxMerkleNode::all_zeros(), + time: 0, + bits: CompactTarget::default(), + nonce: 0, + }); + test_encode_decode_invariant!( + BlockHeaderDB, + block::Header, + blockheader, + "blockheader", + "TEXT" + ); + } + + #[tokio::test] + async fn musigpubnoncedb_encode_decode_invariant() { + assert_eq!( + MusigPubNonceDB::type_info(), + sqlx::postgres::PgTypeInfo::with_name("BYTEA") + ); + + let kp = Keypair::from_secret_key(&SECP, &SecretKey::from_slice(&[1u8; 32]).unwrap()); + let (_sec_nonce, pub_nonce) = musig2::nonce_pair(&kp).unwrap(); + let public_nonce = MusigPubNonceDB(pub_nonce); + test_encode_decode_invariant!( + MusigPubNonceDB, + PublicNonce, + public_nonce, + "public_nonce", + "BYTEA" + ); + } + + #[tokio::test] + async fn musigaggnoncedb_encode_decode_invariant() { + assert_eq!( + MusigAggNonceDB::type_info(), + sqlx::postgres::PgTypeInfo::with_name("BYTEA") + ); + + let kp = Keypair::from_secret_key(&SECP, &SecretKey::from_slice(&[1u8; 32]).unwrap()); + let (_sec_nonce, pub_nonce) = musig2::nonce_pair(&kp).unwrap(); + let aggregated_nonce = MusigAggNonceDB(AggregatedNonce::new(SECP256K1, &[&pub_nonce])); + test_encode_decode_invariant!( + MusigAggNonceDB, + AggregatedNonce, + aggregated_nonce, + "aggregated_nonce", + "BYTEA" + ); + } +} diff --git a/core/src/deposit.rs b/core/src/deposit.rs new file mode 100644 index 000000000..453276c0e --- /dev/null +++ b/core/src/deposit.rs @@ -0,0 +1,355 @@ +//! This module defines the data structures related to Citrea deposits in the Clementine bridge. +//! +//! It includes structures for representing deposit data, actors involved (verifiers, watchtowers, operators), +//! and security council configurations. The module also provides functionality for managing different types +//! of deposits (base and replacement) and deriving the necessary scripts these deposits must have. + +use std::sync::Arc; + +use crate::builder::script::{ + BaseDepositScript, Multisig, ReplacementDepositScript, SpendableScript, TimelockScript, +}; +use crate::builder::transaction::create_move_to_vault_txhandler; +use crate::config::protocol::ProtocolParamset; +use crate::errors::BridgeError; +use crate::musig2::AggregateFromPublicKeys; +use crate::operator::RoundIndex; +use crate::utils::ScriptBufExt; +use crate::EVMAddress; +use bitcoin::address::NetworkUnchecked; +use bitcoin::secp256k1::PublicKey; +use bitcoin::{Address, OutPoint, Txid, XOnlyPublicKey}; +use eyre::Context; + +/// Data structure to represent a single kickoff utxo in an operators round tx. +#[derive( + Debug, Clone, Copy, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize, Ord, PartialOrd, +)] +pub struct KickoffData { + pub operator_xonly_pk: XOnlyPublicKey, + pub round_idx: RoundIndex, + pub kickoff_idx: u32, +} + +/// Data structure to represent a deposit. +/// - nofn_xonly_pk is cached to avoid recomputing it each time. +/// - deposit includes the actual information about the deposit. +/// - actors includes the public keys of the actors that will participate in the deposit. +/// - security_council includes the public keys of the security council that can unlock the deposit to create a replacement deposit +/// in case a bug is found in the bridge. +#[derive(Debug, Clone, serde::Serialize, serde::Deserialize, Eq)] +pub struct DepositData { + /// Cached nofn xonly public key used for deposit. + pub nofn_xonly_pk: Option, + pub deposit: DepositInfo, + pub actors: Actors, + pub security_council: SecurityCouncil, +} + +impl PartialEq for DepositData { + fn eq(&self, other: &Self) -> bool { + // nofn_xonly_pk only depends on verifiers pk's so it can be ignored as verifiers are already compared + // for security council, order of keys matter as it will change the m of n multisig script, + // thus change the scriptpubkey of move to vault tx + self.security_council == other.security_council + && self.deposit.deposit_outpoint == other.deposit.deposit_outpoint + // for watchtowers/verifiers/operators, order doesn't matter, we compare sorted lists + // get() functions already return sorted lists + && self.get_operators() == other.get_operators() + && self.get_verifiers() == other.get_verifiers() + && self.get_watchtowers() == other.get_watchtowers() + && self.deposit.deposit_type == other.deposit.deposit_type + } +} + +impl DepositData { + /// Returns the move to vault txid of the deposit. + pub fn get_move_txid( + &mut self, + paramset: &'static ProtocolParamset, + ) -> Result { + Ok(*create_move_to_vault_txhandler(self, paramset)?.get_txid()) + } +} + +/// Data structure to represent the deposit outpoint and type. +#[derive(Debug, Clone, serde::Serialize, serde::Deserialize, PartialEq, Eq)] +pub struct DepositInfo { + pub deposit_outpoint: OutPoint, + pub deposit_type: DepositType, +} + +/// Type to represent the type of deposit, and related specific data for each type.. +#[derive(Debug, Clone, serde::Serialize, serde::Deserialize, PartialEq, Eq)] +pub enum DepositType { + BaseDeposit(BaseDepositData), + ReplacementDeposit(ReplacementDepositData), +} + +impl DepositData { + /// Returns the outpoint of the deposit. + pub fn get_deposit_outpoint(&self) -> OutPoint { + self.deposit.deposit_outpoint + } + /// Returns the nofn xonly public key of the deposit. It is additionally cached to avoid recomputing it each time. + pub fn get_nofn_xonly_pk(&mut self) -> Result { + if let Some(pk) = self.nofn_xonly_pk { + return Ok(pk); + } + let verifiers = self.get_verifiers(); + let nofn_xonly_pk = bitcoin::XOnlyPublicKey::from_musig2_pks(verifiers, None)?; + self.nofn_xonly_pk = Some(nofn_xonly_pk); + Ok(nofn_xonly_pk) + } + /// Returns the number of verifiers in the deposit. + pub fn get_num_verifiers(&self) -> usize { + self.actors.verifiers.len() + } + /// Returns the number of watchtowers in the deposit. + pub fn get_num_watchtowers(&self) -> usize { + self.get_num_verifiers() + self.actors.watchtowers.len() + } + /// Returns the index of a verifier in the deposit, in the sorted order of verifiers pk. + pub fn get_verifier_index(&self, public_key: &PublicKey) -> Result { + self.get_verifiers() + .iter() + .position(|pk| pk == public_key) + .ok_or_else(|| eyre::eyre!("Verifier with public key {} not found", public_key)) + } + /// Returns the index of a watchtower in the deposit, in the sorted order of watchtowers pk. + pub fn get_watchtower_index(&self, xonly_pk: &XOnlyPublicKey) -> Result { + self.get_watchtowers() + .iter() + .position(|pk| pk == xonly_pk) + .ok_or_else(|| eyre::eyre!("Watchtower with xonly key {} not found", xonly_pk)) + } + /// Returns the index of an operator in the deposit, in the sorted order of operators pk. + pub fn get_operator_index(&self, xonly_pk: XOnlyPublicKey) -> Result { + self.get_operators() + .iter() + .position(|pk| pk == &xonly_pk) + .ok_or_else(|| eyre::eyre!("Operator with xonly key {} not found", xonly_pk)) + } + /// Returns sorted verifiers, they are sorted so that their order is deterministic. + pub fn get_verifiers(&self) -> Vec { + let mut verifiers = self.actors.verifiers.clone(); + verifiers.sort(); + + verifiers + } + /// Returns sorted watchtowers, they are sorted so that their order is deterministic. + /// It is very important for watchtowers to be sorted, as this is the order the watchtower challenge utxo's will be + /// in the kickoff tx. So any change in order will change the kickoff txid's. + pub fn get_watchtowers(&self) -> Vec { + let mut watchtowers = self + .actors + .verifiers + .iter() + .map(|pk| pk.x_only_public_key().0) + .collect::>(); + watchtowers.extend(self.actors.watchtowers.iter()); + watchtowers.sort(); + watchtowers + } + /// Returns sorted operators, they are sorted so that their order is deterministic. + pub fn get_operators(&self) -> Vec { + let mut operators = self.actors.operators.clone(); + operators.sort(); + operators + } + /// Returns the number of operators in the deposit. + pub fn get_num_operators(&self) -> usize { + self.actors.operators.len() + } + /// Returns the scripts a taproot address of a deposit_outpoint must have to spend the deposit. + /// Deposits not having these scripts and corresponding taproot address should be rejected. + pub fn get_deposit_scripts( + &mut self, + paramset: &'static ProtocolParamset, + ) -> Result>, BridgeError> { + let nofn_xonly_pk = self.get_nofn_xonly_pk()?; + + match &mut self.deposit.deposit_type { + DepositType::BaseDeposit(original_deposit_data) => { + let deposit_script = Arc::new(BaseDepositScript::new( + nofn_xonly_pk, + original_deposit_data.evm_address, + )); + + let recovery_script_pubkey = original_deposit_data + .recovery_taproot_address + .clone() + .assume_checked() + .script_pubkey(); + + let recovery_extracted_xonly_pk = recovery_script_pubkey + .try_get_taproot_pk() + .wrap_err("Recovery taproot address is not a valid taproot address")?; + + let script_timelock = Arc::new(TimelockScript::new( + Some(recovery_extracted_xonly_pk), + paramset.user_takes_after, + )); + + Ok(vec![deposit_script, script_timelock]) + } + DepositType::ReplacementDeposit(replacement_deposit_data) => { + let deposit_script: Arc = + Arc::new(ReplacementDepositScript::new( + nofn_xonly_pk, + replacement_deposit_data.old_move_txid, + )); + let security_council_script: Arc = Arc::new( + Multisig::from_security_council(self.security_council.clone()), + ); + + Ok(vec![deposit_script, security_council_script]) + } + } + } +} + +/// Data structure to represent the actors public keys that participate in the deposit. +#[derive(Debug, Clone, serde::Serialize, serde::Deserialize, PartialEq, Eq)] +pub struct Actors { + /// Public keys of verifiers that will participate in the deposit. + pub verifiers: Vec, + /// X-only public keys of watchtowers that will participate in the deposit. + /// NOTE: verifiers are automatically considered watchtowers. This field is only for additional watchtowers. + pub watchtowers: Vec, + /// X-only public keys of operators that will participate in the deposit. + pub operators: Vec, +} + +/// Data structure to represent the security council that can unlock the deposit using an m-of-n multisig to create a replacement deposit. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct SecurityCouncil { + pub pks: Vec, + pub threshold: u32, +} + +impl std::str::FromStr for SecurityCouncil { + type Err = eyre::Report; + + fn from_str(s: &str) -> Result { + let mut parts = s.split(':'); + let threshold_str = parts + .next() + .ok_or_else(|| eyre::eyre!("Missing threshold"))?; + let pks_str = parts + .next() + .ok_or_else(|| eyre::eyre!("Missing public keys"))?; + + if parts.next().is_some() { + return Err(eyre::eyre!("Too many parts in security council string")); + } + + let threshold = threshold_str + .parse::() + .map_err(|e| eyre::eyre!("Invalid threshold: {}", e))?; + + let pks: Result, _> = pks_str + .split(',') + .map(|pk_str| { + let bytes = hex::decode(pk_str) + .map_err(|e| eyre::eyre!("Invalid hex in public key: {}", e))?; + XOnlyPublicKey::from_slice(&bytes) + .map_err(|e| eyre::eyre!("Invalid public key: {}", e)) + }) + .collect(); + + let pks = pks?; + + if pks.is_empty() { + return Err(eyre::eyre!("No public keys provided")); + } + + if threshold > pks.len() as u32 { + return Err(eyre::eyre!( + "Threshold cannot be greater than number of public keys" + )); + } + + Ok(SecurityCouncil { pks, threshold }) + } +} + +impl serde::Serialize for SecurityCouncil { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + serializer.serialize_str(&self.to_string()) + } +} + +impl<'de> serde::Deserialize<'de> for SecurityCouncil { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + let s = String::deserialize(deserializer)?; + s.parse().map_err(serde::de::Error::custom) + } +} + +impl std::fmt::Display for SecurityCouncil { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}:", self.threshold)?; + let pks_str = self + .pks + .iter() + .map(|pk| hex::encode(pk.serialize())) + .collect::>() + .join(","); + write!(f, "{}", pks_str) + } +} + +/// Data structure to represent the data for a base deposit. These kinds of deposits are created by users. +#[derive(Debug, Clone, serde::Serialize, serde::Deserialize, PartialEq, Eq)] +pub struct BaseDepositData { + /// User's EVM address. + pub evm_address: EVMAddress, + /// User's recovery taproot address. + pub recovery_taproot_address: bitcoin::Address, +} + +/// Data structure to represent the data for a replacement deposit. These kinds of deposits are created by the bridge, using +/// security council to unlock the previous deposit and move the funds to create a new deposit. Verifiers will sign the new deposit again. +#[derive(Debug, Clone, PartialEq, Eq, serde::Serialize, serde::Deserialize)] +pub struct ReplacementDepositData { + /// old move_to_vault txid that was replaced + pub old_move_txid: Txid, +} + +/// Data structure to represent the data for an operator. These data is used in the tx creation so any deviation will change the tx's +/// created by the bridge. +#[derive(Debug, Clone, serde::Serialize, PartialEq, Eq)] +pub struct OperatorData { + pub xonly_pk: XOnlyPublicKey, + pub reimburse_addr: Address, + pub collateral_funding_outpoint: OutPoint, +} + +impl<'de> serde::Deserialize<'de> for OperatorData { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + #[derive(serde::Deserialize)] + struct OperatorDataHelper { + xonly_pk: XOnlyPublicKey, + reimburse_addr: Address, + collateral_funding_outpoint: OutPoint, + } + + let helper = OperatorDataHelper::deserialize(deserializer)?; + + Ok(OperatorData { + xonly_pk: helper.xonly_pk, + reimburse_addr: helper.reimburse_addr.assume_checked(), + collateral_funding_outpoint: helper.collateral_funding_outpoint, + }) + } +} diff --git a/core/src/encryption.rs b/core/src/encryption.rs new file mode 100644 index 000000000..bd0ef47c6 --- /dev/null +++ b/core/src/encryption.rs @@ -0,0 +1,119 @@ +use bitcoin::secp256k1::rand::{self, RngCore}; +use chacha20poly1305::{ + aead::{Aead, KeyInit}, + XChaCha20Poly1305, XNonce, +}; +use x25519_dalek::{ + EphemeralSecret, PublicKey as X25519PublicKey, StaticSecret as X25519StaticSecret, +}; + +const MIN_ENCRYPTED_LEN: usize = 56; +const EPHEMERAL_PUBKEY_LEN: usize = 32; + +/// Encrypts a message for a recipient using X25519 key agreement and XChaCha20Poly1305 authenticated encryption. +/// +/// # Parameters +/// - `recipient_pubkey`: The recipient's X25519 public key as a 32-byte array. +/// - `message`: The plaintext message to encrypt. +/// +/// # Returns +/// Returns a `Result` containing the encrypted message as a `Vec`, or an error. +/// The output format is: `[ephemeral_public_key (32 bytes)] || [nonce (24 bytes)] || [ciphertext (variable length)]`. +/// +/// # Encryption Scheme +/// - Uses X25519 to perform an ECDH key agreement between a randomly generated ephemeral key and the recipient's public key. +/// - The shared secret is used as the key for XChaCha20Poly1305 authenticated encryption. +/// - The output includes the ephemeral public key and nonce required for decryption. +pub fn encrypt_bytes(recipient_pubkey: [u8; 32], message: &[u8]) -> Result, eyre::Report> { + let recipient_pubkey = X25519PublicKey::from(recipient_pubkey); + + let ephemeral_secret = EphemeralSecret::random_from_rng(rand::thread_rng()); + let ephemeral_public = X25519PublicKey::from(&ephemeral_secret); + + let shared_secret = ephemeral_secret.diffie_hellman(&recipient_pubkey); + let cipher = XChaCha20Poly1305::new_from_slice(shared_secret.as_bytes()) + .map_err(|e| eyre::eyre!("Failed to create cipher: {e}"))?; + + let mut nonce_bytes = [0u8; 24]; + rand::thread_rng().fill_bytes(&mut nonce_bytes); + let nonce = XNonce::from_slice(&nonce_bytes); + + let ciphertext = cipher + .encrypt(nonce, message) + .map_err(|e| eyre::eyre!("Failed to encrypt message: {e}"))?; + + let mut output = vec![]; + output.extend_from_slice(ephemeral_public.as_bytes()); + output.extend_from_slice(&nonce_bytes); + output.extend_from_slice(&ciphertext); + Ok(output) +} + +/// Decrypts a message encrypted with `encrypt_bytes` using the recipient's X25519 private key. +/// +/// # Parameters +/// - `recipient_privkey`: A 32-byte slice representing the recipient's X25519 private key. +/// - `encrypted`: A byte slice containing the encrypted data. The expected format is: +/// - 32 bytes: ephemeral public key +/// - 24 bytes: XChaCha20-Poly1305 nonce +/// - remaining bytes: ciphertext (including authentication tag) +/// +/// # Returns +/// - `Ok(Vec)`: The decrypted message bytes. +/// - `Err(eyre::Report)`: If decryption fails or the input is invalid. +pub fn decrypt_bytes(recipient_privkey: &[u8], encrypted: &[u8]) -> Result, eyre::Report> { + if encrypted.len() < MIN_ENCRYPTED_LEN { + return Err(eyre::eyre!("Invalid encrypted length")); + } + + let ephemeral_pubkey_bytes: [u8; EPHEMERAL_PUBKEY_LEN] = encrypted[0..EPHEMERAL_PUBKEY_LEN] + .try_into() + .map_err(|_| eyre::eyre!("Invalid ephemeral public key length"))?; + let ephemeral_pubkey = X25519PublicKey::from(ephemeral_pubkey_bytes); + let nonce = XNonce::from_slice(&encrypted[EPHEMERAL_PUBKEY_LEN..MIN_ENCRYPTED_LEN]); + let ciphertext = &encrypted[MIN_ENCRYPTED_LEN..]; + + let recipient_priv_bytes: [u8; 32] = recipient_privkey + .try_into() + .map_err(|_| eyre::eyre!("Invalid recipient private key length"))?; + let recipient_secret = X25519StaticSecret::from(recipient_priv_bytes); + + let shared_secret = recipient_secret.diffie_hellman(&ephemeral_pubkey); + let cipher = XChaCha20Poly1305::new_from_slice(shared_secret.as_bytes()) + .map_err(|_| eyre::eyre!("Failed to create cipher"))?; + + cipher + .decrypt(nonce, ciphertext) + .map_err(|_| eyre::eyre!("Failed to decrypt message")) +} + +#[cfg(test)] +mod tests { + use super::*; + use hex::FromHex; + + #[test] + fn test_encrypt_decrypt() { + // Original keys in hex format + let privkey = <[u8; 32]>::from_hex( + "a80bc8cf095c2b37d4c6233114e0dd91f43d75de5602466232dbfcc1fc66c542", + ) + .unwrap(); + let pubkey = <[u8; 32]>::from_hex( + "025d32d10ec7b899df4eeb4d80918b7f0a1f2a28f6af24f71aa2a59c69c0d531", + ) + .unwrap(); + + // Test message + let message = b"Hello, Clementine!"; + + // Encrypt + let encrypted = encrypt_bytes(pubkey, message).unwrap(); + + // Decrypt + let decrypted = decrypt_bytes(&privkey, &encrypted).unwrap(); + + // Verify + assert_eq!(message, decrypted.as_slice()); + } +} diff --git a/core/src/errors.rs b/core/src/errors.rs new file mode 100644 index 000000000..8edc9ac7c --- /dev/null +++ b/core/src/errors.rs @@ -0,0 +1,279 @@ +//! # Errors +//! +//! This module defines globally shared error messages, the crate-level error wrapper and extension traits for error/results. +//! Our error paradigm is as follows: +//! 1. Modules define their own error types when they need shared error messages. Module-level errors can wrap eyre::Report to capture arbitrary errors. +//! 2. The crate-level error wrapper (BridgeError) is used to wrap errors from modules and attach extra context (ie. which module caused the error). +//! 3. External crate errors are always wrapped by the BridgeError and never by module-level errors. +//! 4. When using external crates inside modules, extension traits are used to convert external-crate errors into BridgeError. This is further wrapped in an eyre::Report to avoid a circular dependency. +//! 5. BridgeError can be converted to tonic::Status to be returned to the client. Module-level errors can define [`Into`] to customize the returned status. +//! 6. BridgeError can be used to share error messages across modules. +//! 7. When the error cause is not sufficiently explained by the error messages, use `eyre::Context::wrap_err` to add more context. This will not hinder modules that are trying to match the error. +//! +//! ## Error wrapper example usage with `TxError` +//! ```rust +//! use thiserror::Error; +//! use clementine_core::errors::{BridgeError, TxError, ErrorExt, ResultExt}; +//! +//! // Function with external crate signature +//! pub fn external_crate() -> Result<(), hex::FromHexError> { +//! Err(hex::FromHexError::InvalidStringLength) +//! } +//! +//! // Internal function failing with some error +//! pub fn internal_function_in_another_module() -> Result<(), BridgeError> { +//! Err(eyre::eyre!("I just failed").into()) +//! } +//! +//! +//! // This function returns module-level errors +//! // It can wrap external crate errors, and other crate-level errors +//! pub fn create_some_txs() -> Result<(), TxError> { +//! // Do external things +//! // This wraps the external crate error with BridgeError, then boxes inside an eyre::Report. The `?` will convert the eyre::Report into a TxError. +//! external_crate().map_to_eyre()?; +//! +//! // Do internal things +//! // This will simply wrap in eyre::Report, then rewrap in TxError. +//! internal_function_in_another_module().map_to_eyre()?; +//! +//! // Return a module-level error +//! Err(TxError::TxInputNotFound) +//! } +//! +//! pub fn test() -> Result<(), BridgeError> { +//! create_some_txs()?; +//! // This will convert the TxError into a BridgeError, wrapping the error with the message "Failed to build transactions" regardless of the actual error. +//! +//! // Chain will be: +//! // 1. External case: BridgeError -> TxError -> eyre::Report -> hex::FromHexError +//! // 2. Internal case: BridgeError -> TxError -> eyre::Report -> BridgeError -> eyre::Report (this could've been any other module-level error) +//! // 3. Module-level error: BridgeError -> TxError +//! +//! +//! // error(transparent) ensures that unnecessary error messages are not repeated. +//! Ok(()) +//! } +//! +//! pub fn main() { +//! assert!(test().is_err()); +//! } +//! ``` + +use crate::{ + actor::VerificationError, + builder::transaction::input::SpendableTxInError, + extended_bitcoin_rpc::BitcoinRPCError, + header_chain_prover::HeaderChainProverError, + rpc::{aggregator::AggregatorError, ParserError}, +}; +#[cfg(feature = "automation")] +use crate::{states::StateMachineError, tx_sender::SendTxError}; +use bitcoin::{secp256k1::PublicKey, OutPoint, Txid, XOnlyPublicKey}; +use clap::builder::StyledStr; +use core::fmt::Debug; +use hex::FromHexError; +use thiserror::Error; +use tonic::Status; + +pub use crate::builder::transaction::TxError; + +/// Errors returned by the bridge. +#[derive(Debug, Error)] +#[non_exhaustive] +pub enum BridgeError { + #[error("Header chain prover returned an error: {0}")] + Prover(#[from] HeaderChainProverError), + #[error("Failed to build transactions: {0}")] + Transaction(#[from] TxError), + #[cfg(feature = "automation")] + #[error("Failed to send transactions: {0}")] + SendTx(#[from] SendTxError), + #[error("Aggregator error: {0}")] + Aggregator(#[from] AggregatorError), + #[error("Failed to parse request: {0}")] + Parser(#[from] ParserError), + #[error("SpendableTxIn error: {0}")] + SpendableTxIn(#[from] SpendableTxInError), + #[error("Bitcoin RPC error: {0}")] + BitcoinRPC(#[from] BitcoinRPCError), + #[cfg(feature = "automation")] + #[error("State machine error: {0}")] + StateMachine(#[from] StateMachineError), + #[error("RPC authentication error: {0}")] + RPCAuthError(#[from] VerificationError), + + // Shared error messages + #[error("Unsupported network")] + UnsupportedNetwork, + #[error("Invalid configuration: {0}")] + ConfigError(String), + #[error("Missing environment variable {1}: {0}")] + EnvVarNotSet(std::env::VarError, &'static str), + #[error("Environment variable {0} is malformed: {1}")] + EnvVarMalformed(&'static str, String), + + #[error("Failed to convert between integer types")] + IntConversionError, + #[error("Failed to encode/decode data using borsh")] + BorshError, + #[error("Operator x-only public key {0} was not found in the DB")] + OperatorNotFound(XOnlyPublicKey), + #[error("Verifier with public key {0} was not found among the verifier clients")] + VerifierNotFound(PublicKey), + #[error("Deposit not found in DB: {0:?}")] + DepositNotFound(OutPoint), + #[error("Deposit is invalid due to {0}")] + InvalidDeposit(String), + #[error("Operator data mismatch. Data already stored in DB and received by set_operator doesn't match for xonly_pk: {0}")] + OperatorDataMismatch(XOnlyPublicKey), + #[error("Deposit data mismatch. Data already stored in DB doesn't match the new data for deposit {0:?}")] + DepositDataMismatch(OutPoint), + #[error("Operator winternitz public keys mismatch. Data already stored in DB doesn't match the new data for operator {0}")] + OperatorWinternitzPublicKeysMismatch(XOnlyPublicKey), + #[error("BitVM setup data mismatch. Data already stored in DB doesn't match the new data for operator {0} and deposit {1:?}")] + BitvmSetupDataMismatch(XOnlyPublicKey, OutPoint), + #[error("BitVM replacement data will exhaust memory. The maximum number of operations is {0}")] + BitvmReplacementResourceExhaustion(usize), + #[error("Operator challenge ack hashes mismatch. Data already stored in DB doesn't match the new data for operator {0} and deposit {1:?}")] + OperatorChallengeAckHashesMismatch(XOnlyPublicKey, OutPoint), + #[error("Invalid BitVM public keys")] + InvalidBitVMPublicKeys, + #[error("Invalid challenge ack hashes")] + InvalidChallengeAckHashes, + #[error("Invalid operator index")] + InvalidOperatorIndex, + #[error("Invalid protocol paramset")] + InvalidProtocolParamset, + #[error("Deposit already signed and move txid {0} is in chain")] + DepositAlreadySigned(Txid), + #[error("Invalid withdrawal ECDSA verification signature")] + InvalidECDSAVerificationSignature, + #[error("Withdrawal ECDSA verification signature missing")] + ECDSAVerificationSignatureMissing, + + // External crate error wrappers + #[error("Failed to call database: {0}")] + DatabaseError(#[from] sqlx::Error), + #[error("Failed to convert hex string: {0}")] + FromHexError(#[from] FromHexError), + #[error("Failed to convert to hash from slice: {0}")] + FromSliceError(#[from] bitcoin::hashes::FromSliceError), + #[error("Error while calling EVM contract: {0}")] + AlloyContract(#[from] alloy::contract::Error), + #[error("Error while calling EVM RPC function: {0}")] + AlloyRpc(#[from] alloy::transports::RpcError), + #[error("Error while encoding/decoding EVM type: {0}")] + AlloySolTypes(#[from] alloy::sol_types::Error), + #[error("{0}")] + CLIDisplayAndExit(StyledStr), + #[error(transparent)] + RPC(#[from] Status), + + #[error("Arithmetic overflow occurred: {0}")] + ArithmeticOverflow(&'static str), + #[error("Insufficient funds: {0}")] + InsufficientFunds(&'static str), + + // Base wrapper for eyre + #[error(transparent)] + Eyre(#[from] eyre::Report), +} + +/// Extension traits for errors to easily convert them to eyre::Report and +/// tonic::Status through BridgeError. +pub trait ErrorExt: Sized { + /// Converts the error into an eyre::Report, first wrapping in + /// BridgeError if necessary. It does not rewrap in eyre::Report if + /// the given error is already an eyre::Report. + fn into_eyre(self) -> eyre::Report; + /// Converts the error into a tonic::Status. Walks the chain of errors and + /// returns the first [`tonic::Status`] error. If it can't find one, it will + /// return an Status::internal with the Display representation of the error. + fn into_status(self) -> tonic::Status; +} + +/// Extension traits for results to easily convert them to eyre::Report and +/// tonic::Status through BridgeError. +pub trait ResultExt: Sized { + type Output; + + fn map_to_eyre(self) -> Result; + fn map_to_status(self) -> Result; +} + +impl> ErrorExt for T { + fn into_eyre(self) -> eyre::Report { + match self.into() { + BridgeError::Eyre(report) => report, + other => eyre::eyre!(other), + } + } + fn into_status(self) -> tonic::Status { + self.into().into() + } +} + +impl> ResultExt for Result { + type Output = U; + + fn map_to_eyre(self) -> Result { + self.map_err(ErrorExt::into_eyre) + } + + fn map_to_status(self) -> Result { + self.map_err(ErrorExt::into_status) + } +} + +impl From for tonic::Status { + fn from(val: BridgeError) -> Self { + let eyre_report = val.into_eyre(); + + // eyre::Report can cast any error in the chain to a Status, so we use its downcast method to get the first Status. + eyre_report.downcast::().unwrap_or_else(|report| { + // We don't want this case to happen, all casts to Status should contain a Status that contains a user-facing error message. + tracing::error!( + "Returning internal error on RPC call, full error: {:?}", + report + ); + + let mut status = tonic::Status::internal(report.to_string()); + status.set_source(Into::into( + Into::>::into(report), + )); + status + }) + } +} + +#[cfg(test)] +mod tests { + use super::*; + #[test] + fn test_downcast() { + assert_eq!( + BridgeError::IntConversionError + .into_eyre() + .wrap_err("Some other error") + .into_eyre() + .wrap_err("some other") + .downcast_ref::() + .unwrap() + .to_string(), + BridgeError::IntConversionError.to_string() + ); + } + + #[test] + fn test_status_in_chain_cast_properly() { + let err: BridgeError = eyre::eyre!("Some problem") + .wrap_err(tonic::Status::deadline_exceeded("Some timer expired")) + .wrap_err("Something else went wrong") + .into(); + + let status: Status = err.into_status(); + assert_eq!(status.code(), tonic::Code::DeadlineExceeded); + assert_eq!(status.message(), "Some timer expired"); + } +} diff --git a/core/src/extended_bitcoin_rpc.rs b/core/src/extended_bitcoin_rpc.rs new file mode 100644 index 000000000..e2b988771 --- /dev/null +++ b/core/src/extended_bitcoin_rpc.rs @@ -0,0 +1,1625 @@ +//! # Bitcoin Extended RPC Interface +//! +//! Extended RPC interface communicates with the Bitcoin node. It features some +//! common wrappers around typical RPC operations as well as direct +//! communication interface with the Bitcoin node. +//! +//! ## Tests +//! +//! In tests, Bitcoind node and client are usually created using +//! [`crate::test::common::create_regtest_rpc`]. Please refer to +//! [`crate::test::common`] for using [`ExtendedBitcoinRpc`] in tests. + +use async_trait::async_trait; +use bitcoin::Address; +use bitcoin::Amount; +use bitcoin::BlockHash; +use bitcoin::FeeRate; +use bitcoin::OutPoint; +use bitcoin::ScriptBuf; +use bitcoin::TxOut; +use bitcoin::Txid; +use bitcoincore_rpc::Auth; +use bitcoincore_rpc::Client; +use bitcoincore_rpc::RpcApi; +use eyre::eyre; +use eyre::Context; +use eyre::OptionExt; +use secrecy::ExposeSecret; +use secrecy::SecretString; +use std::iter::Take; +use std::str::FromStr; +use std::sync::Arc; +use std::time::Duration; +use tokio_retry::strategy::{jitter, ExponentialBackoff}; +use tokio_retry::RetryIf; + +use crate::builder::address::create_taproot_address; +use crate::builder::transaction::create_round_txhandlers; +use crate::builder::transaction::input::UtxoVout; +use crate::builder::transaction::KickoffWinternitzKeys; +use crate::builder::transaction::TransactionType; +use crate::builder::transaction::TxHandler; +use crate::config::protocol::ProtocolParamset; +use crate::deposit::OperatorData; +use crate::errors::BridgeError; +use crate::operator::RoundIndex; + +#[cfg(test)] +use crate::{ + citrea::CitreaClientT, + test::common::{are_all_state_managers_synced, test_actors::TestActors}, +}; + +type Result = std::result::Result; + +#[derive(Clone)] +pub struct RetryConfig { + pub initial_delay: Duration, + pub max_delay: Duration, + pub max_attempts: usize, + pub backoff_multiplier: u64, + pub is_jitter: bool, + // Store the base iterator configuration + base_strategy: Arc>, +} + +impl RetryConfig { + pub fn new( + initial_delay: Duration, + max_delay: Duration, + max_attempts: usize, + backoff_multiplier: u64, + is_jitter: bool, + ) -> Self { + // Create the base strategy once + let base_strategy = Arc::new( + ExponentialBackoff::from_millis(initial_delay.as_millis() as u64) + .max_delay(max_delay) + .factor(backoff_multiplier) + .take(max_attempts), + ); + + Self { + initial_delay, + max_delay, + max_attempts, + backoff_multiplier, + is_jitter, + base_strategy, + } + } + + pub fn get_strategy(&self) -> Box + Send> { + // Clone the base strategy to get a fresh iterator with the same initial state + let strategy = (*self.base_strategy).clone(); + + if self.is_jitter { + Box::new(strategy.map(jitter)) + } else { + Box::new(strategy) + } + } +} + +impl Default for RetryConfig { + fn default() -> Self { + Self::new( + Duration::from_millis(100), + Duration::from_secs(30), + 5, + 2, + false, + ) + } +} + +impl std::fmt::Debug for RetryConfig { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("RetryConfig") + .field("initial_delay", &self.initial_delay) + .field("max_delay", &self.max_delay) + .field("max_attempts", &self.max_attempts) + .field("backoff_multiplier", &self.backoff_multiplier) + .field("is_jitter", &self.is_jitter) + .finish() + } +} + +/// Trait to determine if an error is retryable +pub trait RetryableError { + fn is_retryable(&self) -> bool; +} + +impl RetryableError for bitcoincore_rpc::Error { + fn is_retryable(&self) -> bool { + match self { + // JSON-RPC errors - check specific error patterns + bitcoincore_rpc::Error::JsonRpc(jsonrpc_error) => { + let error_str = jsonrpc_error.to_string().to_lowercase(); + // Retry on connection issues, timeouts, temporary failures + error_str.contains("timeout") + || error_str.contains("connection") + || error_str.contains("temporary") + || error_str.contains("busy") + || error_str.contains("unavailable") + || error_str.contains("network") + || error_str.contains("broken pipe") + || error_str.contains("connection reset") + || error_str.contains("connection refused") + || error_str.contains("host unreachable") + } + + // I/O errors are typically network-related and retryable + bitcoincore_rpc::Error::Io(io_error) => { + use std::io::ErrorKind; + match io_error.kind() { + // These are typically temporary network issues + ErrorKind::ConnectionRefused + | ErrorKind::ConnectionReset + | ErrorKind::ConnectionAborted + | ErrorKind::NotConnected + | ErrorKind::BrokenPipe + | ErrorKind::TimedOut + | ErrorKind::Interrupted + | ErrorKind::UnexpectedEof => true, + + // These are typically permanent issues + ErrorKind::PermissionDenied + | ErrorKind::NotFound + | ErrorKind::InvalidInput + | ErrorKind::InvalidData => false, + + // For other kinds, be conservative and retry + _ => true, + } + } + + // Authentication errors are typically permanent + bitcoincore_rpc::Error::Auth(_) => false, + + // URL parse errors are permanent + bitcoincore_rpc::Error::UrlParse(_) => false, + + // Invalid cookie file is usually a config issue (permanent) + bitcoincore_rpc::Error::InvalidCookieFile => false, + + // Daemon returned error - check the error message + bitcoincore_rpc::Error::ReturnedError(error_msg) => { + let error_str = error_msg.to_lowercase(); + // Retry on temporary RPC errors + error_str.contains("loading") || + error_str.contains("warming up") || + error_str.contains("verifying") || + error_str.contains("busy") || + error_str.contains("temporary") || + error_str.contains("try again") || + error_str.contains("timeout") || + // Don't retry on wallet/transaction specific errors + !(error_str.contains("insufficient funds") || + error_str.contains("transaction already") || + error_str.contains("invalid") || + error_str.contains("not found") || + error_str.contains("conflict")) + } + + // Unexpected structure might be due to version mismatch or temporary parsing issues + // Be conservative and retry once + bitcoincore_rpc::Error::UnexpectedStructure => true, + + // Serialization errors are typically permanent + bitcoincore_rpc::Error::BitcoinSerialization(_) => false, + bitcoincore_rpc::Error::Hex(_) => false, + bitcoincore_rpc::Error::Json(_) => false, + bitcoincore_rpc::Error::Secp256k1(_) => false, + bitcoincore_rpc::Error::InvalidAmount(_) => false, + } + } +} + +impl RetryableError for BitcoinRPCError { + fn is_retryable(&self) -> bool { + match self { + BitcoinRPCError::TransactionNotConfirmed => true, + BitcoinRPCError::TransactionAlreadyInBlock(_) => false, + BitcoinRPCError::BumpFeeUTXOSpent(_) => false, + + // These might be temporary - retry + BitcoinRPCError::BumpFeeError(_, _) => true, + + // Check underlying error + BitcoinRPCError::Other(err) => { + let err_str = err.to_string().to_lowercase(); + err_str.contains("timeout") + || err_str.contains("connection") + || err_str.contains("temporary") + || err_str.contains("busy") + || err_str.contains("network") + } + } + } +} + +/// Bitcoin RPC wrapper. Extended RPC provides useful wrapper functions for +/// common operations, as well as direct access to Bitcoin RPC. +#[derive(Clone)] +pub struct ExtendedBitcoinRpc { + url: String, + client: Arc, + retry_config: RetryConfig, + + #[cfg(test)] + cached_mining_address: Arc>>, +} + +impl std::fmt::Debug for ExtendedBitcoinRpc { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("ExtendedBitcoinRpc") + .field("url", &self.url) + .finish() + } +} + +/// Errors that can occur during Bitcoin RPC operations. +#[derive(Debug, thiserror::Error)] +pub enum BitcoinRPCError { + #[error("Failed to bump fee for Txid of {0} and feerate of {1}")] + BumpFeeError(Txid, FeeRate), + #[error("Failed to bump fee: UTXO is already spent")] + BumpFeeUTXOSpent(OutPoint), + #[error("Transaction is already in block: {0}")] + TransactionAlreadyInBlock(BlockHash), + #[error("Transaction is not confirmed")] + TransactionNotConfirmed, + + #[error(transparent)] + Other(#[from] eyre::Report), +} + +impl ExtendedBitcoinRpc { + /// Connects to Bitcoin RPC server with built-in retry mechanism. + /// + /// This method attempts to connect to the Bitcoin RPC server and creates a new + /// [`ExtendedBitcoinRpc`] instance. It includes retry logic that will retry + /// connection attempts for retryable errors using exponential backoff. + /// + /// # Parameters + /// + /// * `url` - The RPC server URL + /// * `user` - Username for RPC authentication + /// * `password` - Password for RPC authentication + /// * `retry_config` - Optional retry configuration. If None, uses default config. + /// + /// # Returns + /// + /// - [`Result`]: A new ExtendedBitcoinRpc instance on success + /// + /// # Errors + /// + /// - [`BitcoinRPCError`]: If connection fails after all retry attempts or ping fails + pub async fn connect( + url: String, + user: SecretString, + password: SecretString, + retry_config: Option, + ) -> Result { + let config = retry_config.clone().unwrap_or_default(); + + let url_clone = url.clone(); + let user_clone = user.clone(); + let password_clone = password.clone(); + + let retry_strategy = config.get_strategy(); + + RetryIf::spawn( + retry_strategy, + || async { + let auth = Auth::UserPass( + user_clone.expose_secret().to_string(), + password_clone.expose_secret().to_string(), + ); + + let retry_config = retry_config.clone().unwrap_or_default(); + + let rpc = Client::new(&url_clone, auth) + .await + .wrap_err("Failed to connect to Bitcoin RPC")?; + + // Since this is a lazy connection, we should ping it to ensure it works + rpc.ping() + .await + .map_err(|e| eyre::eyre!("Failed to ping Bitcoin RPC: {}", e))?; + + let result: Result = Ok(Self { + url: url_clone.clone(), + client: Arc::new(rpc), + retry_config, + #[cfg(test)] + cached_mining_address: Arc::new(tokio::sync::RwLock::new(None)), + }); + + match &result { + Ok(_) => tracing::debug!("Connected to Bitcoin RPC successfully"), + Err(error) => { + if !error.is_retryable() { + tracing::debug!("Non-retryable connection error: {}", error); + } else { + tracing::debug!("Bitcoin RPC connection failed, will retry: {}", error); + } + } + } + + result + }, + |error: &BitcoinRPCError| error.is_retryable(), + ) + .await + } + + /// Generates a new Bitcoin address for the wallet. + pub async fn get_new_wallet_address(&self) -> Result
{ + self.get_new_address(None, None) + .await + .wrap_err("Failed to get new address") + .map(|addr| addr.assume_checked()) + .map_err(Into::into) + } + + /// Returns the number of confirmations for a transaction. + /// + /// # Parameters + /// + /// * `txid`: TXID of the transaction to check. + /// + /// # Returns + /// + /// - [`u32`]: The number of confirmations for the transaction. + /// + /// # Errors + /// + /// - [`BitcoinRPCError`]: If the transaction is not confirmed (0) or if + /// there was an error retrieving the transaction info. + pub async fn confirmation_blocks(&self, txid: &bitcoin::Txid) -> Result { + let raw_tx_res = self + .get_raw_transaction_info(txid, None) + .await + .wrap_err("Failed to get transaction info")?; + raw_tx_res + .confirmations + .ok_or_else(|| eyre::eyre!("No confirmation data for transaction {}", txid)) + .map_err(Into::into) + } + + /// Retrieves the current blockchain height (number of blocks). + /// + /// # Returns + /// + /// - [`u32`]: Current block height + pub async fn get_current_chain_height(&self) -> Result { + let height = self + .get_block_count() + .await + .wrap_err("Failed to get current chain height")?; + Ok(u32::try_from(height).wrap_err("Failed to convert block count to u32")?) + } + + /// Checks if an operator's collateral is valid and available for use. + /// + /// This function validates the operator's collateral by: + /// 1. Verifying the collateral UTXO exists and has the correct amount + /// 2. Creating the round transaction chain to track current collateral position + /// 3. Determining if the current collateral UTXO in the chain is spent in a non-protocol tx, signaling the exit of operator from the protocol + /// + /// # Parameters + /// + /// * `operator_data`: Data about the operator including collateral funding outpoint + /// * `kickoff_wpks`: Kickoff Winternitz public keys for round transaction creation + /// * `paramset`: Protocol parameters + /// + /// # Returns + /// + /// - [`bool`]: `true` if the collateral is still usable, thus operator is still in protocol, `false` if the collateral is spent, thus operator is not in protocol anymore + /// + /// # Errors + /// + /// - [`BridgeError`]: If there was an error retrieving transaction data, creating round transactions, + /// or checking UTXO status + pub async fn collateral_check( + &self, + operator_data: &OperatorData, + kickoff_wpks: &KickoffWinternitzKeys, + paramset: &'static ProtocolParamset, + ) -> std::result::Result { + // first check if the collateral utxo is on chain or mempool + let tx = self + .get_tx_of_txid(&operator_data.collateral_funding_outpoint.txid) + .await + .wrap_err(format!( + "Failed to find collateral utxo in chain for outpoint {:?}", + operator_data.collateral_funding_outpoint + ))?; + let collateral_outpoint = match tx + .output + .get(operator_data.collateral_funding_outpoint.vout as usize) + { + Some(output) => output, + None => { + tracing::warn!( + "No output at index {} for txid {} while checking for collateral existence", + operator_data.collateral_funding_outpoint.vout, + operator_data.collateral_funding_outpoint.txid + ); + return Ok(false); + } + }; + + if collateral_outpoint.value != paramset.collateral_funding_amount { + tracing::error!( + "Collateral amount for collateral {:?} is not correct: expected {}, got {}", + operator_data.collateral_funding_outpoint, + paramset.collateral_funding_amount, + collateral_outpoint.value + ); + return Ok(false); + } + + let operator_tpr_address = + create_taproot_address(&[], Some(operator_data.xonly_pk), paramset.network).0; + + if collateral_outpoint.script_pubkey != operator_tpr_address.script_pubkey() { + tracing::error!( + "Collateral script pubkey for collateral {:?} is not correct: expected {}, got {}", + operator_data.collateral_funding_outpoint, + operator_tpr_address.script_pubkey(), + collateral_outpoint.script_pubkey + ); + return Ok(false); + } + + // we additionally check if collateral utxo is on chain (so not in mempool) + // on mainnet we fail if collateral utxo is not on chain because if it is in mempool, + // the txid of the utxo can change if the fee is bumped + // on other networks, we allow collateral to be in mempool to not wait for collateral to be on chain to do deposits for faster testing + let is_on_chain = self + .is_tx_on_chain(&operator_data.collateral_funding_outpoint.txid) + .await?; + if !is_on_chain { + return match paramset.network { + bitcoin::Network::Bitcoin => Ok(false), + _ => Ok(true), + }; + } + + let mut current_collateral_outpoint: OutPoint = operator_data.collateral_funding_outpoint; + let mut prev_ready_to_reimburse: Option = None; + // iterate over all rounds + for round_idx in RoundIndex::iter_rounds(paramset.num_round_txs) { + // create round and ready to reimburse txs for the round + let txhandlers = create_round_txhandlers( + paramset, + round_idx, + operator_data, + kickoff_wpks, + prev_ready_to_reimburse.as_ref(), + )?; + + let mut round_txhandler_opt = None; + let mut ready_to_reimburse_txhandler_opt = None; + for txhandler in &txhandlers { + match txhandler.get_transaction_type() { + TransactionType::Round => round_txhandler_opt = Some(txhandler), + TransactionType::ReadyToReimburse => { + ready_to_reimburse_txhandler_opt = Some(txhandler) + } + _ => {} + } + } + if round_txhandler_opt.is_none() || ready_to_reimburse_txhandler_opt.is_none() { + return Err(eyre!( + "Failed to create round and ready to reimburse txs for round {:?} for operator {}", + round_idx, + operator_data.xonly_pk + ).into()); + } + + let round_txid = round_txhandler_opt + .expect("Round txhandler should exist, checked above") + .get_cached_tx() + .compute_txid(); + let is_round_tx_on_chain = self.is_tx_on_chain(&round_txid).await?; + if !is_round_tx_on_chain { + break; + } + let block_hash = self.get_blockhash_of_tx(&round_txid).await?; + let block_height = self + .get_block_info(&block_hash) + .await + .wrap_err(format!( + "Failed to get block info for block hash {}", + block_hash + ))? + .height; + if block_height < paramset.start_height as usize { + tracing::warn!( + "Collateral utxo of operator {:?} is spent in a block before paramset start height: {} < {}", + operator_data, + block_height, + paramset.start_height + ); + return Ok(false); + } + current_collateral_outpoint = OutPoint { + txid: round_txid, + vout: UtxoVout::CollateralInRound.get_vout(), + }; + if round_idx == RoundIndex::Round(paramset.num_round_txs - 1) { + // for the last round, only check round tx, as if the operator sent the ready to reimburse tx of last round, + // it cannot create more kickoffs anymore + break; + } + let ready_to_reimburse_txhandler = ready_to_reimburse_txhandler_opt + .expect("Ready to reimburse txhandler should exist"); + let ready_to_reimburse_txid = + ready_to_reimburse_txhandler.get_cached_tx().compute_txid(); + let is_ready_to_reimburse_tx_on_chain = + self.is_tx_on_chain(&ready_to_reimburse_txid).await?; + if !is_ready_to_reimburse_tx_on_chain { + break; + } + + current_collateral_outpoint = OutPoint { + txid: ready_to_reimburse_txid, + vout: UtxoVout::CollateralInReadyToReimburse.get_vout(), + }; + + prev_ready_to_reimburse = Some(ready_to_reimburse_txhandler.clone()); + } + + // if the collateral utxo we found latest in the round tx chain is spent, operators collateral is spent from Clementine + // bridge protocol, thus it is unusable and operator cannot fulfill withdrawals anymore + // if not spent, it should exist in chain, which is checked below + Ok(!self.is_utxo_spent(¤t_collateral_outpoint).await?) + } + + /// Returns block hash of a transaction, if confirmed. + /// + /// # Parameters + /// + /// * `txid`: TXID of the transaction to check. + /// + /// # Returns + /// + /// - [`bitcoin::BlockHash`]: Block hash of the block that the transaction + /// is in. + /// + /// # Errors + /// + /// - [`BitcoinRPCError`]: If the transaction is not confirmed (0) or if + /// there was an error retrieving the transaction info. + pub async fn get_blockhash_of_tx(&self, txid: &bitcoin::Txid) -> Result { + let raw_transaction_results = self + .get_raw_transaction_info(txid, None) + .await + .wrap_err("Failed to get transaction info")?; + let Some(blockhash) = raw_transaction_results.blockhash else { + return Err(eyre::eyre!("Transaction not confirmed: {0}", txid).into()); + }; + Ok(blockhash) + } + + /// Retrieves the block header and hash for a given block height. + /// + /// # Arguments + /// + /// * `height`: Target block height. + /// + /// # Returns + /// + /// - ([`bitcoin::BlockHash`], [`bitcoin::block::Header`]): A tuple + /// containing the block hash and header. + pub async fn get_block_info_by_height( + &self, + height: u64, + ) -> Result<(bitcoin::BlockHash, bitcoin::block::Header)> { + let block_hash = self.get_block_hash(height).await.wrap_err(format!( + "Couldn't retrieve block hash from height {} from rpc", + height + ))?; + let block_header = self.get_block_header(&block_hash).await.wrap_err(format!( + "Couldn't retrieve block header with block hash {} from rpc", + block_hash + ))?; + + Ok((block_hash, block_header)) + } + + /// Gets the transactions that created the inputs of a given transaction. + /// + /// # Arguments + /// + /// * `tx` - The transaction to get the previous transactions for + /// + /// # Returns + /// + /// A vector of transactions that created the inputs of the given transaction. + #[tracing::instrument(skip(self), err(level = tracing::Level::ERROR), ret(level = tracing::Level::TRACE))] + pub async fn get_prevout_txs( + &self, + tx: &bitcoin::Transaction, + ) -> Result> { + let mut prevout_txs = Vec::new(); + for input in &tx.input { + let txid = input.previous_output.txid; + prevout_txs.push(self.get_tx_of_txid(&txid).await?); + } + Ok(prevout_txs) + } + + /// Gets the transaction data for a given transaction ID. + /// + /// # Parameters + /// + /// * `txid`: TXID of the transaction to check. + /// + /// # Returns + /// + /// - [`bitcoin::Transaction`]: Transaction itself. + pub async fn get_tx_of_txid(&self, txid: &bitcoin::Txid) -> Result { + let raw_transaction = self + .get_raw_transaction(txid, None) + .await + .wrap_err("Failed to get raw transaction")?; + Ok(raw_transaction) + } + + /// Checks if a transaction is on-chain. + /// + /// # Parameters + /// + /// * `txid`: TXID of the transaction to check. + /// + /// # Returns + /// + /// - [`bool`]: `true` if the transaction is on-chain, `false` otherwise. + pub async fn is_tx_on_chain(&self, txid: &bitcoin::Txid) -> Result { + Ok(self + .get_raw_transaction_info(txid, None) + .await + .ok() + .and_then(|s| s.blockhash) + .is_some()) + } + + /// Checks if a transaction UTXO has expected address and amount. + /// + /// # Parameters + /// + /// * `outpoint` - The outpoint to check + /// * `address` - Expected script pubkey + /// * `amount_sats` - Expected amount in satoshis + /// + /// # Returns + /// + /// - [`bool`]: `true` if the UTXO has the expected address and amount, `false` otherwise. + pub async fn check_utxo_address_and_amount( + &self, + outpoint: &OutPoint, + address: &ScriptBuf, + amount_sats: Amount, + ) -> Result { + let tx = self + .get_raw_transaction(&outpoint.txid, None) + .await + .wrap_err("Failed to get transaction")?; + + let current_output = tx + .output + .get(outpoint.vout as usize) + .ok_or(eyre!( + "No output at index {} for txid {}", + outpoint.vout, + outpoint.txid + ))? + .to_owned(); + + let expected_output = TxOut { + script_pubkey: address.clone(), + value: amount_sats, + }; + + Ok(expected_output == current_output) + } + + /// Checks if an UTXO is spent. + /// + /// # Parameters + /// + /// * `outpoint`: The outpoint to check + /// + /// # Returns + /// + /// - [`bool`]: `true` if the UTXO is spent, `false` otherwise. + /// + /// # Errors + /// + /// - [`BitcoinRPCError`]: If the transaction is not confirmed or if there + /// was an error retrieving the transaction output. + pub async fn is_utxo_spent(&self, outpoint: &OutPoint) -> Result { + if !self.is_tx_on_chain(&outpoint.txid).await? { + return Err(BitcoinRPCError::TransactionNotConfirmed); + } + + let res = self + .get_tx_out(&outpoint.txid, outpoint.vout, Some(false)) + .await + .wrap_err("Failed to get transaction output")?; + + Ok(res.is_none()) + } + + /// Attempts to mine the specified number of blocks and returns their hashes. + /// + /// This test-only async function will mine `block_num` blocks on the Bitcoin regtest network + /// using a cached mining address or a newly generated one. It retries up to 5 times on failure + /// with exponential backoff. + /// + /// # Parameters + /// - `block_num`: The number of blocks to mine. + /// + /// # Returns + /// - `Ok(Vec)`: A vector of block hashes for the mined blocks. + /// - `Err`: If mining fails after all retry attempts. + #[cfg(test)] + pub async fn mine_blocks(&self, block_num: u64) -> Result> { + if block_num == 0 { + return Ok(vec![]); + } + + self.try_mine(block_num).await + } + + /// A helper fn to safely mine blocks while waiting for all actors to be synced + #[cfg(test)] + pub async fn mine_blocks_while_synced( + &self, + block_num: u64, + actors: &TestActors, + ) -> Result> { + let mut mined_blocks = Vec::new(); + while mined_blocks.len() < block_num as usize { + if !are_all_state_managers_synced(self, actors).await? { + // wait until they are synced + tokio::time::sleep(std::time::Duration::from_millis(300)).await; + continue; + } + let new_blocks = self.mine_blocks(1).await?; + mined_blocks.extend(new_blocks); + } + Ok(mined_blocks) + } + + /// Internal helper that performs the actual block mining logic. + /// + /// It uses a cached mining address if available, otherwise it generates and caches + /// a new one. It then uses the address to mine `block_num` blocks. + /// + /// # Parameters + /// - `block_num`: The number of blocks to mine. + /// + /// # Returns + /// - `Ok(Vec)`: The list of block hashes. + /// - `Err`: If the client fails to get a new address or mine the blocks. + #[cfg(test)] + async fn try_mine(&self, block_num: u64) -> Result> { + let address = { + let read = self.cached_mining_address.read().await; + if let Some(addr) = &*read { + addr.clone() + } else { + drop(read); + let mut write = self.cached_mining_address.write().await; + + if let Some(addr) = &*write { + addr.clone() + } else { + let new_addr = self + .get_new_address(None, None) + .await + .wrap_err("Failed to get new address")? + .assume_checked() + .to_string(); + *write = Some(new_addr.clone()); + new_addr + } + } + }; + + let address = Address::from_str(&address) + .wrap_err("Invalid address format")? + .assume_checked(); + let blocks = self + .generate_to_address(block_num, &address) + .await + .wrap_err("Failed to generate to address")?; + + Ok(blocks) + } + + /// Gets the number of transactions in the mempool. + /// + /// # Returns + /// + /// - [`usize`]: The number of transactions in the mempool. + pub async fn mempool_size(&self) -> Result { + let mempool_info = self + .get_mempool_info() + .await + .wrap_err("Failed to get mempool info")?; + Ok(mempool_info.size) + } + + /// Sends a specified amount of Bitcoins to the given address. + /// + /// # Parameters + /// + /// * `address` - The recipient address + /// * `amount_sats` - The amount to send in satoshis + /// + /// # Returns + /// + /// - [`OutPoint`]: The outpoint (txid and vout) of the newly created output. + pub async fn send_to_address( + &self, + address: &Address, + amount_sats: Amount, + ) -> Result { + let txid = self + .client + .send_to_address(address, amount_sats, None, None, None, None, None, None) + .await + .wrap_err("Failed to send to address")?; + + let tx_result = self + .get_transaction(&txid, None) + .await + .wrap_err("Failed to get transaction")?; + let vout = tx_result.details[0].vout; + + Ok(OutPoint { txid, vout }) + } + + /// Retrieves the transaction output for a given outpoint. + /// + /// # Arguments + /// + /// * `outpoint` - The outpoint (txid and vout) to retrieve + /// + /// # Returns + /// + /// - [`TxOut`]: The transaction output at the specified outpoint. + pub async fn get_txout_from_outpoint(&self, outpoint: &OutPoint) -> Result { + let tx = self + .get_raw_transaction(&outpoint.txid, None) + .await + .wrap_err("Failed to get transaction")?; + let txout = tx + .output + .get(outpoint.vout as usize) + .ok_or(eyre!( + "No output at index {} for txid {}", + outpoint.vout, + outpoint.txid + ))? + .to_owned(); + + Ok(txout) + } + + /// Bumps the fee of a transaction to meet or exceed a target fee rate. Does + /// nothing if the transaction is already confirmed. Returns the original + /// txid if no bump was needed. + /// + /// This function implements Replace-By-Fee (RBF) to increase the fee of an unconfirmed transaction. + /// It works as follows: + /// 1. If the transaction is already confirmed, returns Err(TransactionAlreadyInBlock) + /// 2. If the current fee rate is already >= the requested fee rate, returns the original txid + /// 3. Otherwise, increases the fee rate by adding the node's incremental fee to the current fee rate, then `bump_fee`s the transaction + /// + /// Note: This function currently only supports fee payer TXs. + /// + /// # Arguments + /// * `txid` - The transaction ID to bump + /// * `fee_rate` - The target fee rate to achieve + /// + /// # Returns + /// + /// - [`Txid`]: The txid of the bumped transaction (which may be the same as the input txid if no bump was needed). + /// + /// # Errors + /// + /// * `TransactionAlreadyInBlock` - If the transaction is already confirmed + /// * `BumpFeeUTXOSpent` - If the UTXO being spent by the transaction is already spent + /// * `BumpFeeError` - For other errors with fee bumping + pub async fn bump_fee_with_fee_rate(&self, txid: Txid, fee_rate: FeeRate) -> Result { + // Check if transaction is already confirmed + let transaction_info = self + .get_transaction(&txid, None) + .await + .wrap_err("Failed to get transaction")?; + if transaction_info.info.blockhash.is_some() { + return Err(BitcoinRPCError::TransactionAlreadyInBlock( + transaction_info + .info + .blockhash + .expect("Blockhash should be present"), + )); + } + + // Calculate current fee rate + let tx = transaction_info + .transaction() + .wrap_err("Failed to get transaction")?; + let tx_size = tx.weight().to_vbytes_ceil(); + let current_fee_sat = u64::try_from( + transaction_info + .fee + .expect("Fee should be present") + .to_sat() + .abs(), + ) + .wrap_err("Failed to convert fee to sat")?; + + let current_fee_rate = FeeRate::from_sat_per_kwu(1000 * current_fee_sat / tx_size); + + // If current fee rate is already sufficient, return original txid + if current_fee_rate >= fee_rate { + return Ok(txid); + } + + // Get node's incremental fee to determine how much to increase + let network_info = self + .get_network_info() + .await + .wrap_err("Failed to get network info")?; + let incremental_fee = network_info.incremental_fee; + let incremental_fee_rate: FeeRate = FeeRate::from_sat_per_kwu(incremental_fee.to_sat()); + + // Calculate new fee rate by adding incremental fee to current fee rate + let new_fee_rate = FeeRate::from_sat_per_kwu( + current_fee_rate.to_sat_per_kwu() + incremental_fee_rate.to_sat_per_kwu(), + ); + + tracing::debug!( + "Bumping fee for txid: {txid} from {current_fee_rate} to {new_fee_rate} with incremental fee {incremental_fee_rate} - Final fee rate: {new_fee_rate}" + ); + + // Call Bitcoin Core's bumpfee RPC + let bump_fee_result = match self + .bump_fee( + &txid, + Some(&bitcoincore_rpc::json::BumpFeeOptions { + fee_rate: Some(bitcoincore_rpc::json::FeeRate::per_vbyte(Amount::from_sat( + new_fee_rate.to_sat_per_vb_ceil(), + ))), + replaceable: Some(true), + ..Default::default() + }), + ) + .await + { + Ok(bump_fee_result) => bump_fee_result, + // Attempt to parse the error message to get the outpoint if the UTXO is already spent + Err(e) => match e { + bitcoincore_rpc::Error::JsonRpc(json_rpc_error) => match json_rpc_error { + bitcoincore_rpc::RpcError::Rpc(rpc_error) => { + if let Some((outpoint_str, _)) = + rpc_error.message.split_once(" is already spent") + { + let outpoint = OutPoint::from_str(outpoint_str) + .wrap_err(BitcoinRPCError::BumpFeeError(txid, fee_rate))?; + + return Err(BitcoinRPCError::BumpFeeUTXOSpent(outpoint)); + } + + return Err(eyre::eyre!("{:?}", rpc_error) + .wrap_err(BitcoinRPCError::BumpFeeError(txid, fee_rate)) + .into()); + } + _ => { + return Err(eyre::eyre!(json_rpc_error) + .wrap_err(BitcoinRPCError::BumpFeeError(txid, fee_rate)) + .into()); + } + }, + _ => { + return Err(eyre::eyre!(e) + .wrap_err(BitcoinRPCError::BumpFeeError(txid, fee_rate)) + .into()) + } + }, + }; + + // Return the new txid + Ok(bump_fee_result + .txid + .ok_or_eyre("Failed to get Txid from bump_fee_result")?) + } + + /// Creates a new instance of the [`ExtendedBitcoinRpc`] with a new client + /// connection for cloning. This is needed when you need a separate + /// connection to the Bitcoin RPC server. + /// + /// # Returns + /// + /// - [`ExtendedBitcoinRpc`]: A new instance of ExtendedBitcoinRpc with a new client connection. + pub async fn clone_inner(&self) -> std::result::Result { + Ok(Self { + url: self.url.clone(), + client: self.client.clone(), + retry_config: self.retry_config.clone(), + #[cfg(test)] + cached_mining_address: self.cached_mining_address.clone(), + }) + } +} + +#[async_trait] +/// Implementation of the `RpcApi` trait for `ExtendedBitcoinRpc`. All RPC calls +/// are made with retry logic that only retries when errors are retryable. +impl RpcApi for ExtendedBitcoinRpc { + async fn call serde::de::Deserialize<'a>>( + &self, + cmd: &str, + args: &[serde_json::Value], + ) -> std::result::Result { + let strategy = self.retry_config.get_strategy(); + + let condition = |error: &bitcoincore_rpc::Error| error.is_retryable(); + + RetryIf::spawn( + strategy, + || async { self.client.call(cmd, args).await }, + condition, + ) + .await + } +} + +#[cfg(test)] +mod tests { + use crate::actor::Actor; + use crate::config::protocol::{ProtocolParamset, REGTEST_PARAMSET}; + use crate::extended_bitcoin_rpc::ExtendedBitcoinRpc; + use crate::test::common::{citrea, create_test_config_with_thread_name}; + use crate::{ + bitvm_client::SECP, extended_bitcoin_rpc::BitcoinRPCError, test::common::create_regtest_rpc, + }; + use bitcoin::Amount; + use bitcoin::{amount, key::Keypair, Address, FeeRate, XOnlyPublicKey}; + use bitcoincore_rpc::RpcApi; + use citrea_e2e::bitcoin::DEFAULT_FINALITY_DEPTH; + use citrea_e2e::config::{BitcoinConfig, TestCaseDockerConfig}; + use citrea_e2e::test_case::TestCaseRunner; + use citrea_e2e::Result; + use citrea_e2e::{config::TestCaseConfig, framework::TestFramework, test_case::TestCase}; + use tonic::async_trait; + + #[tokio::test] + async fn new_extended_rpc_with_clone() { + let mut config = create_test_config_with_thread_name().await; + let regtest = create_regtest_rpc(&mut config).await; + let rpc = regtest.rpc(); + + rpc.mine_blocks(101).await.unwrap(); + let height = rpc.get_block_count().await.unwrap(); + let hash = rpc.get_block_hash(height).await.unwrap(); + + let cloned_rpc = rpc.clone_inner().await.unwrap(); + assert_eq!(cloned_rpc.url, rpc.url); + assert_eq!(cloned_rpc.get_block_count().await.unwrap(), height); + assert_eq!(cloned_rpc.get_block_hash(height).await.unwrap(), hash); + } + + #[tokio::test] + async fn tx_checks_in_mempool_and_on_chain() { + let mut config = create_test_config_with_thread_name().await; + let regtest = create_regtest_rpc(&mut config).await; + let rpc = regtest.rpc(); + + let keypair = Keypair::from_secret_key(&SECP, &config.secret_key); + let (xonly, _parity) = XOnlyPublicKey::from_keypair(&keypair); + let address = Address::p2tr(&SECP, xonly, None, config.protocol_paramset.network); + + let amount = amount::Amount::from_sat(10000); + + // Prepare a transaction. + let utxo = rpc.send_to_address(&address, amount).await.unwrap(); + let tx = rpc.get_tx_of_txid(&utxo.txid).await.unwrap(); + let txid = tx.compute_txid(); + tracing::debug!("TXID: {}", txid); + + assert_eq!(tx.output[utxo.vout as usize].value, amount); + assert_eq!(utxo.txid, txid); + assert!(rpc + .check_utxo_address_and_amount(&utxo, &address.script_pubkey(), amount) + .await + .unwrap()); + + // In mempool. + assert!(rpc.confirmation_blocks(&utxo.txid).await.is_err()); + assert!(rpc.get_blockhash_of_tx(&utxo.txid).await.is_err()); + assert!(!rpc.is_tx_on_chain(&txid).await.unwrap()); + assert!(rpc.is_utxo_spent(&utxo).await.is_err()); + + rpc.mine_blocks(1).await.unwrap(); + let height = rpc.get_block_count().await.unwrap(); + assert_eq!(height as u32, rpc.get_current_chain_height().await.unwrap()); + let blockhash = rpc.get_block_hash(height).await.unwrap(); + + // On chain. + assert_eq!(rpc.confirmation_blocks(&utxo.txid).await.unwrap(), 1); + assert_eq!( + rpc.get_blockhash_of_tx(&utxo.txid).await.unwrap(), + blockhash + ); + assert_eq!(rpc.get_tx_of_txid(&txid).await.unwrap(), tx); + assert!(rpc.is_tx_on_chain(&txid).await.unwrap()); + assert!(!rpc.is_utxo_spent(&utxo).await.unwrap()); + + // Doesn't matter if in mempool or on chain. + let txout = rpc.get_txout_from_outpoint(&utxo).await.unwrap(); + assert_eq!(txout.value, amount); + assert_eq!(rpc.get_tx_of_txid(&txid).await.unwrap(), tx); + + let height = rpc.get_current_chain_height().await.unwrap(); + let (hash, header) = rpc.get_block_info_by_height(height.into()).await.unwrap(); + assert_eq!(blockhash, hash); + assert_eq!(rpc.get_block_header(&hash).await.unwrap(), header); + } + + #[tokio::test] + async fn bump_fee_with_fee_rate() { + let mut config = create_test_config_with_thread_name().await; + let regtest = create_regtest_rpc(&mut config).await; + let rpc = regtest.rpc(); + + let keypair = Keypair::from_secret_key(&SECP, &config.secret_key); + let (xonly, _parity) = XOnlyPublicKey::from_keypair(&keypair); + let address = Address::p2tr(&SECP, xonly, None, config.protocol_paramset.network); + + let amount = amount::Amount::from_sat(10000); + + // Confirmed transaction cannot be fee bumped. + let utxo = rpc.send_to_address(&address, amount).await.unwrap(); + rpc.mine_blocks(1).await.unwrap(); + assert!(rpc + .bump_fee_with_fee_rate(utxo.txid, FeeRate::from_sat_per_vb(1).unwrap()) + .await + .inspect_err(|e| { + match e { + BitcoinRPCError::TransactionAlreadyInBlock(_) => {} + _ => panic!("Unexpected error: {:?}", e), + } + }) + .is_err()); + + let current_fee_rate = FeeRate::from_sat_per_vb_unchecked(1); + + // Trying to bump a transaction with a fee rate that is already enough + // should return the original txid. + let utxo = rpc.send_to_address(&address, amount).await.unwrap(); + let txid = rpc + .bump_fee_with_fee_rate(utxo.txid, current_fee_rate) + .await + .unwrap(); + assert_eq!(txid, utxo.txid); + + // A bigger fee rate should return a different txid. + let new_fee_rate = FeeRate::from_sat_per_vb_unchecked(10000); + let txid = rpc + .bump_fee_with_fee_rate(utxo.txid, new_fee_rate) + .await + .unwrap(); + assert_ne!(txid, utxo.txid); + } + + struct ReorgChecks; + #[async_trait] + impl TestCase for ReorgChecks { + fn bitcoin_config() -> BitcoinConfig { + BitcoinConfig { + extra_args: vec![ + "-txindex=1", + "-fallbackfee=0.000001", + "-rpcallowip=0.0.0.0/0", + "-dustrelayfee=0", + ], + ..Default::default() + } + } + + fn test_config() -> TestCaseConfig { + TestCaseConfig { + with_sequencer: true, + with_batch_prover: false, + n_nodes: 2, + docker: TestCaseDockerConfig { + bitcoin: true, + citrea: true, + }, + ..Default::default() + } + } + + async fn run_test(&mut self, f: &mut TestFramework) -> Result<()> { + let (da0, da1) = ( + f.bitcoin_nodes.get(0).unwrap(), + f.bitcoin_nodes.get(1).unwrap(), + ); + + let mut config = create_test_config_with_thread_name().await; + const PARAMSET: ProtocolParamset = ProtocolParamset { + finality_depth: DEFAULT_FINALITY_DEPTH as u32, + ..REGTEST_PARAMSET + }; + config.protocol_paramset = &PARAMSET; + citrea::update_config_with_citrea_e2e_values( + &mut config, + da0, + f.sequencer.as_ref().expect("Sequencer is present"), + None, + ); + + let rpc = ExtendedBitcoinRpc::connect( + config.bitcoin_rpc_url.clone(), + config.bitcoin_rpc_user.clone(), + config.bitcoin_rpc_password.clone(), + None, + ) + .await + .unwrap(); + + // Reorg starts here. + f.bitcoin_nodes.disconnect_nodes().await?; + + let before_reorg_tip_height = rpc.get_block_count().await?; + let before_reorg_tip_hash = rpc.get_block_hash(before_reorg_tip_height).await?; + + let address = + Actor::new(config.secret_key, None, config.protocol_paramset.network).address; + let tx = rpc + .send_to_address(&address, Amount::from_sat(10000)) + .await?; + + assert!(!rpc.is_tx_on_chain(&tx.txid).await?); + rpc.mine_blocks(1).await?; + assert!(rpc.is_tx_on_chain(&tx.txid).await?); + + // Make the second branch longer and perform a reorg. + let reorg_depth = 4; + da1.generate(reorg_depth).await.unwrap(); + f.bitcoin_nodes.connect_nodes().await?; + f.bitcoin_nodes.wait_for_sync(None).await?; + + // Check that reorg happened. + let current_tip_height = rpc.get_block_count().await?; + assert_eq!( + before_reorg_tip_height + reorg_depth, + current_tip_height, + "Re-org did not occur" + ); + let current_tip_hash = rpc.get_block_hash(current_tip_height).await?; + assert_ne!( + before_reorg_tip_hash, current_tip_hash, + "Re-org did not occur" + ); + + assert!(!rpc.is_tx_on_chain(&tx.txid).await?); + + Ok(()) + } + } + + #[tokio::test] + async fn reorg_checks() -> Result<()> { + TestCaseRunner::new(ReorgChecks).run().await + } + + mod retry_config_tests { + use crate::extended_bitcoin_rpc::RetryConfig; + + use std::time::Duration; + + #[test] + fn test_retry_config_default() { + let config = RetryConfig::default(); + assert_eq!(config.initial_delay, Duration::from_millis(100)); + assert_eq!(config.max_delay, Duration::from_secs(30)); + assert_eq!(config.max_attempts, 5); + assert_eq!(config.backoff_multiplier, 2); + assert!(!config.is_jitter); + } + + #[test] + fn test_retry_config_custom() { + let initial = Duration::from_millis(200); + let max = Duration::from_secs(10); + let attempts = 7; + let backoff_multiplier = 3; + let jitter = true; + let config = RetryConfig::new(initial, max, attempts, backoff_multiplier, jitter); + assert_eq!(config.initial_delay, initial); + assert_eq!(config.max_delay, max); + assert_eq!(config.max_attempts, attempts); + assert_eq!(config.backoff_multiplier, backoff_multiplier); + assert!(config.is_jitter); + } + } + + mod retryable_error_tests { + use bitcoin::{hashes::Hash, BlockHash, Txid}; + + use crate::extended_bitcoin_rpc::RetryableError; + + use super::*; + use std::io::{Error as IoError, ErrorKind}; + + #[test] + fn test_bitcoin_rpc_error_retryable_io_errors() { + let retryable_kinds = [ + ErrorKind::ConnectionRefused, + ErrorKind::ConnectionReset, + ErrorKind::ConnectionAborted, + ErrorKind::NotConnected, + ErrorKind::BrokenPipe, + ErrorKind::TimedOut, + ErrorKind::Interrupted, + ErrorKind::UnexpectedEof, + ]; + + for kind in retryable_kinds { + let io_error = IoError::new(kind, "test error"); + let rpc_error = bitcoincore_rpc::Error::Io(io_error); + assert!( + rpc_error.is_retryable(), + "ErrorKind::{:?} should be retryable", + kind + ); + } + } + + #[test] + fn test_bitcoin_rpc_error_non_retryable_io_errors() { + let non_retryable_kinds = [ + ErrorKind::PermissionDenied, + ErrorKind::NotFound, + ErrorKind::InvalidInput, + ErrorKind::InvalidData, + ]; + + for kind in non_retryable_kinds { + let io_error = IoError::new(kind, "test error"); + let rpc_error = bitcoincore_rpc::Error::Io(io_error); + assert!( + !rpc_error.is_retryable(), + "ErrorKind::{:?} should not be retryable", + kind + ); + } + } + + #[test] + fn test_bitcoin_rpc_error_auth_not_retryable() { + let auth_error = bitcoincore_rpc::Error::Auth("Invalid credentials".to_string()); + assert!(!auth_error.is_retryable()); + } + + #[test] + fn test_bitcoin_rpc_error_url_parse_not_retryable() { + let url_error = url::ParseError::EmptyHost; + let rpc_error = bitcoincore_rpc::Error::UrlParse(url_error); + assert!(!rpc_error.is_retryable()); + } + + #[test] + fn test_bitcoin_rpc_error_invalid_cookie_not_retryable() { + let rpc_error = bitcoincore_rpc::Error::InvalidCookieFile; + assert!(!rpc_error.is_retryable()); + } + + #[test] + fn test_bitcoin_rpc_error_returned_error_non_retryable_patterns() { + let non_retryable_messages = [ + "insufficient funds", + "transaction already in blockchain", + "invalid transaction", + "not found in mempool", + "transaction conflict", + ]; + + for msg in non_retryable_messages { + let rpc_error = bitcoincore_rpc::Error::ReturnedError(msg.to_string()); + assert!( + !rpc_error.is_retryable(), + "Message '{}' should not be retryable", + msg + ); + } + } + + #[test] + fn test_bitcoin_rpc_error_unexpected_structure_retryable() { + let rpc_error = bitcoincore_rpc::Error::UnexpectedStructure; + assert!(rpc_error.is_retryable()); + } + + #[test] + fn test_bitcoin_rpc_error_serialization_errors_not_retryable() { + use bitcoin::consensus::encode::Error as EncodeError; + + let serialization_errors = [ + bitcoincore_rpc::Error::BitcoinSerialization(EncodeError::Io( + IoError::new(ErrorKind::Other, "test").into(), + )), + // bitcoincore_rpc::Error::Hex(HexToBytesError::InvalidChar(InvalidCharError{invalid: 0})), + bitcoincore_rpc::Error::Json(serde_json::Error::io(IoError::new( + ErrorKind::Other, + "test", + ))), + ]; + + for error in serialization_errors { + assert!( + !error.is_retryable(), + "Serialization error should not be retryable" + ); + } + } + + #[test] + fn test_bridge_rpc_error_retryable() { + // Test permanent errors + assert!( + !BitcoinRPCError::TransactionAlreadyInBlock(BlockHash::all_zeros()).is_retryable() + ); + assert!(!BitcoinRPCError::BumpFeeUTXOSpent(Default::default()).is_retryable()); + + // Test potentially retryable errors + let txid = Txid::all_zeros(); + let fee_rate = FeeRate::from_sat_per_vb_unchecked(1); + assert!(BitcoinRPCError::BumpFeeError(txid, fee_rate).is_retryable()); + + // Test Other error with retryable patterns + let retryable_other = BitcoinRPCError::Other(eyre::eyre!("timeout occurred")); + assert!(retryable_other.is_retryable()); + + let non_retryable_other = BitcoinRPCError::Other(eyre::eyre!("permission denied")); + assert!(!non_retryable_other.is_retryable()); + } + } + + mod rpc_call_retry_tests { + + use crate::extended_bitcoin_rpc::RetryableError; + + use super::*; + use secrecy::SecretString; + + #[tokio::test] + async fn test_rpc_call_retry_with_invalid_credentials() { + let mut config = create_test_config_with_thread_name().await; + let regtest = create_regtest_rpc(&mut config).await; + + // Get a working connection first + let working_rpc = regtest.rpc(); + let url = working_rpc.url.clone(); + + // Create connection with invalid credentials + let invalid_user = SecretString::new("invalid_user".to_string().into()); + let invalid_password = SecretString::new("invalid_password".to_string().into()); + + let res = ExtendedBitcoinRpc::connect(url, invalid_user, invalid_password, None).await; + + assert!(res.is_err()); + assert!(!res.unwrap_err().is_retryable()); + } + + #[tokio::test] + async fn test_rpc_call_retry_with_invalid_host() { + let user = SecretString::new("user".to_string().into()); + let password = SecretString::new("password".to_string().into()); + let invalid_url = "http://nonexistent-host:8332".to_string(); + + let res = ExtendedBitcoinRpc::connect(invalid_url, user, password, None).await; + + assert!(res.is_err()); + assert!(!res.unwrap_err().is_retryable()); + } + } + + mod convenience_method_tests { + use super::*; + + #[tokio::test] + async fn test_get_block_hash_with_retry() { + let mut config = create_test_config_with_thread_name().await; + let regtest = create_regtest_rpc(&mut config).await; + let rpc = regtest.rpc(); + + // Mine a block first + rpc.mine_blocks(1).await.unwrap(); + let height = rpc.get_block_count().await.unwrap(); + + let result = rpc.get_block_hash(height).await; + assert!(result.is_ok()); + + let expected_hash = rpc.get_block_hash(height).await.unwrap(); + assert_eq!(result.unwrap(), expected_hash); + } + + #[tokio::test] + async fn test_get_tx_out_with_retry() { + let mut config = create_test_config_with_thread_name().await; + let regtest = create_regtest_rpc(&mut config).await; + let rpc = regtest.rpc(); + + // Create a transaction + let keypair = Keypair::from_secret_key(&SECP, &config.secret_key); + let (xonly, _parity) = XOnlyPublicKey::from_keypair(&keypair); + let address = Address::p2tr(&SECP, xonly, None, config.protocol_paramset.network); + let amount = Amount::from_sat(10000); + + let utxo = rpc.send_to_address(&address, amount).await.unwrap(); + + let result = rpc.get_tx_of_txid(&utxo.txid).await; + assert!(result.is_ok()); + + let tx = result.unwrap(); + assert_eq!(tx.compute_txid(), utxo.txid); + } + + #[tokio::test] + async fn test_send_to_address_with_retry() { + let mut config = create_test_config_with_thread_name().await; + let regtest = create_regtest_rpc(&mut config).await; + let rpc = regtest.rpc(); + + let keypair = Keypair::from_secret_key(&SECP, &config.secret_key); + let (xonly, _parity) = XOnlyPublicKey::from_keypair(&keypair); + let address = Address::p2tr(&SECP, xonly, None, config.protocol_paramset.network); + let amount = Amount::from_sat(10000); + + let result = rpc.send_to_address(&address, amount).await; + assert!(result.is_ok()); + + let outpoint = result.unwrap(); + + // Verify the transaction exists + let tx = rpc.get_tx_of_txid(&outpoint.txid).await.unwrap(); + assert_eq!(tx.output[outpoint.vout as usize].value, amount); + } + + #[tokio::test] + async fn test_bump_fee_with_retry() { + let mut config = create_test_config_with_thread_name().await; + let regtest = create_regtest_rpc(&mut config).await; + let rpc = regtest.rpc(); + + let keypair = Keypair::from_secret_key(&SECP, &config.secret_key); + let (xonly, _parity) = XOnlyPublicKey::from_keypair(&keypair); + let address = Address::p2tr(&SECP, xonly, None, config.protocol_paramset.network); + let amount = Amount::from_sat(10000); + + // Create an unconfirmed transaction + let utxo = rpc.send_to_address(&address, amount).await.unwrap(); + let new_fee_rate = FeeRate::from_sat_per_vb_unchecked(10000); + + let result = rpc.bump_fee_with_fee_rate(utxo.txid, new_fee_rate).await; + assert!(result.is_ok()); + + let new_txid = result.unwrap(); + // Should return a different txid since fee was actually bumped + assert_ne!(new_txid, utxo.txid); + } + } +} diff --git a/core/src/header_chain_prover.rs b/core/src/header_chain_prover.rs new file mode 100644 index 000000000..5522c4acf --- /dev/null +++ b/core/src/header_chain_prover.rs @@ -0,0 +1,1219 @@ +//! # Header Chain Prover +//! +//! This module contains utilities for proving Bitcoin block headers. This +//! module must be fed with new blocks via the database. Later, it can check if +//! proving should be triggered by verifying if the batch size is sufficient. + +use crate::builder::block_cache::BlockCache; +use crate::database::DatabaseTransaction; +use crate::errors::ResultExt; +use crate::{ + config::BridgeConfig, + database::Database, + errors::{BridgeError, ErrorExt}, + extended_bitcoin_rpc::ExtendedBitcoinRpc, +}; +use bitcoin::block::Header; +use bitcoin::{hashes::Hash, BlockHash, Network}; +use bitcoincore_rpc::RpcApi; +use bridge_circuit_host::bridge_circuit_host::{ + MAINNET_HEADER_CHAIN_ELF, MAINNET_WORK_ONLY_ELF, REGTEST_HEADER_CHAIN_ELF, + REGTEST_WORK_ONLY_ELF, SIGNET_HEADER_CHAIN_ELF, SIGNET_WORK_ONLY_ELF, + TESTNET4_HEADER_CHAIN_ELF, TESTNET4_WORK_ONLY_ELF, +}; +use bridge_circuit_host::docker::dev_stark_to_risc0_g16; +use bridge_circuit_host::utils::is_dev_mode; +use circuits_lib::bridge_circuit::structs::{WorkOnlyCircuitInput, WorkOnlyCircuitOutput}; +use circuits_lib::header_chain::mmr_guest::MMRGuest; +use circuits_lib::header_chain::{ + BlockHeaderCircuitOutput, ChainState, CircuitBlockHeader, HeaderChainCircuitInput, + HeaderChainPrevProofType, +}; +use eyre::{eyre, Context, OptionExt}; +use lazy_static::lazy_static; +use risc0_zkvm::{compute_image_id, ExecutorEnv, ProverOpts, Receipt}; +use std::{ + fs::File, + io::{BufReader, Read}, +}; +use thiserror::Error; + +lazy_static! { + static ref MAINNET_IMAGE_ID: [u32; 8] = compute_image_id(MAINNET_HEADER_CHAIN_ELF) + .expect("hardcoded ELF is valid") + .as_words() + .try_into() + .expect("hardcoded ELF is valid"); + static ref TESTNET4_IMAGE_ID: [u32; 8] = compute_image_id(TESTNET4_HEADER_CHAIN_ELF) + .expect("hardcoded ELF is valid") + .as_words() + .try_into() + .expect("hardcoded ELF is valid"); + static ref SIGNET_IMAGE_ID: [u32; 8] = compute_image_id(SIGNET_HEADER_CHAIN_ELF) + .expect("hardcoded ELF is valid") + .as_words() + .try_into() + .expect("hardcoded ELF is valid"); + static ref REGTEST_IMAGE_ID: [u32; 8] = compute_image_id(REGTEST_HEADER_CHAIN_ELF) + .expect("hardcoded ELF is valid") + .as_words() + .try_into() + .expect("hardcoded ELF is valid"); +} + +#[derive(Debug, Error)] +pub enum HeaderChainProverError { + #[error("Error while de/serializing object")] + ProverDeSerializationError, + #[error("Wait for candidate batch to be ready")] + BatchNotReady, + #[error("Header chain prover not initialized due to config")] + HeaderChainProverNotInitialized, + + #[error(transparent)] + Other(#[from] eyre::Report), +} + +#[derive(Debug, Clone)] +pub struct HeaderChainProver { + db: Database, + network: bitcoin::Network, + batch_size: u64, +} + +impl HeaderChainProver { + /// Creates a new [`HeaderChainProver`] instance. Also saves a proof + /// assumption if specified in the config. + pub async fn new( + config: &BridgeConfig, + rpc: ExtendedBitcoinRpc, + ) -> Result { + let db = Database::new(config).await.map_to_eyre()?; + let tip_height = rpc.get_current_chain_height().await.map_to_eyre()?; + if tip_height + < config.protocol_paramset().start_height + config.protocol_paramset().finality_depth + { + return Err(eyre::eyre!( + "Start height is not finalized, reduce start height: {} < {}", + tip_height, + config.protocol_paramset().start_height + config.protocol_paramset().finality_depth + ) + .into()); + } + db.fetch_and_save_missing_blocks( + &rpc, + config.protocol_paramset().genesis_height, + config.protocol_paramset().start_height, + ) + .await + .wrap_err("Failed to save initial block infos")?; + + if let Some(proof_file) = &config.header_chain_proof_path { + tracing::info!("Starting prover with assumption file {:?}.", proof_file); + let file = File::open(proof_file) + .wrap_err_with(|| format!("Failed to open proof assumption file {proof_file:?}"))?; + + let mut reader = BufReader::new(file); + let mut assumption = Vec::new(); + reader + .read_to_end(&mut assumption) + .wrap_err("Can't read assumption file")?; + + let proof: Receipt = borsh::from_slice(&assumption) + .wrap_err(HeaderChainProverError::ProverDeSerializationError)?; + let proof_output: BlockHeaderCircuitOutput = borsh::from_slice(&proof.journal.bytes) + .wrap_err(HeaderChainProverError::ProverDeSerializationError)?; + + // Create block entry, if not exists. + let block_hash = BlockHash::from_raw_hash( + Hash::from_slice(&proof_output.chain_state.best_block_hash).map_to_eyre()?, + ); + let block_header = rpc.get_block_header(&block_hash).await.wrap_err(format!( + "Failed to get block header with block hash {} (retrieved from assumption file)", + block_hash + ))?; + let block_height = rpc + .get_block_info(&block_hash) + .await + .map(|info| info.height) + .wrap_err(format!( + "Failed to get block info with block hash {} (retrieved from assumption file)", + block_hash + ))?; + tracing::info!( + "Adding proof assumption for a block with hash of {:?}, header of {:?} and height of {}", + block_hash, + block_header, + block_height + ); + + // If an unproven block in database already exists, it shouldn't + // effect anything. + // PS: This also ignores other db errors but there are other places + // where we check for those errors. + let _ = db + .save_unproven_finalized_block( + None, + block_hash, + block_header, + proof_output.chain_state.block_height.into(), + ) + .await + .inspect_err(|e| { + tracing::debug!("Can't set initial block info for header chain prover, because: {e}. Doesn't affect anything, continuing..."); + }); + + db.set_block_proof(None, block_hash, proof) + .await + .map_to_eyre()?; + } else { + tracing::info!("Starting prover without assumption, proving genesis block"); + + let genesis_block_hash = rpc + .get_block_hash(config.protocol_paramset().genesis_height.into()) + .await + .wrap_err(format!( + "Failed to get genesis block hash at height {}", + config.protocol_paramset().genesis_height + ))?; + + tracing::debug!( + "Genesis block hash: {}, height: {}", + genesis_block_hash, + config.protocol_paramset().genesis_height + ); // Should be debug + + let genesis_block_header = + rpc.get_block_header(&genesis_block_hash) + .await + .wrap_err(format!( + "Failed to get genesis block header at height {}", + config.protocol_paramset().genesis_height + ))?; + + let genesis_chain_state = HeaderChainProver::get_chain_state_from_height( + rpc.clone(), + config.protocol_paramset().genesis_height.into(), + config.protocol_paramset().network, + ) + .await + .map_to_eyre()?; + tracing::debug!("Genesis chain state (verbose): {:?}", genesis_chain_state); + + let genesis_chain_state_hash = genesis_chain_state.to_hash(); + if genesis_chain_state_hash != config.protocol_paramset().genesis_chain_state_hash { + return Err(eyre::eyre!( + "Genesis chain state hash mismatch: {} != {}", + hex::encode(genesis_chain_state_hash), + hex::encode(config.protocol_paramset().genesis_chain_state_hash) + ) + .into()); + } + + let proof = HeaderChainProver::prove_genesis_block( + genesis_chain_state, + config.protocol_paramset().network, + ) + .map_to_eyre()?; + + let _ = db + .save_unproven_finalized_block( + None, + genesis_block_hash, + genesis_block_header, + config.protocol_paramset().genesis_height.into(), + ) + .await; + + db.set_block_proof(None, genesis_block_hash, proof) + .await + .map_to_eyre()?; + } + + Ok(HeaderChainProver { + db, + batch_size: config + .protocol_paramset() + .header_chain_proof_batch_size + .into(), + network: config.protocol_paramset().network, + }) + } + + pub async fn get_chain_state_from_height( + rpc: ExtendedBitcoinRpc, + height: u64, + network: Network, + ) -> Result { + let block_hash = rpc + .get_block_hash(height) + .await + .wrap_err(format!("Failed to get block hash at height {}", height))?; + + let block_header = rpc.get_block_header(&block_hash).await.wrap_err(format!( + "Failed to get block header with block hash {}", + block_hash + ))?; + + let mut last_11_block_timestamps: [u32; 11] = [0; 11]; + let mut last_block_hash = block_hash; + let mut last_block_height = height; + for _ in 0..11 { + let block_header = rpc + .get_block_header(&last_block_hash) + .await + .wrap_err(format!( + "Failed to get block header with block hash {}", + last_block_hash + ))?; + + last_11_block_timestamps[last_block_height as usize % 11] = block_header.time; + + last_block_hash = block_header.prev_blockhash; + last_block_height = last_block_height.wrapping_sub(1); + + if last_block_hash.to_byte_array() == [0u8; 32] { + break; + } + } + + let epoch_start_block_height = height / 2016 * 2016; + + let (epoch_start_timestamp, expected_bits) = if network == Network::Regtest { + (0, block_header.bits.to_consensus()) + } else { + let epoch_start_block_hash = rpc + .get_block_hash(epoch_start_block_height) + .await + .wrap_err(format!( + "Failed to get block hash at height {}", + epoch_start_block_height + ))?; + let epoch_start_block_header = rpc + .get_block_header(&epoch_start_block_hash) + .await + .wrap_err(format!( + "Failed to get block header with block hash {}", + epoch_start_block_hash + ))?; + let bits = if network == Network::Testnet4 { + // Real difficulty will show up at epoch start block no matter what + epoch_start_block_header.bits.to_consensus() + } else { + block_header.bits.to_consensus() + }; + + (epoch_start_block_header.time, bits) + }; + + let block_info = rpc.get_block_info(&block_hash).await.wrap_err(format!( + "Failed to get block info with block hash {}", + block_hash + ))?; + + let total_work = block_info.chainwork; + + let total_work: [u8; 32] = total_work.try_into().expect("Total work is 32 bytes"); + + let mut block_hashes_mmr = MMRGuest::new(); + block_hashes_mmr.append(block_hash.to_byte_array()); + + let chain_state = ChainState { + block_height: height as u32, + total_work, + best_block_hash: block_hash.to_byte_array(), + current_target_bits: expected_bits, + epoch_start_time: epoch_start_timestamp, + prev_11_timestamps: last_11_block_timestamps, + block_hashes_mmr, + }; + Ok(chain_state) + } + + /// Proves the work only proof for the given HCP receipt. + pub fn prove_work_only( + &self, + hcp_receipt: Receipt, + ) -> Result<(Receipt, WorkOnlyCircuitOutput), HeaderChainProverError> { + let block_header_circuit_output: BlockHeaderCircuitOutput = + borsh::from_slice(&hcp_receipt.journal.bytes) + .wrap_err(HeaderChainProverError::ProverDeSerializationError)?; + let input = WorkOnlyCircuitInput { + header_chain_circuit_output: block_header_circuit_output, + }; + let mut env = ExecutorEnv::builder(); + + env.write_slice(&borsh::to_vec(&input).wrap_err(BridgeError::BorshError)?); + + env.add_assumption(hcp_receipt); + + let env = env + .build() + .map_err(|e| eyre::eyre!(e)) + .wrap_err("Failed to build environment")?; + + let prover = risc0_zkvm::default_prover(); + + let elf = match self.network { + Network::Bitcoin => MAINNET_WORK_ONLY_ELF, + Network::Testnet4 => TESTNET4_WORK_ONLY_ELF, + Network::Signet => SIGNET_WORK_ONLY_ELF, + Network::Regtest => REGTEST_WORK_ONLY_ELF, + _ => Err(BridgeError::UnsupportedNetwork.into_eyre())?, + }; + + tracing::warn!("Starting proving HCP work only proof for creating a watchtower challenge"); + let receipt = if !is_dev_mode() { + prover + .prove_with_opts(env, elf, &ProverOpts::groth16()) + .map_err(|e| eyre::eyre!(e))? + .receipt + } else { + let stark_receipt = prover + .prove_with_opts(env, elf, &ProverOpts::succinct()) + .map_err(|e| eyre::eyre!(e))? + .receipt; + let journal = stark_receipt.journal.bytes.clone(); + dev_stark_to_risc0_g16(stark_receipt, &journal)? + }; + tracing::warn!("HCP work only proof proof generated for creating a watchtower challenge"); + let work_output: WorkOnlyCircuitOutput = borsh::from_slice(&receipt.journal.bytes) + .wrap_err(HeaderChainProverError::ProverDeSerializationError)?; + + Ok((receipt, work_output)) + } + + /// Proves blocks till the block with hash `current_block_hash`. + /// + /// # Parameters + /// + /// - `current_block_hash`: Hash of the target block + /// - `block_headers`: Previous block headers before the target block + /// - `previous_proof`: Previous proof's receipt + #[tracing::instrument(skip_all)] + async fn prove_and_save_block( + &self, + current_block_hash: BlockHash, + block_headers: Vec
, + previous_proof: Receipt, + ) -> Result { + tracing::debug!( + "Prover starts proving {} blocks ending with block with hash {}", + block_headers.len(), + current_block_hash + ); + + let headers: Vec = block_headers.into_iter().map(Into::into).collect(); + let receipt = self.prove_block_headers(previous_proof, headers)?; + + self.db + .set_block_proof(None, current_block_hash, receipt.clone()) + .await?; + + Ok(receipt) + } + + /// Proves given block headers. + /// + /// # Parameters + /// + /// - `prev_receipt`: Previous proof's receipt, if not genesis block + /// - `block_headers`: Block headers to prove + /// + /// # Returns + /// + /// - [`Receipt`]: Proved block headers' proof receipt. + fn prove_block_headers( + &self, + prev_receipt: Receipt, + block_headers: Vec, + ) -> Result { + // Prepare proof input. + let prev_output: BlockHeaderCircuitOutput = borsh::from_slice(&prev_receipt.journal.bytes) + .wrap_err(HeaderChainProverError::ProverDeSerializationError)?; + let method_id = prev_output.method_id; + + let prev_proof = HeaderChainPrevProofType::PrevProof(prev_output); + + let input = HeaderChainCircuitInput { + method_id, + prev_proof, + block_headers, + }; + Self::prove_with_input(input, Some(prev_receipt), self.network) + } + + pub fn prove_genesis_block( + genesis_chain_state: ChainState, + network: Network, + ) -> Result { + let image_id = match network { + Network::Bitcoin => *MAINNET_IMAGE_ID, + Network::Testnet => *TESTNET4_IMAGE_ID, + Network::Testnet4 => *TESTNET4_IMAGE_ID, + Network::Signet => *SIGNET_IMAGE_ID, + Network::Regtest => *REGTEST_IMAGE_ID, + _ => Err(BridgeError::UnsupportedNetwork.into_eyre())?, + }; + let header_chain_circuit_type = HeaderChainPrevProofType::GenesisBlock(genesis_chain_state); + let input = HeaderChainCircuitInput { + method_id: image_id, + prev_proof: header_chain_circuit_type, + block_headers: vec![], + }; + + Self::prove_with_input(input, None, network) + } + + fn prove_with_input( + input: HeaderChainCircuitInput, + prev_receipt: Option, + network: Network, + ) -> Result { + let mut env = ExecutorEnv::builder(); + + env.write_slice(&borsh::to_vec(&input).wrap_err(BridgeError::BorshError)?); + + if let Some(prev_receipt) = prev_receipt { + env.add_assumption(prev_receipt); + } + + let env = env + .build() + .map_err(|e| eyre::eyre!(e)) + .wrap_err("Failed to build environment")?; + + let prover = risc0_zkvm::default_prover(); + + let elf = match network { + Network::Bitcoin => MAINNET_HEADER_CHAIN_ELF, + Network::Testnet => TESTNET4_HEADER_CHAIN_ELF, + Network::Testnet4 => TESTNET4_HEADER_CHAIN_ELF, + Network::Signet => SIGNET_HEADER_CHAIN_ELF, + Network::Regtest => REGTEST_HEADER_CHAIN_ELF, + _ => Err(BridgeError::UnsupportedNetwork.into_eyre())?, + }; + + let receipt = prover.prove(env, elf).map_err(|e| eyre::eyre!(e))?.receipt; + tracing::debug!( + "Proof receipt for header chain circuit input {:?}: {:?}", + input, + receipt + ); + + Ok(receipt) + } + + /// Produces a proof for the chain up to the block with the given hash. + /// + /// # Returns + /// + /// - [`Receipt`]: Specified block's proof receipt + /// - [`u64`]: Height of the proven header chain + pub async fn prove_till_hash( + &self, + block_hash: BlockHash, + ) -> Result<(Receipt, u64), BridgeError> { + let (_, _, height) = self + .db + .get_block_info_from_hash_hcp(None, block_hash) + .await? + .ok_or(eyre::eyre!("Block not found in prove_till_hash"))?; + + let latest_proven_block = self + .db + .get_latest_proven_block_info_until_height(None, height) + .await? + .ok_or_eyre("No proofs found before the given block hash")?; + + if latest_proven_block.2 == height as u64 { + let receipt = self + .db + .get_block_proof_by_hash(None, latest_proven_block.0) + .await + .wrap_err("Failed to get block proof")? + .ok_or(eyre!("Failed to get block proof"))?; + return Ok((receipt, height as u64)); + } + + let block_headers = self + .db + .get_block_info_from_range(None, latest_proven_block.2 + 1, height.into()) + .await? + .into_iter() + .map(|(_hash, header)| header) + .collect::>(); + + let previous_proof = self + .db + .get_block_proof_by_hash(None, latest_proven_block.0) + .await? + .ok_or(eyre::eyre!("No proven block found"))?; + let receipt = self + .prove_and_save_block(block_hash, block_headers, previous_proof) + .await?; + tracing::info!("Generated new proof for height {}", height); + Ok((receipt, height as u64)) + } + + /// Gets the proof of the latest finalized blockchain tip. If the finalized + /// blockchain tip isn't yet proven, it will be proven first in batches + /// (last proven block in database to finalized blockchain tip). + /// + /// # Returns + /// + /// - [`Receipt`]: Specified block's proof receipt + /// - [`u64`]: Height of the proven header chain + pub async fn get_tip_header_chain_proof(&self) -> Result<(Receipt, u64), BridgeError> { + let max_height = self.db.get_latest_finalized_block_height(None).await?; + + if let Some(max_height) = max_height { + let block_hash = self + .db + .get_block_info_from_range(None, max_height, max_height) + .await? + .into_iter() + .next() + .expect("Block should be in table") + .0; + Ok(self.prove_till_hash(block_hash).await?) + } else { + Err(eyre::eyre!("No finalized blocks in header chain proofs table").into()) + } + } + + /// Saves a new block to database, later to be proven. + pub async fn save_unproven_block_cache( + &self, + dbtx: Option>, + block_cache: &BlockCache, + ) -> Result<(), BridgeError> { + let block_hash = block_cache + .block + .as_ref() + .ok_or(eyre::eyre!("Block not found"))? + .block_hash(); + + let block_header = block_cache + .block + .as_ref() + .ok_or(eyre::eyre!("Block not found"))? + .header; + + self.db + .save_unproven_finalized_block( + dbtx, + block_hash, + block_header, + block_cache.block_height.into(), + ) + .await?; + + Ok(()) + } + + /// Checks if there are enough blocks to prove. + #[tracing::instrument(skip_all)] + async fn is_batch_ready(&self) -> Result { + let non_proven_block = if let Some(block) = self.db.get_next_unproven_block(None).await? { + block + } else { + return Ok(false); + }; + let tip_height = self + .db + .get_latest_finalized_block_height(None) + .await? + .ok_or(eyre::eyre!("No tip block found"))?; + + tracing::debug!( + "Tip height: {}, non proven block height: {}, {}", + tip_height, + non_proven_block.2, + self.batch_size + ); + if tip_height - non_proven_block.2 >= self.batch_size { + return Ok(true); + } + tracing::debug!( + "Batch not ready: {} - {} < {}", + tip_height, + non_proven_block.2, + self.batch_size + ); + + Ok(false) + } + + /// Proves blocks if the batch is ready. If not, skips. + pub async fn prove_if_ready(&self) -> Result, BridgeError> { + if !self.is_batch_ready().await? { + return Ok(None); + } + + let unproven_blocks = self + .db + .get_next_n_non_proven_block( + self.batch_size + .try_into() + .wrap_err("Can't convert u64 to u32")?, + ) + .await?; + let (unproven_blocks, prev_proof) = match unproven_blocks { + Some(unproven_blocks) => unproven_blocks, + None => { + tracing::debug!("No unproven blocks found"); + return Ok(None); + } + }; + + let current_block_hash = unproven_blocks.iter().next_back().expect("Exists").0; + let current_block_height = unproven_blocks.iter().next_back().expect("Exists").2; + let block_headers = unproven_blocks + .iter() + .map(|(_, header, _)| *header) + .collect::>(); + + let receipt = self + .prove_and_save_block(current_block_hash, block_headers, prev_proof) + .await?; + tracing::info!( + "Header chain proof generated for block with hash {:?} and height {}", + current_block_hash, + current_block_height, + ); + + Ok(Some(receipt)) + } +} + +#[cfg(test)] +mod tests { + use crate::extended_bitcoin_rpc::ExtendedBitcoinRpc; + use crate::header_chain_prover::HeaderChainProver; + use crate::test::common::*; + use crate::verifier::VerifierServer; + use crate::{database::Database, test::common::citrea::MockCitreaClient}; + use bitcoin::{block::Header, hashes::Hash, BlockHash, Network}; + use bitcoincore_rpc::RpcApi; + use circuits_lib::header_chain::{ + mmr_guest::MMRGuest, BlockHeaderCircuitOutput, ChainState, CircuitBlockHeader, + }; + use secp256k1::rand::{self, Rng}; + + /// Mines `block_num` amount of blocks (if not already mined) and returns + /// the first `block_num` block headers in blockchain. + async fn mine_and_get_first_n_block_headers( + rpc: ExtendedBitcoinRpc, + db: Database, + block_num: u64, + ) -> Vec
{ + let height = rpc.get_block_count().await.unwrap(); + tracing::debug!( + "Current tip height: {}, target block height: {}", + height, + block_num + ); + if height < block_num { + tracing::debug!( + "Mining {} blocks to reach block number {}", + block_num - height, + block_num + ); + rpc.mine_blocks(block_num - height + 1).await.unwrap(); + } + + tracing::debug!("Getting first {} block headers from blockchain", block_num); + let mut headers = Vec::new(); + for i in 0..block_num + 1 { + let hash = rpc.get_block_hash(i).await.unwrap(); + let header = rpc.get_block_header(&hash).await.unwrap(); + + headers.push(header); + + let _ignore_errors = db + .save_unproven_finalized_block(None, hash, header, i) + .await; + } + + headers + } + + #[ignore = "This test is requires env var at build time, but it works, try it out"] + #[tokio::test] + #[serial_test::serial] + async fn test_generate_chain_state_from_height() { + // set BITCOIN_NETWORK to regtest + std::env::set_var("BITCOIN_NETWORK", "regtest"); + let mut config = create_test_config_with_thread_name().await; + let regtest = create_regtest_rpc(&mut config).await; + let rpc = regtest.rpc().clone(); + let db = Database::new(&config).await.unwrap(); + + // randomly select a number of blocks from 12 to 2116 + let num_blocks: u64 = rand::rng().random_range(12..2116); + + // Save some initial blocks. + let headers = mine_and_get_first_n_block_headers(rpc.clone(), db.clone(), num_blocks).await; + + let chain_state = HeaderChainProver::get_chain_state_from_height( + rpc.clone(), + num_blocks, + Network::Regtest, + ) + .await + .unwrap(); + + let mut expected_chain_state = ChainState::genesis_state(); + expected_chain_state.apply_block_headers( + headers + .iter() + .map(|header| CircuitBlockHeader::from(*header)) + .collect::>(), + ); + + expected_chain_state.block_hashes_mmr = MMRGuest::new(); + + println!("Chain state: {:#?}", chain_state); + println!("Expected chain state: {:#?}", expected_chain_state); + + assert_eq!(chain_state, expected_chain_state); + } + + #[ignore = "This test is requires env var at build time & testnet4, but it works, try it out"] + #[tokio::test] + #[serial_test::serial] + async fn test_generate_chain_state_from_height_testnet4() { + // set BITCOIN_NETWORK to regtest + std::env::set_var("BITCOIN_NETWORK", "testnet4"); + let rpc = ExtendedBitcoinRpc::connect( + "http://127.0.0.1:48332".to_string(), + "admin".to_string().into(), + "admin".to_string().into(), + None, + ) + .await + .unwrap(); + + // randomly select a number of blocks from 12 to 2116 + let num_blocks: u64 = rand::rng().random_range(12..2116); + + // Save some initial blocks. + let mut headers = Vec::new(); + for i in 0..=num_blocks { + let hash = rpc.get_block_hash(i).await.unwrap(); + let header = rpc.get_block_header(&hash).await.unwrap(); + headers.push(header); + } + + let chain_state = HeaderChainProver::get_chain_state_from_height( + rpc.clone(), + num_blocks, + Network::Testnet4, + ) + .await + .unwrap(); + + let mut expected_chain_state = ChainState::genesis_state(); + expected_chain_state.apply_block_headers( + headers + .iter() + .map(|header| CircuitBlockHeader::from(*header)) + .collect::>(), + ); + + expected_chain_state.block_hashes_mmr = MMRGuest::new(); + + println!("Chain state: {:#?}", chain_state); + println!("Expected chain state: {:#?}", expected_chain_state); + + assert_eq!(chain_state, expected_chain_state); + } + + #[tokio::test] + async fn test_fetch_and_save_missing_blocks() { + // test these functions: + // save_block_infos_within_range + // fetch_and_save_missing_blocks + // get_block_info_from_hash_hcp + // get_latest_proven_block_info_until_height + let mut config = create_test_config_with_thread_name().await; + let regtest = create_regtest_rpc(&mut config).await; + let rpc = regtest.rpc().clone(); + + let prover = HeaderChainProver::new(&config, rpc.clone_inner().await.unwrap()) + .await + .unwrap(); + + let current_height = rpc.get_block_count().await.unwrap(); + let current_hcp_height = prover + .db + .get_latest_finalized_block_height(None) + .await + .unwrap() + .unwrap(); + assert_ne!(current_height, current_hcp_height); + + prover + .db + .fetch_and_save_missing_blocks( + &rpc, + config.protocol_paramset().genesis_height, + current_height as u32 + 1, + ) + .await + .unwrap(); + + let current_hcp_height = prover + .db + .get_latest_finalized_block_height(None) + .await + .unwrap() + .unwrap(); + assert_eq!(current_height, current_hcp_height); + + let test_height = current_height as u32 / 2; + + let block_hash = rpc.get_block_hash(test_height as u64).await.unwrap(); + let block_info = prover + .db + .get_block_info_from_hash_hcp(None, block_hash) + .await + .unwrap() + .unwrap(); + assert_eq!(block_info.2, test_height); + + let receipt_1 = prover.prove_till_hash(block_hash).await.unwrap(); + let latest_proven_block = prover + .db + .get_latest_proven_block_info_until_height(None, current_hcp_height as u32) + .await + .unwrap() + .unwrap(); + + let receipt_2 = prover.prove_till_hash(block_hash).await.unwrap(); + + assert_eq!(receipt_1.0.journal, receipt_2.0.journal); + + assert_eq!(latest_proven_block.2, test_height as u64); + } + + #[tokio::test] + async fn new() { + let mut config = create_test_config_with_thread_name().await; + let regtest = create_regtest_rpc(&mut config).await; + let rpc = regtest.rpc().clone(); + + let _should_not_panic = HeaderChainProver::new(&config, rpc).await.unwrap(); + } + + #[tokio::test] + async fn new_with_proof_assumption() { + let mut config = create_test_config_with_thread_name().await; + let regtest = create_regtest_rpc(&mut config).await; + let rpc = regtest.rpc().clone(); + + // First block's assumption will be added to db: Make sure block exists + // too. + rpc.mine_blocks(1).await.unwrap(); + let prover = HeaderChainProver::new(&config, rpc.clone_inner().await.unwrap()) + .await + .unwrap(); + + // Test assumption is for block 0. + let hash = rpc.get_block_hash(0).await.unwrap(); + let (receipt, _) = prover.prove_till_hash(hash).await.unwrap(); + let db_receipt = prover + .db + .get_block_proof_by_hash(None, hash) + .await + .unwrap() + .unwrap(); + assert_eq!(receipt.journal, db_receipt.journal); + assert_eq!(receipt.metadata, db_receipt.metadata); + } + + #[tokio::test] + async fn prove_a_block_from_database() { + let mut config = create_test_config_with_thread_name().await; + let regtest = create_regtest_rpc(&mut config).await; + let rpc = regtest.rpc().clone(); + let prover = HeaderChainProver::new(&config, rpc.clone_inner().await.unwrap()) + .await + .unwrap(); + + // Set up the next non proven block. + let height = 1; + let hash = rpc.get_block_hash(height).await.unwrap(); + let genesis_hash = rpc.get_block_hash(0).await.unwrap(); + let (genesis_receipt, _) = prover.prove_till_hash(genesis_hash).await.unwrap(); + let block = rpc.get_block(&hash).await.unwrap(); + let header = block.header; + prover + .db + .save_unproven_finalized_block(None, hash, header, height) + .await + .unwrap(); + + let receipt = prover + .prove_and_save_block(hash, vec![header], genesis_receipt) + .await + .unwrap(); + + let (read_recipt, _) = prover.prove_till_hash(hash).await.unwrap(); + assert_eq!(receipt.journal, read_recipt.journal); + } + + #[tokio::test] + #[serial_test::serial] + async fn prove_block_headers_genesis() { + let genesis_state = ChainState::genesis_state(); + + let receipt = + HeaderChainProver::prove_genesis_block(genesis_state, Network::Regtest).unwrap(); + + let output: BlockHeaderCircuitOutput = borsh::from_slice(&receipt.journal.bytes).unwrap(); + println!("Proof journal output: {:?}", output); + + assert_eq!(output.chain_state.block_height, u32::MAX); // risc0-to-bitvm2 related + assert_eq!( + output.chain_state.best_block_hash, + BlockHash::all_zeros().as_raw_hash().to_byte_array() + ); + } + + #[tokio::test] + #[serial_test::serial] + async fn prove_block_headers_second() { + let mut config = create_test_config_with_thread_name().await; + let regtest = create_regtest_rpc(&mut config).await; + let rpc = regtest.rpc().clone(); + let prover = HeaderChainProver::new(&config, rpc.clone_inner().await.unwrap()) + .await + .unwrap(); + + // Prove genesis block and get it's receipt. + let genesis_state = ChainState::genesis_state(); + + let receipt = + HeaderChainProver::prove_genesis_block(genesis_state, Network::Regtest).unwrap(); + + let block_headers = mine_and_get_first_n_block_headers(rpc, prover.db.clone(), 3) + .await + .iter() + .map(|header| CircuitBlockHeader::from(*header)) + .collect::>(); + let receipt = prover + .prove_block_headers(receipt, block_headers[0..2].to_vec()) + .unwrap(); + let output: BlockHeaderCircuitOutput = borsh::from_slice(&receipt.journal.bytes).unwrap(); + + println!("Proof journal output: {:?}", output); + + assert_eq!(output.chain_state.block_height, 1); + } + + #[tokio::test] + async fn prove_till_hash_intermediate_blocks() { + // this test does assume config start height is bigger than 3 + let mut config = create_test_config_with_thread_name().await; + let regtest = create_regtest_rpc(&mut config).await; + let rpc = regtest.rpc().clone(); + let db = Database::new(&config).await.unwrap(); + + let prover = HeaderChainProver::new(&config, rpc.clone_inner().await.unwrap()) + .await + .unwrap(); + + for i in (0..3).rev() { + let hash = rpc.get_block_hash(i).await.unwrap(); + let (proof, _) = prover.prove_till_hash(hash).await.unwrap(); + let db_proof = db + .get_block_proof_by_hash(None, hash) + .await + .unwrap() + .unwrap(); + assert_eq!(proof.journal, db_proof.journal); + } + let hash = rpc.get_block_hash(5).await.unwrap(); + let (proof, _) = prover.prove_till_hash(hash).await.unwrap(); + let db_proof = db + .get_block_proof_by_hash(None, hash) + .await + .unwrap() + .unwrap(); + assert_eq!(proof.journal, db_proof.journal); + } + + #[tokio::test] + async fn is_batch_ready() { + let mut config = create_test_config_with_thread_name().await; + let regtest = create_regtest_rpc(&mut config).await; + let rpc = regtest.rpc().clone(); + let db = Database::new(&config).await.unwrap(); + + let batch_size = config.protocol_paramset().header_chain_proof_batch_size; + + let prover = HeaderChainProver::new(&config, rpc.clone_inner().await.unwrap()) + .await + .unwrap(); + + let genesis_hash = rpc.get_block_hash(0).await.unwrap(); + let (genesis_block_proof, _) = prover.prove_till_hash(genesis_hash).await.unwrap(); + let db_proof = db + .get_block_proof_by_hash(None, genesis_hash) + .await + .unwrap() + .unwrap(); + assert_eq!(genesis_block_proof.journal, db_proof.journal); + + assert!( + prover.is_batch_ready().await.unwrap() + == (config.protocol_paramset().start_height > batch_size) + ); + + // Mining required amount of blocks should make batch proving ready. + let _headers = + mine_and_get_first_n_block_headers(rpc.clone(), db, batch_size as u64 + 1).await; + assert!(prover.is_batch_ready().await.unwrap()); + } + + #[tokio::test] + async fn prove_if_ready() { + let mut config = create_test_config_with_thread_name().await; + let regtest = create_regtest_rpc(&mut config).await; + let rpc = regtest.rpc().clone(); + let db = Database::new(&config).await.unwrap(); + + let prover = HeaderChainProver::new(&config, rpc.clone()).await.unwrap(); + + // Save some initial blocks. + mine_and_get_first_n_block_headers(rpc.clone(), db.clone(), 2).await; + + let batch_size = config.protocol_paramset().header_chain_proof_batch_size; + + let latest_proven_block_height = db.get_next_unproven_block(None).await.unwrap().unwrap().2; + let _block_headers = mine_and_get_first_n_block_headers( + rpc.clone(), + db.clone(), + latest_proven_block_height + batch_size as u64, + ) + .await; + + let receipt = prover.prove_if_ready().await.unwrap().unwrap(); + let latest_proof = db + .get_latest_proven_block_info(None) + .await + .unwrap() + .unwrap(); + let get_receipt = prover + .db + .get_block_proof_by_hash(None, latest_proof.0) + .await + .unwrap() + .unwrap(); + assert_eq!(receipt.journal, get_receipt.journal); + assert_eq!(receipt.metadata, get_receipt.metadata); + } + + #[tokio::test] + async fn prove_and_get_non_targeted_block() { + let mut config = create_test_config_with_thread_name().await; + let regtest = create_regtest_rpc(&mut config).await; + let rpc = regtest.rpc().clone(); + let db = Database::new(&config).await.unwrap(); + + let prover = HeaderChainProver::new(&config, rpc.clone()).await.unwrap(); + + // Save some initial blocks. + mine_and_get_first_n_block_headers(rpc.clone(), db.clone(), 2).await; + + let batch_size = config.protocol_paramset().header_chain_proof_batch_size; + + let latest_proven_block_height = db.get_next_unproven_block(None).await.unwrap().unwrap().2; + let _block_headers = mine_and_get_first_n_block_headers( + rpc.clone(), + db.clone(), + latest_proven_block_height + batch_size as u64, + ) + .await; + + let receipt = prover.prove_if_ready().await.unwrap().unwrap(); + let latest_proof = db + .get_latest_proven_block_info(None) + .await + .unwrap() + .unwrap(); + let get_receipt = prover + .db + .get_block_proof_by_hash(None, latest_proof.0) + .await + .unwrap() + .unwrap(); + assert_eq!(receipt.journal, get_receipt.journal); + assert_eq!(receipt.metadata, get_receipt.metadata); + + // Try to get proof of the previous block that its heir is proven. + let target_height = latest_proof.2 - 1; + let target_hash = rpc.get_block_hash(target_height).await.unwrap(); + + assert!(db + .get_block_proof_by_hash(None, target_hash) + .await + .unwrap() + .is_none()); + + // get_header_chain_proof should calculate the proof for the block. + let _receipt = prover.get_tip_header_chain_proof().await.unwrap(); + } + + #[tokio::test] + #[cfg(feature = "automation")] + async fn verifier_new_check_header_chain_proof() { + let mut config = create_test_config_with_thread_name().await; + let regtest = create_regtest_rpc(&mut config).await; + let rpc = regtest.rpc().clone(); + let db = Database::new(&config).await.unwrap(); + + let batch_size = config.protocol_paramset().header_chain_proof_batch_size; + + // Save initial blocks, because VerifierServer won't. + let count = rpc.get_block_count().await.unwrap(); + tracing::info!("Block count: {}", count); + for i in 1..count + 1 { + let hash = rpc.get_block_hash(i).await.unwrap(); + let block = rpc.get_block(&hash).await.unwrap(); + + db.save_unproven_finalized_block(None, block.block_hash(), block.header, i) + .await + .unwrap(); + } + + let verifier = VerifierServer::::new(config) + .await + .unwrap(); + verifier.start_background_tasks().await.unwrap(); + // Make sure enough blocks to prove and is finalized. + rpc.mine_blocks((batch_size + 10).into()).await.unwrap(); + + // Aim for a proved block that is added to the database by the verifier. + let height = batch_size; + let hash = rpc.get_block_hash(height.into()).await.unwrap(); + + poll_until_condition( + async || { + Ok(verifier + .verifier + .header_chain_prover + .db + .get_block_proof_by_hash(None, hash) + .await + .is_ok()) + }, + None, + None, + ) + .await + .unwrap(); + } +} diff --git a/core/src/lib.rs b/core/src/lib.rs new file mode 100644 index 000000000..d446ae363 --- /dev/null +++ b/core/src/lib.rs @@ -0,0 +1,195 @@ +//! # Clementine ๐ŸŠ +//! +//! Clementine is Citrea's BitVM-based, trust-minimized two-way peg program. +//! Please refer to the [whitepaper](https://citrea.xyz/clementine_whitepaper.pdf) +//! to understand the design of Clementine. +//! +//! Clementine Core is the backbone of Clementine. As the name suggests, it +//! provides the core functionalities for Clementine to operate. +//! +//! Most of the modules are self-explanatory and documented. Please refer +//! to the documentation of each module for more information. +//! +//! ## Binaries and Using Clementine +//! +//! Clementine's architecture is designed so that every actor is a separate +//! server. They all communicate with each other via gRPC. +//! +//! For this reason, Clementine Core provides a single main binary, +//! `clementine-core`, which acts as a server starter for every actor. There is +//! also a helper binary, `clementine-core-cli`, which is a command-line +//! interface for communicating with these servers. It is located in +//! `bin/cli.rs`. +//! +//! The [`crate::cli`] module provides the command-line interface for Clementine. +//! It is used in every binary. +//! +//! The [`crate::config`] module is also essential for Clementine to operate. +//! It specifies essential variables for the protocol as well as the user's +//! environment setup. +//! +//! ## Utilizing Actors +//! +//! The core behavior of Clementine's actors is defined in the respective +//! modules: +//! +//! - [`crate::operator`] +//! - [`crate::verifier`] +//! - [`crate::aggregator`] +//! +//! For all these modules, the [`crate::actor`] module provides common utilities. +//! +//! ### Servers +//! +//! An actor is only meaningful if its server is running. For each actor, there +//! is a server module, which provides the server implementation. +//! +//! The main server architecture is defined in the `rpc/clementine.proto` file. +//! It is compiled to Rust code by the `tonic` library. Server logic for each +//! actor is defined in the respective server module in the [`crate::rpc`] module. +//! +//! ## Building Transactions and Managing Flow with Tasks +//! +//! Clementine operates on Bitcoin transactions. The [`crate::builder`] module +//! provides utilities for building Bitcoin transactions based on the +//! specification (detailed in the whitepaper). The [`crate::builder`] module +//! can create a transaction according to the specification with the required +//! signatures, addresses, and scripts. +//! +//! Clementine requires a few background tasks to be running in order to operate +//! properly. The task interface is defined in the [`crate::task`] module. These +//! tasks are: +//! +//! - The [`crate::bitcoin_syncer`] module syncs Bitcoin blocks and +//! transactions. +//! - The [`crate::tx_sender`] module sends transactions to the Bitcoin network +//! depending on the transaction type. +//! - The [`crate::states`] module provides state machine implementations for +//! managing some of the steps in the specification. +//! +//! There are other modules that are not tasks, but they are used in the tasks +//! and are important for the flow of Clementine: +//! +//! - The [`crate::header_chain_prover`] module accepts Bitcoin block headers +//! and prepares proofs for them. +//! +//! ### Communicating with the Outside +//! +//! Some steps require communicating with external systems: +//! +//! - The [`crate::extended_rpc`] module provides a client that talks with +//! the Bitcoin node. +//! - The [`crate::citrea`] module provides a client for interacting with Citrea. +//! - The [`crate::bitvm_client`] module provides a client for BitVM. +//! - The [`crate::database`] module provides a database interface for +//! interacting with the PostgreSQL database. +//! +//! ## Development Guidelines +//! +//! ### Error Handling +//! +//! There are rules about error handling in Clementine. Please refer to the +//! [`crate::errors`] module for more information. +//! +//! ### Testing Clementine +//! +//! There are a few quirks about testing Clementine. One of the main ones is +//! that there is no `tests` directory for integration tests. Rather, there is a +//! [`crate::test`] module, which is compiled only if `test` is enabled by Cargo +//! (when running `cargo test`). That module provides common utilities for unit +//! and integration testing, as well as integration tests themselves. This is a +//! workaround for having common test utilities between unit and integration +//! tests. +//! +//! Please refer to the [`crate::test`] module to check what utilities are +//! available for testing and how to use them. +//! +//! Also, if a new integration test file is added, it should be guarded by the +//! `#[cfg(feature = "integration-tests")]` attribute. This ensures that the +//! integration and unit tests can be run separately. + +use bitcoin::{OutPoint, Txid}; +use serde::{Deserialize, Serialize}; + +pub mod actor; +pub mod aggregator; +pub mod bitcoin_syncer; +pub mod bitvm_client; +pub mod builder; +pub mod citrea; +pub mod cli; +pub mod config; +pub mod constants; +pub mod database; +pub mod deposit; +pub mod encryption; +pub mod errors; +pub mod extended_bitcoin_rpc; +pub mod header_chain_prover; +pub mod metrics; +pub mod musig2; +pub mod operator; +pub mod rpc; +pub mod servers; +pub mod task; +pub mod utils; +pub mod verifier; + +#[cfg(feature = "automation")] +pub mod states; +#[cfg(feature = "automation")] +pub mod tx_sender; + +#[cfg(test)] +pub mod test; + +macro_rules! impl_try_from_vec_u8 { + ($name:ident, $size:expr) => { + impl TryFrom> for $name { + type Error = &'static str; + + fn try_from(value: Vec) -> Result { + if value.len() == $size { + Ok($name(value.try_into().unwrap())) + } else { + Err(concat!("Expected a Vec of length ", stringify!($size))) + } + } + } + }; +} + +pub type ConnectorUTXOTree = Vec>; +// pub type HashTree = Vec>; +// pub type PreimageTree = Vec>; +pub type InscriptionTxs = (OutPoint, Txid); + +/// Type alias for EVM address +#[derive(Copy, Clone, Debug, PartialOrd, Ord, PartialEq, Eq, Hash, Serialize, Deserialize)] +pub struct EVMAddress(#[serde(with = "hex::serde")] pub [u8; 20]); + +impl_try_from_vec_u8!(EVMAddress, 20); + +#[derive(Clone, Debug, Eq, Hash, PartialEq, PartialOrd, Ord, Serialize, Deserialize)] +pub struct UTXO { + pub outpoint: OutPoint, + pub txout: bitcoin::TxOut, +} + +#[derive(Clone, Debug, Copy, Serialize, Deserialize, PartialEq, sqlx::Type)] +#[sqlx(type_name = "bytea")] +pub struct ByteArray66(#[serde(with = "hex::serde")] pub [u8; 66]); + +impl_try_from_vec_u8!(ByteArray66, 66); + +#[derive(Clone, Debug, Copy, Serialize, Deserialize, PartialEq, sqlx::Type)] +#[sqlx(type_name = "bytea")] +pub struct ByteArray32(#[serde(with = "hex::serde")] pub [u8; 32]); + +impl_try_from_vec_u8!(ByteArray32, 32); + +#[derive(Clone, Copy, Debug, Serialize, Deserialize, PartialEq, sqlx::Type)] +#[sqlx(type_name = "bytea")] +pub struct ByteArray64(#[serde(with = "hex::serde")] pub [u8; 64]); + +impl_try_from_vec_u8!(ByteArray64, 64); diff --git a/core/src/main.rs b/core/src/main.rs new file mode 100644 index 000000000..b35d1528f --- /dev/null +++ b/core/src/main.rs @@ -0,0 +1,132 @@ +//! # Clementine ๐ŸŠ +//! +//! This is Clementine, Citrea's BitVM based trust-minimized two-way peg program. +//! +//! Clementine binary acts as a server for the every actor. An entity should +//! spawn multiple actor servers that it needs, in different processes. Meaning +//! Clementine binary should be run multiple times with different arguments. + +use bitcoincore_rpc::RpcApi; +use clementine_core::{ + actor::Actor, + bitvm_client::{load_or_generate_bitvm_cache, BITVM_CACHE}, + citrea::CitreaClient, + cli::{self, get_cli_config}, + database::Database, + extended_bitcoin_rpc::ExtendedBitcoinRpc, + servers::{ + create_aggregator_grpc_server, create_operator_grpc_server, create_verifier_grpc_server, + }, + utils::{initialize_logger, initialize_telemetry}, +}; +use std::str::FromStr; +use tracing::{level_filters::LevelFilter, Level}; + +#[tokio::main] +async fn main() { + rustls::crypto::ring::default_provider() + .install_default() + .expect("Failed to install rustls crypto provider"); + + let (config, args) = get_cli_config(); + + let level_filter = match args.verbose { + 0 => None, + other => Some(LevelFilter::from_level( + Level::from_str(&other.to_string()).unwrap_or(Level::INFO), + )), + }; + + initialize_logger(level_filter).expect("Failed to initialize logger."); + + if let Some(telemetry) = &config.telemetry { + if let Err(e) = initialize_telemetry(telemetry) { + tracing::error!("Failed to initialize telemetry listener: {:?}", e); + } + } + + // Load the BitVM cache on startup. + tracing::info!("Loading BitVM cache..."); + BITVM_CACHE.get_or_init(load_or_generate_bitvm_cache); + + Database::run_schema_script(&config, args.actor == cli::Actors::Verifier) + .await + .expect("Can't run schema script"); + + let mut handle = match args.actor { + cli::Actors::Verifier => { + println!("Starting verifier server..."); + + create_verifier_grpc_server::(config.clone()) + .await + .expect("Can't create verifier server") + .1 + } + cli::Actors::Operator => { + println!("Starting operator server..."); + + create_operator_grpc_server::(config.clone()) + .await + .expect("Can't create operator server") + .1 + } + cli::Actors::Aggregator => { + println!("Starting aggregator server..."); + + create_aggregator_grpc_server(config.clone()) + .await + .expect("Can't create aggregator server") + .1 + } + cli::Actors::TestActor => { + let rpc = ExtendedBitcoinRpc::connect( + config.bitcoin_rpc_url.clone(), + config.bitcoin_rpc_user.clone(), + config.bitcoin_rpc_password.clone(), + None, + ) + .await + .expect("Failed to connect to Bitcoin RPC"); + + // Test DB connection with running the schema script. + Database::run_schema_script(&config, true) + .await + .expect("Database connection failed"); + + // This also checks if Bitcoin connection is healthy or not. + let unspents = rpc + .list_unspent(None, None, None, None, None) + .await + .expect("Failed to get unspent outputs"); + let mut addresses = vec![]; + for unspent in unspents { + if let Some(address) = unspent.address { + let serialized_address = address.assume_checked().to_string(); + + if !addresses.contains(&serialized_address) { + addresses.push(serialized_address); + } + } + } + let address = Actor::new( + config.secret_key, + config.winternitz_secret_key, + config.protocol_paramset.network, + ) + .address; + + println!("Configuration: {:#?}", config); + println!("Bitcoin address: {}", address); + println!("Bitcoin node addresses: {:?}", addresses); + + println!("DB connection is successful."); + println!("Bitcoin node connection is successful."); + println!("Your node is healthy and ready to run."); + + std::process::exit(0); + } + }; + println!("Server has started successfully."); + + handle.closed().await; +} diff --git a/core/src/metrics.rs b/core/src/metrics.rs new file mode 100644 index 000000000..11b153ced --- /dev/null +++ b/core/src/metrics.rs @@ -0,0 +1,376 @@ +//! This module includes helper functions to get the blockchain synchronization status of the entity. +//! The entity tracks on-chain transactions for many purposes (TxSender, +//! FinalizedBlockFetcher, HCP) and takes action (header chain proving, payout, +//! disprove, L2 state sync, etc.) +//! SyncStatus tracks the latest processed block heights for each of these tasks. +//! +use std::{sync::LazyLock, time::Duration}; + +use bitcoin::Amount; +use bitcoincore_rpc::RpcApi; +use eyre::Context; +use metrics::Gauge; +use tonic::async_trait; + +use crate::{ + database::Database, + errors::BridgeError, + extended_bitcoin_rpc::ExtendedBitcoinRpc, + utils::{timed_request_base, NamedEntity}, +}; +use metrics_derive::Metrics; + +const L1_SYNC_STATUS_SUB_REQUEST_METRICS_TIMEOUT: Duration = Duration::from_secs(45); + +#[derive(Metrics)] +#[metrics(scope = "l1_sync_status")] +/// The L1 sync status metrics for the currently running entity. (operator/verifier) +pub struct L1SyncStatusMetrics { + #[metric(describe = "The current balance of the wallet in Bitcoin (BTC)")] + pub wallet_balance_btc: Gauge, + #[metric(describe = "The block height of the chain as seen by Bitcoin Core RPC")] + pub rpc_tip_height: Gauge, + #[metric(describe = "The block height of the Bitcoin Syncer")] + pub btc_syncer_synced_height: Gauge, + #[metric(describe = "The block height of the latest header chain proof")] + pub hcp_last_proven_height: Gauge, + #[metric(describe = "The block height processed by the Transaction Sender")] + pub tx_sender_synced_height: Gauge, + #[metric(describe = "The finalized block height as seen by the FinalizedBlockFetcher task")] + pub finalized_synced_height: Gauge, + #[metric(describe = "The next block height to process for the State Manager")] + pub state_manager_next_height: Gauge, +} + +#[derive(Metrics)] +#[metrics(dynamic = true)] +/// The L1 sync status metrics for an entity. This is used by the aggregator to +/// publish external entity metrics. The scope will be set to the EntityId + +/// "_l1_sync_status", which will be displayed as +/// `Operator(abcdef123...)_l1_sync_status` or +/// `Verifier(abcdef123...)_l1_sync_status` where the XOnlyPublicKey's first 10 +/// characters are displayed, cf. [`crate::aggregator::OperatorId`] and +/// [`crate::aggregator::VerifierId`]. +pub struct EntityL1SyncStatusMetrics { + #[metric(describe = "The current balance of the wallet of the entity in Bitcoin (BTC)")] + pub wallet_balance_btc: Gauge, + #[metric( + describe = "The block height of the chain as seen by Bitcoin Core RPC for the entity" + )] + pub rpc_tip_height: Gauge, + #[metric(describe = "The block height of the Bitcoin Syncer for the entity")] + pub btc_syncer_synced_height: Gauge, + #[metric(describe = "The block height of the latest header chain proof for the entity")] + pub hcp_last_proven_height: Gauge, + #[metric(describe = "The block height processed by the Transaction Sender for the entity")] + pub tx_sender_synced_height: Gauge, + #[metric( + describe = "The finalized block height as seen by the FinalizedBlockFetcher task for the entity" + )] + pub finalized_synced_height: Gauge, + #[metric(describe = "The next block height to process for the State Manager for the entity")] + pub state_manager_next_height: Gauge, + + #[metric(describe = "The number of error responses from the entity status endpoint")] + pub entity_status_error_count: metrics::Counter, + + #[metric(describe = "The number of stopped tasks for the entity")] + pub stopped_tasks_count: Gauge, +} + +/// The L1 sync status metrics static for the currently running entity. (operator/verifier) +pub static L1_SYNC_STATUS: LazyLock = LazyLock::new(|| { + L1SyncStatusMetrics::describe(); + L1SyncStatusMetrics::default() +}); + +/// A struct containing the current sync status of the entity. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct L1SyncStatus { + pub wallet_balance: Option, + pub rpc_tip_height: Option, + pub btc_syncer_synced_height: Option, + pub hcp_last_proven_height: Option, + pub tx_sender_synced_height: Option, + pub finalized_synced_height: Option, + pub state_manager_next_height: Option, +} + +/// Get the current balance of the wallet. +pub async fn get_wallet_balance(rpc: &ExtendedBitcoinRpc) -> Result { + let balance = rpc + .get_balance(None, None) + .await + .wrap_err("Failed to get wallet balance")?; + + Ok(balance) +} + +/// Get the current height of the chain as seen by Bitcoin Core RPC. +pub async fn get_rpc_tip_height(rpc: &ExtendedBitcoinRpc) -> Result { + let height = rpc.get_current_chain_height().await?; + Ok(height) +} + +/// Get the last processed block height of the given consumer or None if no +/// block was processed by the consumer. +pub async fn get_btc_syncer_consumer_last_processed_block_height( + db: &Database, + consumer_handle: &str, +) -> Result, BridgeError> { + db.get_last_processed_event_block_height(None, consumer_handle) + .await +} + +/// Get the last processed block height of the Bitcoin Syncer or None if no +/// block is present in the database. +pub async fn get_btc_syncer_synced_height(db: &Database) -> Result, BridgeError> { + let height = db.get_max_height(None).await?; + Ok(height) +} + +/// Get the last proven block height of the HCP or None if no block has been proven. +pub async fn get_hcp_last_proven_height(db: &Database) -> Result, BridgeError> { + let latest_proven_block_height = db + .get_latest_proven_block_info(None) + .await? + .map(|(_, _, height)| height as u32); + Ok(latest_proven_block_height) +} + +/// Get the next height of the State Manager or None if the State Manager status +/// for the owner is missing or the next_height_to_process is NULL. +pub async fn get_state_manager_next_height( + db: &Database, + owner_type: &str, +) -> Result, BridgeError> { + #[cfg(feature = "automation")] + { + let next_height = db + .get_next_height_to_process(None, owner_type) + .await? + .map(|x| x as u32); + Ok(next_height) + } + #[cfg(not(feature = "automation"))] + { + Ok(None) + } +} + +#[async_trait] +/// Extension trait on named entities who synchronize to the L1 data, to retrieve their L1 sync status. +pub trait L1SyncStatusProvider: NamedEntity { + async fn get_l1_status( + db: &Database, + rpc: &ExtendedBitcoinRpc, + ) -> Result; +} + +#[async_trait] +impl L1SyncStatusProvider for T { + async fn get_l1_status( + db: &Database, + rpc: &ExtendedBitcoinRpc, + ) -> Result { + let wallet_balance = timed_request_base( + L1_SYNC_STATUS_SUB_REQUEST_METRICS_TIMEOUT, + "get_wallet_balance", + get_wallet_balance(rpc), + ) + .await + .ok() + .transpose()?; + + let rpc_tip_height = timed_request_base( + L1_SYNC_STATUS_SUB_REQUEST_METRICS_TIMEOUT, + "get_rpc_tip_height", + get_rpc_tip_height(rpc), + ) + .await + .ok() + .transpose()?; + + let tx_sender_synced_height = timed_request_base( + L1_SYNC_STATUS_SUB_REQUEST_METRICS_TIMEOUT, + "get_tx_sender_synced_height", + get_btc_syncer_consumer_last_processed_block_height(db, T::TX_SENDER_CONSUMER_ID), + ) + .await + .ok() + .transpose()? + .flatten(); + + #[cfg(feature = "automation")] + let finalized_synced_height = timed_request_base( + L1_SYNC_STATUS_SUB_REQUEST_METRICS_TIMEOUT, + "get_finalized_synced_height", + get_btc_syncer_consumer_last_processed_block_height( + db, + T::FINALIZED_BLOCK_CONSUMER_ID_AUTOMATION, + ), + ) + .await + .ok() + .transpose()? + .flatten(); + + #[cfg(not(feature = "automation"))] + let finalized_synced_height = timed_request_base( + L1_SYNC_STATUS_SUB_REQUEST_METRICS_TIMEOUT, + "get_finalized_synced_height", + get_btc_syncer_consumer_last_processed_block_height( + db, + T::FINALIZED_BLOCK_CONSUMER_ID_NO_AUTOMATION, + ), + ) + .await + .ok() + .transpose()? + .flatten(); + + let btc_syncer_synced_height = timed_request_base( + L1_SYNC_STATUS_SUB_REQUEST_METRICS_TIMEOUT, + "get_btc_syncer_synced_height", + get_btc_syncer_synced_height(db), + ) + .await + .ok() + .transpose()? + .flatten(); + let hcp_last_proven_height = timed_request_base( + L1_SYNC_STATUS_SUB_REQUEST_METRICS_TIMEOUT, + "get_hcp_last_proven_height", + get_hcp_last_proven_height(db), + ) + .await + .ok() + .transpose()? + .flatten(); + let state_manager_next_height = timed_request_base( + L1_SYNC_STATUS_SUB_REQUEST_METRICS_TIMEOUT, + "get_state_manager_next_height", + get_state_manager_next_height(db, T::ENTITY_NAME), + ) + .await + .ok() + .transpose()? + .flatten(); + + Ok(L1SyncStatus { + wallet_balance, + rpc_tip_height, + btc_syncer_synced_height, + hcp_last_proven_height, + tx_sender_synced_height, + finalized_synced_height, + state_manager_next_height, + }) + } +} + +#[cfg(test)] +mod tests { + #[cfg(not(feature = "automation"))] + use crate::rpc::clementine::EntityType; + use crate::{ + rpc::clementine::GetEntityStatusesRequest, + test::common::{ + citrea::MockCitreaClient, create_actors, create_regtest_rpc, + create_test_config_with_thread_name, + }, + }; + use std::time::Duration; + + #[tokio::test] + async fn test_get_sync_status() { + let mut config = create_test_config_with_thread_name().await; + let _regtest = create_regtest_rpc(&mut config).await; + let actors = create_actors::(&config).await; + let mut aggregator = actors.get_aggregator(); + // wait for entities to sync a bit, this might cause flakiness, if so increase sleep time or make it serial + tokio::time::sleep(Duration::from_secs(40)).await; + let entity_statuses = aggregator + .get_entity_statuses(tonic::Request::new(GetEntityStatusesRequest { + restart_tasks: false, + })) + .await + .unwrap() + .into_inner(); + + for entity in entity_statuses.entity_statuses { + let status = entity.status_result.unwrap(); + match status { + crate::rpc::clementine::entity_status_with_id::StatusResult::Status(status) => { + tracing::info!("Status: {:#?}", status); + #[cfg(feature = "automation")] + { + assert!(status.automation); + assert!( + status + .tx_sender_synced_height + .expect("tx_sender_synced_height is None") + > 0 + ); + assert!( + status + .finalized_synced_height + .expect("finalized_synced_height is None") + > 0 + ); + assert!( + status + .hcp_last_proven_height + .expect("hcp_last_proven_height is None") + > 0 + ); + assert!(status.rpc_tip_height.expect("rpc_tip_height is None") > 0); + assert!( + status + .bitcoin_syncer_synced_height + .expect("bitcoin_syncer_synced_height is None") + > 0 + ); + assert!( + status + .state_manager_next_height + .expect("state_manager_next_height is None") + > 0 + ); + } + #[cfg(not(feature = "automation"))] + { + let entity_type: EntityType = + entity.entity_id.unwrap().kind.try_into().unwrap(); + // tx sender and hcp are not running in non-automation mode + assert!(!status.automation); + assert!(status.tx_sender_synced_height.is_none()); + if entity_type == EntityType::Verifier { + assert!( + status + .finalized_synced_height + .expect("finalized_synced_height is None") + > 0 + ); + } else { + // operator doesn't run finalized block fetcher in non-automation mode + assert!(status.finalized_synced_height.is_none()); + } + assert!(status.hcp_last_proven_height.is_none()); + assert!(status.rpc_tip_height.expect("rpc_tip_height is None") > 0); + assert!( + status + .bitcoin_syncer_synced_height + .expect("bitcoin_syncer_synced_height is None") + > 0 + ); + assert!(status.state_manager_next_height.is_none()); + } + } + crate::rpc::clementine::entity_status_with_id::StatusResult::Err(error) => { + panic!("Couldn't get entity status: {}", error.error); + } + } + } + } +} diff --git a/core/src/musig2.rs b/core/src/musig2.rs new file mode 100644 index 000000000..694b9cb5f --- /dev/null +++ b/core/src/musig2.rs @@ -0,0 +1,797 @@ +//! # MuSig2 +//! +//! Helper functions for the MuSig2 signature scheme. + +use crate::{bitvm_client::SECP, errors::BridgeError}; +use bitcoin::{ + hashes::Hash, + key::Keypair, + secp256k1::{schnorr, Message, PublicKey, SecretKey}, + TapNodeHash, XOnlyPublicKey, +}; +use eyre::Context; +use lazy_static::lazy_static; +use secp256k1::{ + musig::{ + new_nonce_pair, AggregatedNonce, KeyAggCache, PartialSignature, PublicNonce, SecretNonce, + Session, SessionSecretRand, + }, + Scalar, SECP256K1, +}; +use sha2::{Digest, Sha256}; + +pub type MuSigNoncePair = (SecretNonce, PublicNonce); + +pub fn from_secp_xonly(xpk: secp256k1::XOnlyPublicKey) -> XOnlyPublicKey { + XOnlyPublicKey::from_slice(&xpk.serialize()).expect("serialized pubkey is valid") +} + +pub fn to_secp_pk(pk: PublicKey) -> secp256k1::PublicKey { + secp256k1::PublicKey::from_slice(&pk.serialize()).expect("serialized pubkey is valid") +} +pub fn from_secp_pk(pk: secp256k1::PublicKey) -> PublicKey { + PublicKey::from_slice(&pk.serialize()).expect("serialized pubkey is valid") +} + +pub fn to_secp_sk(sk: SecretKey) -> secp256k1::SecretKey { + secp256k1::SecretKey::from_slice(&sk.secret_bytes()).expect("serialized secret key is valid") +} + +pub fn to_secp_kp(kp: &Keypair) -> secp256k1::Keypair { + secp256k1::Keypair::from_seckey_slice(SECP256K1, &kp.secret_bytes()) + .expect("serialized secret key is valid") +} +pub fn from_secp_kp(kp: &secp256k1::Keypair) -> Keypair { + Keypair::from_seckey_slice(&SECP, &kp.secret_bytes()).expect("serialized secret key is valid") +} + +pub fn from_secp_sig(sig: secp256k1::schnorr::Signature) -> schnorr::Signature { + schnorr::Signature::from_slice(&sig.to_byte_array()).expect("serialized signature is valid") +} + +pub fn to_secp_msg(msg: &Message) -> secp256k1::Message { + secp256k1::Message::from_digest(*msg.as_ref()) +} + +/// Possible Musig2 modes. +#[derive(Debug, Clone, Copy)] +pub enum Musig2Mode { + /// No taproot tweak. + ScriptSpend, + /// Taproot tweak with aggregated public key. + OnlyKeySpend, + /// Taproot tweak with tweaked aggregated public key. + KeySpendWithScript(TapNodeHash), +} + +/// sha256(b"TapTweak") +const TAPROOT_TWEAK_TAG_DIGEST: [u8; 32] = [ + 0xe8, 0x0f, 0xe1, 0x63, 0x9c, 0x9c, 0xa0, 0x50, 0xe3, 0xaf, 0x1b, 0x39, 0xc1, 0x43, 0xc6, 0x3e, + 0x42, 0x9c, 0xbc, 0xeb, 0x15, 0xd9, 0x40, 0xfb, 0xb5, 0xc5, 0xa1, 0xf4, 0xaf, 0x57, 0xc5, 0xe9, +]; + +lazy_static! { + pub static ref TAPROOT_TWEAK_TAGGED_HASH: Sha256 = Sha256::new() + .chain_update(TAPROOT_TWEAK_TAG_DIGEST) + .chain_update(TAPROOT_TWEAK_TAG_DIGEST); +} + +fn create_key_agg_cache( + mut public_keys: Vec, + mode: Option, +) -> Result { + if public_keys.is_empty() { + return Err(BridgeError::from(eyre::eyre!( + "MuSig2 Error: cannot create key aggregation cache (no public keys provided)" + ))); + } + public_keys.sort(); + let secp_pubkeys: Vec = + public_keys.iter().map(|pk| to_secp_pk(*pk)).collect(); + let pubkeys_ref: Vec<&secp256k1::PublicKey> = secp_pubkeys.iter().collect(); + let pubkeys_ref = pubkeys_ref.as_slice(); + + let mut musig_key_agg_cache = KeyAggCache::new(SECP256K1, pubkeys_ref); + let agg_key = musig_key_agg_cache.agg_pk(); + + if let Some(mode) = mode { + match mode { + Musig2Mode::ScriptSpend => (), + Musig2Mode::OnlyKeySpend => { + // sha256(C, C, IPK) where C = sha256("TapTweak") + let xonly_tweak = TAPROOT_TWEAK_TAGGED_HASH + .clone() + .chain_update(agg_key.serialize()) + .finalize(); + + musig_key_agg_cache + .pubkey_xonly_tweak_add( + SECP256K1, + &Scalar::from_be_bytes(xonly_tweak.into()) + .wrap_err("Failed to create scalar from xonly tweak bytes")?, + ) + .wrap_err("Failed to tweak aggregated public key")?; + } + Musig2Mode::KeySpendWithScript(merkle_root) => { + // sha256(C, C, IPK, s) where C = sha256("TapTweak") + let xonly_tweak = TAPROOT_TWEAK_TAGGED_HASH + .clone() + .chain_update(agg_key.serialize()) + .chain_update(merkle_root.to_raw_hash().to_byte_array()) + .finalize(); + + musig_key_agg_cache + .pubkey_ec_tweak_add( + SECP256K1, + &Scalar::from_be_bytes(xonly_tweak.into()) + .wrap_err("Failed to create scalar from xonly tweak bytes")?, + ) + .wrap_err("Failed to tweak aggregated public key")?; + } + } + }; + + Ok(musig_key_agg_cache) +} + +pub trait AggregateFromPublicKeys { + fn from_musig2_pks( + pks: Vec, + tweak: Option, + ) -> Result; +} + +impl AggregateFromPublicKeys for XOnlyPublicKey { + fn from_musig2_pks( + pks: Vec, + tweak: Option, + ) -> Result { + let musig_key_agg_cache = create_key_agg_cache(pks, tweak)?; + + Ok( + XOnlyPublicKey::from_slice(&musig_key_agg_cache.agg_pk().serialize()) + .wrap_err("Failed to create XOnlyPublicKey from aggregated public key")?, + ) + } +} + +// Aggregates the public nonces into a single aggregated nonce. +pub fn aggregate_nonces(pub_nonces: &[&PublicNonce]) -> Result { + if pub_nonces.is_empty() { + return Err(BridgeError::from(eyre::eyre!( + "MuSig2 Error: cannot aggregate nonces (no public nonces provided)" + ))); + } + Ok(AggregatedNonce::new(SECP256K1, pub_nonces)) +} + +// Aggregates the partial signatures into a single aggregated signature. +pub fn aggregate_partial_signatures( + pks: Vec, + tweak: Option, + agg_nonce: AggregatedNonce, + partial_sigs: &[PartialSignature], + message: Message, +) -> Result { + let musig_key_agg_cache = create_key_agg_cache(pks, tweak)?; + let secp_message = to_secp_msg(&message); + + let session = Session::new(SECP256K1, &musig_key_agg_cache, agg_nonce, secp_message); + + let partial_sigs: Vec<&PartialSignature> = partial_sigs.iter().collect(); + let final_sig = session.partial_sig_agg(&partial_sigs); + + SECP256K1 + .verify_schnorr( + &final_sig.assume_valid(), + secp_message.as_ref(), + &musig_key_agg_cache.agg_pk(), + ) + .wrap_err("Failed to verify schnorr signature")?; + + Ok(from_secp_sig(final_sig.assume_valid())) +} + +/// Generates a pair of nonces, one secret and one public. Be careful, +/// DO NOT REUSE the same pair of nonces for multiple transactions. It will cause +/// you to leak your secret key. For more information. See: +/// . +pub fn nonce_pair(keypair: &Keypair) -> Result<(SecretNonce, PublicNonce), BridgeError> { + let musig_session_sec_rand = SessionSecretRand::new(); + + Ok(new_nonce_pair( + SECP256K1, + musig_session_sec_rand, + None, + None, + to_secp_kp(keypair).public_key(), + None, + None, + )) +} + +pub fn partial_sign( + pks: Vec, + // Aggregated tweak, if there is any. This is useful for + // Taproot key-spends, since we might have script-spend conditions. + tweak: Option, + sec_nonce: SecretNonce, + agg_nonce: AggregatedNonce, + keypair: Keypair, + sighash: Message, +) -> Result { + let musig_key_agg_cache = create_key_agg_cache(pks, tweak)?; + + let session = Session::new( + SECP256K1, + &musig_key_agg_cache, + agg_nonce, + to_secp_msg(&sighash), + ); + + Ok(session.partial_sign( + SECP256K1, + sec_nonce, + &to_secp_kp(&keypair), + &musig_key_agg_cache, + )) +} + +#[cfg(test)] +mod tests { + use super::{nonce_pair, MuSigNoncePair, Musig2Mode}; + use crate::builder::script::{CheckSig, OtherSpendable, SpendableScript}; + use crate::builder::transaction::{TransactionType, DEFAULT_SEQUENCE}; + use crate::rpc::clementine::NormalSignatureKind; + use crate::{ + bitvm_client::{self, SECP}, + builder::{ + self, + transaction::{input::SpendableTxIn, output::UnspentTxOut, TxHandlerBuilder}, + }, + errors::BridgeError, + musig2::{ + aggregate_nonces, aggregate_partial_signatures, create_key_agg_cache, from_secp_xonly, + partial_sign, AggregateFromPublicKeys, + }, + }; + use bitcoin::{ + hashes::Hash, + key::Keypair, + script, + secp256k1::{schnorr, Message, PublicKey}, + Amount, OutPoint, TapNodeHash, TapSighashType, TxOut, Txid, XOnlyPublicKey, + }; + use secp256k1::{musig::PartialSignature, rand::Rng}; + use std::sync::Arc; + use std::vec; + + /// Generates random key and nonce pairs for a given number of signers. + fn create_key_and_nonce_pairs(num_signers: usize) -> (Vec, Vec) { + let mut key_pairs = Vec::new(); + let mut nonce_pairs = Vec::new(); + + for _ in 0..num_signers { + let key_pair = Keypair::new(&SECP, &mut bitcoin::secp256k1::rand::thread_rng()); + let nonce_pair = nonce_pair(&key_pair).unwrap(); + + key_pairs.push(key_pair); + nonce_pairs.push(nonce_pair); + } + + (key_pairs, nonce_pairs) + } + + #[test] + fn musig2_raw_without_a_tweak() { + let (key_pairs, nonce_pairs) = create_key_and_nonce_pairs(3); + let message = Message::from_digest(secp256k1::rand::rng().random()); + + let public_keys = key_pairs + .iter() + .map(|kp| kp.public_key()) + .collect::>(); + let agg_pk = XOnlyPublicKey::from_musig2_pks(public_keys.clone(), None).unwrap(); + + let aggregated_nonce = super::aggregate_nonces( + nonce_pairs + .iter() + .map(|(_, musig_pub_nonce)| musig_pub_nonce) + .collect::>() + .as_slice(), + ) + .unwrap(); + + let partial_sigs = key_pairs + .into_iter() + .zip(nonce_pairs) + .map(|(kp, nonce_pair)| { + super::partial_sign( + public_keys.clone(), + None, + nonce_pair.0, + aggregated_nonce, + kp, + message, + ) + .unwrap() + }) + .collect::>(); + + let final_signature = super::aggregate_partial_signatures( + public_keys, + None, + aggregated_nonce, + &partial_sigs, + message, + ) + .unwrap(); + + SECP.verify_schnorr(&final_signature, &message, &agg_pk) + .unwrap(); + } + + #[test] + fn musig2_raw_fail_if_partial_sigs_invalid() { + let kp_0 = Keypair::new(&SECP, &mut bitcoin::secp256k1::rand::thread_rng()); + let kp_1 = Keypair::new(&SECP, &mut bitcoin::secp256k1::rand::thread_rng()); + let kp_2 = Keypair::new(&SECP, &mut bitcoin::secp256k1::rand::thread_rng()); + + let message = Message::from_digest(secp256k1::rand::rng().random()); + + let pks = vec![kp_0.public_key(), kp_1.public_key(), kp_2.public_key()]; + + let (sec_nonce_0, pub_nonce_0) = super::nonce_pair(&kp_0).unwrap(); + let (sec_nonce_1, pub_nonce_1) = super::nonce_pair(&kp_1).unwrap(); + let (sec_nonce_2, pub_nonce_2) = super::nonce_pair(&kp_2).unwrap(); + + let agg_nonce = + super::aggregate_nonces(&[&pub_nonce_0, &pub_nonce_1, &pub_nonce_2]).unwrap(); + + let partial_sig_0 = + super::partial_sign(pks.clone(), None, sec_nonce_0, agg_nonce, kp_0, message).unwrap(); + let partial_sig_1 = + super::partial_sign(pks.clone(), None, sec_nonce_1, agg_nonce, kp_1, message).unwrap(); + // Oops, a verifier accidentally added some tweak! + let partial_sig_2 = super::partial_sign( + pks.clone(), + Some(Musig2Mode::KeySpendWithScript( + TapNodeHash::from_slice(&[1u8; 32]).unwrap(), + )), + sec_nonce_2, + agg_nonce, + kp_2, + message, + ) + .unwrap(); + let partial_sigs = vec![partial_sig_0, partial_sig_1, partial_sig_2]; + + let final_signature: Result = + super::aggregate_partial_signatures(pks, None, agg_nonce, &partial_sigs, message); + + assert!(final_signature.is_err()); + } + + #[test] + fn musig2_sig_with_tweak() { + let (key_pairs, nonce_pairs) = create_key_and_nonce_pairs(3); + let message = Message::from_digest(secp256k1::rand::rng().random()); + let tweak: [u8; 32] = secp256k1::rand::rng().random(); + + let public_keys = key_pairs + .iter() + .map(|kp| kp.public_key()) + .collect::>(); + let aggregated_pk: XOnlyPublicKey = XOnlyPublicKey::from_musig2_pks( + public_keys.clone(), + Some(Musig2Mode::KeySpendWithScript( + TapNodeHash::from_slice(&tweak).unwrap(), + )), + ) + .unwrap(); + + let aggregated_nonce = super::aggregate_nonces( + nonce_pairs + .iter() + .map(|(_, musig_pub_nonce)| musig_pub_nonce) + .collect::>() + .as_slice(), + ) + .unwrap(); + + let partial_sigs = key_pairs + .into_iter() + .zip(nonce_pairs) + .map(|(kp, nonce_pair)| { + super::partial_sign( + public_keys.clone(), + Some(Musig2Mode::KeySpendWithScript( + TapNodeHash::from_slice(&tweak).unwrap(), + )), + nonce_pair.0, + aggregated_nonce, + kp, + message, + ) + .unwrap() + }) + .collect::>(); + + let final_signature = super::aggregate_partial_signatures( + public_keys, + Some(Musig2Mode::KeySpendWithScript( + TapNodeHash::from_slice(&tweak).unwrap(), + )), + aggregated_nonce, + &partial_sigs, + message, + ) + .unwrap(); + + SECP.verify_schnorr(&final_signature, &message, &aggregated_pk) + .unwrap(); + } + + #[test] + fn musig2_tweak_fail() { + let kp_0 = Keypair::new(&SECP, &mut bitcoin::secp256k1::rand::thread_rng()); + let kp_1 = Keypair::new(&SECP, &mut bitcoin::secp256k1::rand::thread_rng()); + let kp_2 = Keypair::new(&SECP, &mut bitcoin::secp256k1::rand::thread_rng()); + + let message = Message::from_digest(secp256k1::rand::rng().random::<[u8; 32]>()); + + let tweak: [u8; 32] = secp256k1::rand::rng().random(); + + let pks = vec![kp_0.public_key(), kp_1.public_key(), kp_2.public_key()]; + + let (sec_nonce_0, pub_nonce_0) = super::nonce_pair(&kp_0).unwrap(); + let (sec_nonce_1, pub_nonce_1) = super::nonce_pair(&kp_1).unwrap(); + let (sec_nonce_2, pub_nonce_2) = super::nonce_pair(&kp_2).unwrap(); + + let agg_nonce = + super::aggregate_nonces(&[&pub_nonce_0, &pub_nonce_1, &pub_nonce_2]).unwrap(); + + let partial_sig_0 = super::partial_sign( + pks.clone(), + Some(Musig2Mode::KeySpendWithScript( + TapNodeHash::from_slice(&tweak).unwrap(), + )), + sec_nonce_0, + agg_nonce, + kp_0, + message, + ) + .unwrap(); + let partial_sig_1 = super::partial_sign( + pks.clone(), + Some(Musig2Mode::KeySpendWithScript( + TapNodeHash::from_slice(&tweak).unwrap(), + )), + sec_nonce_1, + agg_nonce, + kp_1, + message, + ) + .unwrap(); + // Oops, a verifier accidentally forgot to put the tweak! + let partial_sig_2 = + super::partial_sign(pks.clone(), None, sec_nonce_2, agg_nonce, kp_2, message).unwrap(); + let partial_sigs = vec![partial_sig_0, partial_sig_1, partial_sig_2]; + + let final_signature = super::aggregate_partial_signatures( + pks, + Some(Musig2Mode::KeySpendWithScript( + TapNodeHash::from_slice(&tweak).unwrap(), + )), + agg_nonce, + &partial_sigs, + message, + ); + + assert!(final_signature.is_err()); + } + + #[test] + fn musig2_key_spend() { + let (key_pairs, nonce_pairs) = create_key_and_nonce_pairs(2); + let public_keys = key_pairs + .iter() + .map(|key_pair| key_pair.public_key()) + .collect::>(); + + let untweaked_xonly_pubkey = + XOnlyPublicKey::from_musig2_pks(public_keys.clone(), None).unwrap(); + + let agg_nonce = super::aggregate_nonces( + nonce_pairs + .iter() + .map(|(_, musig_pub_nonce)| musig_pub_nonce) + .collect::>() + .as_slice(), + ) + .unwrap(); + + let dummy_script = script::Builder::new().push_int(1).into_script(); + let scripts: Vec> = + vec![Arc::new(OtherSpendable::new(dummy_script))]; + let receiving_address = bitcoin::Address::p2tr( + &SECP, + *bitvm_client::UNSPENDABLE_XONLY_PUBKEY, + None, + bitcoin::Network::Regtest, + ); + let (sending_address, sending_address_spend_info) = + builder::address::create_taproot_address( + &scripts + .iter() + .map(|a| a.to_script_buf()) + .collect::>(), + Some(untweaked_xonly_pubkey), + bitcoin::Network::Regtest, + ); + + let prevout = TxOut { + value: Amount::from_sat(100_000_000), + script_pubkey: sending_address.script_pubkey(), + }; + let utxo = OutPoint { + txid: Txid::from_byte_array([0u8; 32]), + vout: 0, + }; + let mut builder = TxHandlerBuilder::new(TransactionType::Dummy); + builder = builder + .add_input( + NormalSignatureKind::OperatorSighashDefault, + SpendableTxIn::new( + utxo, + prevout.clone(), + scripts.clone(), + Some(sending_address_spend_info.clone()), + ), + builder::script::SpendPath::Unknown, + DEFAULT_SEQUENCE, + ) + .add_output(UnspentTxOut::from_partial(TxOut { + value: Amount::from_sat(99_000_000), + script_pubkey: receiving_address.script_pubkey(), + })); + + let tx_details = builder.finalize(); + + let message = Message::from_digest( + tx_details + .calculate_pubkey_spend_sighash(0, TapSighashType::Default) + .unwrap() + .to_byte_array(), + ); + let merkle_root = sending_address_spend_info.merkle_root().unwrap(); + + let partial_sigs: Vec = key_pairs + .into_iter() + .zip(nonce_pairs) + .map(|(kp, nonce_pair)| { + super::partial_sign( + public_keys.clone(), + Some(Musig2Mode::KeySpendWithScript(merkle_root)), + nonce_pair.0, + agg_nonce, + kp, + message, + ) + .unwrap() + }) + .collect(); + + let final_signature = super::aggregate_partial_signatures( + public_keys.clone(), + Some(Musig2Mode::KeySpendWithScript(merkle_root)), + agg_nonce, + &partial_sigs, + message, + ) + .unwrap(); + + let musig_agg_xonly_pubkey = XOnlyPublicKey::from_musig2_pks( + public_keys, + Some(Musig2Mode::KeySpendWithScript(merkle_root)), + ) + .unwrap(); + + SECP.verify_schnorr(&final_signature, &message, &musig_agg_xonly_pubkey) + .unwrap(); + } + + #[test] + fn musig2_script_spend() { + let (key_pairs, nonce_pairs) = create_key_and_nonce_pairs(2); + let public_keys = key_pairs + .iter() + .map(|key_pair| key_pair.public_key()) + .collect::>(); + + let agg_nonce = super::aggregate_nonces( + nonce_pairs + .iter() + .map(|x| &x.1) + .collect::>() + .as_slice(), + ) + .unwrap(); + let musig_agg_xonly_pubkey_wrapped = + XOnlyPublicKey::from_musig2_pks(public_keys.clone(), None).unwrap(); + + let scripts: Vec> = + vec![Arc::new(CheckSig::new(musig_agg_xonly_pubkey_wrapped))]; + + let receiving_address = bitcoin::Address::p2tr( + &SECP, + *bitvm_client::UNSPENDABLE_XONLY_PUBKEY, + None, + bitcoin::Network::Regtest, + ); + let (sending_address, sending_address_spend_info) = + builder::address::create_taproot_address( + &scripts + .iter() + .map(|a| a.to_script_buf()) + .collect::>(), + None, + bitcoin::Network::Regtest, + ); + + let prevout = TxOut { + value: Amount::from_sat(100_000_000), + script_pubkey: sending_address.script_pubkey(), + }; + let utxo = OutPoint { + txid: Txid::from_byte_array([0u8; 32]), + vout: 0, + }; + + let tx_details = TxHandlerBuilder::new(TransactionType::Dummy) + .add_input( + NormalSignatureKind::OperatorSighashDefault, + SpendableTxIn::new( + utxo, + prevout, + scripts, + Some(sending_address_spend_info.clone()), + ), + builder::script::SpendPath::Unknown, + DEFAULT_SEQUENCE, + ) + .add_output(UnspentTxOut::from_partial(TxOut { + value: Amount::from_sat(99_000_000), + script_pubkey: receiving_address.script_pubkey(), + })) + .finalize(); + + let message = Message::from_digest( + tx_details + .calculate_script_spend_sighash_indexed(0, 0, bitcoin::TapSighashType::Default) + .unwrap() + .to_byte_array(), + ); + + let partial_sigs: Vec = key_pairs + .into_iter() + .zip(nonce_pairs) + .map(|(kp, nonce_pair)| { + super::partial_sign( + public_keys.clone(), + None, + nonce_pair.0, + agg_nonce, + kp, + message, + ) + .unwrap() + }) + .collect(); + + let final_signature = super::aggregate_partial_signatures( + public_keys, + None, + agg_nonce, + &partial_sigs, + message, + ) + .unwrap(); + + SECP.verify_schnorr(&final_signature, &message, &musig_agg_xonly_pubkey_wrapped) + .unwrap(); + } + + #[test] + fn different_aggregated_keys_for_different_musig2_modes() { + let kp1 = Keypair::new(&SECP, &mut bitcoin::secp256k1::rand::thread_rng()); + let kp2 = Keypair::new(&SECP, &mut bitcoin::secp256k1::rand::thread_rng()); + let public_keys = vec![kp1.public_key(), kp2.public_key()]; + + let key_agg_cache = create_key_agg_cache(public_keys.clone(), None).unwrap(); + let agg_pk_no_tweak = from_secp_xonly(key_agg_cache.agg_pk()); + + let key_agg_cache = + create_key_agg_cache(public_keys.clone(), Some(Musig2Mode::ScriptSpend)).unwrap(); + let agg_pk_script_spend = from_secp_xonly(key_agg_cache.agg_pk()); + + let key_agg_cache = + create_key_agg_cache(public_keys.clone(), Some(Musig2Mode::OnlyKeySpend)).unwrap(); + let agg_pk_key_tweak = from_secp_xonly(key_agg_cache.agg_pk()); + + let key_agg_cache = create_key_agg_cache( + public_keys.clone(), + Some(Musig2Mode::KeySpendWithScript( + TapNodeHash::from_slice(&[1u8; 32]).unwrap(), + )), + ) + .unwrap(); + let agg_pk_script_tweak = from_secp_xonly(key_agg_cache.agg_pk()); + + assert_eq!(agg_pk_no_tweak, agg_pk_script_spend); + + assert_ne!(agg_pk_no_tweak, agg_pk_script_tweak); + assert_ne!(agg_pk_no_tweak, agg_pk_key_tweak); + assert_ne!(agg_pk_script_tweak, agg_pk_key_tweak); + assert_ne!(agg_pk_script_tweak, agg_pk_script_spend); + assert_ne!(agg_pk_key_tweak, agg_pk_script_spend); + } + + #[test] + fn signing_checks_for_different_musig2_modes() { + let kp1 = Keypair::new(&SECP, &mut bitcoin::secp256k1::rand::thread_rng()); + let kp2 = Keypair::new(&SECP, &mut bitcoin::secp256k1::rand::thread_rng()); + let public_keys = vec![kp1.public_key(), kp2.public_key()]; + + let message = Message::from_digest(secp256k1::rand::rng().random()); + let key_spend_with_script_tweak = + Musig2Mode::KeySpendWithScript(TapNodeHash::from_slice(&[0x45u8; 32]).unwrap()); + + let key_agg_cache = + create_key_agg_cache(public_keys.clone(), Some(key_spend_with_script_tweak)).unwrap(); + let agg_pk_script_tweak = from_secp_xonly(key_agg_cache.agg_pk()); + + let (sec_nonce1, pub_nonce1) = nonce_pair(&kp1).unwrap(); + let (sec_nonce2, pub_nonce2) = nonce_pair(&kp2).unwrap(); + let agg_nonce = aggregate_nonces(&[&pub_nonce1, &pub_nonce2]).unwrap(); + + let partial_sig1 = partial_sign( + public_keys.clone(), + Some(key_spend_with_script_tweak), + sec_nonce1, + agg_nonce, + kp1, + message, + ) + .unwrap(); + let partial_sig2 = partial_sign( + public_keys.clone(), + Some(key_spend_with_script_tweak), + sec_nonce2, + agg_nonce, + kp2, + message, + ) + .unwrap(); + + let final_sig = aggregate_partial_signatures( + public_keys.clone(), + Some(key_spend_with_script_tweak), + agg_nonce, + &[partial_sig1, partial_sig2], + message, + ) + .unwrap(); + + SECP.verify_schnorr(&final_sig, &message, &agg_pk_script_tweak) + .unwrap(); + + // Verification will fail with a untweaked aggregate public key against + // a signature created with a tweaked aggregate public key. + let key_agg_cache = create_key_agg_cache(public_keys, None).unwrap(); + let agg_pk_no_tweak = from_secp_xonly(key_agg_cache.agg_pk()); + assert!(SECP + .verify_schnorr(&final_sig, &message, &agg_pk_no_tweak) + .is_err()); + } +} diff --git a/core/src/operator.rs b/core/src/operator.rs new file mode 100644 index 000000000..e808a59e7 --- /dev/null +++ b/core/src/operator.rs @@ -0,0 +1,2495 @@ +use ark_ff::PrimeField; +use circuits_lib::common::constants::{FIRST_FIVE_OUTPUTS, NUMBER_OF_ASSERT_TXS}; + +use crate::actor::{Actor, TweakCache, WinternitzDerivationPath}; +use crate::bitvm_client::{ClementineBitVMPublicKeys, SECP}; +use crate::builder::sighash::{create_operator_sighash_stream, PartialSignatureInfo}; +use crate::builder::transaction::deposit_signature_owner::EntityType; +use crate::builder::transaction::input::UtxoVout; +use crate::builder::transaction::sign::{create_and_sign_txs, TransactionRequestData}; +use crate::builder::transaction::{ + create_burn_unused_kickoff_connectors_txhandler, create_round_nth_txhandler, + create_round_txhandlers, ContractContext, KickoffWinternitzKeys, TransactionType, TxHandler, +}; +use crate::citrea::CitreaClientT; +use crate::config::BridgeConfig; +use crate::database::Database; +use crate::database::DatabaseTransaction; +use crate::deposit::{DepositData, KickoffData, OperatorData}; +use crate::errors::BridgeError; +use crate::extended_bitcoin_rpc::ExtendedBitcoinRpc; + +use crate::metrics::L1SyncStatusProvider; +use crate::rpc::clementine::EntityStatus; +use crate::task::entity_metric_publisher::{ + EntityMetricPublisher, ENTITY_METRIC_PUBLISHER_INTERVAL, +}; +use crate::task::manager::BackgroundTaskManager; +use crate::task::payout_checker::{PayoutCheckerTask, PAYOUT_CHECKER_POLL_DELAY}; +use crate::task::TaskExt; +use crate::utils::{monitor_standalone_task, Last20Bytes, ScriptBufExt}; +use crate::utils::{NamedEntity, TxMetadata}; +use crate::{builder, constants, UTXO}; +use bitcoin::hashes::Hash; +use bitcoin::secp256k1::schnorr::Signature; +use bitcoin::secp256k1::{schnorr, Message}; +use bitcoin::{Address, Amount, BlockHash, OutPoint, ScriptBuf, Transaction, TxOut, Txid}; +use bitcoincore_rpc::json::AddressType; +use bitcoincore_rpc::RpcApi; +use bitvm::signatures::winternitz; + +use eyre::{Context, OptionExt}; +use tokio::sync::mpsc; +use tokio_stream::StreamExt; + +#[cfg(feature = "automation")] +use { + crate::{ + builder::script::extract_winternitz_commits, + header_chain_prover::HeaderChainProver, + states::StateManager, + task::IntoTask, + tx_sender::{ActivatedWithOutpoint, ActivatedWithTxid, TxSenderClient}, + utils::FeePayingType, + }, + bitcoin::Witness, + bitvm::chunk::api::generate_assertions, + bridge_circuit_host::{ + bridge_circuit_host::{ + create_spv, prove_bridge_circuit, MAINNET_BRIDGE_CIRCUIT_ELF, + REGTEST_BRIDGE_CIRCUIT_ELF, REGTEST_BRIDGE_CIRCUIT_ELF_TEST, SIGNET_BRIDGE_CIRCUIT_ELF, + TESTNET4_BRIDGE_CIRCUIT_ELF, + }, + structs::{BridgeCircuitHostParams, WatchtowerContext}, + }, + circuits_lib::bridge_circuit::structs::LightClientProof, + std::collections::HashMap, +}; + +pub type SecretPreimage = [u8; 20]; +pub type PublicHash = [u8; 20]; + +/// Round index is used to represent the round index safely. +/// Collateral represents the collateral utxo. +/// Round(index) represents the rounds of the bridge operators, index is 0-indexed. +/// As a single u32, collateral is represented as 0 and rounds are represented starting from 1. +#[derive( + Debug, Clone, Copy, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize, Ord, PartialOrd, +)] +pub enum RoundIndex { + Collateral, + Round(usize), // 0-indexed +} + +impl RoundIndex { + /// Converts the round to a 0-indexed index. + pub fn to_index(&self) -> usize { + match self { + RoundIndex::Collateral => 0, + RoundIndex::Round(index) => *index + 1, + } + } + + /// Converts a 0-indexed index to a RoundIndex. + /// Use this only when dealing with 0-indexed data. Currently these are data coming from the database and rpc. + pub fn from_index(index: usize) -> Self { + if index == 0 { + RoundIndex::Collateral + } else { + RoundIndex::Round(index - 1) + } + } + + /// Returns the next RoundIndex. + pub fn next_round(&self) -> Self { + match self { + RoundIndex::Collateral => RoundIndex::Round(0), + RoundIndex::Round(index) => RoundIndex::Round(*index + 1), + } + } + + /// Creates an iterator over rounds from 0 to num_rounds (exclusive) + /// Only iterates actual rounds, collateral is not included. + pub fn iter_rounds(num_rounds: usize) -> impl Iterator { + Self::iter_rounds_range(0, num_rounds) + } + + /// Creates an iterator over rounds from start to end (exclusive) + /// Only iterates actual rounds, collateral is not included. + pub fn iter_rounds_range(start: usize, end: usize) -> impl Iterator { + (start..end).map(RoundIndex::Round) + } +} + +pub struct OperatorServer { + pub operator: Operator, + background_tasks: BackgroundTaskManager, +} + +#[derive(Debug, Clone)] +pub struct Operator { + pub rpc: ExtendedBitcoinRpc, + pub db: Database, + pub signer: Actor, + pub config: BridgeConfig, + pub collateral_funding_outpoint: OutPoint, + pub(crate) reimburse_addr: Address, + #[cfg(feature = "automation")] + pub tx_sender: TxSenderClient, + #[cfg(feature = "automation")] + pub header_chain_prover: HeaderChainProver, + pub citrea_client: C, +} + +impl OperatorServer +where + C: CitreaClientT, +{ + pub async fn new(config: BridgeConfig) -> Result { + let operator = Operator::new(config.clone()).await?; + let background_tasks = BackgroundTaskManager::default(); + + Ok(Self { + operator, + background_tasks, + }) + } + + /// Starts the background tasks for the operator. + /// If called multiple times, it will restart only the tasks that are not already running. + pub async fn start_background_tasks(&self) -> Result<(), BridgeError> { + // initialize and run state manager + #[cfg(feature = "automation")] + { + let paramset = self.operator.config.protocol_paramset(); + let state_manager = + StateManager::new(self.operator.db.clone(), self.operator.clone(), paramset) + .await?; + + let should_run_state_mgr = { + #[cfg(test)] + { + self.operator.config.test_params.should_run_state_manager + } + #[cfg(not(test))] + { + true + } + }; + + if should_run_state_mgr { + self.background_tasks + .ensure_task_looping(state_manager.block_fetcher_task().await?) + .await; + self.background_tasks + .ensure_task_looping(state_manager.into_task()) + .await; + } + } + + // run payout checker task + self.background_tasks + .ensure_task_looping( + PayoutCheckerTask::new(self.operator.db.clone(), self.operator.clone()) + .with_delay(PAYOUT_CHECKER_POLL_DELAY), + ) + .await; + + self.background_tasks + .ensure_task_looping( + EntityMetricPublisher::>::new( + self.operator.db.clone(), + self.operator.rpc.clone(), + ) + .with_delay(ENTITY_METRIC_PUBLISHER_INTERVAL), + ) + .await; + + tracing::info!("Payout checker task started"); + + // track the operator's round state + #[cfg(feature = "automation")] + { + // Will not start a new state machine if one for the operator already exists. + self.operator.track_rounds().await?; + tracing::info!("Operator round state tracked"); + } + + Ok(()) + } + + pub async fn get_current_status(&self) -> Result { + let stopped_tasks = self.background_tasks.get_stopped_tasks().await?; + // Determine if automation is enabled + let automation_enabled = cfg!(feature = "automation"); + + let sync_status = + Operator::::get_l1_status(&self.operator.db, &self.operator.rpc).await?; + + Ok(EntityStatus { + automation: automation_enabled, + wallet_balance: sync_status + .wallet_balance + .map(|balance| format!("{} BTC", balance.to_btc())), + tx_sender_synced_height: sync_status.tx_sender_synced_height, + finalized_synced_height: sync_status.finalized_synced_height, + hcp_last_proven_height: sync_status.hcp_last_proven_height, + rpc_tip_height: sync_status.rpc_tip_height, + bitcoin_syncer_synced_height: sync_status.btc_syncer_synced_height, + stopped_tasks: Some(stopped_tasks), + state_manager_next_height: sync_status.state_manager_next_height, + }) + } + + pub async fn shutdown(&mut self) { + self.background_tasks.graceful_shutdown().await; + } +} + +impl Operator +where + C: CitreaClientT, +{ + /// Creates a new `Operator`. + pub async fn new(config: BridgeConfig) -> Result { + let signer = Actor::new( + config.secret_key, + config.winternitz_secret_key, + config.protocol_paramset().network, + ); + + let db = Database::new(&config).await?; + let rpc = ExtendedBitcoinRpc::connect( + config.bitcoin_rpc_url.clone(), + config.bitcoin_rpc_user.clone(), + config.bitcoin_rpc_password.clone(), + None, + ) + .await?; + + #[cfg(feature = "automation")] + let tx_sender = TxSenderClient::new(db.clone(), Self::TX_SENDER_CONSUMER_ID.to_string()); + + if config.operator_withdrawal_fee_sats.is_none() { + return Err(eyre::eyre!("Operator withdrawal fee is not set").into()); + } + + // check if we store our collateral outpoint already in db + let mut dbtx = db.begin_transaction().await?; + let op_data = db + .get_operator(Some(&mut dbtx), signer.xonly_public_key) + .await?; + let (collateral_funding_outpoint, reimburse_addr) = match op_data { + Some(operator_data) => { + // Operator data is already set in db, we don't actually need to do anything. + // set_operator_checked will give error if the values set in config and db doesn't match. + ( + operator_data.collateral_funding_outpoint, + operator_data.reimburse_addr, + ) + } + None => { + // Operator data is not set in db, then we check if any collateral outpoint and reimbursement address is set in config. + // If so we create a new operator using those data, otherwise we generate new collateral outpoint and reimbursement address. + let reimburse_addr = match &config.operator_reimbursement_address { + Some(reimburse_addr) => { + reimburse_addr + .to_owned() + .require_network(config.protocol_paramset().network) + .wrap_err(format!("Invalid operator reimbursement address provided in config: {:?} for network: {:?}", reimburse_addr, config.protocol_paramset().network))? + } + None => { + rpc + .get_new_address(Some("OperatorReimbursement"), Some(AddressType::Bech32m)) + .await + .wrap_err("Failed to get new address")? + .require_network(config.protocol_paramset().network) + .wrap_err(format!("Invalid operator reimbursement address generated for the network in config: {:?} + Possibly the provided rpc's network and network given in config doesn't match", config.protocol_paramset().network))? + } + }; + let outpoint = match &config.operator_collateral_funding_outpoint { + Some(outpoint) => { + // check if outpoint exists on chain and has exactly collateral funding amount + let collateral_tx = rpc + .get_tx_of_txid(&outpoint.txid) + .await + .wrap_err("Failed to get collateral funding tx")?; + let collateral_txout = collateral_tx + .output + .get(outpoint.vout as usize) + .ok_or_eyre("Invalid vout index for collateral funding tx")?; + if collateral_txout.value + != config.protocol_paramset().collateral_funding_amount + { + return Err(eyre::eyre!("Operator collateral funding outpoint given in config has a different amount than the one specified in config.. + Bridge collateral funding amount: {:?}, Amount in given outpoint: {:?}", config.protocol_paramset().collateral_funding_amount, collateral_txout.value).into()); + } + if collateral_txout.script_pubkey != signer.address.script_pubkey() { + return Err(eyre::eyre!("Operator collateral funding outpoint given in config has a different script pubkey than the pubkey matching to the operator's secret key. Script pubkey should correspond to taproot address with no scripts and internal key equal to the operator's xonly public key. + Script pubkey in given outpoint: {:?}, Script pubkey should be: {:?}", collateral_txout.script_pubkey, signer.address.script_pubkey()).into()); + } + *outpoint + } + None => { + // create a new outpoint that has collateral funding amount + rpc.send_to_address( + &signer.address, + config.protocol_paramset().collateral_funding_amount, + ) + .await? + } + }; + (outpoint, reimburse_addr) + } + }; + + db.insert_operator_if_not_exists( + Some(&mut dbtx), + signer.xonly_public_key, + &reimburse_addr, + collateral_funding_outpoint, + ) + .await?; + dbtx.commit().await?; + let citrea_client = C::new( + config.citrea_rpc_url.clone(), + config.citrea_light_client_prover_url.clone(), + config.citrea_chain_id, + None, + config.citrea_request_timeout, + ) + .await?; + + tracing::info!( + "Operator xonly pk: {:?}, db created with name: {:?}", + signer.xonly_public_key, + config.db_name + ); + + #[cfg(feature = "automation")] + let header_chain_prover = HeaderChainProver::new(&config, rpc.clone()).await?; + + Ok(Operator { + rpc, + db: db.clone(), + signer, + config, + collateral_funding_outpoint, + #[cfg(feature = "automation")] + tx_sender, + citrea_client, + #[cfg(feature = "automation")] + header_chain_prover, + reimburse_addr, + }) + } + + #[cfg(feature = "automation")] + pub async fn send_initial_round_tx(&self, round_tx: &Transaction) -> Result<(), BridgeError> { + let mut dbtx = self.db.begin_transaction().await?; + self.tx_sender + .insert_try_to_send( + &mut dbtx, + Some(TxMetadata { + tx_type: TransactionType::Round, + operator_xonly_pk: None, + round_idx: Some(RoundIndex::Round(0)), + kickoff_idx: None, + deposit_outpoint: None, + }), + round_tx, + FeePayingType::CPFP, + None, + &[], + &[], + &[], + &[], + ) + .await?; + dbtx.commit().await?; + Ok(()) + } + + /// Returns an operator's winternitz public keys and challenge ackpreimages + /// & hashes. + /// + /// # Returns + /// + /// - [`mpsc::Receiver`]: A [`tokio`] data channel with a type of + /// [`winternitz::PublicKey`] and size of operator's winternitz public + /// keys count + /// - [`mpsc::Receiver`]: A [`tokio`] data channel with a type of + /// [`PublicHash`] and size of operator's challenge ack preimages & hashes + /// count + /// + pub async fn get_params( + &self, + ) -> Result< + ( + mpsc::Receiver, + mpsc::Receiver, + ), + BridgeError, + > { + tracing::info!("Generating operator params"); + tracing::info!("Generating kickoff winternitz pubkeys"); + let wpks = self.generate_kickoff_winternitz_pubkeys()?; + tracing::info!("Kickoff winternitz pubkeys generated"); + let (wpk_tx, wpk_rx) = mpsc::channel(wpks.len()); + let kickoff_wpks = KickoffWinternitzKeys::new( + wpks, + self.config.protocol_paramset().num_kickoffs_per_round, + self.config.protocol_paramset().num_round_txs, + ); + tracing::info!("Starting to generate unspent kickoff signatures"); + let kickoff_sigs = self.generate_unspent_kickoff_sigs(&kickoff_wpks)?; + tracing::info!("Unspent kickoff signatures generated"); + let wpks = kickoff_wpks.keys; + let (sig_tx, sig_rx) = mpsc::channel(kickoff_sigs.len()); + + tokio::spawn(async move { + for wpk in wpks { + wpk_tx + .send(wpk) + .await + .wrap_err("Failed to send winternitz public key")?; + } + + for sig in kickoff_sigs { + sig_tx + .send(sig) + .await + .wrap_err("Failed to send kickoff signature")?; + } + + Ok::<(), BridgeError>(()) + }); + + Ok((wpk_rx, sig_rx)) + } + + pub async fn deposit_sign( + &self, + mut deposit_data: DepositData, + ) -> Result, BridgeError> { + self.citrea_client + .check_nofn_correctness(deposit_data.get_nofn_xonly_pk()?) + .await?; + + let mut tweak_cache = TweakCache::default(); + let (sig_tx, sig_rx) = mpsc::channel(constants::DEFAULT_CHANNEL_SIZE); + + let deposit_blockhash = self + .rpc + .get_blockhash_of_tx(&deposit_data.get_deposit_outpoint().txid) + .await?; + + let mut sighash_stream = Box::pin(create_operator_sighash_stream( + self.db.clone(), + self.signer.xonly_public_key, + self.config.clone(), + deposit_data, + deposit_blockhash, + )); + + let signer = self.signer.clone(); + let handle = tokio::spawn(async move { + while let Some(sighash) = sighash_stream.next().await { + // None because utxos that operators need to sign do not have scripts + let (sighash, sig_info) = sighash?; + let sig = signer.sign_with_tweak_data( + sighash, + sig_info.tweak_data, + Some(&mut tweak_cache), + )?; + + if sig_tx.send(sig).await.is_err() { + break; + } + } + + Ok::<(), BridgeError>(()) + }); + monitor_standalone_task(handle, "Operator deposit sign"); + + Ok(sig_rx) + } + + /// Creates the round state machine by adding a system event to the database + #[cfg(feature = "automation")] + pub async fn track_rounds(&self) -> Result<(), BridgeError> { + let mut dbtx = self.db.begin_transaction().await?; + // set operators own kickoff winternitz public keys before creating the round state machine + // as round machine needs kickoff keys to create the first round tx + self.db + .insert_operator_kickoff_winternitz_public_keys_if_not_exist( + Some(&mut dbtx), + self.signer.xonly_public_key, + self.generate_kickoff_winternitz_pubkeys()?, + ) + .await?; + + StateManager::>::dispatch_new_round_machine( + self.db.clone(), + &mut dbtx, + self.data(), + ) + .await?; + dbtx.commit().await?; + Ok(()) + } + + /// Checks if the withdrawal amount is within the acceptable range. + fn is_profitable( + input_amount: Amount, + withdrawal_amount: Amount, + bridge_amount_sats: Amount, + operator_withdrawal_fee_sats: Amount, + ) -> bool { + // Use checked_sub to safely handle potential underflow + let withdrawal_diff = match withdrawal_amount + .to_sat() + .checked_sub(input_amount.to_sat()) + { + Some(diff) => diff, + None => return false, // If underflow occurs, it's not profitable + }; + + if withdrawal_diff > bridge_amount_sats.to_sat() { + return false; + } + + // Calculate net profit after the withdrawal using checked_sub to prevent panic + let net_profit = match bridge_amount_sats.checked_sub(withdrawal_amount) { + Some(profit) => profit, + None => return false, // If underflow occurs, it's not profitable + }; + + // Net profit must be bigger than withdrawal fee. + net_profit >= operator_withdrawal_fee_sats + } + + /// Prepares a withdrawal by: + /// + /// 1. Checking if the withdrawal has been made on Citrea + /// 2. Verifying the given signature + /// 3. Checking if the withdrawal is profitable or not + /// 4. Funding the withdrawal transaction using TxSender RBF option + /// + /// # Parameters + /// + /// - `withdrawal_idx`: Citrea withdrawal UTXO index + /// - `in_signature`: User's signature that is going to be used for signing + /// withdrawal transaction input + /// - `in_outpoint`: User's input for the payout transaction + /// - `out_script_pubkey`: User's script pubkey which will be used + /// in the payout transaction's output + /// - `out_amount`: Payout transaction output's value + /// + /// # Returns + /// + /// - Ok(()) if the withdrawal checks are successful and a payout transaction is added to the TxSender + /// - Err(BridgeError) if the withdrawal checks fail + pub async fn withdraw( + &self, + withdrawal_index: u32, + in_signature: schnorr::Signature, + in_outpoint: OutPoint, + out_script_pubkey: ScriptBuf, + out_amount: Amount, + ) -> Result { + tracing::info!( + "Withdrawing with index: {}, in_signature: {}, in_outpoint: {:?}, out_script_pubkey: {}, out_amount: {}", + withdrawal_index, + in_signature.to_string(), + in_outpoint, + out_script_pubkey.to_string(), + out_amount + ); + + // Prepare input and output of the payout transaction. + let input_prevout = self.rpc.get_txout_from_outpoint(&in_outpoint).await?; + let input_utxo = UTXO { + outpoint: in_outpoint, + txout: input_prevout, + }; + let output_txout = TxOut { + value: out_amount, + script_pubkey: out_script_pubkey, + }; + + // Check Citrea for the withdrawal state. + let withdrawal_utxo = self + .db + .get_withdrawal_utxo_from_citrea_withdrawal(None, withdrawal_index) + .await?; + + if withdrawal_utxo != input_utxo.outpoint { + return Err(eyre::eyre!("Input UTXO does not match withdrawal UTXO from Citrea: Input Outpoint: {0}, Withdrawal Outpoint (from Citrea): {1}", input_utxo.outpoint, withdrawal_utxo).into()); + } + + let operator_withdrawal_fee_sats = + self.config + .operator_withdrawal_fee_sats + .ok_or(BridgeError::ConfigError( + "Operator withdrawal fee sats is not specified in configuration file" + .to_string(), + ))?; + if !Self::is_profitable( + input_utxo.txout.value, + output_txout.value, + self.config.protocol_paramset().bridge_amount, + operator_withdrawal_fee_sats, + ) { + return Err(eyre::eyre!("Not enough fee for operator").into()); + } + + let user_xonly_pk = &input_utxo + .txout + .script_pubkey + .try_get_taproot_pk() + .wrap_err("Input utxo script pubkey is not a valid taproot script")?; + + let payout_txhandler = builder::transaction::create_payout_txhandler( + input_utxo, + output_txout, + self.signer.xonly_public_key, + in_signature, + self.config.protocol_paramset().network, + )?; + + // tracing::info!("Payout txhandler: {:?}", hex::encode(bitcoin::consensus::serialize(&payout_txhandler.get_cached_tx()))); + + let sighash = payout_txhandler + .calculate_sighash_txin(0, bitcoin::sighash::TapSighashType::SinglePlusAnyoneCanPay)?; + + SECP.verify_schnorr( + &in_signature, + &Message::from_digest(*sighash.as_byte_array()), + user_xonly_pk, + ) + .wrap_err("Failed to verify signature received from user for payout txin")?; + + // send payout tx using RBF + let funded_tx = self + .rpc + .fund_raw_transaction( + payout_txhandler.get_cached_tx(), + Some(&bitcoincore_rpc::json::FundRawTransactionOptions { + add_inputs: Some(true), + change_address: None, + change_position: Some(1), + change_type: None, + include_watching: None, + lock_unspents: Some(true), + fee_rate: None, + subtract_fee_from_outputs: None, + replaceable: None, + conf_target: None, + estimate_mode: None, + }), + None, + ) + .await + .wrap_err("Failed to fund raw transaction")? + .hex; + + let signed_tx: Transaction = bitcoin::consensus::deserialize( + &self + .rpc + .sign_raw_transaction_with_wallet(&funded_tx, None, None) + .await + .wrap_err("Failed to sign funded tx through bitcoin RPC")? + .hex, + ) + .wrap_err("Failed to deserialize signed tx")?; + + self.rpc + .send_raw_transaction(&signed_tx) + .await + .wrap_err("Failed to send transaction to signed tx")?; + + Ok(signed_tx) + } + + /// Generates Winternitz public keys for every BitVM assert tx for a deposit. + /// + /// # Returns + /// + /// - [`Vec>`]: Winternitz public keys for + /// `watchtower index` row and `BitVM assert tx index` column. + pub fn generate_assert_winternitz_pubkeys( + &self, + deposit_outpoint: bitcoin::OutPoint, + ) -> Result, BridgeError> { + tracing::debug!("Generating assert winternitz pubkeys"); + let bitvm_pks = self + .signer + .generate_bitvm_pks_for_deposit(deposit_outpoint, self.config.protocol_paramset())?; + + let flattened_wpks = bitvm_pks.to_flattened_vec(); + + Ok(flattened_wpks) + } + /// Generates Winternitz public keys for every blockhash commit to be used in kickoff utxos. + /// Unique for each kickoff utxo of operator. + /// + /// # Returns + /// + /// - [`Vec>`]: Winternitz public keys for + /// `round_index` row and `kickoff_idx` column. + pub fn generate_kickoff_winternitz_pubkeys( + &self, + ) -> Result, BridgeError> { + let mut winternitz_pubkeys = + Vec::with_capacity(self.config.get_num_kickoff_winternitz_pks()); + + // we need num_round_txs + 1 because the last round includes reimburse generators of previous round + for round_idx in RoundIndex::iter_rounds(self.config.protocol_paramset().num_round_txs + 1) + { + for kickoff_idx in 0..self.config.protocol_paramset().num_kickoffs_per_round { + let path = WinternitzDerivationPath::Kickoff( + round_idx, + kickoff_idx as u32, + self.config.protocol_paramset(), + ); + winternitz_pubkeys.push(self.signer.derive_winternitz_pk(path)?); + } + } + + if winternitz_pubkeys.len() != self.config.get_num_kickoff_winternitz_pks() { + return Err(eyre::eyre!( + "Expected {} number of kickoff winternitz pubkeys, but got {}", + self.config.get_num_kickoff_winternitz_pks(), + winternitz_pubkeys.len() + ) + .into()); + } + + Ok(winternitz_pubkeys) + } + + pub fn generate_unspent_kickoff_sigs( + &self, + kickoff_wpks: &KickoffWinternitzKeys, + ) -> Result, BridgeError> { + let mut tweak_cache = TweakCache::default(); + let mut sigs: Vec = + Vec::with_capacity(self.config.get_num_unspent_kickoff_sigs()); + let mut prev_ready_to_reimburse: Option = None; + let operator_data = OperatorData { + xonly_pk: self.signer.xonly_public_key, + collateral_funding_outpoint: self.collateral_funding_outpoint, + reimburse_addr: self.reimburse_addr.clone(), + }; + for round_idx in RoundIndex::iter_rounds(self.config.protocol_paramset().num_round_txs) { + let txhandlers = create_round_txhandlers( + self.config.protocol_paramset(), + round_idx, + &operator_data, + kickoff_wpks, + prev_ready_to_reimburse.as_ref(), + )?; + for txhandler in txhandlers { + if let TransactionType::UnspentKickoff(kickoff_idx) = + txhandler.get_transaction_type() + { + let partial = PartialSignatureInfo { + operator_idx: 0, // dummy value + round_idx, + kickoff_utxo_idx: kickoff_idx, + }; + let sighashes = txhandler + .calculate_shared_txins_sighash(EntityType::OperatorSetup, partial)?; + let signed_sigs: Result, _> = sighashes + .into_iter() + .map(|(sighash, sig_info)| { + self.signer.sign_with_tweak_data( + sighash, + sig_info.tweak_data, + Some(&mut tweak_cache), + ) + }) + .collect(); + sigs.extend(signed_sigs?); + } + if let TransactionType::ReadyToReimburse = txhandler.get_transaction_type() { + prev_ready_to_reimburse = Some(txhandler); + } + } + } + if sigs.len() != self.config.get_num_unspent_kickoff_sigs() { + return Err(eyre::eyre!( + "Expected {} number of unspent kickoff sigs, but got {}", + self.config.get_num_unspent_kickoff_sigs(), + sigs.len() + ) + .into()); + } + Ok(sigs) + } + + pub fn generate_challenge_ack_preimages_and_hashes( + &self, + deposit_data: &DepositData, + ) -> Result, BridgeError> { + let mut hashes = Vec::with_capacity(self.config.get_num_challenge_ack_hashes(deposit_data)); + + for watchtower_idx in 0..deposit_data.get_num_watchtowers() { + let path = WinternitzDerivationPath::ChallengeAckHash( + watchtower_idx as u32, + deposit_data.get_deposit_outpoint(), + self.config.protocol_paramset(), + ); + let hash = self.signer.generate_public_hash_from_path(path)?; + hashes.push(hash); + } + + if hashes.len() != self.config.get_num_challenge_ack_hashes(deposit_data) { + return Err(eyre::eyre!( + "Expected {} number of challenge ack hashes, but got {}", + self.config.get_num_challenge_ack_hashes(deposit_data), + hashes.len() + ) + .into()); + } + + Ok(hashes) + } + + pub async fn handle_finalized_payout<'a>( + &'a self, + dbtx: DatabaseTransaction<'a, '_>, + deposit_outpoint: OutPoint, + payout_tx_blockhash: BlockHash, + ) -> Result { + let (deposit_id, deposit_data) = self + .db + .get_deposit_data(Some(dbtx), deposit_outpoint) + .await? + .ok_or(BridgeError::DatabaseError(sqlx::Error::RowNotFound))?; + + // get unused kickoff connector + let (round_idx, kickoff_idx) = self + .db + .get_unused_and_signed_kickoff_connector( + Some(dbtx), + deposit_id, + self.signer.xonly_public_key, + ) + .await? + .ok_or(BridgeError::DatabaseError(sqlx::Error::RowNotFound))?; + + let current_round_index = self.db.get_current_round_index(Some(dbtx)).await?; + #[cfg(feature = "automation")] + if current_round_index != round_idx { + // we currently have no free kickoff connectors in the current round, so we need to end round first + // if current_round_index should only be smaller than round_idx, and should not be smaller by more than 1 + // so sanity check: + if current_round_index.next_round() != round_idx { + return Err(eyre::eyre!( + "Internal error: Expected the current round ({:?}) to be equal to or 1 less than the round of the first available kickoff for deposit reimbursement ({:?}) for deposit {:?}. If the round is less than the current round, there is an issue with the logic of the fn that gets the first available kickoff. If the round is greater, that means the next round do not have any kickoff connectors available for reimbursement, which should not be possible.", + current_round_index, round_idx, deposit_outpoint + ).into()); + } + // start the next round to be able to get reimbursement for the payout + self.end_round(dbtx).await?; + } + + // get signed txs, + let kickoff_data = KickoffData { + operator_xonly_pk: self.signer.xonly_public_key, + round_idx, + kickoff_idx, + }; + + let payout_tx_blockhash = payout_tx_blockhash.as_byte_array().last_20_bytes(); + + #[cfg(test)] + let payout_tx_blockhash = self + .config + .test_params + .maybe_disrupt_payout_tx_block_hash_commit(payout_tx_blockhash); + + let context = ContractContext::new_context_for_kickoff( + kickoff_data, + deposit_data, + self.config.protocol_paramset(), + ); + + let signed_txs = create_and_sign_txs( + self.db.clone(), + &self.signer, + self.config.clone(), + context, + Some(payout_tx_blockhash), + Some(dbtx), + ) + .await?; + + let tx_metadata = Some(TxMetadata { + tx_type: TransactionType::Dummy, // will be replaced in add_tx_to_queue + operator_xonly_pk: Some(self.signer.xonly_public_key), + round_idx: Some(round_idx), + kickoff_idx: Some(kickoff_idx), + deposit_outpoint: Some(deposit_outpoint), + }); + + // try to send them + for (tx_type, signed_tx) in &signed_txs { + match *tx_type { + TransactionType::Kickoff + | TransactionType::OperatorChallengeAck(_) + | TransactionType::WatchtowerChallengeTimeout(_) + | TransactionType::ChallengeTimeout + | TransactionType::DisproveTimeout + | TransactionType::Reimburse => { + #[cfg(feature = "automation")] + self.tx_sender + .add_tx_to_queue( + dbtx, + *tx_type, + signed_tx, + &signed_txs, + tx_metadata, + &self.config, + None, + ) + .await?; + } + _ => {} + } + } + + let kickoff_txid = signed_txs + .iter() + .find_map(|(tx_type, tx)| { + if let TransactionType::Kickoff = tx_type { + Some(tx.compute_txid()) + } else { + None + } + }) + .ok_or(eyre::eyre!( + "Couldn't find kickoff tx in signed_txs".to_string(), + ))?; + + // mark the kickoff connector as used + self.db + .mark_kickoff_connector_as_used(Some(dbtx), round_idx, kickoff_idx, Some(kickoff_txid)) + .await?; + + Ok(kickoff_txid) + } + + #[cfg(feature = "automation")] + async fn start_first_round( + &self, + dbtx: DatabaseTransaction<'_, '_>, + kickoff_wpks: KickoffWinternitzKeys, + ) -> Result<(), BridgeError> { + // try to send the first round tx + let (mut first_round_tx, _) = create_round_nth_txhandler( + self.signer.xonly_public_key, + self.collateral_funding_outpoint, + self.config.protocol_paramset().collateral_funding_amount, + RoundIndex::Round(0), + &kickoff_wpks, + self.config.protocol_paramset(), + )?; + + self.signer + .tx_sign_and_fill_sigs(&mut first_round_tx, &[], None)?; + + self.tx_sender + .insert_try_to_send( + dbtx, + Some(TxMetadata { + tx_type: TransactionType::Round, + operator_xonly_pk: None, + round_idx: Some(RoundIndex::Round(0)), + kickoff_idx: None, + deposit_outpoint: None, + }), + first_round_tx.get_cached_tx(), + FeePayingType::CPFP, + None, + &[], + &[], + &[], + &[], + ) + .await?; + + // update current round index to 1 + self.db + .update_current_round_index(Some(dbtx), RoundIndex::Round(0)) + .await?; + + Ok(()) + } + + #[cfg(feature = "automation")] + pub async fn end_round<'a>( + &'a self, + dbtx: DatabaseTransaction<'a, '_>, + ) -> Result<(), BridgeError> { + // get current round index + let current_round_index = self.db.get_current_round_index(Some(dbtx)).await?; + + let mut activation_prerequisites = Vec::new(); + + let operator_winternitz_public_keys = self + .db + .get_operator_kickoff_winternitz_public_keys(None, self.signer.xonly_public_key) + .await?; + let kickoff_wpks = KickoffWinternitzKeys::new( + operator_winternitz_public_keys, + self.config.protocol_paramset().num_kickoffs_per_round, + self.config.protocol_paramset().num_round_txs, + ); + + // if we are at round 0, which is just the collateral, we need to start the first round + if current_round_index == RoundIndex::Collateral { + return self.start_first_round(dbtx, kickoff_wpks).await; + } + + let (current_round_txhandler, mut ready_to_reimburse_txhandler) = + create_round_nth_txhandler( + self.signer.xonly_public_key, + self.collateral_funding_outpoint, + self.config.protocol_paramset().collateral_funding_amount, + current_round_index, + &kickoff_wpks, + self.config.protocol_paramset(), + )?; + + let (mut next_round_txhandler, _) = create_round_nth_txhandler( + self.signer.xonly_public_key, + self.collateral_funding_outpoint, + self.config.protocol_paramset().collateral_funding_amount, + current_round_index.next_round(), + &kickoff_wpks, + self.config.protocol_paramset(), + )?; + + let mut tweak_cache = TweakCache::default(); + + // sign ready to reimburse tx + self.signer.tx_sign_and_fill_sigs( + &mut ready_to_reimburse_txhandler, + &[], + Some(&mut tweak_cache), + )?; + + // sign next round tx + self.signer.tx_sign_and_fill_sigs( + &mut next_round_txhandler, + &[], + Some(&mut tweak_cache), + )?; + + let current_round_txid = current_round_txhandler.get_cached_tx().compute_txid(); + let ready_to_reimburse_tx = ready_to_reimburse_txhandler.get_cached_tx(); + let next_round_tx = next_round_txhandler.get_cached_tx(); + + let ready_to_reimburse_txid = ready_to_reimburse_tx.compute_txid(); + + let mut unspent_kickoff_connector_indices = Vec::new(); + + // get kickoff txid for used kickoff connector + for kickoff_connector_idx in + 0..self.config.protocol_paramset().num_kickoffs_per_round as u32 + { + let kickoff_txid = self + .db + .get_kickoff_txid_for_used_kickoff_connector( + Some(dbtx), + current_round_index, + kickoff_connector_idx, + ) + .await?; + match kickoff_txid { + Some(kickoff_txid) => { + activation_prerequisites.push(ActivatedWithOutpoint { + outpoint: OutPoint { + txid: kickoff_txid, + vout: UtxoVout::KickoffFinalizer.get_vout(), // Kickoff finalizer output index + }, + relative_block_height: self.config.protocol_paramset().finality_depth, + }); + } + None => { + let unspent_kickoff_connector = OutPoint { + txid: current_round_txid, + vout: UtxoVout::Kickoff(kickoff_connector_idx as usize).get_vout(), + }; + unspent_kickoff_connector_indices.push(kickoff_connector_idx as usize); + self.db + .mark_kickoff_connector_as_used( + Some(dbtx), + current_round_index, + kickoff_connector_idx, + None, + ) + .await?; + activation_prerequisites.push(ActivatedWithOutpoint { + outpoint: unspent_kickoff_connector, + relative_block_height: self.config.protocol_paramset().finality_depth, + }); + } + } + } + + // Burn unused kickoff connectors + let mut burn_unspent_kickoff_connectors_tx = + create_burn_unused_kickoff_connectors_txhandler( + ¤t_round_txhandler, + &unspent_kickoff_connector_indices, + &self.signer.address, + self.config.protocol_paramset(), + )?; + + // sign burn unused kickoff connectors tx + self.signer.tx_sign_and_fill_sigs( + &mut burn_unspent_kickoff_connectors_tx, + &[], + Some(&mut tweak_cache), + )?; + + self.tx_sender + .insert_try_to_send( + dbtx, + Some(TxMetadata { + tx_type: TransactionType::BurnUnusedKickoffConnectors, + operator_xonly_pk: Some(self.signer.xonly_public_key), + round_idx: Some(current_round_index), + kickoff_idx: None, + deposit_outpoint: None, + }), + burn_unspent_kickoff_connectors_tx.get_cached_tx(), + FeePayingType::CPFP, + None, + &[], + &[], + &[], + &[], + ) + .await?; + + // send ready to reimburse tx + self.tx_sender + .insert_try_to_send( + dbtx, + Some(TxMetadata { + tx_type: TransactionType::ReadyToReimburse, + operator_xonly_pk: Some(self.signer.xonly_public_key), + round_idx: Some(current_round_index), + kickoff_idx: None, + deposit_outpoint: None, + }), + ready_to_reimburse_tx, + FeePayingType::CPFP, + None, + &[], + &[], + &[], + &activation_prerequisites, + ) + .await?; + + // send next round tx + self.tx_sender + .insert_try_to_send( + dbtx, + Some(TxMetadata { + tx_type: TransactionType::Round, + operator_xonly_pk: Some(self.signer.xonly_public_key), + round_idx: Some(current_round_index.next_round()), + kickoff_idx: None, + deposit_outpoint: None, + }), + next_round_tx, + FeePayingType::CPFP, + None, + &[], + &[], + &[ActivatedWithTxid { + txid: ready_to_reimburse_txid, + relative_block_height: self + .config + .protocol_paramset() + .operator_reimburse_timelock + as u32, + }], + &[], + ) + .await?; + + // update current round index + self.db + .update_current_round_index(Some(dbtx), current_round_index.next_round()) + .await?; + + Ok(()) + } + + #[cfg(feature = "automation")] + async fn send_asserts( + &self, + kickoff_data: KickoffData, + deposit_data: DepositData, + watchtower_challenges: HashMap, + _payout_blockhash: Witness, + latest_blockhash: Witness, + ) -> Result<(), BridgeError> { + use bridge_circuit_host::utils::{get_verifying_key, is_dev_mode}; + use citrea_sov_rollup_interface::zk::light_client_proof::output::LightClientCircuitOutput; + + let context = ContractContext::new_context_for_kickoff( + kickoff_data, + deposit_data.clone(), + self.config.protocol_paramset(), + ); + let mut db_cache = crate::builder::transaction::ReimburseDbCache::from_context( + self.db.clone(), + &context, + None, + ); + let txhandlers = builder::transaction::create_txhandlers( + TransactionType::Kickoff, + context, + &mut crate::builder::transaction::TxHandlerCache::new(), + &mut db_cache, + ) + .await?; + let move_txid = txhandlers + .get(&TransactionType::MoveToVault) + .ok_or(eyre::eyre!( + "Move to vault txhandler not found in send_asserts" + ))? + .get_cached_tx() + .compute_txid(); + let kickoff_tx = txhandlers + .get(&TransactionType::Kickoff) + .ok_or(eyre::eyre!("Kickoff txhandler not found in send_asserts"))? + .get_cached_tx(); + + let (payout_op_xonly_pk_opt, payout_block_hash, payout_txid, deposit_idx) = self + .db + .get_payout_info_from_move_txid(None, move_txid) + .await + .wrap_err("Failed to get payout info from db during sending asserts.")? + .ok_or_eyre(format!( + "Payout info not found in db while sending asserts for move txid: {}", + move_txid + ))?; + + let payout_op_xonly_pk = payout_op_xonly_pk_opt.ok_or_eyre(format!( + "Payout operator xonly pk not found in payout info DB while sending asserts for deposit move txid: {}", + move_txid + ))?; + + tracing::info!("Sending asserts for deposit_idx: {:?}", deposit_idx); + + if payout_op_xonly_pk != kickoff_data.operator_xonly_pk { + return Err(eyre::eyre!( + "Payout operator xonly pk does not match kickoff operator xonly pk in send_asserts" + ) + .into()); + } + + let (payout_block_height, payout_block) = self + .db + .get_full_block_from_hash(None, payout_block_hash) + .await? + .ok_or_eyre(format!( + "Payout block {:?} {:?} not found in db", + payout_op_xonly_pk, payout_block_hash + ))?; + + let payout_tx_index = payout_block + .txdata + .iter() + .position(|tx| tx.compute_txid() == payout_txid) + .ok_or_eyre(format!( + "Payout txid {:?} not found in block {:?} {:?}", + payout_txid, payout_op_xonly_pk, payout_block_hash + ))?; + let payout_tx = &payout_block.txdata[payout_tx_index]; + tracing::debug!("Calculated payout tx in send_asserts: {:?}", payout_tx); + + let lcp_receipt = self + .citrea_client + .fetch_validate_and_store_lcp( + payout_block_height as u64, + deposit_idx as u32, + &self.db, + None, + self.config.protocol_paramset(), + ) + .await?; + let proof_output: LightClientCircuitOutput = borsh::from_slice(&lcp_receipt.journal.bytes) + .wrap_err("Failed to deserialize light client circuit output")?; + let l2_height = proof_output.last_l2_height; + let hex_l2_str = format!("0x{:x}", l2_height); + let light_client_proof = LightClientProof { + lc_journal: lcp_receipt.journal.bytes.clone(), + l2_height: hex_l2_str, + }; + + tracing::info!("Got light client proof in send_asserts"); + + let storage_proof = self + .citrea_client + .get_storage_proof(l2_height, deposit_idx as u32) + .await + .wrap_err(format!( + "Failed to get storage proof for move txid {:?}, l2 height {}, deposit_idx {}", + move_txid, l2_height, deposit_idx + ))?; + + tracing::debug!("Got storage proof in send_asserts {:?}", storage_proof); + + // get committed latest blockhash + let wt_derive_path = ClementineBitVMPublicKeys::get_latest_blockhash_derivation( + deposit_data.get_deposit_outpoint(), + self.config.protocol_paramset(), + ); + let commits = extract_winternitz_commits( + latest_blockhash, + &[wt_derive_path], + self.config.protocol_paramset(), + )?; + + let latest_blockhash_last_20: [u8; 20] = commits + .first() + .ok_or_eyre("Failed to get latest blockhash in send_asserts")? + .to_owned() + .try_into() + .map_err(|_| eyre::eyre!("Committed latest blockhash is not 20 bytes long"))?; + + #[cfg(test)] + let latest_blockhash_last_20 = self + .config + .test_params + .maybe_disrupt_latest_block_hash_commit(latest_blockhash_last_20); + + let rpc_current_finalized_height = self + .rpc + .get_current_chain_height() + .await? + .saturating_sub(self.config.protocol_paramset().finality_depth); + + // update headers in case the sync (state machine handle_finalized_block) is behind + self.db + .fetch_and_save_missing_blocks( + &self.rpc, + self.config.protocol_paramset().genesis_height, + rpc_current_finalized_height + 1, + ) + .await?; + + let current_height = self + .db + .get_latest_finalized_block_height(None) + .await? + .ok_or_eyre("Failed to get current finalized block height")?; + + let block_hashes = self + .db + .get_block_info_from_range( + None, + self.config.protocol_paramset().genesis_height as u64, + current_height, + ) + .await?; + + // find out which blockhash is latest_blockhash (only last 20 bytes is committed to Witness) + let latest_blockhash_index = block_hashes + .iter() + .position(|(block_hash, _)| { + block_hash.as_byte_array().last_20_bytes() == latest_blockhash_last_20 + }) + .ok_or_eyre("Failed to find latest blockhash in send_asserts")?; + + let latest_blockhash = block_hashes[latest_blockhash_index].0; + + let (current_hcp, _hcp_height) = self + .header_chain_prover + .prove_till_hash(latest_blockhash) + .await?; + + #[cfg(test)] + let mut total_works: Vec<[u8; 16]> = Vec::with_capacity(watchtower_challenges.len()); + + #[cfg(test)] + { + use bridge_circuit_host::utils::total_work_from_wt_tx; + for (_, tx) in watchtower_challenges.iter() { + let total_work = total_work_from_wt_tx(tx); + total_works.push(total_work); + } + tracing::debug!("Total works: {:?}", total_works); + } + + #[cfg(test)] + let current_hcp = self + .config + .test_params + .maybe_override_current_hcp( + current_hcp, + payout_block_hash, + &block_hashes, + &self.header_chain_prover, + total_works.clone(), + ) + .await?; + + tracing::info!("Got header chain proof in send_asserts"); + + let blockhashes_serialized: Vec<[u8; 32]> = block_hashes + .iter() + .take(latest_blockhash_index + 1) + .map(|(h, _)| h.to_byte_array()) + .collect(); + + #[cfg(test)] + let blockhashes_serialized = self + .config + .test_params + .maybe_override_blockhashes_serialized( + blockhashes_serialized, + payout_block_height, + self.config.protocol_paramset().genesis_height, + total_works, + ); + + tracing::debug!( + "Genesis height - Before SPV: {},", + self.config.protocol_paramset().genesis_height + ); + + let spv = create_spv( + payout_tx.clone(), + &blockhashes_serialized, + payout_block.clone(), + payout_block_height, + self.config.protocol_paramset().genesis_height, + payout_tx_index as u32, + )?; + tracing::info!("Calculated spv proof in send_asserts"); + + let mut wt_contexts = Vec::new(); + for (_, tx) in watchtower_challenges.iter() { + wt_contexts.push(WatchtowerContext { + watchtower_tx: tx.clone(), + prevout_txs: self.rpc.get_prevout_txs(tx).await?, + }); + } + + #[cfg(test)] + { + if self.config.test_params.operator_forgot_watchtower_challenge { + tracing::info!("Disrupting watchtower challenges in send_asserts"); + wt_contexts.pop(); + } + } + + let watchtower_challenge_connector_start_idx = + (FIRST_FIVE_OUTPUTS + NUMBER_OF_ASSERT_TXS) as u16; + + let bridge_circuit_host_params = BridgeCircuitHostParams::new_with_wt_tx( + kickoff_tx.clone(), + spv, + current_hcp, + light_client_proof, + lcp_receipt, + storage_proof, + self.config.protocol_paramset().network, + &wt_contexts, + watchtower_challenge_connector_start_idx, + ) + .wrap_err("Failed to create bridge circuit host params in send_asserts")?; + + let bridge_circuit_elf = match self.config.protocol_paramset().network { + bitcoin::Network::Bitcoin => MAINNET_BRIDGE_CIRCUIT_ELF, + bitcoin::Network::Testnet4 => TESTNET4_BRIDGE_CIRCUIT_ELF, + bitcoin::Network::Signet => SIGNET_BRIDGE_CIRCUIT_ELF, + bitcoin::Network::Regtest => { + if is_dev_mode() { + REGTEST_BRIDGE_CIRCUIT_ELF_TEST + } else { + REGTEST_BRIDGE_CIRCUIT_ELF + } + } + _ => { + return Err(eyre::eyre!( + "Unsupported network {:?} in send_asserts", + self.config.protocol_paramset().network + ) + .into()) + } + }; + tracing::info!("Starting proving bridge circuit to send asserts"); + + #[cfg(test)] + self.config + .test_params + .maybe_dump_bridge_circuit_params_to_file(&bridge_circuit_host_params)?; + + #[cfg(test)] + self.config + .test_params + .maybe_dump_bridge_circuit_params_to_file(&bridge_circuit_host_params)?; + + let (g16_proof, g16_output, public_inputs) = + prove_bridge_circuit(bridge_circuit_host_params, bridge_circuit_elf)?; + + tracing::info!("Proved bridge circuit in send_asserts"); + let public_input_scalar = ark_bn254::Fr::from_be_bytes_mod_order(&g16_output); + + #[cfg(test)] + let mut public_inputs = public_inputs; + + #[cfg(test)] + { + if self + .config + .test_params + .disrupt_challenge_sending_watchtowers_commit + { + tracing::info!("Disrupting challenge sending watchtowers commit in send_asserts"); + public_inputs.challenge_sending_watchtowers[0] ^= 0x01; + tracing::info!( + "Disrupted challenge sending watchtowers commit: {:?}", + public_inputs.challenge_sending_watchtowers + ); + } + } + + tracing::info!( + "Challenge sending watchtowers commit: {:?}", + public_inputs.challenge_sending_watchtowers + ); + + let asserts = tokio::task::spawn_blocking(move || { + let vk = get_verifying_key(); + + generate_assertions(g16_proof, vec![public_input_scalar], &vk).map_err(|e| { + eyre::eyre!( + "Failed to generate {}assertions: {}", + if is_dev_mode() { "dev mode " } else { "" }, + e + ) + }) + }) + .await + .wrap_err("Generate assertions thread failed with error")??; + + tracing::warn!("Generated assertions in send_asserts"); + + #[cfg(test)] + let asserts = self.config.test_params.maybe_corrupt_asserts(asserts); + + let assert_txs = self + .create_assert_commitment_txs( + TransactionRequestData { + kickoff_data, + deposit_outpoint: deposit_data.get_deposit_outpoint(), + }, + ClementineBitVMPublicKeys::get_assert_commit_data( + asserts, + &public_inputs.challenge_sending_watchtowers, + ), + None, + ) + .await?; + + let mut dbtx = self.db.begin_transaction().await?; + for (tx_type, tx) in assert_txs { + self.tx_sender + .add_tx_to_queue( + &mut dbtx, + tx_type, + &tx, + &[], + Some(TxMetadata { + tx_type, + operator_xonly_pk: Some(self.signer.xonly_public_key), + round_idx: Some(kickoff_data.round_idx), + kickoff_idx: Some(kickoff_data.kickoff_idx), + deposit_outpoint: Some(deposit_data.get_deposit_outpoint()), + }), + &self.config, + None, + ) + .await?; + } + dbtx.commit().await?; + Ok(()) + } + + #[cfg(feature = "automation")] + fn data(&self) -> OperatorData { + OperatorData { + xonly_pk: self.signer.xonly_public_key, + collateral_funding_outpoint: self.collateral_funding_outpoint, + reimburse_addr: self.reimburse_addr.clone(), + } + } + + #[cfg(feature = "automation")] + async fn send_latest_blockhash( + &self, + kickoff_data: KickoffData, + deposit_data: DepositData, + latest_blockhash: BlockHash, + ) -> Result<(), BridgeError> { + tracing::warn!("Operator sending latest blockhash"); + let deposit_outpoint = deposit_data.get_deposit_outpoint(); + let (tx_type, tx) = self + .create_latest_blockhash_tx( + TransactionRequestData { + deposit_outpoint, + kickoff_data, + }, + latest_blockhash, + None, + ) + .await?; + if tx_type != TransactionType::LatestBlockhash { + return Err(eyre::eyre!("Latest blockhash tx type is not LatestBlockhash").into()); + } + let mut dbtx = self.db.begin_transaction().await?; + self.tx_sender + .add_tx_to_queue( + &mut dbtx, + tx_type, + &tx, + &[], + Some(TxMetadata { + tx_type, + operator_xonly_pk: Some(self.signer.xonly_public_key), + round_idx: Some(kickoff_data.round_idx), + kickoff_idx: Some(kickoff_data.kickoff_idx), + deposit_outpoint: Some(deposit_outpoint), + }), + &self.config, + None, + ) + .await?; + dbtx.commit().await?; + Ok(()) + } + + /// For a deposit_id checks that the payer for that deposit is the operator, and the payout blockhash and kickoff txid are set. + async fn validate_payer_is_operator( + &self, + dbtx: Option>, + deposit_id: u32, + ) -> Result<(BlockHash, Txid), BridgeError> { + let (payer_xonly_pk, payout_blockhash, kickoff_txid) = self + .db + .get_payer_xonly_pk_blockhash_and_kickoff_txid_from_deposit_id(dbtx, deposit_id) + .await?; + + tracing::info!( + "Payer xonly pk and kickoff txid found for the requested deposit, payer xonly pk: {:?}, kickoff txid: {:?}", + payer_xonly_pk, + kickoff_txid + ); + + // first check if the payer is the operator, and the kickoff is handled + // by the PayoutCheckerTask, meaning kickoff_txid is set + let (payout_blockhash, kickoff_txid) = match ( + payer_xonly_pk, + payout_blockhash, + kickoff_txid, + ) { + (Some(payer_xonly_pk), Some(payout_blockhash), Some(kickoff_txid)) => { + if payer_xonly_pk != self.signer.xonly_public_key { + return Err(eyre::eyre!( + "Payer is not own operator for deposit, payer xonly pk: {:?}, operator xonly pk: {:?}", + payer_xonly_pk, + self.signer.xonly_public_key + ) + .into()); + } + (payout_blockhash, kickoff_txid) + } + _ => { + return Err(eyre::eyre!( + "Payer info not found for deposit, payout blockhash: {:?}, kickoff txid: {:?}", + payout_blockhash, + kickoff_txid + ) + .into()); + } + }; + + tracing::info!( + "Payer xonly pk, payout blockhash and kickoff txid found and valid for own operator for the requested deposit id: {}, payer xonly pk: {:?}, payout blockhash: {:?}, kickoff txid: {:?}", + deposit_id, + payer_xonly_pk, + payout_blockhash, + kickoff_txid + ); + + Ok((payout_blockhash, kickoff_txid)) + } + + async fn get_next_txs_to_send( + &self, + mut dbtx: Option>, + deposit_data: &mut DepositData, + payout_blockhash: BlockHash, + kickoff_txid: Txid, + current_round_idx: RoundIndex, + ) -> Result, BridgeError> { + let mut txs_to_send = Vec::new(); + + // get used kickoff connector for the kickoff txid + let (kickoff_round_idx, kickoff_connector_idx) = self + .db + .get_kickoff_connector_for_kickoff_txid(dbtx.as_deref_mut(), kickoff_txid) + .await?; + + let context = ContractContext::new_context_for_kickoff( + KickoffData { + operator_xonly_pk: self.signer.xonly_public_key, + round_idx: kickoff_round_idx, + kickoff_idx: kickoff_connector_idx, + }, + deposit_data.clone(), + self.config.protocol_paramset(), + ); + + // get txs for the kickoff + let kickoff_txs = create_and_sign_txs( + self.db.clone(), + &self.signer, + self.config.clone(), + context, + Some(payout_blockhash.to_byte_array().last_20_bytes()), + dbtx.as_deref_mut(), + ) + .await?; + + // check the current round status compared to the round of the assigned kickoff tx + match current_round_idx + .to_index() + .cmp(&kickoff_round_idx.to_index()) + { + std::cmp::Ordering::Less => { + // We need to advance the round manually to be able to start the kickoff + tracing::info!("We need to advance the round manually to be able to start the kickoff, current round idx: {:?}, kickoff round idx: {:?}", current_round_idx, kickoff_round_idx); + let txs = self.advance_round_manually(dbtx, current_round_idx).await?; + txs_to_send.extend(txs); + } + std::cmp::Ordering::Greater => { + tracing::info!("We are at least on the next round, meaning we can get the reimbursement as reimbursement utxos are in the next round, current round idx: {:?}, kickoff round idx: {:?}", current_round_idx, kickoff_round_idx); + // we are at least on the next round, meaning we can get the reimbursement as reimbursement utxos are in the next round + let reimbursement_tx = kickoff_txs + .iter() + .find(|(tx_type, _)| tx_type == &TransactionType::Reimburse) + .ok_or(eyre::eyre!("Reimburse tx not found in kickoff txs"))?; + txs_to_send.push(reimbursement_tx.clone()); + } + std::cmp::Ordering::Equal => { + // first check if the kickoff is in chain + if !self.rpc.is_tx_on_chain(&kickoff_txid).await? { + tracing::info!( + "Kickoff tx is not on chain, can send it, kickoff txid: {:?}", + kickoff_txid + ); + let kickoff_tx = kickoff_txs + .iter() + .find(|(tx_type, _)| tx_type == &TransactionType::Kickoff) + .ok_or(eyre::eyre!("Kickoff tx not found in kickoff txs"))?; + + // fetch and save the LCP for if we get challenged and need to provide proof of payout later + let (_, payout_block_height) = self + .db + .get_block_info_from_hash(dbtx.as_deref_mut(), payout_blockhash) + .await? + .ok_or_eyre("Couldn't find payout blockhash in bitcoin sync")?; + + let move_txid = Txid::all_zeros(); + + let (_, _, _, citrea_idx) = self + .db + .get_payout_info_from_move_txid(dbtx.as_deref_mut(), move_txid) + .await? + .ok_or_eyre("Couldn't find payout info from move txid")?; + + let _ = self + .citrea_client + .fetch_validate_and_store_lcp( + payout_block_height as u64, + citrea_idx as u32, + &self.db, + dbtx.as_deref_mut(), + self.config.protocol_paramset(), + ) + .await?; + + // sanity check + if kickoff_tx.1.compute_txid() != kickoff_txid { + return Err(eyre::eyre!("Kickoff txid mismatch for deposit outpoint: {}, kickoff txid: {:?}, computed txid: {:?}", + deposit_data.get_deposit_outpoint(), kickoff_txid, kickoff_tx.1.compute_txid()).into()); + } + txs_to_send.push(kickoff_tx.clone()); + } + // kickoff tx is on chain, check if kickoff finalizer is spent + else if !self + .rpc + .is_utxo_spent(&OutPoint { + txid: kickoff_txid, + vout: UtxoVout::KickoffFinalizer.get_vout(), + }) + .await? + { + // kickoff finalizer is not spent, we need to send challenge timeout + tracing::info!( + "Kickoff finalizer is not spent, can send challenge timeout, kickoff txid: {:?}", + kickoff_txid + ); + // first check if challenge tx was sent, then we need automation enabled to be able to answer the challenge + if self + .rpc + .is_utxo_spent(&OutPoint { + txid: kickoff_txid, + vout: UtxoVout::Challenge.get_vout(), + }) + .await? + { + // challenge tx was sent, we need automation enabled to be able to answer the challenge + tracing::warn!( + "Challenge tx was sent for deposit outpoint: {:?}, but automation is not enabled, enable automation!", + deposit_data.get_deposit_outpoint() + ); + return Err(eyre::eyre!("WARNING: Challenge tx was sent to kickoff connector {:?}, but automation is not enabled, enable automation!", kickoff_txid).into()); + } + let challenge_timeout_tx = kickoff_txs + .iter() + .find(|(tx_type, _)| tx_type == &TransactionType::ChallengeTimeout) + .ok_or(eyre::eyre!("Challenge timeout tx not found in kickoff txs"))?; + txs_to_send.push(challenge_timeout_tx.clone()); + } else { + // if kickoff finalizer is spent, it is time to get the reimbursement + tracing::info!( + "Kickoff finalizer is spent, can advance the round manually to get the reimbursement, current round idx: {:?}, kickoff round idx: {:?}", + current_round_idx, + kickoff_round_idx + ); + let txs = self.advance_round_manually(dbtx, current_round_idx).await?; + txs_to_send.extend(txs); + } + } + } + Ok(txs_to_send) + } + + /// For a given deposit outpoint, get the txs that are needed to reimburse the deposit. + /// To avoid operator getting slashed, this function only returns the next tx that needs to be sent + /// This fn can track and enable sending of these transactions during a normal reimbursement process. + /// + /// - First, if the current round is less than the round of the kickoff assigned to the deposit by PayoutCheckerTask, it returns the Round TX. + /// - After Round tx is sent, it returns the Kickoff tx. + /// - After Kickoff tx is sent, it returns the challenge timeout tx. + /// - After challenge timeout tx is sent, it returns BurnUnusedKickoffConnectors tx. If challenge timeout tx is not sent, and but challenge utxo was spent, it means the kickoff was challenged, thus the fn returns an error as it cannot handle the challenge process. Automation is required to answer the challenge. + /// - After all kickoff utxos are spent, and for any live kickoff, all kickoff finalizers are spent, it returns the ReadyToReimburse tx. + /// - After ReadyToReimburse tx is sent, it returns the next Round tx to generate reimbursement utxos. + /// - Finally, after the next round tx is sent, it returns the Reimburse tx. + pub async fn get_reimbursement_txs( + &self, + deposit_outpoint: OutPoint, + ) -> Result, BridgeError> { + let mut dbtx = self.db.begin_transaction().await?; + // first check if the deposit is in the database + let (deposit_id, mut deposit_data) = self + .db + .get_deposit_data(Some(&mut dbtx), deposit_outpoint) + .await? + .ok_or_eyre(format!( + "Deposit data not found for the requested deposit outpoint: {:?}, make sure you send the deposit outpoint, not the move txid.", + deposit_outpoint + ))?; + + tracing::info!( + "Deposit data found for the requested deposit outpoint: {:?}, deposit id: {:?}", + deposit_outpoint, + deposit_id + ); + + // validate payer is operator and get payer xonly pk, payout blockhash and kickoff txid + let (payout_blockhash, kickoff_txid) = self + .validate_payer_is_operator(Some(&mut dbtx), deposit_id) + .await?; + + let mut current_round_idx = self.db.get_current_round_index(Some(&mut dbtx)).await?; + + let mut txs_to_send: Vec<(TransactionType, Transaction)>; + + loop { + txs_to_send = self + .get_next_txs_to_send( + Some(&mut dbtx), + &mut deposit_data, + payout_blockhash, + kickoff_txid, + current_round_idx, + ) + .await?; + if txs_to_send.is_empty() { + // if no txs were returned, and we advanced the round in the db, ask for the next txs again + // with the new round index + let round_idx_after_operations = + self.db.get_current_round_index(Some(&mut dbtx)).await?; + if round_idx_after_operations != current_round_idx { + current_round_idx = round_idx_after_operations; + continue; + } + } + break; + } + + dbtx.commit().await?; + Ok(txs_to_send) + } + + /// Checks the current round status, and returns the next txs that are safe to send to be + /// able to advance to the next round. + async fn advance_round_manually( + &self, + mut dbtx: Option>, + round_idx: RoundIndex, + ) -> Result, BridgeError> { + if round_idx == RoundIndex::Collateral { + // if current round is collateral, nothing to do except send the first round tx + return self.send_next_round_tx(dbtx, round_idx).await; + } + + // get round txhandlers + let context = ContractContext::new_context_for_round( + self.signer.xonly_public_key, + round_idx, + self.config.protocol_paramset(), + ); + + let txs = create_and_sign_txs( + self.db.clone(), + &self.signer, + self.config.clone(), + context, + None, + dbtx.as_deref_mut(), + ) + .await?; + + let round_tx = txs + .iter() + .find(|(tx_type, _)| tx_type == &TransactionType::Round) + .ok_or(eyre::eyre!("Round tx not found in txs"))?; + + if !self.rpc.is_tx_on_chain(&round_tx.1.compute_txid()).await? { + return Err(eyre::eyre!("Round tx for round {:?} is not on chain, but the database shows we are on this round, error", round_idx).into()); + } + + // check if ready to reimburse tx was sent + let ready_to_reimburse_tx = txs + .iter() + .find(|(tx_type, _)| tx_type == &TransactionType::ReadyToReimburse) + .ok_or(eyre::eyre!("Ready to reimburse tx not found in txs"))?; + + let mut txs_to_send = Vec::new(); + + // to be able to send ready to reimburse tx, we need to make sure, all kickoff utxos are spent, and for all kickoffs, all kickoff finalizers are spent + if !self + .rpc + .is_tx_on_chain(&ready_to_reimburse_tx.1.compute_txid()) + .await? + { + tracing::info!("Ready to reimburse tx for round {:?} is not on chain, checking prerequisites to see if we are able to send it + Prerequisites: + - all kickoff utxos are spent + - for all kickoffs, all kickoff finalizers are spent + ", round_idx); + // get max height saved in bitcoin syncer + let current_chain_height = self + .db + .get_max_height(dbtx.as_deref_mut()) + .await? + .ok_or_eyre("Max block height is not found in the btc syncer database")?; + + let round_txid = round_tx.1.compute_txid(); + let (unspent_kickoff_utxos, are_all_utxos_spent_finalized) = self + .find_and_mark_unspent_kickoff_utxos( + dbtx.as_deref_mut(), + round_idx, + round_txid, + current_chain_height, + ) + .await?; + + if !unspent_kickoff_utxos.is_empty() { + let burn_txs = self + .create_burn_unused_kickoff_connectors_tx(round_idx, &unspent_kickoff_utxos) + .await?; + txs_to_send.extend(burn_txs); + } else if !are_all_utxos_spent_finalized { + // if some utxos are not spent, we need to wait until they are spent + return Err(eyre::eyre!(format!( + "The transactions that spend the kickoff utxos are not yet finalized, wait until they are finalized. Finality depth: {} + If they are actually finalized, but this error is returned, it means internal bitcoin syncer is slow or stopped.", + self.config.protocol_paramset().finality_depth + )) + .into()); + } else { + // every kickoff utxo is spent, but we need to check if all kickoff finalizers are spent + // if not, we return and error and wait until they are spent + // if all finalizers are spent, it is safe to send ready to reimburse tx + self.validate_all_kickoff_finalizers_spent( + dbtx.as_deref_mut(), + round_idx, + current_chain_height, + ) + .await?; + // all finalizers and kickoff utxos are spent, it is safe to send ready to reimburse tx + txs_to_send.push(ready_to_reimburse_tx.clone()); + } + } else { + // ready to reimburse tx is on chain, we need to wait for the timelock to send the next round tx + // first check if next round tx is already sent, that means we can update the database + txs_to_send.extend(self.send_next_round_tx(dbtx, round_idx).await?); + } + + Ok(txs_to_send) + } + + /// Finds unspent kickoff UTXOs and marks spent ones as used in the database. + /// Returns the unspent kickoff utxos (doesn't matter if finalized or unfinalized) and a boolean to mark if all utxos are spent and finalized + async fn find_and_mark_unspent_kickoff_utxos( + &self, + mut dbtx: Option>, + round_idx: RoundIndex, + round_txid: Txid, + current_chain_height: u32, + ) -> Result<(Vec, bool), BridgeError> { + // check and collect all kickoff utxos that are not spent + let mut unspent_kickoff_utxos = Vec::new(); + // a variable to mark if any any kickoff utxo is spent, but still not finalized + let mut fully_finalized_spent = true; + for kickoff_idx in 0..self.config.protocol_paramset().num_kickoffs_per_round { + let kickoff_utxo = OutPoint { + txid: round_txid, + vout: UtxoVout::Kickoff(kickoff_idx).get_vout(), + }; + if !self.rpc.is_utxo_spent(&kickoff_utxo).await? { + unspent_kickoff_utxos.push(kickoff_idx); + } else { + // set the kickoff connector as used (it will do nothing if the utxo is already in db, so it won't overwrite the kickoff txid) + // mark so that we don't try to use this utxo anymore + self.db + .mark_kickoff_connector_as_used( + dbtx.as_deref_mut(), + round_idx, + kickoff_idx as u32, + None, + ) + .await?; + // check if the tx that spent the kickoff utxo is finalized + // use btc syncer for this + fully_finalized_spent &= self + .db + .check_if_utxo_spending_tx_is_finalized( + dbtx.as_deref_mut(), + kickoff_utxo, + current_chain_height, + self.config.protocol_paramset().finality_depth, + ) + .await?; + } + } + Ok((unspent_kickoff_utxos, fully_finalized_spent)) + } + + /// Creates a transaction that burns unused kickoff connectors. + async fn create_burn_unused_kickoff_connectors_tx( + &self, + round_idx: RoundIndex, + unspent_kickoff_utxos: &[usize], + ) -> Result, BridgeError> { + tracing::info!( + "There are unspent kickoff utxos {:?}, creating a tx that spends them", + unspent_kickoff_utxos + ); + let operator_winternitz_public_keys = self.generate_kickoff_winternitz_pubkeys()?; + let kickoff_wpks = KickoffWinternitzKeys::new( + operator_winternitz_public_keys, + self.config.protocol_paramset().num_kickoffs_per_round, + self.config.protocol_paramset().num_round_txs, + ); + // if there are unspent kickoff utxos, create a tx that spends them + let (round_txhandler, _ready_to_reimburse_txhandler) = create_round_nth_txhandler( + self.signer.xonly_public_key, + self.collateral_funding_outpoint, + self.config.protocol_paramset().collateral_funding_amount, + round_idx, + &kickoff_wpks, + self.config.protocol_paramset(), + )?; + let mut burn_unused_kickoff_connectors_txhandler = + create_burn_unused_kickoff_connectors_txhandler( + &round_txhandler, + unspent_kickoff_utxos, + &self.reimburse_addr, + self.config.protocol_paramset(), + )?; + + // sign burn unused kickoff connectors tx + self.signer.tx_sign_and_fill_sigs( + &mut burn_unused_kickoff_connectors_txhandler, + &[], + None, + )?; + let burn_unused_kickoff_connectors_txhandler = + burn_unused_kickoff_connectors_txhandler.promote()?; + Ok(vec![( + TransactionType::BurnUnusedKickoffConnectors, + burn_unused_kickoff_connectors_txhandler + .get_cached_tx() + .clone(), + )]) + } + + /// Validates that all kickoff finalizers are spent for the given round. + async fn validate_all_kickoff_finalizers_spent( + &self, + mut dbtx: Option>, + round_idx: RoundIndex, + current_chain_height: u32, + ) -> Result<(), BridgeError> { + // we need to check if all finalizers are spent + for kickoff_idx in 0..self.config.protocol_paramset().num_kickoffs_per_round { + let kickoff_txid = self + .db + .get_kickoff_txid_for_used_kickoff_connector( + dbtx.as_deref_mut(), + round_idx, + kickoff_idx as u32, + ) + .await?; + if let Some(kickoff_txid) = kickoff_txid { + let deposit_outpoint = self + .db + .get_deposit_outpoint_for_kickoff_txid(dbtx.as_deref_mut(), kickoff_txid) + .await?; + let kickoff_finalizer_utxo = OutPoint { + txid: kickoff_txid, + vout: UtxoVout::KickoffFinalizer.get_vout(), + }; + if !self.rpc.is_tx_on_chain(&kickoff_txid).await? { + return Err(eyre::eyre!("For round {:?} and kickoff utxo {:?}, the kickoff tx {:?} is not on chain, + reimburse the deposit {:?} corresponding to this kickoff first. " + , round_idx, kickoff_idx, kickoff_txid, deposit_outpoint).into()); + } else if !self.rpc.is_utxo_spent(&kickoff_finalizer_utxo).await? { + return Err(eyre::eyre!("For round {:?} and kickoff utxo {:?}, the kickoff finalizer {:?} is not spent, + send the challenge timeout tx for the deposit {:?} first", round_idx, kickoff_idx, kickoff_txid, deposit_outpoint).into()); + } else if !self + .db + .check_if_utxo_spending_tx_is_finalized( + dbtx.as_deref_mut(), + kickoff_finalizer_utxo, + current_chain_height, + self.config.protocol_paramset().finality_depth, + ) + .await? + { + return Err(eyre::eyre!("For round {:?} and kickoff utxo {:?}, the kickoff finalizer utxo {:?} is spent, but not yet finalized, wait until it is finalized. Finality depth: {} + If the transaction is actually finalized, but this error is returned, it means internal bitcoin syncer is slow or stopped.", round_idx, kickoff_idx, kickoff_finalizer_utxo, self.config.protocol_paramset().finality_depth).into()); + } + } + } + Ok(()) + } + + /// Checks if the next round tx is on chain, if it is, updates the database, otherwise returns the round tx that needs to be sent. + async fn send_next_round_tx( + &self, + mut dbtx: Option>, + round_idx: RoundIndex, + ) -> Result, BridgeError> { + let next_round_context = ContractContext::new_context_for_round( + self.signer.xonly_public_key, + round_idx.next_round(), + self.config.protocol_paramset(), + ); + let next_round_txs = create_and_sign_txs( + self.db.clone(), + &self.signer, + self.config.clone(), + next_round_context, + None, + dbtx.as_deref_mut(), + ) + .await?; + let next_round_tx = next_round_txs + .iter() + .find(|(tx_type, _)| tx_type == &TransactionType::Round) + .ok_or(eyre::eyre!("Next round tx not found in txs"))?; + let next_round_txid = next_round_tx.1.compute_txid(); + + if !self.rpc.is_tx_on_chain(&next_round_txid).await? { + // if next round tx is not on chain, we need to wait for the timelock to send it + Ok(vec![next_round_tx.clone()]) + } else { + // if next round tx is on chain, we need to update the database + self.db + .update_current_round_index(dbtx, round_idx.next_round()) + .await?; + Ok(vec![]) + } + } +} + +impl NamedEntity for Operator +where + C: CitreaClientT, +{ + const ENTITY_NAME: &'static str = "operator"; + // operators use their verifier's tx sender + const TX_SENDER_CONSUMER_ID: &'static str = "verifier_tx_sender"; + const FINALIZED_BLOCK_CONSUMER_ID_AUTOMATION: &'static str = + "operator_finalized_block_fetcher_automation"; + const FINALIZED_BLOCK_CONSUMER_ID_NO_AUTOMATION: &'static str = + "operator_finalized_block_fetcher_no_automation"; +} + +#[cfg(feature = "automation")] +mod states { + use super::*; + use crate::builder::transaction::{ + create_txhandlers, ContractContext, ReimburseDbCache, TransactionType, TxHandler, + TxHandlerCache, + }; + use crate::states::context::DutyResult; + use crate::states::{block_cache, Duty, Owner, StateManager}; + use std::collections::BTreeMap; + use std::sync::Arc; + + #[tonic::async_trait] + impl Owner for Operator + where + C: CitreaClientT, + { + async fn handle_duty(&self, duty: Duty) -> Result { + match duty { + Duty::NewReadyToReimburse { + round_idx, + operator_xonly_pk, + used_kickoffs, + } => { + tracing::info!("Operator {:?} called new ready to reimburse with round_idx: {:?}, operator_xonly_pk: {:?}, used_kickoffs: {:?}", + self.signer.xonly_public_key, round_idx, operator_xonly_pk, used_kickoffs); + Ok(DutyResult::Handled) + } + Duty::WatchtowerChallenge { .. } => Ok(DutyResult::Handled), + Duty::SendOperatorAsserts { + kickoff_data, + deposit_data, + watchtower_challenges, + payout_blockhash, + latest_blockhash, + } => { + tracing::warn!("Operator {:?} called send operator asserts with kickoff_data: {:?}, deposit_data: {:?}, watchtower_challenges: {:?}", + self.signer.xonly_public_key, kickoff_data, deposit_data, watchtower_challenges.len()); + self.send_asserts( + kickoff_data, + deposit_data, + watchtower_challenges, + payout_blockhash, + latest_blockhash, + ) + .await?; + Ok(DutyResult::Handled) + } + Duty::VerifierDisprove { .. } => Ok(DutyResult::Handled), + Duty::SendLatestBlockhash { + kickoff_data, + deposit_data, + latest_blockhash, + } => { + tracing::warn!("Operator {:?} called send latest blockhash with kickoff_id: {:?}, deposit_data: {:?}, latest_blockhash: {:?}", self.signer.xonly_public_key, kickoff_data, deposit_data, latest_blockhash); + self.send_latest_blockhash(kickoff_data, deposit_data, latest_blockhash) + .await?; + Ok(DutyResult::Handled) + } + Duty::CheckIfKickoff { + txid, + block_height, + witness, + challenged_before: _, + } => { + tracing::debug!( + "Operator {:?} called check if kickoff with txid: {:?}, block_height: {:?}", + self.signer.xonly_public_key, + txid, + block_height, + ); + let kickoff_data = self + .db + .get_deposit_data_with_kickoff_txid(None, txid) + .await?; + if let Some((deposit_data, kickoff_data)) = kickoff_data { + let mut dbtx = self.db.begin_transaction().await?; + StateManager::::dispatch_new_kickoff_machine( + self.db.clone(), + &mut dbtx, + kickoff_data, + block_height, + deposit_data.clone(), + witness, + ) + .await?; + + // send the relevant txs an operator should send during a kickoff to the txsender again + // note: an operator only tracks itself, so only receives its own kickoffs here + // the reason why is that if kickoff was sent during no-automation mode, these tx's were never added to the txsender + let context = ContractContext::new_context_for_kickoff( + kickoff_data, + deposit_data.clone(), + self.config.protocol_paramset(), + ); + let signed_txs = create_and_sign_txs( + self.db.clone(), + &self.signer, + self.config.clone(), + context, + // we don't need to send kickoff tx (it's already sent) so payout blockhash is irrelevant + // blockhash doesn't change the kickoff txid (it's in the witness) + Some([0u8; 20]), + Some(&mut dbtx), + ) + .await?; + let tx_metadata = Some(TxMetadata { + tx_type: TransactionType::Dummy, // will be replaced in add_tx_to_queue + operator_xonly_pk: Some(self.signer.xonly_public_key), + round_idx: Some(kickoff_data.round_idx), + kickoff_idx: Some(kickoff_data.kickoff_idx), + deposit_outpoint: Some(deposit_data.get_deposit_outpoint()), + }); + // try to send them + for (tx_type, signed_tx) in &signed_txs { + match *tx_type { + TransactionType::OperatorChallengeAck(_) + | TransactionType::WatchtowerChallengeTimeout(_) + | TransactionType::ChallengeTimeout + | TransactionType::DisproveTimeout + | TransactionType::Reimburse => { + self.tx_sender + .add_tx_to_queue( + &mut dbtx, + *tx_type, + signed_tx, + &signed_txs, + tx_metadata, + &self.config, + None, + ) + .await?; + } + _ => {} + } + } + dbtx.commit().await?; + } + Ok(DutyResult::Handled) + } + } + } + + async fn create_txhandlers( + &self, + tx_type: TransactionType, + contract_context: ContractContext, + ) -> Result, BridgeError> { + let mut db_cache = + ReimburseDbCache::from_context(self.db.clone(), &contract_context, None); + let txhandlers = create_txhandlers( + tx_type, + contract_context, + &mut TxHandlerCache::new(), + &mut db_cache, + ) + .await?; + Ok(txhandlers) + } + + async fn handle_finalized_block( + &self, + _dbtx: DatabaseTransaction<'_, '_>, + _block_id: u32, + _block_height: u32, + _block_cache: Arc, + _light_client_proof_wait_interval_secs: Option, + ) -> Result<(), BridgeError> { + Ok(()) + } + } +} + +#[cfg(test)] +mod tests { + use crate::operator::Operator; + use crate::test::common::citrea::MockCitreaClient; + use crate::test::common::*; + use bitcoin::hashes::Hash; + use bitcoin::{OutPoint, Txid}; + + #[tokio::test] + #[ignore = "Design changes in progress"] + async fn get_winternitz_public_keys() { + let mut config = create_test_config_with_thread_name().await; + let _regtest = create_regtest_rpc(&mut config).await; + + let operator = Operator::::new(config.clone()) + .await + .unwrap(); + + let deposit_outpoint = OutPoint { + txid: Txid::all_zeros(), + vout: 2, + }; + + let winternitz_public_key = operator + .generate_assert_winternitz_pubkeys(deposit_outpoint) + .unwrap(); + assert_eq!( + winternitz_public_key.len(), + config.protocol_paramset().num_round_txs + * config.protocol_paramset().num_kickoffs_per_round + ); + } + + #[tokio::test] + async fn operator_get_params() { + let mut config = create_test_config_with_thread_name().await; + let _regtest = create_regtest_rpc(&mut config).await; + + let operator = Operator::::new(config.clone()) + .await + .unwrap(); + let actual_wpks = operator.generate_kickoff_winternitz_pubkeys().unwrap(); + + let (mut wpk_rx, _) = operator.get_params().await.unwrap(); + let mut idx = 0; + while let Some(wpk) = wpk_rx.recv().await { + assert_eq!(actual_wpks[idx], wpk); + idx += 1; + } + assert_eq!(idx, actual_wpks.len()); + } +} diff --git a/core/src/rpc/aggregator.rs b/core/src/rpc/aggregator.rs new file mode 100644 index 000000000..cc1e305de --- /dev/null +++ b/core/src/rpc/aggregator.rs @@ -0,0 +1,2304 @@ +use super::clementine::{ + clementine_aggregator_server::ClementineAggregator, verifier_deposit_finalize_params, + DepositParams, Empty, VerifierDepositFinalizeParams, +}; +use super::clementine::{ + AggregatorWithdrawResponse, Deposit, EntityStatuses, GetEntityStatusesRequest, + OptimisticPayoutParams, RawSignedTx, VergenResponse, VerifierPublicKeys, +}; +use crate::aggregator::{AggregatorServer, ParticipatingOperators, ParticipatingVerifiers}; +use crate::bitvm_client::SECP; +use crate::builder::sighash::SignatureInfo; +use crate::builder::transaction::{ + create_emergency_stop_txhandler, create_move_to_vault_txhandler, + create_optimistic_payout_txhandler, Signed, TransactionType, TxHandler, +}; +use crate::config::BridgeConfig; +use crate::constants::{ + DEPOSIT_FINALIZATION_TIMEOUT, DEPOSIT_FINALIZE_STREAM_CREATION_TIMEOUT, + KEY_DISTRIBUTION_TIMEOUT, NONCE_STREAM_CREATION_TIMEOUT, OPERATOR_SIGS_STREAM_CREATION_TIMEOUT, + OPERATOR_SIGS_TIMEOUT, OVERALL_DEPOSIT_TIMEOUT, PARTIAL_SIG_STREAM_CREATION_TIMEOUT, + PIPELINE_COMPLETION_TIMEOUT, SEND_OPERATOR_SIGS_TIMEOUT, WITHDRAWAL_TIMEOUT, +}; +use crate::deposit::{Actors, DepositData, DepositInfo}; +use crate::errors::ResultExt; +use crate::musig2::AggregateFromPublicKeys; +use crate::rpc::clementine::{ + operator_withrawal_response, AggregatorWithdrawalInput, OperatorWithrawalResponse, + VerifierDepositSignParams, +}; +use crate::rpc::parser; +use crate::utils::{get_vergen_response, timed_request, timed_try_join_all, ScriptBufExt}; +use crate::utils::{FeePayingType, TxMetadata}; +use crate::UTXO; +use crate::{ + aggregator::Aggregator, + builder::sighash::create_nofn_sighash_stream, + errors::BridgeError, + musig2::aggregate_nonces, + rpc::clementine::{self, DepositSignSession}, +}; +use bitcoin::hashes::Hash; +use bitcoin::secp256k1::schnorr::{self, Signature}; +use bitcoin::secp256k1::{Message, PublicKey}; +use bitcoin::{TapSighash, TxOut, Txid, XOnlyPublicKey}; +use eyre::{Context, OptionExt}; +use futures::{ + future::try_join_all, + stream::{BoxStream, TryStreamExt}, + FutureExt, Stream, StreamExt, TryFutureExt, +}; +use secp256k1::musig::{AggregatedNonce, PartialSignature, PublicNonce}; +use std::future::Future; +use tokio::sync::mpsc::{channel, Receiver, Sender}; +use tonic::{async_trait, Request, Response, Status, Streaming}; + +struct AggNonceQueueItem { + agg_nonce: AggregatedNonce, + sighash: TapSighash, +} + +#[derive(Debug, Clone)] +struct FinalSigQueueItem { + final_sig: Vec, +} + +#[derive(Debug, thiserror::Error)] +pub enum AggregatorError { + #[error("Failed to receive from {stream_name} stream.")] + InputStreamEndedEarlyUnknownSize { stream_name: String }, + #[error("Failed to send to {stream_name} stream.")] + OutputStreamEndedEarly { stream_name: String }, + #[error("Failed to send request to {request_name} stream.")] + RequestFailed { request_name: String }, +} + +async fn get_next_pub_nonces( + nonce_streams: &mut [impl Stream> + + Unpin + + Send + + 'static], +) -> Result, BridgeError> { + Ok(try_join_all( + nonce_streams + .iter_mut() + .enumerate() + .map(|(i, s)| async move { + s.next() + .await + .transpose()? // Return the inner error if it exists + .ok_or_else(|| -> eyre::Report { + AggregatorError::InputStreamEndedEarlyUnknownSize { + // Return an early end error if the stream is empty + stream_name: format!("Nonce stream {i}"), + } + .into() + }) + }), + ) + .await?) +} + +/// For each expected sighash, we collect a batch of public nonces from all verifiers. We aggregate and send to the agg_nonce_sender. Then repeat for the next sighash. +async fn nonce_aggregator( + mut nonce_streams: Vec< + impl Stream> + Unpin + Send + 'static, + >, + mut sighash_stream: impl Stream> + + Unpin + + Send + + 'static, + agg_nonce_sender: Sender, +) -> Result<(AggregatedNonce, AggregatedNonce), BridgeError> { + let mut total_sigs = 0; + + tracing::info!("Starting nonce aggregation"); + + // We assume the sighash stream returns the correct number of items. + while let Some(msg) = sighash_stream.next().await { + let (sighash, siginfo) = msg.wrap_err("Sighash stream failed")?; + + total_sigs += 1; + + let pub_nonces = get_next_pub_nonces(&mut nonce_streams) + .await + .wrap_err_with(|| { + format!("Failed to aggregate nonces for sighash with info: {siginfo:?}") + })?; + + tracing::trace!( + "Received nonces for signature id {:?} in nonce_aggregator", + siginfo.signature_id + ); + + let agg_nonce = aggregate_nonces(pub_nonces.iter().collect::>().as_slice())?; + + agg_nonce_sender + .send(AggNonceQueueItem { agg_nonce, sighash }) + .await + .wrap_err_with(|| AggregatorError::OutputStreamEndedEarly { + stream_name: "nonce_aggregator".to_string(), + })?; + + tracing::trace!( + "Sent nonces for signature id {:?} in nonce_aggregator", + siginfo.signature_id + ); + } + + if total_sigs == 0 { + tracing::warn!("Sighash stream returned 0 signatures"); + } + // aggregate nonces for the movetx signature + let pub_nonces = try_join_all(nonce_streams.iter_mut().map(|s| async { + s.next() + .await + .transpose()? // Return the inner error if it exists + .ok_or_else(|| -> eyre::Report { + AggregatorError::InputStreamEndedEarlyUnknownSize { + // Return an early end error if the stream is empty + stream_name: "Nonce stream".to_string(), + } + .into() + }) + })) + .await + .wrap_err("Failed to aggregate nonces for the move tx")?; + + tracing::trace!("Received nonces for movetx in nonce_aggregator"); + + let move_tx_agg_nonce = aggregate_nonces(pub_nonces.iter().collect::>().as_slice())?; + + let pub_nonces = try_join_all(nonce_streams.iter_mut().map(|s| async { + s.next() + .await + .transpose()? // Return the inner error if it exists + .ok_or_else(|| -> eyre::Report { + AggregatorError::InputStreamEndedEarlyUnknownSize { + // Return an early end error if the stream is empty + stream_name: "Nonce stream".to_string(), + } + .into() + }) + })) + .await + .wrap_err("Failed to aggregate nonces for the emergency stop tx")?; + + let emergency_stop_agg_nonce = + aggregate_nonces(pub_nonces.iter().collect::>().as_slice())?; + + Ok((move_tx_agg_nonce, emergency_stop_agg_nonce)) +} + +/// Reroutes aggregated nonces to the signature aggregator. +async fn nonce_distributor( + mut agg_nonce_receiver: Receiver, + partial_sig_streams: Vec<( + Streaming, + Sender, + )>, + partial_sig_sender: Sender<(Vec, AggNonceQueueItem)>, +) -> Result<(), BridgeError> { + let mut sig_count = 0; + let (mut partial_sig_rx, mut partial_sig_tx): (Vec<_>, Vec<_>) = + partial_sig_streams.into_iter().unzip(); + + let (queue_tx, mut queue_rx) = channel(crate::constants::DEFAULT_CHANNEL_SIZE); + + let handle_1 = tokio::spawn(async move { + while let Some(queue_item) = agg_nonce_receiver.recv().await { + sig_count += 1; + + tracing::trace!( + "Received aggregated nonce {} in nonce_distributor", + sig_count + ); + + let agg_nonce_wrapped = clementine::VerifierDepositSignParams { + params: Some(clementine::verifier_deposit_sign_params::Params::AggNonce( + queue_item.agg_nonce.serialize().to_vec(), + )), + }; + + // Broadcast aggregated nonce to all streams + try_join_all(partial_sig_tx.iter_mut().enumerate().map(|(idx, tx)| { + let agg_nonce_wrapped = agg_nonce_wrapped.clone(); + async move { + tx.send(agg_nonce_wrapped).await.wrap_err_with(|| { + AggregatorError::OutputStreamEndedEarly { + stream_name: format!("Partial sig stream {idx}"), + } + }) + } + })) + .await + .wrap_err("Failed to send aggregated nonces to verifiers")?; + + queue_tx + .send(queue_item) + .await + .wrap_err("Other end of channel closed")?; + + tracing::trace!( + "Sent aggregated nonce {} to verifiers in nonce_distributor", + sig_count + ); + } + + Ok::<(), BridgeError>(()) + }); + + let handle_2 = tokio::spawn(async move { + while let Some(queue_item) = queue_rx.recv().await { + let partial_sigs = try_join_all(partial_sig_rx.iter_mut().enumerate().map( + |(idx, stream)| async move { + let partial_sig = stream + .message() + .await + .wrap_err_with(|| AggregatorError::RequestFailed { + request_name: format!("Partial sig stream {idx}"), + })? + .ok_or_eyre(AggregatorError::InputStreamEndedEarlyUnknownSize { + stream_name: format!("Partial sig stream {idx}"), + })?; + + Ok::<_, BridgeError>( + PartialSignature::from_byte_array( + &partial_sig + .partial_sig + .as_slice() + .try_into() + .wrap_err("PartialSignature must be 32 bytes")?, + ) + .wrap_err("Failed to parse partial signature")?, + ) + }, + )) + .await?; + + tracing::trace!( + "Received partial signature {} from verifiers in nonce_distributor", + sig_count + ); + + partial_sig_sender + .send((partial_sigs, queue_item)) + .await + .map_err(|_| { + eyre::eyre!(AggregatorError::OutputStreamEndedEarly { + stream_name: "partial_sig_sender".into(), + }) + })?; + + tracing::trace!( + "Sent partial signature {} to signature_aggregator in nonce_distributor", + sig_count + ); + } + Ok::<(), BridgeError>(()) + }); + + let (result_1, result_2) = tokio::join!(handle_1, handle_2); + + result_1 + .wrap_err("Task crashed while distributing aggnonces")? + .wrap_err("Error while distributing aggnonces")?; + result_2 + .wrap_err("Task crashed while receiving partial sigs")? + .wrap_err("Error while receiving partial sigs")?; + + Ok(()) +} + +/// Collects partial signatures from given stream and aggregates them. +async fn signature_aggregator( + mut partial_sig_receiver: Receiver<(Vec, AggNonceQueueItem)>, + verifiers_public_keys: Vec, + final_sig_sender: Sender, +) -> Result<(), BridgeError> { + let mut sig_count = 0; + while let Some((partial_sigs, queue_item)) = partial_sig_receiver.recv().await { + sig_count += 1; + tracing::trace!( + "Received partial signatures {} in signature_aggregator", + sig_count + ); + + let final_sig = crate::musig2::aggregate_partial_signatures( + verifiers_public_keys.clone(), + None, + queue_item.agg_nonce, + &partial_sigs, + Message::from_digest(queue_item.sighash.to_byte_array()), + )?; + + final_sig_sender + .send(FinalSigQueueItem { + final_sig: final_sig.serialize().to_vec(), + }) + .await + .wrap_err_with(|| { + eyre::eyre!(AggregatorError::OutputStreamEndedEarly { + stream_name: "final_sig_sender".into(), + }) + })?; + tracing::trace!( + "Sent aggregated signature {} to signature_distributor in signature_aggregator", + sig_count + ); + } + + Ok(()) +} + +/// Reroutes aggregated signatures to the caller. +/// Also sends 2 aggregated nonces to the verifiers. +async fn signature_distributor( + mut final_sig_receiver: Receiver, + deposit_finalize_sender: Vec>, + agg_nonce: impl Future>, +) -> Result<(), BridgeError> { + use verifier_deposit_finalize_params::Params; + let mut sig_count = 0; + while let Some(queue_item) = final_sig_receiver.recv().await { + sig_count += 1; + tracing::trace!("Received signature {} in signature_distributor", sig_count); + let final_params = VerifierDepositFinalizeParams { + params: Some(Params::SchnorrSig(queue_item.final_sig)), + }; + + try_join_all(deposit_finalize_sender.iter().map(|tx| { + let final_params = final_params.clone(); + async move { + tx.send(final_params).await.wrap_err_with(|| { + AggregatorError::OutputStreamEndedEarly { + stream_name: "Deposit finalize sender".to_string(), + } + }) + } + })) + .await + .wrap_err("Failed to send final signatures to verifiers")?; + + tracing::trace!( + "Sent signature {} to verifiers in signature_distributor", + sig_count + ); + } + + let (movetx_agg_nonce, emergency_stop_agg_nonce) = agg_nonce + .await + .wrap_err("Failed to get aggregated nonce for movetx and emergency stop")?; + + tracing::info!("Got aggregated nonce for movetx and emergency stop in signature distributor"); + + // Send the movetx agg nonce to the verifiers. + for tx in &deposit_finalize_sender { + tx.send(VerifierDepositFinalizeParams { + params: Some(Params::MoveTxAggNonce( + movetx_agg_nonce.serialize().to_vec(), + )), + }) + .await + .wrap_err_with(|| AggregatorError::OutputStreamEndedEarly { + stream_name: "Deposit finalize sender (for movetx agg nonce)".to_string(), + })?; + } + tracing::info!("Sent movetx aggregated nonce to verifiers in signature distributor"); + + // send emergency stop agg nonce to verifiers + for tx in &deposit_finalize_sender { + tx.send(VerifierDepositFinalizeParams { + params: Some(Params::EmergencyStopAggNonce( + emergency_stop_agg_nonce.serialize().to_vec(), + )), + }) + .await + .wrap_err_with(|| AggregatorError::OutputStreamEndedEarly { + stream_name: "Deposit finalize sender (for emergency stop agg nonce)".to_string(), + })?; + } + tracing::info!("Sent emergency stop aggregated nonce to verifiers in signature distributor"); + + Ok(()) +} + +/// Creates a stream of nonces from verifiers. +/// This will automatically get the first response from the verifiers. +/// +/// # Returns +/// +/// - Vec<[`clementine::NonceGenFirstResponse`]>: First response from each verifier +/// - Vec>>: Stream of nonces from each verifier +async fn create_nonce_streams( + verifiers: ParticipatingVerifiers, + num_nonces: u32, + #[cfg(test)] config: &crate::config::BridgeConfig, +) -> Result< + ( + Vec, + Vec>>, + ), + BridgeError, +> { + let mut nonce_streams = timed_try_join_all( + NONCE_STREAM_CREATION_TIMEOUT, + "Nonce stream creation", + Some(verifiers.ids()), + verifiers + .clients() + .into_iter() + .enumerate() + .map(|(idx, client)| { + let mut client = client.clone(); + #[cfg(test)] + let config = config.clone(); + + async move { + #[cfg(test)] + config + .test_params + .timeout_params + .hook_timeout_nonce_stream_creation_verifier(idx) + .await; + let response_stream = client + .nonce_gen(tonic::Request::new(clementine::NonceGenRequest { + num_nonces, + })) + .await + .wrap_err_with(|| AggregatorError::RequestFailed { + request_name: format!("Nonce gen stream for verifier {idx}"), + })?; + + Ok::<_, BridgeError>(response_stream.into_inner()) + } + }), + ) + .await?; + + // Get the first responses from verifiers. + let first_responses: Vec = + try_join_all(nonce_streams.iter_mut().zip(verifiers.ids()).map( + |(stream, id)| async move { + parser::verifier::parse_nonce_gen_first_response(stream) + .await + .wrap_err_with(|| format!("Failed to get initial response from {id}")) + }, + )) + .await + .wrap_err("Failed to get nonce gen's initial responses from verifiers")?; + + let transformed_streams = nonce_streams + .into_iter() + .zip(verifiers.ids()) + .map(|(stream, id)| { + stream + .map(move |result| { + Aggregator::extract_pub_nonce( + result + .wrap_err_with(|| AggregatorError::InputStreamEndedEarlyUnknownSize { + stream_name: format!("Nonce gen stream for {id}"), + })? + .response, + ) + }) + .boxed() + }) + .collect::>(); + + Ok((first_responses, transformed_streams)) +} + +/// Use items collected from the broadcast receiver for an async function call. +/// +/// Handles the boilerplate of managing a receiver of a broadcast channel. +/// If receiver is lagged at any time (data is lost) an error is returned. +async fn collect_and_call( + rx: &mut tokio::sync::broadcast::Receiver>, + mut f: F, +) -> Result +where + R: Default, + T: Clone, + F: FnMut(Vec) -> Fut, + Fut: Future>, +{ + loop { + match rx.recv().await { + Ok(params) => { + f(params).await?; + } + Err(tokio::sync::broadcast::error::RecvError::Lagged(n)) => { + break Err(Status::internal(format!( + "lost {n} items due to lagging receiver" + ))); + } + Err(tokio::sync::broadcast::error::RecvError::Closed) => break Ok(R::default()), + } + } +} + +impl Aggregator { + // Extracts pub_nonce from given stream. + fn extract_pub_nonce( + response: Option, + ) -> Result { + match response.ok_or_eyre("NonceGen response is empty")? { + clementine::nonce_gen_response::Response::PubNonce(pub_nonce) => { + Ok(PublicNonce::from_byte_array( + &pub_nonce + .as_slice() + .try_into() + .wrap_err("PubNonce must be 66 bytes")?, + ) + .wrap_err("Failed to parse pub nonce")?) + } + _ => Err(eyre::eyre!("Expected PubNonce in response").into()), + } + } + + /// For a specific deposit, collects needed signatures from all operators into a [`Vec>`]. + async fn collect_operator_sigs( + operator_clients: ParticipatingOperators, + config: BridgeConfig, + mut deposit_sign_session: DepositSignSession, + ) -> Result>, BridgeError> { + deposit_sign_session.nonce_gen_first_responses = Vec::new(); // not needed for operators + let mut operator_sigs_streams = + // create deposit sign streams with each operator + timed_try_join_all( + OPERATOR_SIGS_STREAM_CREATION_TIMEOUT, + "Operator signature stream creation", + Some(operator_clients.ids()), + operator_clients.clients().into_iter().enumerate().map(|(idx, mut operator_client)| { + let sign_session = deposit_sign_session.clone(); + #[cfg(test)] + let config = config.clone(); + async move { + #[cfg(test)] + config + .test_params + .timeout_params + .hook_timeout_operator_sig_collection_operator(idx) + .await; + let stream = operator_client + .deposit_sign(tonic::Request::new(sign_session)) + .await.wrap_err_with(|| AggregatorError::RequestFailed { + request_name: format!("Deposit sign stream for operator {idx}"), + })?; + Ok::<_, BridgeError>(stream.into_inner()) + } + })) + .await?; + + let deposit_data: DepositData = deposit_sign_session + .deposit_params + .clone() + .ok_or_else(|| eyre::eyre!("No deposit params found in deposit sign session"))? + .try_into() + .wrap_err("Failed to convert deposit params to deposit data")?; + + // calculate number of signatures needed from each operator + let needed_sigs = config.get_num_required_operator_sigs(&deposit_data); + + // get signatures from each operator's signature streams + let operator_sigs = try_join_all(operator_sigs_streams.iter_mut().enumerate().map( + |(idx, stream)| async move { + let mut sigs: Vec = Vec::with_capacity(needed_sigs); + while let Some(sig) = + stream + .message() + .await + .wrap_err_with(|| AggregatorError::RequestFailed { + request_name: format!("Deposit sign stream for operator {idx}"), + })? + { + sigs.push(Signature::from_slice(&sig.schnorr_sig).wrap_err_with(|| { + format!("Failed to parse Schnorr signature from operator {idx}") + })?); + } + Ok::<_, BridgeError>(sigs) + }, + )) + .await?; + + // check if all signatures are received + for (idx, sigs) in operator_sigs.iter().enumerate() { + if sigs.len() != needed_sigs { + return Err(eyre::eyre!( + "Not all operator sigs received from operator {}.\n Expected: {}, got: {}", + idx, + needed_sigs, + sigs.len() + ) + .into()); + } + } + Ok(operator_sigs) + } + + async fn create_movetx( + &self, + partial_sigs: Vec>, + movetx_agg_nonce: AggregatedNonce, + deposit_params: DepositParams, + ) -> Result, Status> { + let mut deposit_data: DepositData = deposit_params.try_into()?; + let musig_partial_sigs = parser::verifier::parse_partial_sigs(partial_sigs)?; + + // create move tx and calculate sighash + let mut move_txhandler = + create_move_to_vault_txhandler(&mut deposit_data, self.config.protocol_paramset())?; + + let sighash = move_txhandler.calculate_script_spend_sighash_indexed( + 0, + 0, + bitcoin::TapSighashType::Default, + )?; + + // aggregate partial signatures + let verifiers_public_keys = deposit_data.get_verifiers(); + let final_sig = crate::musig2::aggregate_partial_signatures( + verifiers_public_keys, + None, + movetx_agg_nonce, + &musig_partial_sigs, + Message::from_digest(sighash.to_byte_array()), + )?; + + // Put the signature in the tx + move_txhandler.set_p2tr_script_spend_witness(&[final_sig.as_ref()], 0, 0)?; + + Ok(move_txhandler.promote()?) + } + + async fn verify_and_save_emergency_stop_sigs( + &self, + emergency_stop_sigs: Vec>, + emergency_stop_agg_nonce: AggregatedNonce, + deposit_params: DepositParams, + ) -> Result<(), BridgeError> { + let mut deposit_data: DepositData = deposit_params + .try_into() + .wrap_err("Failed to convert deposit params to deposit data")?; + let musig_partial_sigs = parser::verifier::parse_partial_sigs(emergency_stop_sigs) + .wrap_err("Failed to parse emergency stop signatures")?; + + // create move tx and calculate sighash + let move_txhandler = + create_move_to_vault_txhandler(&mut deposit_data, self.config.protocol_paramset())?; + + let mut emergency_stop_txhandler = create_emergency_stop_txhandler( + &mut deposit_data, + &move_txhandler, + self.config.protocol_paramset(), + )?; + + let sighash = emergency_stop_txhandler.calculate_script_spend_sighash_indexed( + 0, + 0, + bitcoin::TapSighashType::SinglePlusAnyoneCanPay, + )?; + + let verifiers_public_keys = deposit_data.get_verifiers(); + + let final_sig = crate::musig2::aggregate_partial_signatures( + verifiers_public_keys, + None, + emergency_stop_agg_nonce, + &musig_partial_sigs, + Message::from_digest(sighash.to_byte_array()), + ) + .wrap_err("Failed to aggregate emergency stop signatures")?; + + let final_sig = bitcoin::taproot::Signature { + signature: final_sig, + sighash_type: bitcoin::TapSighashType::SinglePlusAnyoneCanPay, + }; + + // insert the signature into the tx + emergency_stop_txhandler.set_p2tr_script_spend_witness(&[final_sig.serialize()], 0, 0)?; + + let emergency_stop_tx = emergency_stop_txhandler.get_cached_tx(); + let move_to_vault_txid = move_txhandler.get_txid(); + + tracing::debug!("Move to vault tx id: {}", move_to_vault_txid.to_string()); + + let emergency_stop_pubkey = self + .config + .emergency_stop_encryption_public_key + .ok_or_else(|| eyre::eyre!("Emergency stop encryption public key is not set"))?; + let encrypted_emergency_stop_tx = crate::encryption::encrypt_bytes( + emergency_stop_pubkey, + &bitcoin::consensus::serialize(&emergency_stop_tx), + )?; + + self.db + .insert_signed_emergency_stop_tx_if_not_exists( + None, + move_to_vault_txid, + &encrypted_emergency_stop_tx, + ) + .await?; + + Ok(()) + } + + #[cfg(feature = "automation")] + pub async fn send_emergency_stop_tx( + &self, + tx: bitcoin::Transaction, + ) -> Result { + // Add fee bumper. + let mut dbtx = self.db.begin_transaction().await?; + self.tx_sender + .insert_try_to_send( + &mut dbtx, + Some(TxMetadata { + deposit_outpoint: None, + operator_xonly_pk: None, + round_idx: None, + kickoff_idx: None, + tx_type: TransactionType::EmergencyStop, + }), + &tx, + FeePayingType::RBF, + None, + &[], + &[], + &[], + &[], + ) + .await + .map_err(BridgeError::from)?; + dbtx.commit() + .await + .map_err(|e| Status::internal(format!("Failed to commit db transaction: {}", e)))?; + + Ok(tx) + } +} + +#[async_trait] +impl ClementineAggregator for AggregatorServer { + async fn vergen(&self, _request: Request) -> Result, Status> { + Ok(Response::new(get_vergen_response())) + } + + async fn get_entity_statuses( + &self, + request: Request, + ) -> Result, Status> { + let request = request.into_inner(); + let restart_tasks = request.restart_tasks; + + Ok(Response::new(EntityStatuses { + entity_statuses: self.aggregator.get_entity_statuses(restart_tasks).await?, + })) + } + + async fn optimistic_payout( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status> { + let opt_withdraw_params = request.into_inner(); + + let withdraw_params = + opt_withdraw_params + .withdrawal + .clone() + .ok_or(Status::invalid_argument( + "Withdrawal params not found for optimistic payout", + ))?; + let (deposit_id, input_signature, input_outpoint, output_script_pubkey, output_amount) = + parser::operator::parse_withdrawal_sig_params(withdraw_params)?; + + // if the withdrawal utxo is spent, no reason to sign optimistic payout + if self + .rpc + .is_utxo_spent(&input_outpoint) + .await + .map_to_status()? + { + return Err(Status::invalid_argument(format!( + "Withdrawal utxo is already spent: {:?}", + input_outpoint + ))); + } + + // get which deposit the withdrawal belongs to + let withdrawal = self + .db + .get_move_to_vault_txid_from_citrea_deposit(None, deposit_id) + .await?; + if let Some(move_txid) = withdrawal { + // check if withdrawal utxo is correct + let withdrawal_utxo = self + .db + .get_withdrawal_utxo_from_citrea_withdrawal(None, deposit_id) + .await?; + if withdrawal_utxo != input_outpoint { + return Err(Status::invalid_argument(format!( + "Withdrawal utxo is not correct: {:?} != {:?}", + withdrawal_utxo, input_outpoint + ))); + } + + // Prepare input and output of the payout transaction. + let withdrawal_prevout = self + .rpc + .get_txout_from_outpoint(&input_outpoint) + .await + .map_to_status()?; + + let user_xonly_pk = withdrawal_prevout + .script_pubkey + .try_get_taproot_pk() + .map_err(|_| { + Status::invalid_argument(format!( + "Withdrawal prevout script_pubkey is not a Taproot output: {:?}", + withdrawal_prevout.script_pubkey + )) + })?; + + let withdrawal_utxo = UTXO { + outpoint: input_outpoint, + txout: withdrawal_prevout, + }; + + let output_txout = TxOut { + value: output_amount, + script_pubkey: output_script_pubkey, + }; + + let deposit_data = self + .db + .get_deposit_data_with_move_tx(None, move_txid) + .await?; + + let mut deposit_data = deposit_data + .ok_or(eyre::eyre!( + "Deposit data not found for move txid {}", + move_txid + )) + .map_err(BridgeError::from)?; + + let mut opt_payout_txhandler = create_optimistic_payout_txhandler( + &mut deposit_data, + withdrawal_utxo, + output_txout, + input_signature, + self.config.protocol_paramset(), + )?; + + let sighash = opt_payout_txhandler.calculate_pubkey_spend_sighash( + 0, + bitcoin::TapSighashType::SinglePlusAnyoneCanPay, + )?; + + let message = Message::from_digest(sighash.to_byte_array()); + + let sig = + schnorr::Signature::from_slice(&input_signature.serialize()).map_err(|_| { + Status::internal("Failed to parse signature from optimistic payout tx witness") + })?; + + SECP.verify_schnorr(&sig, &message, &user_xonly_pk) + .map_err(|_| Status::internal("Invalid signature for optimistic payout tx"))?; + + // get which verifiers participated in the deposit to collect the optimistic payout tx signature + let participating_verifiers = self.get_participating_verifiers(&deposit_data).await?; + let (first_responses, mut nonce_streams) = { + create_nonce_streams( + participating_verifiers.clone(), + 1, + #[cfg(test)] + &self.config, + ) + .await? + }; + // collect nonces + let pub_nonces = get_next_pub_nonces(&mut nonce_streams) + .await + .wrap_err("Failed to aggregate nonces for optimistic payout") + .map_to_status()?; + let agg_nonce = aggregate_nonces(pub_nonces.iter().collect::>().as_slice())?; + + let agg_nonce_bytes = agg_nonce.serialize().to_vec(); + // send the agg nonce to the verifiers to sign the optimistic payout tx + let payout_sigs = participating_verifiers + .clients() + .into_iter() + .zip(first_responses) + .map(|(client, first_response)| { + let mut client = client.clone(); + let opt_withdraw_params = opt_withdraw_params.clone(); + { + let agg_nonce_serialized = agg_nonce_bytes.clone(); + async move { + client + .optimistic_payout_sign(OptimisticPayoutParams { + opt_withdrawal: Some(opt_withdraw_params), + agg_nonce: agg_nonce_serialized, + nonce_gen: Some(first_response), + }) + .await + } + } + }) + .collect::>(); + + // txin at index 1 is deposited utxo in movetx + let sighash = opt_payout_txhandler.calculate_script_spend_sighash_indexed( + 1, + 0, + bitcoin::TapSighashType::Default, + )?; + + // calculate final sig + let payout_sig = try_join_all(payout_sigs).await?; + + let musig_partial_sigs = payout_sig + .iter() + .map(|sig| { + PartialSignature::from_byte_array( + &sig.get_ref() + .partial_sig + .clone() + .try_into() + .map_err(|_| secp256k1::musig::ParseError::MalformedArg)?, + ) + }) + .collect::, _>>() + .map_err(|e| Status::internal(format!("Failed to parse partial sig: {:?}", e)))?; + + let final_sig = bitcoin::taproot::Signature { + signature: crate::musig2::aggregate_partial_signatures( + deposit_data.get_verifiers(), + None, + agg_nonce, + &musig_partial_sigs, + Message::from_digest(sighash.to_byte_array()), + )?, + sighash_type: bitcoin::TapSighashType::Default, + }; + + // set witness and send tx + opt_payout_txhandler.set_p2tr_script_spend_witness(&[final_sig.serialize()], 1, 0)?; + let opt_payout_txhandler = opt_payout_txhandler.promote()?; + let opt_payout_tx = opt_payout_txhandler.get_cached_tx(); + + #[cfg(feature = "automation")] + { + tracing::info!("Sending optimistic payout tx via tx_sender"); + + let mut dbtx = self.db.begin_transaction().await?; + self.tx_sender + .add_tx_to_queue( + &mut dbtx, + TransactionType::OptimisticPayout, + opt_payout_tx, + &[], + None, + &self.config, + None, + ) + .await + .map_err(BridgeError::from)?; + dbtx.commit().await.map_err(|e| { + Status::internal(format!( + "Failed to commit db transaction to send optimistic payout tx: {}", + e + )) + })?; + } + + Ok(Response::new(RawSignedTx::from(opt_payout_tx))) + } else { + Err(Status::not_found(format!( + "Withdrawal with index {} not found.", + deposit_id + ))) + } + } + + async fn internal_send_tx( + &self, + request: Request, + ) -> Result, Status> { + #[cfg(not(feature = "automation"))] + { + Err(Status::unimplemented("Automation is not enabled")) + } + #[cfg(feature = "automation")] + { + let send_tx_req = request.into_inner(); + let fee_type = send_tx_req.fee_type(); + let signed_tx: bitcoin::Transaction = send_tx_req + .raw_tx + .ok_or(Status::invalid_argument("Missing raw_tx"))? + .try_into()?; + + let mut dbtx = self.db.begin_transaction().await?; + self.tx_sender + .insert_try_to_send( + &mut dbtx, + None, + &signed_tx, + fee_type.try_into()?, + None, + &[], + &[], + &[], + &[], + ) + .await + .map_err(BridgeError::from)?; + dbtx.commit() + .await + .map_err(|e| Status::internal(format!("Failed to commit db transaction: {}", e)))?; + Ok(Response::new(Empty {})) + } + } + + #[tracing::instrument(skip_all, err(level = tracing::Level::ERROR), ret(level = tracing::Level::TRACE))] + async fn setup( + &self, + _request: Request, + ) -> Result, Status> { + // Propagate Operators configurations to all verifier clients + const CHANNEL_CAPACITY: usize = 1024 * 16; + let (operator_params_tx, operator_params_rx) = + tokio::sync::broadcast::channel(CHANNEL_CAPACITY); + let operator_params_rx_handles = (0..self.get_verifier_clients().len()) + .map(|_| operator_params_rx.resubscribe()) + .collect::>(); + + let operators = self.get_operator_clients().to_vec(); + let get_operator_params_chunked_handle = tokio::spawn(async move { + tracing::info!(clients = operators.len(), "Collecting operator details..."); + try_join_all(operators.iter().map(|operator| { + let mut operator = operator.clone(); + let tx = operator_params_tx.clone(); + async move { + let stream = operator + .get_params(Request::new(Empty {})) + .await? + .into_inner(); + tx.send(stream.try_collect::>().await?) + .map_err(|e| { + BridgeError::from(eyre::eyre!("failed to read operator params: {e}")) + })?; + Ok::<_, Status>(()) + } + })) + .await?; + Ok::<_, Status>(()) + }); + + let verifiers = self.get_verifier_clients().to_vec(); + let set_operator_params_handle = tokio::spawn(async move { + tracing::info!("Informing verifiers of existing operators..."); + try_join_all(verifiers.iter().zip(operator_params_rx_handles).map( + |(verifier, mut rx)| { + let verifier = verifier.clone(); + async move { + collect_and_call(&mut rx, |params| { + let mut verifier = verifier.clone(); + async move { + verifier.set_operator(futures::stream::iter(params)).await?; + Ok::<_, Status>(()) + } + }) + .await?; + Ok::<_, Status>(()) + } + }, + )) + .await?; + Ok::<_, Status>(()) + }); + + try_join_all([ + get_operator_params_chunked_handle, + set_operator_params_handle, + ]) + .await + .wrap_err("aggregator setup failed") + .map_err(BridgeError::from)? + .into_iter() + .collect::, Status>>()?; + + let verifier_public_keys = self.fetch_verifier_keys().await?; + + Ok(Response::new(VerifierPublicKeys::from( + verifier_public_keys, + ))) + } + + /// Handles a new deposit request from a user. This function coordinates the signing process + /// between verifiers to create a valid move transaction. It ensures a covenant using pre-signed NofN transactions. + /// It also collects signatures from operators to ensure that the operators can be slashed if they act maliciously. + /// + /// Overview: + /// 1. Receive and parse deposit parameters from user + /// 2. Signs all NofN transactions with verifiers using MuSig2: + /// - Creates nonce streams with verifiers (get pub nonces for each transaction) + /// - Opens deposit signing streams with verifiers (sends aggnonces for each transaction, receives partial sigs) + /// - Opens deposit finalization streams with verifiers (sends final signatures, receives movetx signatures) + /// 3. Collects signatures from operators + /// 4. Waits for all tasks to complete + /// 5. Returns signed move transaction + /// + /// The following pipelines are used to coordinate the signing process, these move the data between the verifiers and the aggregator: + /// - Nonce aggregation + /// - Nonce distribution + /// - Signature aggregation + /// - Signature distribution + // #[tracing::instrument(skip(self), err(level = tracing::Level::ERROR), ret(level = tracing::Level::TRACE))] + async fn new_deposit( + &self, + request: Request, + ) -> Result, Status> { + timed_request(OVERALL_DEPOSIT_TIMEOUT, "Overall new deposit", async { + let deposit_info: DepositInfo = request.into_inner().try_into()?; + + let deposit_data = DepositData { + deposit: deposit_info, + nofn_xonly_pk: None, + actors: Actors { + verifiers: self.fetch_verifier_keys().await?, + watchtowers: vec![], + operators: self.fetch_operator_keys().await?, + }, + security_council: self.config.security_council.clone(), + }; + + let deposit_params = deposit_data.clone().into(); + + // Collect and distribute keys needed keys from operators and watchtowers to verifiers + let start = std::time::Instant::now(); + timed_request( + KEY_DISTRIBUTION_TIMEOUT, + "Key collection and distribution", + self.collect_and_distribute_keys(&deposit_params), + ) + .await?; + tracing::info!("Collected and distributed keys in {:?}", start.elapsed()); + + let verifiers = self.get_participating_verifiers(&deposit_data).await?; + + // Generate nonce streams for all verifiers. + let num_required_sigs = self.config.get_num_required_nofn_sigs(&deposit_data); + let num_required_nonces = num_required_sigs as u32 + 2; // ask for +2 for the final movetx signature + emergency stop signature, but don't send it on deposit_sign stage + let (first_responses, nonce_streams) = + create_nonce_streams( + verifiers.clone(), + num_required_nonces, + #[cfg(test)] + &self.config, + ) + .await?; + + // Create initial deposit session and send to verifiers + let deposit_sign_session = DepositSignSession { + deposit_params: Some(deposit_params.clone()), + nonce_gen_first_responses: first_responses, + }; + + let deposit_sign_param: VerifierDepositSignParams = + deposit_sign_session.clone().into(); + + #[allow(clippy::unused_enumerate_index)] + let partial_sig_streams = timed_try_join_all( + PARTIAL_SIG_STREAM_CREATION_TIMEOUT, + "Partial signature stream creation", + Some(verifiers.ids()), + verifiers.clients().into_iter().enumerate().map(|(_idx, verifier_client)| { + let mut verifier_client = verifier_client.clone(); + #[cfg(test)] + let config = self.config.clone(); + + let deposit_sign_param = + deposit_sign_param.clone(); + + async move { + #[cfg(test)] + config + .test_params + .timeout_params + .hook_timeout_partial_sig_stream_creation_verifier(_idx) + .await; + + let (tx, rx) = tokio::sync::mpsc::channel(num_required_nonces as usize + 1); // initial param + num_required_nonces nonces + + let stream = verifier_client + .deposit_sign(tokio_stream::wrappers::ReceiverStream::new(rx)) + .await? + .into_inner(); + + tx.send(deposit_sign_param).await.map_err(|e| { + Status::internal(format!("Failed to send deposit sign session: {:?}", e)) + })?; + + Ok::<_, BridgeError>((stream, tx)) + } + }) + ) + .await?; + + // Set up deposit finalization streams + #[allow(clippy::unused_enumerate_index)] + let deposit_finalize_streams = verifiers.clients().into_iter().enumerate().map( + |(_idx, mut verifier)| { + let (tx, rx) = tokio::sync::mpsc::channel(num_required_nonces as usize + 1); + let receiver_stream = tokio_stream::wrappers::ReceiverStream::new(rx); + #[cfg(test)] + let config = self.config.clone(); + // start deposit_finalize with tokio spawn + let deposit_finalize_future = tokio::spawn(async move { + #[cfg(test)] + config + .test_params + .timeout_params + .hook_timeout_deposit_finalize_verifier(_idx) + .await; + + verifier.deposit_finalize(receiver_stream).await.map_err(BridgeError::from) + }); + + Ok::<_, BridgeError>((deposit_finalize_future, tx)) + }, + ).collect::, BridgeError>>()?; + + tracing::info!("Sending deposit finalize streams to verifiers"); + + let (deposit_finalize_futures, deposit_finalize_sender): (Vec<_>, Vec<_>) = + deposit_finalize_streams.into_iter().unzip(); + + // Send initial finalization params + let deposit_finalize_first_param: VerifierDepositFinalizeParams = + deposit_sign_session.clone().into(); + + timed_try_join_all( + DEPOSIT_FINALIZE_STREAM_CREATION_TIMEOUT, + "Deposit finalization initial param send", + Some(verifiers.ids()), + deposit_finalize_sender.iter().cloned().map(|tx| { + let param = deposit_finalize_first_param.clone(); + async move { + tx.send(param).await + .map_err(|e| { + Status::internal(format!( + "Failed to send deposit finalize first param: {:?}", + e + )) + }).map_err(Into::into) + } + }) + ).await?; + + + let deposit_blockhash = self + .rpc + .get_blockhash_of_tx(&deposit_data.get_deposit_outpoint().txid) + .await + .map_to_status()?; + + let verifiers_public_keys = deposit_data.get_verifiers(); + + // Create sighash stream for transaction signing + let sighash_stream = Box::pin(create_nofn_sighash_stream( + self.db.clone(), + self.config.clone(), + deposit_data.clone(), + deposit_blockhash, + false, + )); + + // Create channels for pipeline communication + let (agg_nonce_sender, agg_nonce_receiver) = channel(num_required_nonces as usize); + let (partial_sig_sender, partial_sig_receiver) = channel(num_required_nonces as usize); + let (final_sig_sender, final_sig_receiver) = channel(num_required_nonces as usize); + + // Start the nonce aggregation pipe. + let nonce_agg_handle = tokio::spawn(nonce_aggregator( + nonce_streams, + sighash_stream, + agg_nonce_sender, + )); + + // Start the nonce distribution pipe. + let nonce_dist_handle = tokio::spawn(nonce_distributor( + agg_nonce_receiver, + partial_sig_streams, + partial_sig_sender, + )); + + // Start the signature aggregation pipe. + let sig_agg_handle = tokio::spawn(signature_aggregator( + partial_sig_receiver, + verifiers_public_keys, + final_sig_sender, + )); + + tracing::debug!("Getting signatures from operators"); + // Get sigs from each operator in background + let operators = self.get_participating_operators(&deposit_data).await?; + + let config_clone = self.config.clone(); + let operator_sigs_fut = tokio::spawn(async move { + timed_request( + OPERATOR_SIGS_TIMEOUT, + "Operator signature collection", + async { + Aggregator::collect_operator_sigs( + operators, + config_clone, + deposit_sign_session, + ) + .await + }, + ) + .await + }); + + // Join the nonce aggregation handle to get the movetx agg nonce. + let nonce_agg_handle = nonce_agg_handle + .map_err(|_| Status::internal("panic when aggregating nonces")) + .map( + |res| -> Result<(AggregatedNonce, AggregatedNonce), Status> { + res.and_then(|r| r.map_err(Into::into)) + }, + ) + .shared(); + + // Start the deposit finalization pipe. + let sig_dist_handle = tokio::spawn(signature_distributor( + final_sig_receiver, + deposit_finalize_sender.clone(), + nonce_agg_handle.clone(), + )); + + tracing::debug!( + "Waiting for pipeline tasks to complete (nonce agg, sig agg, sig dist, operator sigs)" + ); + + // Right now we collect all operator sigs then start to send them, we can do it simultaneously in the future + // Need to change sig verification ordering in deposit_finalize() in verifiers so that we verify + // 1st signature of all operators, then 2nd of all operators etc. + let all_op_sigs = operator_sigs_fut + .await + .map_err(|_| Status::internal("panic when collecting operator signatures"))??; + + tracing::debug!("Got all operator signatures"); + + tracing::debug!("Waiting for pipeline tasks to complete"); + // Wait for all pipeline tasks to complete + timed_request( + PIPELINE_COMPLETION_TIMEOUT, + "MuSig2 signing pipeline", + try_join_all([nonce_dist_handle, sig_agg_handle, sig_dist_handle]).map_err(|join_err| -> BridgeError { eyre::Report::from(join_err).wrap_err("Failed to join on pipelined tasks").into()}), + ) + .await?; + + tracing::debug!("Pipeline tasks completed"); + + + // send operators sigs to verifiers after all verifiers have signed + timed_request( + SEND_OPERATOR_SIGS_TIMEOUT, + "Sending operator signatures to verifiers", + async { + let send_operator_sigs: Vec<_> = deposit_finalize_sender + .iter() + .map(|tx| async { + for one_op_sigs in all_op_sigs.iter() { + for sig in one_op_sigs.iter() { + let deposit_finalize_param: VerifierDepositFinalizeParams = + sig.into(); + + tx.send(deposit_finalize_param).await.wrap_err_with(|| { + eyre::eyre!(AggregatorError::OutputStreamEndedEarly { + stream_name: "deposit_finalize_sender".into(), + }) + })?; + } + } + + Ok::<(), BridgeError>(()) + }) + .collect(); + try_join_all(send_operator_sigs).await?; + Ok(()) + }, + ) + .await?; + + tracing::debug!("Waiting for deposit finalization"); + + // Collect partial signatures for move transaction + let partial_sigs: Vec<(Vec, Vec)> = timed_try_join_all( + DEPOSIT_FINALIZATION_TIMEOUT, + "Deposit finalization", + Some(verifiers.ids()), + deposit_finalize_futures.into_iter().map(|fut| async move { + let inner = fut.await + .map_err(|_| BridgeError::from(Status::internal("panic finishing deposit_finalize")))?? + .into_inner(); + + Ok((inner.move_to_vault_partial_sig, inner.emergency_stop_partial_sig)) + }), + ) + .await?; + + + let (move_to_vault_sigs, emergency_stop_sigs): (Vec>, Vec>) = + partial_sigs.into_iter().unzip(); + + tracing::debug!("Received move tx partial sigs: {:?}", move_to_vault_sigs); + + // Create the final move transaction and check the signatures + let (movetx_agg_nonce, emergency_stop_agg_nonce) = nonce_agg_handle.await?; + + // Verify emergency stop signatures + self.verify_and_save_emergency_stop_sigs( + emergency_stop_sigs, + emergency_stop_agg_nonce, + deposit_params.clone(), + ) + .await?; + + let signed_movetx_handler = self + .create_movetx(move_to_vault_sigs, movetx_agg_nonce, deposit_params) + .await?; + + let raw_signed_tx = RawSignedTx { + raw_tx: bitcoin::consensus::serialize(&signed_movetx_handler.get_cached_tx()), + }; + + Ok(Response::new(raw_signed_tx)) + }) + .await.map_err(Into::into) + } + + #[tracing::instrument(skip(self), err(level = tracing::Level::ERROR), ret(level = tracing::Level::TRACE))] + async fn withdraw( + &self, + request: Request, + ) -> Result, Status> { + let request = request.into_inner(); + let (withdraw_params, operator_xonly_pks) = ( + request.withdrawal.ok_or(Status::invalid_argument( + "withdrawalParamsWithSig is missing", + ))?, + request.operator_xonly_pks, + ); + // convert rpc xonly pks to bitcoin xonly pks + let operator_xonly_pks_from_rpc: Vec = operator_xonly_pks + .into_iter() + .map(|xonly_pk| { + xonly_pk.try_into().map_err(|e| { + Status::invalid_argument(format!("Failed to convert xonly public key: {}", e)) + }) + }) + .collect::, Status>>()?; + + // check if all given operator xonly pubkeys are a valid operator xonly pubkey, to warn the caller if + // something is wrong with the given operator xonly pubkeys + let current_operator_xonly_pks = self.fetch_operator_keys().await?; + let invalid_operator_xonly_pks = operator_xonly_pks_from_rpc + .iter() + .filter(|xonly_pk| !current_operator_xonly_pks.contains(xonly_pk)) + .collect::>(); + if !invalid_operator_xonly_pks.is_empty() { + return Err(Status::invalid_argument(format!( + "Given xonly public key doesn't belong to any current operator: invalid keys: {:?}, current operators: {:?}", + invalid_operator_xonly_pks, + current_operator_xonly_pks + ))); + } + + let operators = self + .get_operator_clients() + .iter() + .zip(current_operator_xonly_pks.into_iter()); + let withdraw_futures = operators + .filter(|(_, xonly_pk)| { + // check if operator_xonly_pks is empty or contains the operator's xonly public key + operator_xonly_pks_from_rpc.is_empty() + || operator_xonly_pks_from_rpc.contains(xonly_pk) + }) + .map(|(operator, operator_xonly_pk)| { + let mut operator = operator.clone(); + let params = withdraw_params.clone(); + let mut request = Request::new(params); + request.set_timeout(WITHDRAWAL_TIMEOUT); + async move { (operator.withdraw(request).await, operator_xonly_pk) } + }); + + // collect responses from operators and return them as a vector of strings + let responses = futures::future::join_all(withdraw_futures).await; + Ok(Response::new(AggregatorWithdrawResponse { + withdraw_responses: responses + .into_iter() + .map(|(res, xonly_pk)| match res { + Ok(withdraw_response) => OperatorWithrawalResponse { + operator_xonly_pk: Some(xonly_pk.into()), + response: Some(operator_withrawal_response::Response::RawTx( + withdraw_response.into_inner(), + )), + }, + Err(e) => OperatorWithrawalResponse { + operator_xonly_pk: Some(xonly_pk.into()), + response: Some(operator_withrawal_response::Response::Error(e.to_string())), + }, + }) + .collect(), + })) + } + + async fn get_nofn_aggregated_xonly_pk( + &self, + _: tonic::Request, + ) -> std::result::Result, tonic::Status> { + let verifier_keys = self.fetch_verifier_keys().await?; + let num_verifiers = verifier_keys.len(); + let nofn_xonly_pk = bitcoin::XOnlyPublicKey::from_musig2_pks(verifier_keys, None) + .expect("Failed to aggregate verifier public keys"); + Ok(Response::new(super::NofnResponse { + nofn_xonly_pk: nofn_xonly_pk.serialize().to_vec(), + num_verifiers: num_verifiers as u32, + })) + } + + async fn internal_get_emergency_stop_tx( + &self, + request: Request, + ) -> Result, Status> { + let inner_request = request.into_inner(); + let txids: Vec = inner_request + .txids + .into_iter() + .map(|txid| { + Txid::from_slice(&txid.txid).map_err(|e| { + tonic::Status::invalid_argument(format!("Failed to parse txid: {e}")) + }) + }) + .collect::, _>>()?; + + let emergency_stop_txs = self.db.get_emergency_stop_txs(None, txids).await?; + + let (txids, encrypted_emergency_stop_txs): (Vec, Vec>) = + emergency_stop_txs.into_iter().unzip(); + + Ok(Response::new(clementine::GetEmergencyStopTxResponse { + txids: txids.into_iter().map(|txid| txid.into()).collect(), + encrypted_emergency_stop_txs, + })) + } + + async fn send_move_to_vault_tx( + &self, + request: Request, + ) -> Result, Status> { + #[cfg(not(feature = "automation"))] + { + let _ = request; + return Err(Status::unimplemented( + "Automation is disabled, cannot automatically send move to vault tx.", + )); + } + + #[cfg(feature = "automation")] + { + let request = request.into_inner(); + let movetx = bitcoin::consensus::deserialize( + &request + .raw_tx + .ok_or_eyre("raw_tx is required") + .map_to_status()? + .raw_tx, + ) + .wrap_err("Failed to deserialize movetx") + .map_to_status()?; + + let mut dbtx = self.db.begin_transaction().await?; + self.tx_sender + .insert_try_to_send( + &mut dbtx, + Some(TxMetadata { + deposit_outpoint: request + .deposit_outpoint + .map(TryInto::try_into) + .transpose()?, + operator_xonly_pk: None, + round_idx: None, + kickoff_idx: None, + tx_type: TransactionType::MoveToVault, + }), + &movetx, + FeePayingType::CPFP, + None, + &[], + &[], + &[], + &[], + ) + .await + .map_err(BridgeError::from)?; + dbtx.commit() + .await + .map_err(|e| Status::internal(format!("Failed to commit db transaction: {}", e)))?; + + Ok(Response::new(movetx.compute_txid().into())) + } + } +} + +#[cfg(test)] +mod tests { + use crate::actor::Actor; + use crate::config::BridgeConfig; + use crate::deposit::{BaseDepositData, DepositInfo, DepositType}; + use crate::musig2::AggregateFromPublicKeys; + use crate::rpc::clementine::clementine_aggregator_client::ClementineAggregatorClient; + use crate::rpc::clementine::{self, GetEntityStatusesRequest, SendMoveTxRequest}; + use crate::rpc::get_clients; + use crate::servers::create_aggregator_unix_server; + use crate::test::common::citrea::MockCitreaClient; + use crate::test::common::tx_utils::ensure_tx_onchain; + use crate::test::common::*; + use crate::{builder, EVMAddress}; + use bitcoin::hashes::Hash; + use bitcoincore_rpc::RpcApi; + use eyre::Context; + use std::time::Duration; + use tokio::time::sleep; + use tonic::{Request, Status}; + + #[cfg(feature = "automation")] + async fn perform_deposit(mut config: BridgeConfig) -> Result<(), Status> { + let regtest = create_regtest_rpc(&mut config).await; + let rpc = regtest.rpc(); + + let _unused = + run_single_deposit::(&mut config, rpc.clone(), None, None, None) + .await?; + + Ok(()) + } + #[tokio::test] + #[ignore = "See #687"] + async fn aggregator_double_setup_fail() { + let mut config = create_test_config_with_thread_name().await; + let _regtest = create_regtest_rpc(&mut config).await; + + let actors = create_actors::(&config).await; + let mut aggregator = actors.get_aggregator(); + + aggregator + .setup(tonic::Request::new(clementine::Empty {})) + .await + .unwrap(); + + assert!(aggregator + .setup(tonic::Request::new(clementine::Empty {})) + .await + .is_err()); + } + + #[tokio::test(flavor = "multi_thread")] + async fn aggregator_double_deposit() { + let mut config = create_test_config_with_thread_name().await; + let regtest = create_regtest_rpc(&mut config).await; + let rpc = regtest.rpc(); + let actors = create_actors::(&config).await; + let mut aggregator = actors.get_aggregator(); + + let evm_address = EVMAddress([1u8; 20]); + let signer = Actor::new( + config.secret_key, + config.winternitz_secret_key, + config.protocol_paramset().network, + ); + + let verifiers_public_keys: Vec = aggregator + .setup(tonic::Request::new(clementine::Empty {})) + .await + .unwrap() + .into_inner() + .try_into() + .unwrap(); + sleep(Duration::from_secs(3)).await; + + let nofn_xonly_pk = + bitcoin::XOnlyPublicKey::from_musig2_pks(verifiers_public_keys.clone(), None).unwrap(); + + let deposit_address = builder::address::generate_deposit_address( + nofn_xonly_pk, + signer.address.as_unchecked(), + evm_address, + config.protocol_paramset().network, + config.protocol_paramset().user_takes_after, + ) + .unwrap() + .0; + + let deposit_outpoint = rpc + .send_to_address(&deposit_address, config.protocol_paramset().bridge_amount) + .await + .unwrap(); + rpc.mine_blocks(18).await.unwrap(); + + let deposit_info = DepositInfo { + deposit_outpoint, + deposit_type: DepositType::BaseDeposit(BaseDepositData { + evm_address, + recovery_taproot_address: signer.address.as_unchecked().clone(), + }), + }; + + // Two deposits with the same values. + let movetx_one = aggregator + .new_deposit(clementine::Deposit::from(deposit_info.clone())) + .await + .unwrap() + .into_inner(); + let movetx_one_txid: bitcoin::Txid = aggregator + .send_move_to_vault_tx(SendMoveTxRequest { + deposit_outpoint: Some(deposit_outpoint.into()), + raw_tx: Some(movetx_one), + }) + .await + .unwrap() + .into_inner() + .try_into() + .unwrap(); + + let movetx_two = aggregator + .new_deposit(clementine::Deposit::from(deposit_info)) + .await + .unwrap() + .into_inner(); + let _movetx_two_txid: bitcoin::Txid = aggregator + .send_move_to_vault_tx(SendMoveTxRequest { + deposit_outpoint: Some(deposit_outpoint.into()), + raw_tx: Some(movetx_two), + }) + .await + .unwrap() + .into_inner() + .try_into() + .unwrap(); + rpc.mine_blocks(1).await.unwrap(); + sleep(Duration::from_secs(3)).await; + + let tx = poll_get( + async || { + rpc.mine_blocks(1).await.unwrap(); + + let tx_result = rpc.get_raw_transaction_info(&movetx_one_txid, None).await; + + let tx_result = tx_result + .inspect_err(|e| { + tracing::error!("Error getting transaction: {:?}", e); + }) + .ok(); + + Ok(tx_result) + }, + None, + None, + ) + .await + .wrap_err_with(|| eyre::eyre!("MoveTx did not land onchain")) + .unwrap(); + + assert!(tx.confirmations.unwrap() > 0); + } + + #[tokio::test(flavor = "multi_thread")] + async fn aggregator_deposit_movetx_lands_onchain() { + let mut config = create_test_config_with_thread_name().await; + let regtest = create_regtest_rpc(&mut config).await; + let rpc = regtest.rpc(); + let actors = create_actors::(&config).await; + let mut aggregator = actors.get_aggregator(); + + let evm_address = EVMAddress([1u8; 20]); + let signer = Actor::new( + config.secret_key, + config.winternitz_secret_key, + config.protocol_paramset().network, + ); + + let verifiers_public_keys: Vec = aggregator + .setup(tonic::Request::new(clementine::Empty {})) + .await + .unwrap() + .into_inner() + .try_into() + .unwrap(); + sleep(Duration::from_secs(3)).await; + + let nofn_xonly_pk = + bitcoin::XOnlyPublicKey::from_musig2_pks(verifiers_public_keys.clone(), None).unwrap(); + + let deposit_address = builder::address::generate_deposit_address( + nofn_xonly_pk, + signer.address.as_unchecked(), + evm_address, + config.protocol_paramset().network, + config.protocol_paramset().user_takes_after, + ) + .unwrap() + .0; + + let deposit_outpoint = rpc + .send_to_address(&deposit_address, config.protocol_paramset().bridge_amount) + .await + .unwrap(); + rpc.mine_blocks(18).await.unwrap(); + + let deposit_info = DepositInfo { + deposit_outpoint, + deposit_type: DepositType::BaseDeposit(BaseDepositData { + evm_address, + recovery_taproot_address: signer.address.as_unchecked().clone(), + }), + }; + + // Generate and broadcast the move-to-vault transaction + let raw_move_tx = aggregator + .new_deposit(clementine::Deposit::from(deposit_info)) + .await + .unwrap() + .into_inner(); + + let movetx_txid = aggregator + .send_move_to_vault_tx(SendMoveTxRequest { + deposit_outpoint: Some(deposit_outpoint.into()), + raw_tx: Some(raw_move_tx), + }) + .await + .unwrap() + .into_inner() + .try_into() + .unwrap(); + + rpc.mine_blocks(1).await.unwrap(); + sleep(Duration::from_secs(3)).await; + + let tx = poll_get( + async || { + rpc.mine_blocks(1).await.unwrap(); + + let tx_result = rpc.get_raw_transaction_info(&movetx_txid, None).await; + + let tx_result = tx_result + .inspect_err(|e| { + tracing::error!("Error getting transaction: {:?}", e); + }) + .ok(); + + Ok(tx_result) + }, + None, + None, + ) + .await + .wrap_err_with(|| eyre::eyre!("MoveTx did not land onchain")) + .unwrap(); + + assert!(tx.confirmations.unwrap() > 0); + } + + #[tokio::test] + async fn aggregator_two_deposit_movetx_and_emergency_stop() { + let mut config = create_test_config_with_thread_name().await; + let regtest = create_regtest_rpc(&mut config).await; + let rpc = regtest.rpc(); + let actors = create_actors::(&config).await; + let mut aggregator = actors.get_aggregator(); + + let evm_address = EVMAddress([1u8; 20]); + let signer = Actor::new( + config.secret_key, + config.winternitz_secret_key, + config.protocol_paramset().network, + ); + + let verifiers_public_keys: Vec = aggregator + .setup(tonic::Request::new(clementine::Empty {})) + .await + .unwrap() + .into_inner() + .try_into() + .unwrap(); + sleep(Duration::from_secs(3)).await; + + let nofn_xonly_pk = + bitcoin::XOnlyPublicKey::from_musig2_pks(verifiers_public_keys.clone(), None).unwrap(); + + let deposit_address_0 = builder::address::generate_deposit_address( + nofn_xonly_pk, + signer.address.as_unchecked(), + evm_address, + config.protocol_paramset().network, + config.protocol_paramset().user_takes_after, + ) + .unwrap() + .0; + + let deposit_address_1 = builder::address::generate_deposit_address( + nofn_xonly_pk, + signer.address.as_unchecked(), + evm_address, + config.protocol_paramset().network, + config.protocol_paramset().user_takes_after, + ) + .unwrap() + .0; + + let deposit_outpoint_0 = rpc + .send_to_address(&deposit_address_0, config.protocol_paramset().bridge_amount) + .await + .unwrap(); + rpc.mine_blocks(18).await.unwrap(); + + let deposit_outpoint_1 = rpc + .send_to_address(&deposit_address_1, config.protocol_paramset().bridge_amount) + .await + .unwrap(); + rpc.mine_blocks(18).await.unwrap(); + + let deposit_info_0 = DepositInfo { + deposit_outpoint: deposit_outpoint_0, + deposit_type: DepositType::BaseDeposit(BaseDepositData { + evm_address, + recovery_taproot_address: signer.address.as_unchecked().clone(), + }), + }; + + let deposit_info_1 = DepositInfo { + deposit_outpoint: deposit_outpoint_1, + deposit_type: DepositType::BaseDeposit(BaseDepositData { + evm_address, + recovery_taproot_address: signer.address.as_unchecked().clone(), + }), + }; + + // Generate and broadcast the move-to-vault tx for the first deposit + let raw_move_tx_0 = aggregator + .new_deposit(clementine::Deposit::from(deposit_info_0)) + .await + .unwrap() + .into_inner(); + let move_txid_0: bitcoin::Txid = aggregator + .send_move_to_vault_tx(SendMoveTxRequest { + deposit_outpoint: Some(deposit_outpoint_0.into()), + raw_tx: Some(raw_move_tx_0), + }) + .await + .unwrap() + .into_inner() + .try_into() + .unwrap(); + + rpc.mine_blocks(1).await.unwrap(); + sleep(Duration::from_secs(3)).await; + ensure_tx_onchain(rpc, move_txid_0) + .await + .expect("failed to get movetx_0 on chain"); + + // Generate and broadcast the move-to-vault tx for the second deposit + let raw_move_tx_1 = aggregator + .new_deposit(clementine::Deposit::from(deposit_info_1)) + .await + .unwrap() + .into_inner(); + let move_txid_1 = aggregator + .send_move_to_vault_tx(SendMoveTxRequest { + deposit_outpoint: Some(deposit_outpoint_1.into()), + raw_tx: Some(raw_move_tx_1), + }) + .await + .unwrap() + .into_inner() + .try_into() + .unwrap(); + + rpc.mine_blocks(1).await.unwrap(); + ensure_tx_onchain(rpc, move_txid_1) + .await + .expect("failed to get movetx_1 on chain"); + sleep(Duration::from_secs(3)).await; + + let move_txids = vec![move_txid_0, move_txid_1]; + + tracing::debug!("Move txids: {:?}", move_txids); + + let emergency_txid = aggregator + .internal_get_emergency_stop_tx(tonic::Request::new( + clementine::GetEmergencyStopTxRequest { + txids: move_txids + .iter() + .map(|txid| clementine::Txid { + txid: txid.to_byte_array().to_vec(), + }) + .collect(), + }, + )) + .await + .unwrap() + .into_inner(); + + let decryption_priv_key = + hex::decode("a80bc8cf095c2b37d4c6233114e0dd91f43d75de5602466232dbfcc1fc66c542") + .expect("Failed to parse emergency stop encryption public key"); + let emergency_stop_tx: bitcoin::Transaction = bitcoin::consensus::deserialize( + &crate::encryption::decrypt_bytes( + &decryption_priv_key, + &emergency_txid.encrypted_emergency_stop_txs[0], + ) + .expect("Failed to decrypt emergency stop tx"), + ) + .expect("Failed to deserialize"); + + rpc.send_raw_transaction(&emergency_stop_tx) + .await + .expect("Failed to send emergency stop tx"); + + let emergency_stop_txid = emergency_stop_tx.compute_txid(); + rpc.mine_blocks(1).await.unwrap(); + + let _emergencty_tx = poll_get( + async || { + rpc.mine_blocks(1).await.unwrap(); + + let tx_result = rpc + .get_raw_transaction_info(&emergency_stop_txid, None) + .await; + + let tx_result = tx_result + .inspect_err(|e| { + tracing::error!("Error getting transaction: {:?}", e); + }) + .ok(); + + Ok(tx_result) + }, + None, + None, + ) + .await + .wrap_err_with(|| eyre::eyre!("Emergency stop tx did not land onchain")) + .unwrap(); + } + + #[cfg(feature = "automation")] + #[tokio::test] + #[ignore = "This test does not work"] + async fn aggregator_deposit_finalize_verifier_timeout() { + let mut config = create_test_config_with_thread_name().await; + config + .test_params + .timeout_params + .deposit_finalize_verifier_idx = Some(0); + let res = perform_deposit(config).await; + assert!(res.is_err()); + let err_string = res.unwrap_err().to_string(); + assert!( + err_string.contains("Deposit finalization from verifiers"), + "Error string was: {}", + err_string + ); + } + + #[cfg(feature = "automation")] + #[tokio::test] + async fn aggregator_deposit_key_distribution_verifier_timeout() { + let mut config = create_test_config_with_thread_name().await; + config + .test_params + .timeout_params + .key_distribution_verifier_idx = Some(0); + + let res = perform_deposit(config).await; + + assert!(res.is_err()); + let err_string = res.unwrap_err().to_string(); + assert!( + err_string.contains("Verifier key distribution (id:"), + "Error string was: {}", + err_string + ); + } + + #[cfg(feature = "automation")] + #[tokio::test] + async fn aggregator_deposit_key_distribution_operator_timeout() { + let mut config = create_test_config_with_thread_name().await; + config + .test_params + .timeout_params + .key_collection_operator_idx = Some(0); + + let res = perform_deposit(config).await; + + assert!(res.is_err()); + let err_string = res.unwrap_err().to_string(); + assert!( + err_string.contains("Operator key collection (id:"), + "Error string was: {}", + err_string + ); + } + + #[cfg(feature = "automation")] + #[tokio::test] + async fn aggregator_deposit_nonce_stream_creation_verifier_timeout() { + let mut config = create_test_config_with_thread_name().await; + config + .test_params + .timeout_params + .nonce_stream_creation_verifier_idx = Some(0); + + let res = perform_deposit(config).await; + + assert!(res.is_err()); + let err_string = res.unwrap_err().to_string(); + assert!( + err_string.contains("Nonce stream creation (id:"), + "Error string was: {}", + err_string + ); + } + + #[cfg(feature = "automation")] + #[tokio::test] + async fn aggregator_deposit_partial_sig_stream_creation_timeout() { + let mut config = create_test_config_with_thread_name().await; + config + .test_params + .timeout_params + .partial_sig_stream_creation_verifier_idx = Some(0); + + let res = perform_deposit(config).await; + + assert!(res.is_err()); + let err_string = res.unwrap_err().to_string(); + assert!( + err_string.contains("Partial signature stream creation (id:"), + "Error string was: {}", + err_string + ); + } + + #[cfg(feature = "automation")] + #[tokio::test] + async fn aggregator_deposit_operator_sig_collection_operator_timeout() { + let mut config = create_test_config_with_thread_name().await; + config + .test_params + .timeout_params + .operator_sig_collection_operator_idx = Some(0); + + let res = perform_deposit(config).await; + + assert!(res.is_err()); + let err_string = res.unwrap_err().to_string(); + assert!( + err_string.contains("Operator signature stream creation (id:"), + "Error string was: {}", + err_string + ); + } + + #[tokio::test] + async fn aggregator_get_entity_statuses() { + let mut config = create_test_config_with_thread_name().await; + let _regtest = create_regtest_rpc(&mut config).await; + + let actors = create_actors::(&config).await; + let mut aggregator = actors.get_aggregator(); + let status = aggregator + .get_entity_statuses(Request::new(GetEntityStatusesRequest { + restart_tasks: false, + })) + .await + .unwrap() + .into_inner(); + + tracing::info!("Status: {:?}", status); + + assert_eq!( + status.entity_statuses.len(), + config.test_params.all_operators_secret_keys.len() + + config.test_params.all_verifiers_secret_keys.len() + ); + } + + #[tokio::test] + async fn aggregator_start_with_offline_verifier() { + let mut config = create_test_config_with_thread_name().await; + // Create regtest rpc + let _regtest = create_regtest_rpc(&mut config).await; + // random ips + config.verifier_endpoints = Some(vec!["https://142.143.144.145:17001".to_string()]); + config.operator_endpoints = Some(vec!["https://142.143.144.145:17002".to_string()]); + // Create temporary directory for aggregator socket + let socket_dir = tempfile::tempdir().unwrap(); + let socket_path = socket_dir.path().join("aggregator.sock"); + + tracing::info!("Creating unix aggregator server"); + + let (_, _shutdown_tx) = create_aggregator_unix_server(config.clone(), socket_path.clone()) + .await + .unwrap(); + + tracing::info!("Created unix aggregator server"); + + let mut aggregator_client = get_clients( + vec![format!("unix://{}", socket_path.display())], + ClementineAggregatorClient::new, + &config, + false, + ) + .await + .unwrap() + .pop() + .unwrap(); + + tracing::info!("Got aggregator client"); + + // vergen should work + assert!(aggregator_client + .vergen(Request::new(clementine::Empty {})) + .await + .is_ok()); + + tracing::info!("After vergen"); + + // setup should give error as it can't connect to the verifier + assert!(aggregator_client + .setup(Request::new(clementine::Empty {})) + .await + .is_err()); + + tracing::info!("After setup"); + + // aggregator should still be up even after not connecting to the verifier + // and should be able to get metrics + tracing::info!( + "Entity statuses: {:?}", + aggregator_client + .get_entity_statuses(Request::new(GetEntityStatusesRequest { + restart_tasks: false, + })) + .await + .unwrap() + ); + } +} diff --git a/core/src/rpc/clementine.proto b/core/src/rpc/clementine.proto new file mode 100644 index 000000000..08b221e1e --- /dev/null +++ b/core/src/rpc/clementine.proto @@ -0,0 +1,736 @@ +syntax = "proto3"; +package clementine; + +message Empty {} + +message Txid { + bytes txid = 1; +} + +message Outpoint { + Txid txid = 1; + uint32 vout = 2; +} + +message NofnResponse { + bytes nofn_xonly_pk = 1; + uint32 num_verifiers = 2; +} + +enum NormalSignatureKind { + NormalSignatureUnknown = 0; + // Used for TxHandlers that verifiers don't care. These will have signatures created + // by the operator on the fly. + OperatorSighashDefault = 1; + Challenge = 2; + DisproveTimeout2 = 3; + Disprove2 = 4; + Reimburse1 = 5; + KickoffNotFinalized1 = 6; + KickoffNotFinalized2 = 7; + Reimburse2 = 8; + NoSignature = 9; + ChallengeTimeout2 = 10; + MiniAssert1 = 11; + OperatorChallengeAck1 = 12; + NotStored = 13; + YieldKickoffTxid = 14; + LatestBlockhashTimeout1 = 15; + LatestBlockhashTimeout2 = 16; + LatestBlockhashTimeout3 = 17; + LatestBlockhash = 18; +} + +// Signatures that are needed multiple times per an operators kickoff. +// Some watchtower sigs are needed once per watchtower. +// Asserts are needed multiple times +enum NumberedSignatureKind { + NumberedSignatureUnknown = 0; + // Used for TxHandlers that verifiers don't care. These will have signatures created + // by the operator on the fly. + NumberedNotStored = 1; + OperatorChallengeNack1 = 2; + OperatorChallengeNack2 = 3; + OperatorChallengeNack3 = 4; + AssertTimeout1 = 5; + AssertTimeout2 = 6; + AssertTimeout3 = 7; + UnspentKickoff1 = 8; + UnspentKickoff2 = 9; + WatchtowerChallengeTimeout1 = 10; + WatchtowerChallengeTimeout2 = 11; + WatchtowerChallenge = 12; +} + +message NormalSignatureId { + NormalSignatureKind signature_kind = 1; +} + +message NumberedSignatureId { + NumberedSignatureKind signature_kind = 1; + int32 idx = 2; +} + +// A tagged signature struct that identifies the transaction-input that the signature is for. +// The id is left as NotStored for signatures that are created on the fly by the operator (they're also not stored). +message TaggedSignature { + oneof signature_id { + NormalSignatureId normal_signature = 1; + NumberedSignatureId numbered_signature = 2; + } + bytes signature = 3; +} + +message DepositSignatures { repeated TaggedSignature signatures = 1; } + +message ChallengeACKDigest { + bytes hash = 1; +} + +message WinternitzPubkey { + repeated bytes digit_pubkey = 3; +} + +message DepositParams { + Deposit deposit = 1; + Actors actors = 2; + SecurityCouncil security_council = 3; +} + +message SecurityCouncil { + repeated bytes pks = 1; + uint32 threshold = 2; +} + +message Deposit { + /// User's deposit UTXO. + Outpoint deposit_outpoint = 1; + oneof deposit_data { + BaseDeposit base_deposit = 2; + ReplacementDeposit replacement_deposit = 3; + } +} + +message Actors { + /// Public keys of verifiers that will participate in the deposit. + VerifierPublicKeys verifiers = 1; + /// X-only public keys of watchtowers that will participate in the deposit. + /// NOTE: verifiers are automatically considered watchtowers. This field is only for additional watchtowers. + XOnlyPublicKeys watchtowers = 2; + /// X-only public keys of operators that will participate in the deposit. + XOnlyPublicKeys operators = 3; +} + +message ReplacementDeposit { + // Move to vault txid that is being replaced. + Txid old_move_txid = 1; +} + +// A new original deposit request's details. +message BaseDeposit { + // User's EVM address. + bytes evm_address = 1; + // User's recovery taproot address. + string recovery_taproot_address = 2; +} + +enum FeeType { + UNSPECIFIED = 0; + CPFP = 1; + RBF = 2; + NO_FUNDING = 3; +} + +enum NormalTransactionId { + UNSPECIFIED_TRANSACTION_TYPE = 0; + ROUND = 1; + KICKOFF = 2; + MOVE_TO_VAULT = 3; + PAYOUT = 4; + CHALLENGE = 5; + DISPROVE = 6; + DISPROVE_TIMEOUT = 7; + REIMBURSE = 8; + ALL_NEEDED_FOR_DEPOSIT = 9; + DUMMY = 10; + READY_TO_REIMBURSE = 11; + KICKOFF_NOT_FINALIZED = 12; + CHALLENGE_TIMEOUT = 13; + BURN_UNUSED_KICKOFF_CONNECTORS = 14; + YIELD_KICKOFF_TXID = 15; + REPLACEMENT_DEPOSIT = 17; + LATEST_BLOCKHASH_TIMEOUT = 18; + LATEST_BLOCKHASH = 19; + OPTIMISTIC_PAYOUT = 20; +} + +enum NumberedTransactionType { + UNSPECIFIED_INDEXED_TRANSACTION_TYPE = 0; + WATCHTOWER_CHALLENGE = 1; + OPERATOR_CHALLENGE_NACK = 2; + OPERATOR_CHALLENGE_ACK = 3; + ASSERT_TIMEOUT = 4; + UNSPENT_KICKOFF = 5; + MINI_ASSERT = 6; + WATCHTOWER_CHALLENGE_TIMEOUT = 7; +} + +message NumberedTransactionId { + NumberedTransactionType transaction_type = 1; + int32 index = 2; +} + +message GrpcTransactionId { + oneof id { + NormalTransactionId normal_transaction = 1; + NumberedTransactionId numbered_transaction = 2; + } +} + +message KickoffId { + bytes operator_xonly_pk = 1; + uint32 round_idx = 2; + uint32 kickoff_idx = 3; +} + +message TransactionRequest { + Outpoint deposit_outpoint = 1; + KickoffId kickoff_id = 2; +} + +// Includes the deposit params and the nonce gen initial responses (pubkeys and their signatures from all verifiers) +message DepositSignSession { + DepositParams deposit_params = 1; + repeated NonceGenFirstResponse nonce_gen_first_responses = 2; +} + +// Operator -------------------------------------------------------------------- + +message OperatorConfig { + Outpoint collateral_funding_outpoint = 1; + string xonly_pk = 2; + string wallet_reimburse_address = 3; +} + +message OperatorParams { + oneof response { + // Operator's configuration. + OperatorConfig operator_details = 1; + // Winternitz pubkeys for each kickoff utxo (to commit blockhash). + WinternitzPubkey winternitz_pubkeys = 2; + // unspent kickoff signatures + SchnorrSig unspent_kickoff_sig = 3; + } +} + +message OperatorKeysWithDeposit { + OperatorKeys operator_keys = 1; + DepositParams deposit_params = 2; + bytes operator_xonly_pk = 3; +} + +message OperatorKeys { + // Winternitz pubkeys for each bitvm assert tx. + repeated WinternitzPubkey winternitz_pubkeys = 1; + // Hashes of preimages that will be used to ACK watchtower challenges. + repeated ChallengeACKDigest challenge_ack_digests = 2; +} + +message SchnorrSig { + bytes schnorr_sig = 1; +} + +message WithdrawParams { + // The ID of the withdrawal in Citrea + uint32 withdrawal_id = 1; + // User's [`bitcoin::sighash::TapSighashType::SinglePlusAnyoneCanPay`] + // signature + bytes input_signature = 2; + // User's UTXO to claim the deposit + Outpoint input_outpoint = 3; + // The withdrawal output's script_pubkey (user's signature is only valid for this pubkey) + bytes output_script_pubkey = 4; + // The withdrawal output's amount (user's signature is only valid for this amount) + uint64 output_amount = 5; +} + + +message FinalizedPayoutParams { + bytes payout_blockhash = 1; + Outpoint deposit_outpoint = 2; +} + +message XOnlyPublicKeyRpc { + bytes xonly_public_key = 1; +} + +message StoppedTasks { + repeated string stopped_tasks = 1; +} + +message EntityError { + string error = 1; +} + +message EntityStatus { + bool automation = 1; + optional string wallet_balance = 2; + optional uint32 tx_sender_synced_height = 3; + optional uint32 finalized_synced_height = 4; + optional uint32 hcp_last_proven_height = 5; + StoppedTasks stopped_tasks = 6; + optional uint32 rpc_tip_height = 7; + optional uint32 bitcoin_syncer_synced_height = 8; + optional uint32 state_manager_next_height = 9; +} + +enum EntityType { + ENTITY_UNKNOWN = 0; + OPERATOR = 1; + VERIFIER = 2; +} + +message EntityId { + EntityType kind = 1; + string id = 2; +} + +message EntityStatusWithId { + EntityId entity_id = 1; + oneof status_result { + EntityStatus status = 2; + EntityError err = 3; + } +} + +message EntityStatuses { + repeated EntityStatusWithId entity_statuses = 1; +} + +// An operator is responsible for paying withdrawals. It has an unique ID and +// chain of UTXOs named `round_txs`. An operator also runs a verifier. These are +// connected to the same database and both have access to watchtowers' +// winternitz pubkeys. +service ClementineOperator { + // Returns the operator's xonly public key + // + // Used by aggregator inside setup + rpc GetXOnlyPublicKey(Empty) returns (XOnlyPublicKeyRpc) {} + + // Returns an operator's parameters. It will be called once, by the + // aggregator, to set all the public keys. + // + // # Returns + // + // Returns an [`OperatorParams`], which includes operator's configuration and + // Watchtower parameters. + // + // Used by aggregator inside setup + rpc GetParams(Empty) returns (stream OperatorParams) {} + + // Returns an operator's deposit keys. + // Deposit keys include Assert BitVM winternitz keys, and challenge ACK hashes. + // + // Used by aggregator inside new_deposit + rpc GetDepositKeys(DepositParams) returns (OperatorKeys) {} + + // Returns the current status of tasks running on the operator and their last synced heights. + rpc GetCurrentStatus(Empty) returns (EntityStatus) {} + + // Signs everything that includes Operator's burn connector. + // + // # Parameters + // + // - User's deposit information + // - Nonce metadata + // + // # Returns + // + // - Operator burn Schnorr signature + rpc DepositSign(DepositSignSession) returns (stream SchnorrSig) {} + + // Restarts the background tasks for the operator. + rpc RestartBackgroundTasks(Empty) returns (Empty) {} + + // Prepares a withdrawal if it's profitable and the withdrawal is correct and registered in Citrea bridge contract. + // If withdrawal is accepted, the payout tx will be added to the TxSender and success is returned, otherwise an error is returned. + // If automation is disabled, the withdrawal will not be accepted and an error will be returned. + // Note: This is intended for operator's own use, so it doesn't include a signature from aggregator. + rpc InternalWithdraw(WithdrawParams) + returns (RawSignedTx) {} + + + // First, if verification address in operator's config is set, the signature in rpc is checked to see if it was signed by the verification address. + // Then prepares a withdrawal if it's profitable and the withdrawal is correct and registered in Citrea bridge contract. + // If withdrawal is accepted, the payout tx will be added to the TxSender and success is returned, otherwise an error is returned. + // If automation is disabled, the withdrawal will not be accepted and an error will be returned. + rpc Withdraw(WithdrawParamsWithSig) + returns (RawSignedTx) {} + + // For a given deposit outpoint, determines the next step in the kickoff process the operator is in, + // and returns the raw signed txs that the operator needs to send next, for enabling reimbursement process + // without automation. + // + // # Parameters + // - deposit_outpoint: Deposit outpoint to create the kickoff for + // + // # Returns + // - Raw signed txs that the operator needs to send next + rpc GetReimbursementTxs(Outpoint) returns (SignedTxsWithType) {} + + // Signs all tx's it can according to given transaction type (use it with AllNeededForDeposit to get almost all tx's) + // Creates the transactions denoted by the deposit and operator_idx, round_idx, and kickoff_idx. + // It will create the transaction and sign it with the operator's private key and/or saved nofn signatures. + // + // # Parameters + // - deposit_params: User's deposit information + // - transaction_type: Requested Transaction type + // - kickoff_id: Operator's kickoff ID + // + // # Returns + // - Raw signed transactions that the entity can sign (no asserts and watchtower challenge) + // + // Only used in tests + rpc InternalCreateSignedTxs(TransactionRequest) returns (SignedTxsWithType) {} + + // Creates all assert transactions (AssertBegin, MiniAsserts, AssertEnd), signs them, and returns the raw txs + // in the same order. + // # Parameters + // - deposit_params: User's deposit information + // - kickoff_id: Operator's kickoff ID + // - commit_data: Commitment data for each MiniAssert tx's + // + // # Returns + // - Raw signed assert transactions + rpc InternalCreateAssertCommitmentTxs(TransactionRequest) returns (SignedTxsWithType) {} + + rpc InternalFinalizedPayout(FinalizedPayoutParams) returns (Txid) {} + + rpc InternalEndRound(Empty) returns (Empty) {} + + rpc Vergen(Empty) returns (VergenResponse) {} +} + +// Verifier -------------------------------------------------------------------- + +message VerifierParams { + bytes public_key = 1; +} + +message PartialSig { + bytes partial_sig = 1; +} + +message NonceGenRequest { + uint32 num_nonces = 1; +} + +message NonceGenFirstResponse { + // ID of the nonce session (used to store nonces in verifier's memory) + // The id is string representation of a u128 number + string id = 1; + // Number of nonces to generate + uint32 num_nonces = 2; +} +message NonceGenResponse { + oneof response { + NonceGenFirstResponse first_response = 1; + bytes pub_nonce = 2; + } +} + +message OptimisticWithdrawParams { + WithdrawParams withdrawal = 1; + // An ECDSA signature (of citrea/aggregator) over the withdrawal params + // to authenticate the withdrawal params. This will be signed manually by citrea + // after manual verification of the optimistic payout. + optional string verification_signature = 2; +} + +message WithdrawParamsWithSig { + WithdrawParams withdrawal = 1; + // An ECDSA signature (of citrea/aggregator) over the withdrawal params + // to authenticate the withdrawal params. This will be signed manually by citrea + // after manual verification of the optimistic payout. + // This message contains same data as the one in Optimistic Payout signature, but with a different message name, + // so that the same signature can't be used for both optimistic payout and normal withdrawal. + optional string verification_signature = 2; +} + +// Input of the aggregator's withdraw function. +// It contains the withdrawal params along with the verification signature that signs the withdrawal params. +// It also contains the operator's xonly public keys that the withdrawal request should be sent to. If the list is empty, the withdrawal will be sent to all operators. +message AggregatorWithdrawalInput { + WithdrawParamsWithSig withdrawal = 1; + repeated XOnlyPublicKeyRpc operator_xonly_pks = 2; +} + +message OptimisticPayoutParams { + OptimisticWithdrawParams opt_withdrawal = 1; + NonceGenFirstResponse nonce_gen = 2; + bytes agg_nonce = 3; +} + +message VerifierDepositSignParams { + oneof params { + DepositSignSession deposit_sign_first_param = 1; + bytes agg_nonce = 2; + } +} + +message VerifierDepositFinalizeParams { + oneof params { + DepositSignSession deposit_sign_first_param = 1; + bytes schnorr_sig = 2; + bytes move_tx_agg_nonce = 3; + bytes emergency_stop_agg_nonce = 4; + } +} + +message VerifierDepositFinalizeResponse { + bytes move_to_vault_partial_sig = 1; + bytes emergency_stop_partial_sig = 2; +} + +message VerifierPublicKeys { + repeated bytes verifier_public_keys = 1; +} + +message TxDebugRequest { + uint32 tx_id = 1; +} + +message TxDebugSubmissionError { + string error_message = 1; + string timestamp = 2; +} + +message TxDebugFeePayerUtxo { + Txid txid = 1; + uint32 vout = 2; + uint64 amount = 3; + bool confirmed = 4; +} + +message TxMetadata { + // Optional outpoint of the deposit transaction + Outpoint deposit_outpoint = 1; + // Deposit identification + XOnlyPublicKeyRpc operator_xonly_pk = 2; + uint32 round_idx = 4; + uint32 kickoff_idx = 5; + // Transaction ID + GrpcTransactionId tx_type = 6; +} + +message TxDebugInfo { + uint32 id = 1; + bool is_active = 2; + string current_state = 3; + repeated TxDebugSubmissionError submission_errors = 4; + repeated TxDebugFeePayerUtxo fee_payer_utxos = 5; + string created_at = 6; + Txid txid = 7; + string fee_paying_type = 8; + uint32 fee_payer_utxos_count = 9; + uint32 fee_payer_utxos_confirmed_count = 10; + bytes raw_tx = 11; + TxMetadata metadata = 12; +} + +message XOnlyPublicKeys { + repeated bytes xonly_public_keys = 1; +} + + +message VergenResponse { + string response = 1; +} + +service ClementineVerifier { + // Returns verifiers' metadata. Needs to be called once per setup. + // + // Used by aggregator inside setup to let all verifiers know all other verifier pks + rpc GetParams(Empty) returns (VerifierParams) {} + + // Saves an operator. + // + // Used by aggregator inside setup to let all verifiers know all other operator pks + rpc SetOperator(stream OperatorParams) returns (Empty) {} + + // Sets the operator's winternitz keys and challenge ACK hashes and saves them + // into the db. + // + // Used by aggregator inside new_deposit to let all verifiers know all other operators' deposit information + rpc SetOperatorKeys(OperatorKeysWithDeposit) returns (Empty) {} + + // Generates nonces for a deposit. + // + // # Returns + // + // Nonce metadata followed by nonces. + // + // Used by aggregator inside new_deposit + rpc NonceGen(NonceGenRequest) returns (stream NonceGenResponse) {} + + // Signs deposit with given aggNonces and verifier's secNonce using + // nonce_id. + // + // Used by aggregator inside new_deposit + rpc DepositSign(stream VerifierDepositSignParams) + returns (stream PartialSig) {} + + // Signs the optimistic payout tx with given aggNonce and withdrawal info. + rpc OptimisticPayoutSign(OptimisticPayoutParams) returns (PartialSig) {} + + // Verifies every signature and signs move_tx. + // + // Used by aggregator inside new_deposit + rpc DepositFinalize(stream VerifierDepositFinalizeParams) + returns (VerifierDepositFinalizeResponse) {} + + // Debug a transaction by retrieving its current state and history + rpc DebugTx(TxDebugRequest) returns (TxDebugInfo) {} + + // Restarts the background tasks for the verifier. + rpc RestartBackgroundTasks(Empty) returns (Empty) {} + + // Checks if the kickoff tx is malicious and if so, try to send all necessary txs to punish the operator + rpc InternalHandleKickoff(Txid) returns (Empty) {} + + // Returns the current status of tasks running on the verifier and their last synced heights. + rpc GetCurrentStatus(Empty) returns (EntityStatus) {} + + // 1. Signs all tx's it can according to given transaction type (use it with AllNeededForDeposit to get almost all tx's) + // 2. Creates the transactions denoted by the deposit and operator_idx, round_idx, and kickoff_idx. + // 3. It will create the transaction and sign it with the operator's private key and/or saved nofn signatures. + // + // # Parameters + // - deposit_params: User's deposit information + // - transaction_type: Requested Transaction type + // - kickoff_id: Operator's kickoff ID + // + // # Returns + // - Raw signed transactions that the entity can sign (no asserts and watchtower challenge) + rpc InternalCreateSignedTxs(TransactionRequest) returns (SignedTxsWithType) {} + + // Signs the verifiers own watchtower challenge tx in the corresponding + // kickoff and returns the signed raw tx + rpc InternalCreateWatchtowerChallenge(TransactionRequest) returns (RawTxWithRbfInfo) {} + + rpc Vergen(Empty) returns (VergenResponse) {} +} + +// Aggregator ------------------------------------------------------------------ + +message RawSignedTx { + bytes raw_tx = 1; +} + +message SendTxRequest { + RawSignedTx raw_tx = 1; + FeeType fee_type = 2; +} + +message RawSignedTxs { + repeated RawSignedTx raw_txs = 1; +} + +message SignedTxWithType { + GrpcTransactionId transaction_type = 1; + bytes raw_tx = 2; +} + +message SignedTxsWithType { + repeated SignedTxWithType signed_txs = 1; +} + +message RbfSigningInfoRpc { + bytes merkle_root = 1; + uint32 vout = 2; +} + +message RawTxWithRbfInfo { + bytes raw_tx = 1; + RbfSigningInfoRpc rbf_info = 2; +} + +message OperatorWithrawalResponse { + XOnlyPublicKeyRpc operator_xonly_pk = 1; + oneof response { + RawSignedTx raw_tx = 2; + string error = 3; + } +} + +message AggregatorWithdrawResponse { + repeated OperatorWithrawalResponse withdraw_responses = 1; +} + +message GetEmergencyStopTxRequest { + repeated Txid txids = 1; +} + +message GetEmergencyStopTxResponse { + repeated Txid txids = 1; + repeated bytes encrypted_emergency_stop_txs = 2; +} + +message SendMoveTxRequest { + RawSignedTx raw_tx = 1; + Outpoint deposit_outpoint = 2; +} + +message GetEntityStatusesRequest { + bool restart_tasks = 1; +} + +service ClementineAggregator { + rpc GetNofnAggregatedXonlyPk(Empty) returns (NofnResponse) {} + + // Sets up the system of verifiers, watchtowers and operators by: + // + // 1. Collects verifier keys from each verifier + // 2. Distributes these verifier keys to all verifiers + // 3. Collects all operator configs from each operator + // 4. Distributes these operator configs to all verifiers + // + // Used by the clementine-backend service + rpc Setup(Empty) returns (VerifierPublicKeys) {} + + // This will call, DepositNonceGen for every verifier, + // then it will aggregate one by one and then send it to DepositSign, + // then it will aggregate the partial sigs and send it to DepositFinalize, + // this will also call the operator to get their signatures and send it to + // DepositFinalize then it will collect the partial sigs and create the move + // tx. + // + // Used by the clementine-backend service to initiate a deposit + rpc NewDeposit(Deposit) returns (RawSignedTx) {} + + // Call's withdraw on all operators + // Used by the clementine-backend service to initiate a withdrawal + // If the operator's xonly public keys list is empty, the withdrawal will be sent to all operators. + // If not, only the operators in the list will be sent the withdrawal request. + rpc Withdraw(AggregatorWithdrawalInput) + returns (AggregatorWithdrawResponse) {} + + // Perform an optimistic payout to reimburse a peg-out from Citrea + rpc OptimisticPayout(OptimisticWithdrawParams) returns (RawSignedTx) {} + + // Send a pre-signed tx to the network + rpc InternalSendTx(SendTxRequest) returns (Empty) {} + + rpc SendMoveToVaultTx(SendMoveTxRequest) returns (Txid) {} + + // Returns the current status of tasks running on the operators/verifiers. + // If restart_tasks is true, it will restart the tasks on the entities if they are stopped. + rpc GetEntityStatuses(GetEntityStatusesRequest) returns (EntityStatuses) {} + + // Creates an emergency stop tx that won't be broadcasted. + // Tx will have around 3 sats/vbyte fee. + // Set add_anchor to true to add an anchor output for cpfp.. + rpc InternalGetEmergencyStopTx(GetEmergencyStopTxRequest) returns (GetEmergencyStopTxResponse) {} + + rpc Vergen(Empty) returns (VergenResponse) {} +} diff --git a/core/src/rpc/clementine.rs b/core/src/rpc/clementine.rs new file mode 100644 index 000000000..674fee0c5 --- /dev/null +++ b/core/src/rpc/clementine.rs @@ -0,0 +1,4843 @@ +// This file is @generated by prost-build. +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct Empty {} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Txid { + #[prost(bytes = "vec", tag = "1")] + pub txid: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Outpoint { + #[prost(message, optional, tag = "1")] + pub txid: ::core::option::Option, + #[prost(uint32, tag = "2")] + pub vout: u32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct NofnResponse { + #[prost(bytes = "vec", tag = "1")] + pub nofn_xonly_pk: ::prost::alloc::vec::Vec, + #[prost(uint32, tag = "2")] + pub num_verifiers: u32, +} +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct NormalSignatureId { + #[prost(enumeration = "NormalSignatureKind", tag = "1")] + pub signature_kind: i32, +} +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct NumberedSignatureId { + #[prost(enumeration = "NumberedSignatureKind", tag = "1")] + pub signature_kind: i32, + #[prost(int32, tag = "2")] + pub idx: i32, +} +/// A tagged signature struct that identifies the transaction-input that the signature is for. +/// The id is left as NotStored for signatures that are created on the fly by the operator (they're also not stored). +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TaggedSignature { + #[prost(bytes = "vec", tag = "3")] + pub signature: ::prost::alloc::vec::Vec, + #[prost(oneof = "tagged_signature::SignatureId", tags = "1, 2")] + pub signature_id: ::core::option::Option, +} +/// Nested message and enum types in `TaggedSignature`. +pub mod tagged_signature { + #[derive(Clone, Copy, PartialEq, ::prost::Oneof)] + pub enum SignatureId { + #[prost(message, tag = "1")] + NormalSignature(super::NormalSignatureId), + #[prost(message, tag = "2")] + NumberedSignature(super::NumberedSignatureId), + } +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DepositSignatures { + #[prost(message, repeated, tag = "1")] + pub signatures: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ChallengeAckDigest { + #[prost(bytes = "vec", tag = "1")] + pub hash: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct WinternitzPubkey { + #[prost(bytes = "vec", repeated, tag = "3")] + pub digit_pubkey: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec>, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DepositParams { + #[prost(message, optional, tag = "1")] + pub deposit: ::core::option::Option, + #[prost(message, optional, tag = "2")] + pub actors: ::core::option::Option, + #[prost(message, optional, tag = "3")] + pub security_council: ::core::option::Option, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SecurityCouncil { + #[prost(bytes = "vec", repeated, tag = "1")] + pub pks: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec>, + #[prost(uint32, tag = "2")] + pub threshold: u32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Deposit { + /// / User's deposit UTXO. + #[prost(message, optional, tag = "1")] + pub deposit_outpoint: ::core::option::Option, + #[prost(oneof = "deposit::DepositData", tags = "2, 3")] + pub deposit_data: ::core::option::Option, +} +/// Nested message and enum types in `Deposit`. +pub mod deposit { + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum DepositData { + #[prost(message, tag = "2")] + BaseDeposit(super::BaseDeposit), + #[prost(message, tag = "3")] + ReplacementDeposit(super::ReplacementDeposit), + } +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Actors { + /// / Public keys of verifiers that will participate in the deposit. + #[prost(message, optional, tag = "1")] + pub verifiers: ::core::option::Option, + /// / X-only public keys of watchtowers that will participate in the deposit. + /// / NOTE: verifiers are automatically considered watchtowers. This field is only for additional watchtowers. + #[prost(message, optional, tag = "2")] + pub watchtowers: ::core::option::Option, + /// / X-only public keys of operators that will participate in the deposit. + #[prost(message, optional, tag = "3")] + pub operators: ::core::option::Option, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ReplacementDeposit { + /// Move to vault txid that is being replaced. + #[prost(message, optional, tag = "1")] + pub old_move_txid: ::core::option::Option, +} +/// A new original deposit request's details. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BaseDeposit { + /// User's EVM address. + #[prost(bytes = "vec", tag = "1")] + pub evm_address: ::prost::alloc::vec::Vec, + /// User's recovery taproot address. + #[prost(string, tag = "2")] + pub recovery_taproot_address: ::prost::alloc::string::String, +} +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct NumberedTransactionId { + #[prost(enumeration = "NumberedTransactionType", tag = "1")] + pub transaction_type: i32, + #[prost(int32, tag = "2")] + pub index: i32, +} +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct GrpcTransactionId { + #[prost(oneof = "grpc_transaction_id::Id", tags = "1, 2")] + pub id: ::core::option::Option, +} +/// Nested message and enum types in `GrpcTransactionId`. +pub mod grpc_transaction_id { + #[derive(Clone, Copy, PartialEq, ::prost::Oneof)] + pub enum Id { + #[prost(enumeration = "super::NormalTransactionId", tag = "1")] + NormalTransaction(i32), + #[prost(message, tag = "2")] + NumberedTransaction(super::NumberedTransactionId), + } +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct KickoffId { + #[prost(bytes = "vec", tag = "1")] + pub operator_xonly_pk: ::prost::alloc::vec::Vec, + #[prost(uint32, tag = "2")] + pub round_idx: u32, + #[prost(uint32, tag = "3")] + pub kickoff_idx: u32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TransactionRequest { + #[prost(message, optional, tag = "1")] + pub deposit_outpoint: ::core::option::Option, + #[prost(message, optional, tag = "2")] + pub kickoff_id: ::core::option::Option, +} +/// Includes the deposit params and the nonce gen initial responses (pubkeys and their signatures from all verifiers) +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DepositSignSession { + #[prost(message, optional, tag = "1")] + pub deposit_params: ::core::option::Option, + #[prost(message, repeated, tag = "2")] + pub nonce_gen_first_responses: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct OperatorConfig { + #[prost(message, optional, tag = "1")] + pub collateral_funding_outpoint: ::core::option::Option, + #[prost(string, tag = "2")] + pub xonly_pk: ::prost::alloc::string::String, + #[prost(string, tag = "3")] + pub wallet_reimburse_address: ::prost::alloc::string::String, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct OperatorParams { + #[prost(oneof = "operator_params::Response", tags = "1, 2, 3")] + pub response: ::core::option::Option, +} +/// Nested message and enum types in `OperatorParams`. +pub mod operator_params { + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Response { + /// Operator's configuration. + #[prost(message, tag = "1")] + OperatorDetails(super::OperatorConfig), + /// Winternitz pubkeys for each kickoff utxo (to commit blockhash). + #[prost(message, tag = "2")] + WinternitzPubkeys(super::WinternitzPubkey), + /// unspent kickoff signatures + #[prost(message, tag = "3")] + UnspentKickoffSig(super::SchnorrSig), + } +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct OperatorKeysWithDeposit { + #[prost(message, optional, tag = "1")] + pub operator_keys: ::core::option::Option, + #[prost(message, optional, tag = "2")] + pub deposit_params: ::core::option::Option, + #[prost(bytes = "vec", tag = "3")] + pub operator_xonly_pk: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct OperatorKeys { + /// Winternitz pubkeys for each bitvm assert tx. + #[prost(message, repeated, tag = "1")] + pub winternitz_pubkeys: ::prost::alloc::vec::Vec, + /// Hashes of preimages that will be used to ACK watchtower challenges. + #[prost(message, repeated, tag = "2")] + pub challenge_ack_digests: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SchnorrSig { + #[prost(bytes = "vec", tag = "1")] + pub schnorr_sig: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct WithdrawParams { + /// The ID of the withdrawal in Citrea + #[prost(uint32, tag = "1")] + pub withdrawal_id: u32, + /// User's \[`bitcoin::sighash::TapSighashType::SinglePlusAnyoneCanPay`\] + /// signature + #[prost(bytes = "vec", tag = "2")] + pub input_signature: ::prost::alloc::vec::Vec, + /// User's UTXO to claim the deposit + #[prost(message, optional, tag = "3")] + pub input_outpoint: ::core::option::Option, + /// The withdrawal output's script_pubkey (user's signature is only valid for this pubkey) + #[prost(bytes = "vec", tag = "4")] + pub output_script_pubkey: ::prost::alloc::vec::Vec, + /// The withdrawal output's amount (user's signature is only valid for this amount) + #[prost(uint64, tag = "5")] + pub output_amount: u64, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct FinalizedPayoutParams { + #[prost(bytes = "vec", tag = "1")] + pub payout_blockhash: ::prost::alloc::vec::Vec, + #[prost(message, optional, tag = "2")] + pub deposit_outpoint: ::core::option::Option, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct XOnlyPublicKeyRpc { + #[prost(bytes = "vec", tag = "1")] + pub xonly_public_key: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct StoppedTasks { + #[prost(string, repeated, tag = "1")] + pub stopped_tasks: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct EntityError { + #[prost(string, tag = "1")] + pub error: ::prost::alloc::string::String, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct EntityStatus { + #[prost(bool, tag = "1")] + pub automation: bool, + #[prost(string, optional, tag = "2")] + pub wallet_balance: ::core::option::Option<::prost::alloc::string::String>, + #[prost(uint32, optional, tag = "3")] + pub tx_sender_synced_height: ::core::option::Option, + #[prost(uint32, optional, tag = "4")] + pub finalized_synced_height: ::core::option::Option, + #[prost(uint32, optional, tag = "5")] + pub hcp_last_proven_height: ::core::option::Option, + #[prost(message, optional, tag = "6")] + pub stopped_tasks: ::core::option::Option, + #[prost(uint32, optional, tag = "7")] + pub rpc_tip_height: ::core::option::Option, + #[prost(uint32, optional, tag = "8")] + pub bitcoin_syncer_synced_height: ::core::option::Option, + #[prost(uint32, optional, tag = "9")] + pub state_manager_next_height: ::core::option::Option, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct EntityId { + #[prost(enumeration = "EntityType", tag = "1")] + pub kind: i32, + #[prost(string, tag = "2")] + pub id: ::prost::alloc::string::String, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct EntityStatusWithId { + #[prost(message, optional, tag = "1")] + pub entity_id: ::core::option::Option, + #[prost(oneof = "entity_status_with_id::StatusResult", tags = "2, 3")] + pub status_result: ::core::option::Option, +} +/// Nested message and enum types in `EntityStatusWithId`. +pub mod entity_status_with_id { + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum StatusResult { + #[prost(message, tag = "2")] + Status(super::EntityStatus), + #[prost(message, tag = "3")] + Err(super::EntityError), + } +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct EntityStatuses { + #[prost(message, repeated, tag = "1")] + pub entity_statuses: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct VerifierParams { + #[prost(bytes = "vec", tag = "1")] + pub public_key: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct PartialSig { + #[prost(bytes = "vec", tag = "1")] + pub partial_sig: ::prost::alloc::vec::Vec, +} +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct NonceGenRequest { + #[prost(uint32, tag = "1")] + pub num_nonces: u32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct NonceGenFirstResponse { + /// ID of the nonce session (used to store nonces in verifier's memory) + /// The id is string representation of a u128 number + #[prost(string, tag = "1")] + pub id: ::prost::alloc::string::String, + /// Number of nonces to generate + #[prost(uint32, tag = "2")] + pub num_nonces: u32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct NonceGenResponse { + #[prost(oneof = "nonce_gen_response::Response", tags = "1, 2")] + pub response: ::core::option::Option, +} +/// Nested message and enum types in `NonceGenResponse`. +pub mod nonce_gen_response { + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Response { + #[prost(message, tag = "1")] + FirstResponse(super::NonceGenFirstResponse), + #[prost(bytes, tag = "2")] + PubNonce(::prost::alloc::vec::Vec), + } +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct OptimisticWithdrawParams { + #[prost(message, optional, tag = "1")] + pub withdrawal: ::core::option::Option, + /// An ECDSA signature (of citrea/aggregator) over the withdrawal params + /// to authenticate the withdrawal params. This will be signed manually by citrea + /// after manual verification of the optimistic payout. + #[prost(string, optional, tag = "2")] + pub verification_signature: ::core::option::Option<::prost::alloc::string::String>, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct WithdrawParamsWithSig { + #[prost(message, optional, tag = "1")] + pub withdrawal: ::core::option::Option, + /// An ECDSA signature (of citrea/aggregator) over the withdrawal params + /// to authenticate the withdrawal params. This will be signed manually by citrea + /// after manual verification of the optimistic payout. + /// This message contains same data as the one in Optimistic Payout signature, but with a different message name, + /// so that the same signature can't be used for both optimistic payout and normal withdrawal. + #[prost(string, optional, tag = "2")] + pub verification_signature: ::core::option::Option<::prost::alloc::string::String>, +} +/// Input of the aggregator's withdraw function. +/// It contains the withdrawal params along with the verification signature that signs the withdrawal params. +/// It also contains the operator's xonly public keys that the withdrawal request should be sent to. If the list is empty, the withdrawal will be sent to all operators. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AggregatorWithdrawalInput { + #[prost(message, optional, tag = "1")] + pub withdrawal: ::core::option::Option, + #[prost(message, repeated, tag = "2")] + pub operator_xonly_pks: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct OptimisticPayoutParams { + #[prost(message, optional, tag = "1")] + pub opt_withdrawal: ::core::option::Option, + #[prost(message, optional, tag = "2")] + pub nonce_gen: ::core::option::Option, + #[prost(bytes = "vec", tag = "3")] + pub agg_nonce: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct VerifierDepositSignParams { + #[prost(oneof = "verifier_deposit_sign_params::Params", tags = "1, 2")] + pub params: ::core::option::Option, +} +/// Nested message and enum types in `VerifierDepositSignParams`. +pub mod verifier_deposit_sign_params { + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Params { + #[prost(message, tag = "1")] + DepositSignFirstParam(super::DepositSignSession), + #[prost(bytes, tag = "2")] + AggNonce(::prost::alloc::vec::Vec), + } +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct VerifierDepositFinalizeParams { + #[prost(oneof = "verifier_deposit_finalize_params::Params", tags = "1, 2, 3, 4")] + pub params: ::core::option::Option, +} +/// Nested message and enum types in `VerifierDepositFinalizeParams`. +pub mod verifier_deposit_finalize_params { + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Params { + #[prost(message, tag = "1")] + DepositSignFirstParam(super::DepositSignSession), + #[prost(bytes, tag = "2")] + SchnorrSig(::prost::alloc::vec::Vec), + #[prost(bytes, tag = "3")] + MoveTxAggNonce(::prost::alloc::vec::Vec), + #[prost(bytes, tag = "4")] + EmergencyStopAggNonce(::prost::alloc::vec::Vec), + } +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct VerifierDepositFinalizeResponse { + #[prost(bytes = "vec", tag = "1")] + pub move_to_vault_partial_sig: ::prost::alloc::vec::Vec, + #[prost(bytes = "vec", tag = "2")] + pub emergency_stop_partial_sig: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct VerifierPublicKeys { + #[prost(bytes = "vec", repeated, tag = "1")] + pub verifier_public_keys: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec>, +} +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct TxDebugRequest { + #[prost(uint32, tag = "1")] + pub tx_id: u32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TxDebugSubmissionError { + #[prost(string, tag = "1")] + pub error_message: ::prost::alloc::string::String, + #[prost(string, tag = "2")] + pub timestamp: ::prost::alloc::string::String, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TxDebugFeePayerUtxo { + #[prost(message, optional, tag = "1")] + pub txid: ::core::option::Option, + #[prost(uint32, tag = "2")] + pub vout: u32, + #[prost(uint64, tag = "3")] + pub amount: u64, + #[prost(bool, tag = "4")] + pub confirmed: bool, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TxMetadata { + /// Optional outpoint of the deposit transaction + #[prost(message, optional, tag = "1")] + pub deposit_outpoint: ::core::option::Option, + /// Deposit identification + #[prost(message, optional, tag = "2")] + pub operator_xonly_pk: ::core::option::Option, + #[prost(uint32, tag = "4")] + pub round_idx: u32, + #[prost(uint32, tag = "5")] + pub kickoff_idx: u32, + /// Transaction ID + #[prost(message, optional, tag = "6")] + pub tx_type: ::core::option::Option, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TxDebugInfo { + #[prost(uint32, tag = "1")] + pub id: u32, + #[prost(bool, tag = "2")] + pub is_active: bool, + #[prost(string, tag = "3")] + pub current_state: ::prost::alloc::string::String, + #[prost(message, repeated, tag = "4")] + pub submission_errors: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "5")] + pub fee_payer_utxos: ::prost::alloc::vec::Vec, + #[prost(string, tag = "6")] + pub created_at: ::prost::alloc::string::String, + #[prost(message, optional, tag = "7")] + pub txid: ::core::option::Option, + #[prost(string, tag = "8")] + pub fee_paying_type: ::prost::alloc::string::String, + #[prost(uint32, tag = "9")] + pub fee_payer_utxos_count: u32, + #[prost(uint32, tag = "10")] + pub fee_payer_utxos_confirmed_count: u32, + #[prost(bytes = "vec", tag = "11")] + pub raw_tx: ::prost::alloc::vec::Vec, + #[prost(message, optional, tag = "12")] + pub metadata: ::core::option::Option, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct XOnlyPublicKeys { + #[prost(bytes = "vec", repeated, tag = "1")] + pub xonly_public_keys: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec>, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct VergenResponse { + #[prost(string, tag = "1")] + pub response: ::prost::alloc::string::String, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct RawSignedTx { + #[prost(bytes = "vec", tag = "1")] + pub raw_tx: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SendTxRequest { + #[prost(message, optional, tag = "1")] + pub raw_tx: ::core::option::Option, + #[prost(enumeration = "FeeType", tag = "2")] + pub fee_type: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct RawSignedTxs { + #[prost(message, repeated, tag = "1")] + pub raw_txs: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SignedTxWithType { + #[prost(message, optional, tag = "1")] + pub transaction_type: ::core::option::Option, + #[prost(bytes = "vec", tag = "2")] + pub raw_tx: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SignedTxsWithType { + #[prost(message, repeated, tag = "1")] + pub signed_txs: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct RbfSigningInfoRpc { + #[prost(bytes = "vec", tag = "1")] + pub merkle_root: ::prost::alloc::vec::Vec, + #[prost(uint32, tag = "2")] + pub vout: u32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct RawTxWithRbfInfo { + #[prost(bytes = "vec", tag = "1")] + pub raw_tx: ::prost::alloc::vec::Vec, + #[prost(message, optional, tag = "2")] + pub rbf_info: ::core::option::Option, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct OperatorWithrawalResponse { + #[prost(message, optional, tag = "1")] + pub operator_xonly_pk: ::core::option::Option, + #[prost(oneof = "operator_withrawal_response::Response", tags = "2, 3")] + pub response: ::core::option::Option, +} +/// Nested message and enum types in `OperatorWithrawalResponse`. +pub mod operator_withrawal_response { + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Response { + #[prost(message, tag = "2")] + RawTx(super::RawSignedTx), + #[prost(string, tag = "3")] + Error(::prost::alloc::string::String), + } +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AggregatorWithdrawResponse { + #[prost(message, repeated, tag = "1")] + pub withdraw_responses: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetEmergencyStopTxRequest { + #[prost(message, repeated, tag = "1")] + pub txids: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetEmergencyStopTxResponse { + #[prost(message, repeated, tag = "1")] + pub txids: ::prost::alloc::vec::Vec, + #[prost(bytes = "vec", repeated, tag = "2")] + pub encrypted_emergency_stop_txs: ::prost::alloc::vec::Vec< + ::prost::alloc::vec::Vec, + >, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SendMoveTxRequest { + #[prost(message, optional, tag = "1")] + pub raw_tx: ::core::option::Option, + #[prost(message, optional, tag = "2")] + pub deposit_outpoint: ::core::option::Option, +} +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct GetEntityStatusesRequest { + #[prost(bool, tag = "1")] + pub restart_tasks: bool, +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum NormalSignatureKind { + NormalSignatureUnknown = 0, + /// Used for TxHandlers that verifiers don't care. These will have signatures created + /// by the operator on the fly. + OperatorSighashDefault = 1, + Challenge = 2, + DisproveTimeout2 = 3, + Disprove2 = 4, + Reimburse1 = 5, + KickoffNotFinalized1 = 6, + KickoffNotFinalized2 = 7, + Reimburse2 = 8, + NoSignature = 9, + ChallengeTimeout2 = 10, + MiniAssert1 = 11, + OperatorChallengeAck1 = 12, + NotStored = 13, + YieldKickoffTxid = 14, + LatestBlockhashTimeout1 = 15, + LatestBlockhashTimeout2 = 16, + LatestBlockhashTimeout3 = 17, + LatestBlockhash = 18, +} +impl NormalSignatureKind { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::NormalSignatureUnknown => "NormalSignatureUnknown", + Self::OperatorSighashDefault => "OperatorSighashDefault", + Self::Challenge => "Challenge", + Self::DisproveTimeout2 => "DisproveTimeout2", + Self::Disprove2 => "Disprove2", + Self::Reimburse1 => "Reimburse1", + Self::KickoffNotFinalized1 => "KickoffNotFinalized1", + Self::KickoffNotFinalized2 => "KickoffNotFinalized2", + Self::Reimburse2 => "Reimburse2", + Self::NoSignature => "NoSignature", + Self::ChallengeTimeout2 => "ChallengeTimeout2", + Self::MiniAssert1 => "MiniAssert1", + Self::OperatorChallengeAck1 => "OperatorChallengeAck1", + Self::NotStored => "NotStored", + Self::YieldKickoffTxid => "YieldKickoffTxid", + Self::LatestBlockhashTimeout1 => "LatestBlockhashTimeout1", + Self::LatestBlockhashTimeout2 => "LatestBlockhashTimeout2", + Self::LatestBlockhashTimeout3 => "LatestBlockhashTimeout3", + Self::LatestBlockhash => "LatestBlockhash", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "NormalSignatureUnknown" => Some(Self::NormalSignatureUnknown), + "OperatorSighashDefault" => Some(Self::OperatorSighashDefault), + "Challenge" => Some(Self::Challenge), + "DisproveTimeout2" => Some(Self::DisproveTimeout2), + "Disprove2" => Some(Self::Disprove2), + "Reimburse1" => Some(Self::Reimburse1), + "KickoffNotFinalized1" => Some(Self::KickoffNotFinalized1), + "KickoffNotFinalized2" => Some(Self::KickoffNotFinalized2), + "Reimburse2" => Some(Self::Reimburse2), + "NoSignature" => Some(Self::NoSignature), + "ChallengeTimeout2" => Some(Self::ChallengeTimeout2), + "MiniAssert1" => Some(Self::MiniAssert1), + "OperatorChallengeAck1" => Some(Self::OperatorChallengeAck1), + "NotStored" => Some(Self::NotStored), + "YieldKickoffTxid" => Some(Self::YieldKickoffTxid), + "LatestBlockhashTimeout1" => Some(Self::LatestBlockhashTimeout1), + "LatestBlockhashTimeout2" => Some(Self::LatestBlockhashTimeout2), + "LatestBlockhashTimeout3" => Some(Self::LatestBlockhashTimeout3), + "LatestBlockhash" => Some(Self::LatestBlockhash), + _ => None, + } + } +} +/// Signatures that are needed multiple times per an operators kickoff. +/// Some watchtower sigs are needed once per watchtower. +/// Asserts are needed multiple times +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum NumberedSignatureKind { + NumberedSignatureUnknown = 0, + /// Used for TxHandlers that verifiers don't care. These will have signatures created + /// by the operator on the fly. + NumberedNotStored = 1, + OperatorChallengeNack1 = 2, + OperatorChallengeNack2 = 3, + OperatorChallengeNack3 = 4, + AssertTimeout1 = 5, + AssertTimeout2 = 6, + AssertTimeout3 = 7, + UnspentKickoff1 = 8, + UnspentKickoff2 = 9, + WatchtowerChallengeTimeout1 = 10, + WatchtowerChallengeTimeout2 = 11, + WatchtowerChallenge = 12, +} +impl NumberedSignatureKind { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::NumberedSignatureUnknown => "NumberedSignatureUnknown", + Self::NumberedNotStored => "NumberedNotStored", + Self::OperatorChallengeNack1 => "OperatorChallengeNack1", + Self::OperatorChallengeNack2 => "OperatorChallengeNack2", + Self::OperatorChallengeNack3 => "OperatorChallengeNack3", + Self::AssertTimeout1 => "AssertTimeout1", + Self::AssertTimeout2 => "AssertTimeout2", + Self::AssertTimeout3 => "AssertTimeout3", + Self::UnspentKickoff1 => "UnspentKickoff1", + Self::UnspentKickoff2 => "UnspentKickoff2", + Self::WatchtowerChallengeTimeout1 => "WatchtowerChallengeTimeout1", + Self::WatchtowerChallengeTimeout2 => "WatchtowerChallengeTimeout2", + Self::WatchtowerChallenge => "WatchtowerChallenge", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "NumberedSignatureUnknown" => Some(Self::NumberedSignatureUnknown), + "NumberedNotStored" => Some(Self::NumberedNotStored), + "OperatorChallengeNack1" => Some(Self::OperatorChallengeNack1), + "OperatorChallengeNack2" => Some(Self::OperatorChallengeNack2), + "OperatorChallengeNack3" => Some(Self::OperatorChallengeNack3), + "AssertTimeout1" => Some(Self::AssertTimeout1), + "AssertTimeout2" => Some(Self::AssertTimeout2), + "AssertTimeout3" => Some(Self::AssertTimeout3), + "UnspentKickoff1" => Some(Self::UnspentKickoff1), + "UnspentKickoff2" => Some(Self::UnspentKickoff2), + "WatchtowerChallengeTimeout1" => Some(Self::WatchtowerChallengeTimeout1), + "WatchtowerChallengeTimeout2" => Some(Self::WatchtowerChallengeTimeout2), + "WatchtowerChallenge" => Some(Self::WatchtowerChallenge), + _ => None, + } + } +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum FeeType { + Unspecified = 0, + Cpfp = 1, + Rbf = 2, + NoFunding = 3, +} +impl FeeType { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::Unspecified => "UNSPECIFIED", + Self::Cpfp => "CPFP", + Self::Rbf => "RBF", + Self::NoFunding => "NO_FUNDING", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "UNSPECIFIED" => Some(Self::Unspecified), + "CPFP" => Some(Self::Cpfp), + "RBF" => Some(Self::Rbf), + "NO_FUNDING" => Some(Self::NoFunding), + _ => None, + } + } +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum NormalTransactionId { + UnspecifiedTransactionType = 0, + Round = 1, + Kickoff = 2, + MoveToVault = 3, + Payout = 4, + Challenge = 5, + Disprove = 6, + DisproveTimeout = 7, + Reimburse = 8, + AllNeededForDeposit = 9, + Dummy = 10, + ReadyToReimburse = 11, + KickoffNotFinalized = 12, + ChallengeTimeout = 13, + BurnUnusedKickoffConnectors = 14, + YieldKickoffTxid = 15, + ReplacementDeposit = 17, + LatestBlockhashTimeout = 18, + LatestBlockhash = 19, + OptimisticPayout = 20, +} +impl NormalTransactionId { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::UnspecifiedTransactionType => "UNSPECIFIED_TRANSACTION_TYPE", + Self::Round => "ROUND", + Self::Kickoff => "KICKOFF", + Self::MoveToVault => "MOVE_TO_VAULT", + Self::Payout => "PAYOUT", + Self::Challenge => "CHALLENGE", + Self::Disprove => "DISPROVE", + Self::DisproveTimeout => "DISPROVE_TIMEOUT", + Self::Reimburse => "REIMBURSE", + Self::AllNeededForDeposit => "ALL_NEEDED_FOR_DEPOSIT", + Self::Dummy => "DUMMY", + Self::ReadyToReimburse => "READY_TO_REIMBURSE", + Self::KickoffNotFinalized => "KICKOFF_NOT_FINALIZED", + Self::ChallengeTimeout => "CHALLENGE_TIMEOUT", + Self::BurnUnusedKickoffConnectors => "BURN_UNUSED_KICKOFF_CONNECTORS", + Self::YieldKickoffTxid => "YIELD_KICKOFF_TXID", + Self::ReplacementDeposit => "REPLACEMENT_DEPOSIT", + Self::LatestBlockhashTimeout => "LATEST_BLOCKHASH_TIMEOUT", + Self::LatestBlockhash => "LATEST_BLOCKHASH", + Self::OptimisticPayout => "OPTIMISTIC_PAYOUT", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "UNSPECIFIED_TRANSACTION_TYPE" => Some(Self::UnspecifiedTransactionType), + "ROUND" => Some(Self::Round), + "KICKOFF" => Some(Self::Kickoff), + "MOVE_TO_VAULT" => Some(Self::MoveToVault), + "PAYOUT" => Some(Self::Payout), + "CHALLENGE" => Some(Self::Challenge), + "DISPROVE" => Some(Self::Disprove), + "DISPROVE_TIMEOUT" => Some(Self::DisproveTimeout), + "REIMBURSE" => Some(Self::Reimburse), + "ALL_NEEDED_FOR_DEPOSIT" => Some(Self::AllNeededForDeposit), + "DUMMY" => Some(Self::Dummy), + "READY_TO_REIMBURSE" => Some(Self::ReadyToReimburse), + "KICKOFF_NOT_FINALIZED" => Some(Self::KickoffNotFinalized), + "CHALLENGE_TIMEOUT" => Some(Self::ChallengeTimeout), + "BURN_UNUSED_KICKOFF_CONNECTORS" => Some(Self::BurnUnusedKickoffConnectors), + "YIELD_KICKOFF_TXID" => Some(Self::YieldKickoffTxid), + "REPLACEMENT_DEPOSIT" => Some(Self::ReplacementDeposit), + "LATEST_BLOCKHASH_TIMEOUT" => Some(Self::LatestBlockhashTimeout), + "LATEST_BLOCKHASH" => Some(Self::LatestBlockhash), + "OPTIMISTIC_PAYOUT" => Some(Self::OptimisticPayout), + _ => None, + } + } +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum NumberedTransactionType { + UnspecifiedIndexedTransactionType = 0, + WatchtowerChallenge = 1, + OperatorChallengeNack = 2, + OperatorChallengeAck = 3, + AssertTimeout = 4, + UnspentKickoff = 5, + MiniAssert = 6, + WatchtowerChallengeTimeout = 7, +} +impl NumberedTransactionType { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::UnspecifiedIndexedTransactionType => { + "UNSPECIFIED_INDEXED_TRANSACTION_TYPE" + } + Self::WatchtowerChallenge => "WATCHTOWER_CHALLENGE", + Self::OperatorChallengeNack => "OPERATOR_CHALLENGE_NACK", + Self::OperatorChallengeAck => "OPERATOR_CHALLENGE_ACK", + Self::AssertTimeout => "ASSERT_TIMEOUT", + Self::UnspentKickoff => "UNSPENT_KICKOFF", + Self::MiniAssert => "MINI_ASSERT", + Self::WatchtowerChallengeTimeout => "WATCHTOWER_CHALLENGE_TIMEOUT", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "UNSPECIFIED_INDEXED_TRANSACTION_TYPE" => { + Some(Self::UnspecifiedIndexedTransactionType) + } + "WATCHTOWER_CHALLENGE" => Some(Self::WatchtowerChallenge), + "OPERATOR_CHALLENGE_NACK" => Some(Self::OperatorChallengeNack), + "OPERATOR_CHALLENGE_ACK" => Some(Self::OperatorChallengeAck), + "ASSERT_TIMEOUT" => Some(Self::AssertTimeout), + "UNSPENT_KICKOFF" => Some(Self::UnspentKickoff), + "MINI_ASSERT" => Some(Self::MiniAssert), + "WATCHTOWER_CHALLENGE_TIMEOUT" => Some(Self::WatchtowerChallengeTimeout), + _ => None, + } + } +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum EntityType { + EntityUnknown = 0, + Operator = 1, + Verifier = 2, +} +impl EntityType { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::EntityUnknown => "ENTITY_UNKNOWN", + Self::Operator => "OPERATOR", + Self::Verifier => "VERIFIER", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "ENTITY_UNKNOWN" => Some(Self::EntityUnknown), + "OPERATOR" => Some(Self::Operator), + "VERIFIER" => Some(Self::Verifier), + _ => None, + } + } +} +/// Generated client implementations. +pub mod clementine_operator_client { + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] + use tonic::codegen::*; + use tonic::codegen::http::Uri; + /// An operator is responsible for paying withdrawals. It has an unique ID and + /// chain of UTXOs named `round_txs`. An operator also runs a verifier. These are + /// connected to the same database and both have access to watchtowers' + /// winternitz pubkeys. + #[derive(Debug, Clone)] + pub struct ClementineOperatorClient { + inner: tonic::client::Grpc, + } + impl ClementineOperatorClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl ClementineOperatorClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + std::marker::Send + 'static, + ::Error: Into + std::marker::Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> ClementineOperatorClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + , + >>::Error: Into + std::marker::Send + std::marker::Sync, + { + ClementineOperatorClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_decoding_message_size(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_encoding_message_size(limit); + self + } + /// Returns the operator's xonly public key + /// + /// Used by aggregator inside setup + pub async fn get_x_only_public_key( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/clementine.ClementineOperator/GetXOnlyPublicKey", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("clementine.ClementineOperator", "GetXOnlyPublicKey"), + ); + self.inner.unary(req, path, codec).await + } + /// Returns an operator's parameters. It will be called once, by the + /// aggregator, to set all the public keys. + /// + /// # Returns + /// + /// Returns an [`OperatorParams`], which includes operator's configuration and + /// Watchtower parameters. + /// + /// Used by aggregator inside setup + pub async fn get_params( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response>, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/clementine.ClementineOperator/GetParams", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("clementine.ClementineOperator", "GetParams")); + self.inner.server_streaming(req, path, codec).await + } + /// Returns an operator's deposit keys. + /// Deposit keys include Assert BitVM winternitz keys, and challenge ACK hashes. + /// + /// Used by aggregator inside new_deposit + pub async fn get_deposit_keys( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/clementine.ClementineOperator/GetDepositKeys", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("clementine.ClementineOperator", "GetDepositKeys"), + ); + self.inner.unary(req, path, codec).await + } + /// Returns the current status of tasks running on the operator and their last synced heights. + pub async fn get_current_status( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/clementine.ClementineOperator/GetCurrentStatus", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("clementine.ClementineOperator", "GetCurrentStatus"), + ); + self.inner.unary(req, path, codec).await + } + /// Signs everything that includes Operator's burn connector. + /// + /// # Parameters + /// + /// - User's deposit information + /// - Nonce metadata + /// + /// # Returns + /// + /// - Operator burn Schnorr signature + pub async fn deposit_sign( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response>, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/clementine.ClementineOperator/DepositSign", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("clementine.ClementineOperator", "DepositSign")); + self.inner.server_streaming(req, path, codec).await + } + /// Restarts the background tasks for the operator. + pub async fn restart_background_tasks( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/clementine.ClementineOperator/RestartBackgroundTasks", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "clementine.ClementineOperator", + "RestartBackgroundTasks", + ), + ); + self.inner.unary(req, path, codec).await + } + /// Prepares a withdrawal if it's profitable and the withdrawal is correct and registered in Citrea bridge contract. + /// If withdrawal is accepted, the payout tx will be added to the TxSender and success is returned, otherwise an error is returned. + /// If automation is disabled, the withdrawal will not be accepted and an error will be returned. + /// Note: This is intended for operator's own use, so it doesn't include a signature from aggregator. + pub async fn internal_withdraw( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/clementine.ClementineOperator/InternalWithdraw", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("clementine.ClementineOperator", "InternalWithdraw"), + ); + self.inner.unary(req, path, codec).await + } + /// First, if verification address in operator's config is set, the signature in rpc is checked to see if it was signed by the verification address. + /// Then prepares a withdrawal if it's profitable and the withdrawal is correct and registered in Citrea bridge contract. + /// If withdrawal is accepted, the payout tx will be added to the TxSender and success is returned, otherwise an error is returned. + /// If automation is disabled, the withdrawal will not be accepted and an error will be returned. + pub async fn withdraw( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/clementine.ClementineOperator/Withdraw", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("clementine.ClementineOperator", "Withdraw")); + self.inner.unary(req, path, codec).await + } + /// For a given deposit outpoint, determines the next step in the kickoff process the operator is in, + /// and returns the raw signed txs that the operator needs to send next, for enabling reimbursement process + /// without automation. + /// + /// # Parameters + /// - deposit_outpoint: Deposit outpoint to create the kickoff for + /// + /// # Returns + /// - Raw signed txs that the operator needs to send next + pub async fn get_reimbursement_txs( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/clementine.ClementineOperator/GetReimbursementTxs", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "clementine.ClementineOperator", + "GetReimbursementTxs", + ), + ); + self.inner.unary(req, path, codec).await + } + /// Signs all tx's it can according to given transaction type (use it with AllNeededForDeposit to get almost all tx's) + /// Creates the transactions denoted by the deposit and operator_idx, round_idx, and kickoff_idx. + /// It will create the transaction and sign it with the operator's private key and/or saved nofn signatures. + /// + /// # Parameters + /// - deposit_params: User's deposit information + /// - transaction_type: Requested Transaction type + /// - kickoff_id: Operator's kickoff ID + /// + /// # Returns + /// - Raw signed transactions that the entity can sign (no asserts and watchtower challenge) + /// + /// Only used in tests + pub async fn internal_create_signed_txs( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/clementine.ClementineOperator/InternalCreateSignedTxs", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "clementine.ClementineOperator", + "InternalCreateSignedTxs", + ), + ); + self.inner.unary(req, path, codec).await + } + /// Creates all assert transactions (AssertBegin, MiniAsserts, AssertEnd), signs them, and returns the raw txs + /// in the same order. + /// # Parameters + /// - deposit_params: User's deposit information + /// - kickoff_id: Operator's kickoff ID + /// - commit_data: Commitment data for each MiniAssert tx's + /// + /// # Returns + /// - Raw signed assert transactions + pub async fn internal_create_assert_commitment_txs( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/clementine.ClementineOperator/InternalCreateAssertCommitmentTxs", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "clementine.ClementineOperator", + "InternalCreateAssertCommitmentTxs", + ), + ); + self.inner.unary(req, path, codec).await + } + pub async fn internal_finalized_payout( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/clementine.ClementineOperator/InternalFinalizedPayout", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "clementine.ClementineOperator", + "InternalFinalizedPayout", + ), + ); + self.inner.unary(req, path, codec).await + } + pub async fn internal_end_round( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/clementine.ClementineOperator/InternalEndRound", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("clementine.ClementineOperator", "InternalEndRound"), + ); + self.inner.unary(req, path, codec).await + } + pub async fn vergen( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/clementine.ClementineOperator/Vergen", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("clementine.ClementineOperator", "Vergen")); + self.inner.unary(req, path, codec).await + } + } +} +/// Generated client implementations. +pub mod clementine_verifier_client { + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] + use tonic::codegen::*; + use tonic::codegen::http::Uri; + #[derive(Debug, Clone)] + pub struct ClementineVerifierClient { + inner: tonic::client::Grpc, + } + impl ClementineVerifierClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl ClementineVerifierClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + std::marker::Send + 'static, + ::Error: Into + std::marker::Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> ClementineVerifierClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + , + >>::Error: Into + std::marker::Send + std::marker::Sync, + { + ClementineVerifierClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_decoding_message_size(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_encoding_message_size(limit); + self + } + /// Returns verifiers' metadata. Needs to be called once per setup. + /// + /// Used by aggregator inside setup to let all verifiers know all other verifier pks + pub async fn get_params( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/clementine.ClementineVerifier/GetParams", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("clementine.ClementineVerifier", "GetParams")); + self.inner.unary(req, path, codec).await + } + /// Saves an operator. + /// + /// Used by aggregator inside setup to let all verifiers know all other operator pks + pub async fn set_operator( + &mut self, + request: impl tonic::IntoStreamingRequest, + ) -> std::result::Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/clementine.ClementineVerifier/SetOperator", + ); + let mut req = request.into_streaming_request(); + req.extensions_mut() + .insert(GrpcMethod::new("clementine.ClementineVerifier", "SetOperator")); + self.inner.client_streaming(req, path, codec).await + } + /// Sets the operator's winternitz keys and challenge ACK hashes and saves them + /// into the db. + /// + /// Used by aggregator inside new_deposit to let all verifiers know all other operators' deposit information + pub async fn set_operator_keys( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/clementine.ClementineVerifier/SetOperatorKeys", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("clementine.ClementineVerifier", "SetOperatorKeys"), + ); + self.inner.unary(req, path, codec).await + } + /// Generates nonces for a deposit. + /// + /// # Returns + /// + /// Nonce metadata followed by nonces. + /// + /// Used by aggregator inside new_deposit + pub async fn nonce_gen( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response>, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/clementine.ClementineVerifier/NonceGen", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("clementine.ClementineVerifier", "NonceGen")); + self.inner.server_streaming(req, path, codec).await + } + /// Signs deposit with given aggNonces and verifier's secNonce using + /// nonce_id. + /// + /// Used by aggregator inside new_deposit + pub async fn deposit_sign( + &mut self, + request: impl tonic::IntoStreamingRequest< + Message = super::VerifierDepositSignParams, + >, + ) -> std::result::Result< + tonic::Response>, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/clementine.ClementineVerifier/DepositSign", + ); + let mut req = request.into_streaming_request(); + req.extensions_mut() + .insert(GrpcMethod::new("clementine.ClementineVerifier", "DepositSign")); + self.inner.streaming(req, path, codec).await + } + /// Signs the optimistic payout tx with given aggNonce and withdrawal info. + pub async fn optimistic_payout_sign( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/clementine.ClementineVerifier/OptimisticPayoutSign", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "clementine.ClementineVerifier", + "OptimisticPayoutSign", + ), + ); + self.inner.unary(req, path, codec).await + } + /// Verifies every signature and signs move_tx. + /// + /// Used by aggregator inside new_deposit + pub async fn deposit_finalize( + &mut self, + request: impl tonic::IntoStreamingRequest< + Message = super::VerifierDepositFinalizeParams, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/clementine.ClementineVerifier/DepositFinalize", + ); + let mut req = request.into_streaming_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("clementine.ClementineVerifier", "DepositFinalize"), + ); + self.inner.client_streaming(req, path, codec).await + } + /// Debug a transaction by retrieving its current state and history + pub async fn debug_tx( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/clementine.ClementineVerifier/DebugTx", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("clementine.ClementineVerifier", "DebugTx")); + self.inner.unary(req, path, codec).await + } + /// Restarts the background tasks for the verifier. + pub async fn restart_background_tasks( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/clementine.ClementineVerifier/RestartBackgroundTasks", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "clementine.ClementineVerifier", + "RestartBackgroundTasks", + ), + ); + self.inner.unary(req, path, codec).await + } + /// Checks if the kickoff tx is malicious and if so, try to send all necessary txs to punish the operator + pub async fn internal_handle_kickoff( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/clementine.ClementineVerifier/InternalHandleKickoff", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "clementine.ClementineVerifier", + "InternalHandleKickoff", + ), + ); + self.inner.unary(req, path, codec).await + } + /// Returns the current status of tasks running on the verifier and their last synced heights. + pub async fn get_current_status( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/clementine.ClementineVerifier/GetCurrentStatus", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("clementine.ClementineVerifier", "GetCurrentStatus"), + ); + self.inner.unary(req, path, codec).await + } + /// 1. Signs all tx's it can according to given transaction type (use it with AllNeededForDeposit to get almost all tx's) + /// 2. Creates the transactions denoted by the deposit and operator_idx, round_idx, and kickoff_idx. + /// 3. It will create the transaction and sign it with the operator's private key and/or saved nofn signatures. + /// + /// # Parameters + /// - deposit_params: User's deposit information + /// - transaction_type: Requested Transaction type + /// - kickoff_id: Operator's kickoff ID + /// + /// # Returns + /// - Raw signed transactions that the entity can sign (no asserts and watchtower challenge) + pub async fn internal_create_signed_txs( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/clementine.ClementineVerifier/InternalCreateSignedTxs", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "clementine.ClementineVerifier", + "InternalCreateSignedTxs", + ), + ); + self.inner.unary(req, path, codec).await + } + /// Signs the verifiers own watchtower challenge tx in the corresponding + /// kickoff and returns the signed raw tx + pub async fn internal_create_watchtower_challenge( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/clementine.ClementineVerifier/InternalCreateWatchtowerChallenge", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "clementine.ClementineVerifier", + "InternalCreateWatchtowerChallenge", + ), + ); + self.inner.unary(req, path, codec).await + } + pub async fn vergen( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/clementine.ClementineVerifier/Vergen", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("clementine.ClementineVerifier", "Vergen")); + self.inner.unary(req, path, codec).await + } + } +} +/// Generated client implementations. +pub mod clementine_aggregator_client { + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] + use tonic::codegen::*; + use tonic::codegen::http::Uri; + #[derive(Debug, Clone)] + pub struct ClementineAggregatorClient { + inner: tonic::client::Grpc, + } + impl ClementineAggregatorClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl ClementineAggregatorClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + std::marker::Send + 'static, + ::Error: Into + std::marker::Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> ClementineAggregatorClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + , + >>::Error: Into + std::marker::Send + std::marker::Sync, + { + ClementineAggregatorClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_decoding_message_size(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_encoding_message_size(limit); + self + } + pub async fn get_nofn_aggregated_xonly_pk( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/clementine.ClementineAggregator/GetNofnAggregatedXonlyPk", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "clementine.ClementineAggregator", + "GetNofnAggregatedXonlyPk", + ), + ); + self.inner.unary(req, path, codec).await + } + /// Sets up the system of verifiers, watchtowers and operators by: + /// + /// 1. Collects verifier keys from each verifier + /// 2. Distributes these verifier keys to all verifiers + /// 3. Collects all operator configs from each operator + /// 4. Distributes these operator configs to all verifiers + /// + /// Used by the clementine-backend service + pub async fn setup( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/clementine.ClementineAggregator/Setup", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("clementine.ClementineAggregator", "Setup")); + self.inner.unary(req, path, codec).await + } + /// This will call, DepositNonceGen for every verifier, + /// then it will aggregate one by one and then send it to DepositSign, + /// then it will aggregate the partial sigs and send it to DepositFinalize, + /// this will also call the operator to get their signatures and send it to + /// DepositFinalize then it will collect the partial sigs and create the move + /// tx. + /// + /// Used by the clementine-backend service to initiate a deposit + pub async fn new_deposit( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/clementine.ClementineAggregator/NewDeposit", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("clementine.ClementineAggregator", "NewDeposit"), + ); + self.inner.unary(req, path, codec).await + } + /// Call's withdraw on all operators + /// Used by the clementine-backend service to initiate a withdrawal + /// If the operator's xonly public keys list is empty, the withdrawal will be sent to all operators. + /// If not, only the operators in the list will be sent the withdrawal request. + pub async fn withdraw( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/clementine.ClementineAggregator/Withdraw", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("clementine.ClementineAggregator", "Withdraw")); + self.inner.unary(req, path, codec).await + } + /// Perform an optimistic payout to reimburse a peg-out from Citrea + pub async fn optimistic_payout( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/clementine.ClementineAggregator/OptimisticPayout", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "clementine.ClementineAggregator", + "OptimisticPayout", + ), + ); + self.inner.unary(req, path, codec).await + } + /// Send a pre-signed tx to the network + pub async fn internal_send_tx( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/clementine.ClementineAggregator/InternalSendTx", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("clementine.ClementineAggregator", "InternalSendTx"), + ); + self.inner.unary(req, path, codec).await + } + pub async fn send_move_to_vault_tx( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/clementine.ClementineAggregator/SendMoveToVaultTx", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "clementine.ClementineAggregator", + "SendMoveToVaultTx", + ), + ); + self.inner.unary(req, path, codec).await + } + /// Returns the current status of tasks running on the operators/verifiers. + /// If restart_tasks is true, it will restart the tasks on the entities if they are stopped. + pub async fn get_entity_statuses( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/clementine.ClementineAggregator/GetEntityStatuses", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "clementine.ClementineAggregator", + "GetEntityStatuses", + ), + ); + self.inner.unary(req, path, codec).await + } + /// Creates an emergency stop tx that won't be broadcasted. + /// Tx will have around 3 sats/vbyte fee. + /// Set add_anchor to true to add an anchor output for cpfp.. + pub async fn internal_get_emergency_stop_tx( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/clementine.ClementineAggregator/InternalGetEmergencyStopTx", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "clementine.ClementineAggregator", + "InternalGetEmergencyStopTx", + ), + ); + self.inner.unary(req, path, codec).await + } + pub async fn vergen( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/clementine.ClementineAggregator/Vergen", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("clementine.ClementineAggregator", "Vergen")); + self.inner.unary(req, path, codec).await + } + } +} +/// Generated server implementations. +pub mod clementine_operator_server { + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] + use tonic::codegen::*; + /// Generated trait containing gRPC methods that should be implemented for use with ClementineOperatorServer. + #[async_trait] + pub trait ClementineOperator: std::marker::Send + std::marker::Sync + 'static { + /// Returns the operator's xonly public key + /// + /// Used by aggregator inside setup + async fn get_x_only_public_key( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// Server streaming response type for the GetParams method. + type GetParamsStream: tonic::codegen::tokio_stream::Stream< + Item = std::result::Result, + > + + std::marker::Send + + 'static; + /// Returns an operator's parameters. It will be called once, by the + /// aggregator, to set all the public keys. + /// + /// # Returns + /// + /// Returns an [`OperatorParams`], which includes operator's configuration and + /// Watchtower parameters. + /// + /// Used by aggregator inside setup + async fn get_params( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status>; + /// Returns an operator's deposit keys. + /// Deposit keys include Assert BitVM winternitz keys, and challenge ACK hashes. + /// + /// Used by aggregator inside new_deposit + async fn get_deposit_keys( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status>; + /// Returns the current status of tasks running on the operator and their last synced heights. + async fn get_current_status( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status>; + /// Server streaming response type for the DepositSign method. + type DepositSignStream: tonic::codegen::tokio_stream::Stream< + Item = std::result::Result, + > + + std::marker::Send + + 'static; + /// Signs everything that includes Operator's burn connector. + /// + /// # Parameters + /// + /// - User's deposit information + /// - Nonce metadata + /// + /// # Returns + /// + /// - Operator burn Schnorr signature + async fn deposit_sign( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// Restarts the background tasks for the operator. + async fn restart_background_tasks( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status>; + /// Prepares a withdrawal if it's profitable and the withdrawal is correct and registered in Citrea bridge contract. + /// If withdrawal is accepted, the payout tx will be added to the TxSender and success is returned, otherwise an error is returned. + /// If automation is disabled, the withdrawal will not be accepted and an error will be returned. + /// Note: This is intended for operator's own use, so it doesn't include a signature from aggregator. + async fn internal_withdraw( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status>; + /// First, if verification address in operator's config is set, the signature in rpc is checked to see if it was signed by the verification address. + /// Then prepares a withdrawal if it's profitable and the withdrawal is correct and registered in Citrea bridge contract. + /// If withdrawal is accepted, the payout tx will be added to the TxSender and success is returned, otherwise an error is returned. + /// If automation is disabled, the withdrawal will not be accepted and an error will be returned. + async fn withdraw( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status>; + /// For a given deposit outpoint, determines the next step in the kickoff process the operator is in, + /// and returns the raw signed txs that the operator needs to send next, for enabling reimbursement process + /// without automation. + /// + /// # Parameters + /// - deposit_outpoint: Deposit outpoint to create the kickoff for + /// + /// # Returns + /// - Raw signed txs that the operator needs to send next + async fn get_reimbursement_txs( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// Signs all tx's it can according to given transaction type (use it with AllNeededForDeposit to get almost all tx's) + /// Creates the transactions denoted by the deposit and operator_idx, round_idx, and kickoff_idx. + /// It will create the transaction and sign it with the operator's private key and/or saved nofn signatures. + /// + /// # Parameters + /// - deposit_params: User's deposit information + /// - transaction_type: Requested Transaction type + /// - kickoff_id: Operator's kickoff ID + /// + /// # Returns + /// - Raw signed transactions that the entity can sign (no asserts and watchtower challenge) + /// + /// Only used in tests + async fn internal_create_signed_txs( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// Creates all assert transactions (AssertBegin, MiniAsserts, AssertEnd), signs them, and returns the raw txs + /// in the same order. + /// # Parameters + /// - deposit_params: User's deposit information + /// - kickoff_id: Operator's kickoff ID + /// - commit_data: Commitment data for each MiniAssert tx's + /// + /// # Returns + /// - Raw signed assert transactions + async fn internal_create_assert_commitment_txs( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + async fn internal_finalized_payout( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status>; + async fn internal_end_round( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status>; + async fn vergen( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status>; + } + /// An operator is responsible for paying withdrawals. It has an unique ID and + /// chain of UTXOs named `round_txs`. An operator also runs a verifier. These are + /// connected to the same database and both have access to watchtowers' + /// winternitz pubkeys. + #[derive(Debug)] + pub struct ClementineOperatorServer { + inner: Arc, + accept_compression_encodings: EnabledCompressionEncodings, + send_compression_encodings: EnabledCompressionEncodings, + max_decoding_message_size: Option, + max_encoding_message_size: Option, + } + impl ClementineOperatorServer { + pub fn new(inner: T) -> Self { + Self::from_arc(Arc::new(inner)) + } + pub fn from_arc(inner: Arc) -> Self { + Self { + inner, + accept_compression_encodings: Default::default(), + send_compression_encodings: Default::default(), + max_decoding_message_size: None, + max_encoding_message_size: None, + } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> InterceptedService + where + F: tonic::service::Interceptor, + { + InterceptedService::new(Self::new(inner), interceptor) + } + /// Enable decompressing requests with the given encoding. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.accept_compression_encodings.enable(encoding); + self + } + /// Compress responses with the given encoding, if the client supports it. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.send_compression_encodings.enable(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.max_decoding_message_size = Some(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.max_encoding_message_size = Some(limit); + self + } + } + impl tonic::codegen::Service> for ClementineOperatorServer + where + T: ClementineOperator, + B: Body + std::marker::Send + 'static, + B::Error: Into + std::marker::Send + 'static, + { + type Response = http::Response; + type Error = std::convert::Infallible; + type Future = BoxFuture; + fn poll_ready( + &mut self, + _cx: &mut Context<'_>, + ) -> Poll> { + Poll::Ready(Ok(())) + } + fn call(&mut self, req: http::Request) -> Self::Future { + match req.uri().path() { + "/clementine.ClementineOperator/GetXOnlyPublicKey" => { + #[allow(non_camel_case_types)] + struct GetXOnlyPublicKeySvc(pub Arc); + impl tonic::server::UnaryService + for GetXOnlyPublicKeySvc { + type Response = super::XOnlyPublicKeyRpc; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_x_only_public_key( + &inner, + request, + ) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = GetXOnlyPublicKeySvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/clementine.ClementineOperator/GetParams" => { + #[allow(non_camel_case_types)] + struct GetParamsSvc(pub Arc); + impl< + T: ClementineOperator, + > tonic::server::ServerStreamingService + for GetParamsSvc { + type Response = super::OperatorParams; + type ResponseStream = T::GetParamsStream; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_params(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = GetParamsSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.server_streaming(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/clementine.ClementineOperator/GetDepositKeys" => { + #[allow(non_camel_case_types)] + struct GetDepositKeysSvc(pub Arc); + impl< + T: ClementineOperator, + > tonic::server::UnaryService + for GetDepositKeysSvc { + type Response = super::OperatorKeys; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_deposit_keys(&inner, request) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = GetDepositKeysSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/clementine.ClementineOperator/GetCurrentStatus" => { + #[allow(non_camel_case_types)] + struct GetCurrentStatusSvc(pub Arc); + impl tonic::server::UnaryService + for GetCurrentStatusSvc { + type Response = super::EntityStatus; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_current_status( + &inner, + request, + ) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = GetCurrentStatusSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/clementine.ClementineOperator/DepositSign" => { + #[allow(non_camel_case_types)] + struct DepositSignSvc(pub Arc); + impl< + T: ClementineOperator, + > tonic::server::ServerStreamingService + for DepositSignSvc { + type Response = super::SchnorrSig; + type ResponseStream = T::DepositSignStream; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::deposit_sign(&inner, request) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = DepositSignSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.server_streaming(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/clementine.ClementineOperator/RestartBackgroundTasks" => { + #[allow(non_camel_case_types)] + struct RestartBackgroundTasksSvc(pub Arc); + impl tonic::server::UnaryService + for RestartBackgroundTasksSvc { + type Response = super::Empty; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::restart_background_tasks( + &inner, + request, + ) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = RestartBackgroundTasksSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/clementine.ClementineOperator/InternalWithdraw" => { + #[allow(non_camel_case_types)] + struct InternalWithdrawSvc(pub Arc); + impl< + T: ClementineOperator, + > tonic::server::UnaryService + for InternalWithdrawSvc { + type Response = super::RawSignedTx; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::internal_withdraw( + &inner, + request, + ) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = InternalWithdrawSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/clementine.ClementineOperator/Withdraw" => { + #[allow(non_camel_case_types)] + struct WithdrawSvc(pub Arc); + impl< + T: ClementineOperator, + > tonic::server::UnaryService + for WithdrawSvc { + type Response = super::RawSignedTx; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::withdraw(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = WithdrawSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/clementine.ClementineOperator/GetReimbursementTxs" => { + #[allow(non_camel_case_types)] + struct GetReimbursementTxsSvc(pub Arc); + impl< + T: ClementineOperator, + > tonic::server::UnaryService + for GetReimbursementTxsSvc { + type Response = super::SignedTxsWithType; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_reimbursement_txs( + &inner, + request, + ) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = GetReimbursementTxsSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/clementine.ClementineOperator/InternalCreateSignedTxs" => { + #[allow(non_camel_case_types)] + struct InternalCreateSignedTxsSvc(pub Arc); + impl< + T: ClementineOperator, + > tonic::server::UnaryService + for InternalCreateSignedTxsSvc { + type Response = super::SignedTxsWithType; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::internal_create_signed_txs( + &inner, + request, + ) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = InternalCreateSignedTxsSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/clementine.ClementineOperator/InternalCreateAssertCommitmentTxs" => { + #[allow(non_camel_case_types)] + struct InternalCreateAssertCommitmentTxsSvc( + pub Arc, + ); + impl< + T: ClementineOperator, + > tonic::server::UnaryService + for InternalCreateAssertCommitmentTxsSvc { + type Response = super::SignedTxsWithType; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::internal_create_assert_commitment_txs( + &inner, + request, + ) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = InternalCreateAssertCommitmentTxsSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/clementine.ClementineOperator/InternalFinalizedPayout" => { + #[allow(non_camel_case_types)] + struct InternalFinalizedPayoutSvc(pub Arc); + impl< + T: ClementineOperator, + > tonic::server::UnaryService + for InternalFinalizedPayoutSvc { + type Response = super::Txid; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::internal_finalized_payout( + &inner, + request, + ) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = InternalFinalizedPayoutSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/clementine.ClementineOperator/InternalEndRound" => { + #[allow(non_camel_case_types)] + struct InternalEndRoundSvc(pub Arc); + impl tonic::server::UnaryService + for InternalEndRoundSvc { + type Response = super::Empty; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::internal_end_round( + &inner, + request, + ) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = InternalEndRoundSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/clementine.ClementineOperator/Vergen" => { + #[allow(non_camel_case_types)] + struct VergenSvc(pub Arc); + impl tonic::server::UnaryService + for VergenSvc { + type Response = super::VergenResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::vergen(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = VergenSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + _ => { + Box::pin(async move { + let mut response = http::Response::new(empty_body()); + let headers = response.headers_mut(); + headers + .insert( + tonic::Status::GRPC_STATUS, + (tonic::Code::Unimplemented as i32).into(), + ); + headers + .insert( + http::header::CONTENT_TYPE, + tonic::metadata::GRPC_CONTENT_TYPE, + ); + Ok(response) + }) + } + } + } + } + impl Clone for ClementineOperatorServer { + fn clone(&self) -> Self { + let inner = self.inner.clone(); + Self { + inner, + accept_compression_encodings: self.accept_compression_encodings, + send_compression_encodings: self.send_compression_encodings, + max_decoding_message_size: self.max_decoding_message_size, + max_encoding_message_size: self.max_encoding_message_size, + } + } + } + /// Generated gRPC service name + pub const SERVICE_NAME: &str = "clementine.ClementineOperator"; + impl tonic::server::NamedService for ClementineOperatorServer { + const NAME: &'static str = SERVICE_NAME; + } +} +/// Generated server implementations. +pub mod clementine_verifier_server { + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] + use tonic::codegen::*; + /// Generated trait containing gRPC methods that should be implemented for use with ClementineVerifierServer. + #[async_trait] + pub trait ClementineVerifier: std::marker::Send + std::marker::Sync + 'static { + /// Returns verifiers' metadata. Needs to be called once per setup. + /// + /// Used by aggregator inside setup to let all verifiers know all other verifier pks + async fn get_params( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status>; + /// Saves an operator. + /// + /// Used by aggregator inside setup to let all verifiers know all other operator pks + async fn set_operator( + &self, + request: tonic::Request>, + ) -> std::result::Result, tonic::Status>; + /// Sets the operator's winternitz keys and challenge ACK hashes and saves them + /// into the db. + /// + /// Used by aggregator inside new_deposit to let all verifiers know all other operators' deposit information + async fn set_operator_keys( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status>; + /// Server streaming response type for the NonceGen method. + type NonceGenStream: tonic::codegen::tokio_stream::Stream< + Item = std::result::Result, + > + + std::marker::Send + + 'static; + /// Generates nonces for a deposit. + /// + /// # Returns + /// + /// Nonce metadata followed by nonces. + /// + /// Used by aggregator inside new_deposit + async fn nonce_gen( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status>; + /// Server streaming response type for the DepositSign method. + type DepositSignStream: tonic::codegen::tokio_stream::Stream< + Item = std::result::Result, + > + + std::marker::Send + + 'static; + /// Signs deposit with given aggNonces and verifier's secNonce using + /// nonce_id. + /// + /// Used by aggregator inside new_deposit + async fn deposit_sign( + &self, + request: tonic::Request>, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// Signs the optimistic payout tx with given aggNonce and withdrawal info. + async fn optimistic_payout_sign( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status>; + /// Verifies every signature and signs move_tx. + /// + /// Used by aggregator inside new_deposit + async fn deposit_finalize( + &self, + request: tonic::Request< + tonic::Streaming, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// Debug a transaction by retrieving its current state and history + async fn debug_tx( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status>; + /// Restarts the background tasks for the verifier. + async fn restart_background_tasks( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status>; + /// Checks if the kickoff tx is malicious and if so, try to send all necessary txs to punish the operator + async fn internal_handle_kickoff( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status>; + /// Returns the current status of tasks running on the verifier and their last synced heights. + async fn get_current_status( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status>; + /// 1. Signs all tx's it can according to given transaction type (use it with AllNeededForDeposit to get almost all tx's) + /// 2. Creates the transactions denoted by the deposit and operator_idx, round_idx, and kickoff_idx. + /// 3. It will create the transaction and sign it with the operator's private key and/or saved nofn signatures. + /// + /// # Parameters + /// - deposit_params: User's deposit information + /// - transaction_type: Requested Transaction type + /// - kickoff_id: Operator's kickoff ID + /// + /// # Returns + /// - Raw signed transactions that the entity can sign (no asserts and watchtower challenge) + async fn internal_create_signed_txs( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// Signs the verifiers own watchtower challenge tx in the corresponding + /// kickoff and returns the signed raw tx + async fn internal_create_watchtower_challenge( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + async fn vergen( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status>; + } + #[derive(Debug)] + pub struct ClementineVerifierServer { + inner: Arc, + accept_compression_encodings: EnabledCompressionEncodings, + send_compression_encodings: EnabledCompressionEncodings, + max_decoding_message_size: Option, + max_encoding_message_size: Option, + } + impl ClementineVerifierServer { + pub fn new(inner: T) -> Self { + Self::from_arc(Arc::new(inner)) + } + pub fn from_arc(inner: Arc) -> Self { + Self { + inner, + accept_compression_encodings: Default::default(), + send_compression_encodings: Default::default(), + max_decoding_message_size: None, + max_encoding_message_size: None, + } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> InterceptedService + where + F: tonic::service::Interceptor, + { + InterceptedService::new(Self::new(inner), interceptor) + } + /// Enable decompressing requests with the given encoding. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.accept_compression_encodings.enable(encoding); + self + } + /// Compress responses with the given encoding, if the client supports it. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.send_compression_encodings.enable(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.max_decoding_message_size = Some(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.max_encoding_message_size = Some(limit); + self + } + } + impl tonic::codegen::Service> for ClementineVerifierServer + where + T: ClementineVerifier, + B: Body + std::marker::Send + 'static, + B::Error: Into + std::marker::Send + 'static, + { + type Response = http::Response; + type Error = std::convert::Infallible; + type Future = BoxFuture; + fn poll_ready( + &mut self, + _cx: &mut Context<'_>, + ) -> Poll> { + Poll::Ready(Ok(())) + } + fn call(&mut self, req: http::Request) -> Self::Future { + match req.uri().path() { + "/clementine.ClementineVerifier/GetParams" => { + #[allow(non_camel_case_types)] + struct GetParamsSvc(pub Arc); + impl tonic::server::UnaryService + for GetParamsSvc { + type Response = super::VerifierParams; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_params(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = GetParamsSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/clementine.ClementineVerifier/SetOperator" => { + #[allow(non_camel_case_types)] + struct SetOperatorSvc(pub Arc); + impl< + T: ClementineVerifier, + > tonic::server::ClientStreamingService + for SetOperatorSvc { + type Response = super::Empty; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + tonic::Streaming, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::set_operator(&inner, request) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = SetOperatorSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.client_streaming(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/clementine.ClementineVerifier/SetOperatorKeys" => { + #[allow(non_camel_case_types)] + struct SetOperatorKeysSvc(pub Arc); + impl< + T: ClementineVerifier, + > tonic::server::UnaryService + for SetOperatorKeysSvc { + type Response = super::Empty; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::set_operator_keys( + &inner, + request, + ) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = SetOperatorKeysSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/clementine.ClementineVerifier/NonceGen" => { + #[allow(non_camel_case_types)] + struct NonceGenSvc(pub Arc); + impl< + T: ClementineVerifier, + > tonic::server::ServerStreamingService + for NonceGenSvc { + type Response = super::NonceGenResponse; + type ResponseStream = T::NonceGenStream; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::nonce_gen(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = NonceGenSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.server_streaming(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/clementine.ClementineVerifier/DepositSign" => { + #[allow(non_camel_case_types)] + struct DepositSignSvc(pub Arc); + impl< + T: ClementineVerifier, + > tonic::server::StreamingService + for DepositSignSvc { + type Response = super::PartialSig; + type ResponseStream = T::DepositSignStream; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + tonic::Streaming, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::deposit_sign(&inner, request) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = DepositSignSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.streaming(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/clementine.ClementineVerifier/OptimisticPayoutSign" => { + #[allow(non_camel_case_types)] + struct OptimisticPayoutSignSvc(pub Arc); + impl< + T: ClementineVerifier, + > tonic::server::UnaryService + for OptimisticPayoutSignSvc { + type Response = super::PartialSig; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::optimistic_payout_sign( + &inner, + request, + ) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = OptimisticPayoutSignSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/clementine.ClementineVerifier/DepositFinalize" => { + #[allow(non_camel_case_types)] + struct DepositFinalizeSvc(pub Arc); + impl< + T: ClementineVerifier, + > tonic::server::ClientStreamingService< + super::VerifierDepositFinalizeParams, + > for DepositFinalizeSvc { + type Response = super::VerifierDepositFinalizeResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + tonic::Streaming, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::deposit_finalize(&inner, request) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = DepositFinalizeSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.client_streaming(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/clementine.ClementineVerifier/DebugTx" => { + #[allow(non_camel_case_types)] + struct DebugTxSvc(pub Arc); + impl< + T: ClementineVerifier, + > tonic::server::UnaryService + for DebugTxSvc { + type Response = super::TxDebugInfo; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::debug_tx(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = DebugTxSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/clementine.ClementineVerifier/RestartBackgroundTasks" => { + #[allow(non_camel_case_types)] + struct RestartBackgroundTasksSvc(pub Arc); + impl tonic::server::UnaryService + for RestartBackgroundTasksSvc { + type Response = super::Empty; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::restart_background_tasks( + &inner, + request, + ) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = RestartBackgroundTasksSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/clementine.ClementineVerifier/InternalHandleKickoff" => { + #[allow(non_camel_case_types)] + struct InternalHandleKickoffSvc(pub Arc); + impl tonic::server::UnaryService + for InternalHandleKickoffSvc { + type Response = super::Empty; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::internal_handle_kickoff( + &inner, + request, + ) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = InternalHandleKickoffSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/clementine.ClementineVerifier/GetCurrentStatus" => { + #[allow(non_camel_case_types)] + struct GetCurrentStatusSvc(pub Arc); + impl tonic::server::UnaryService + for GetCurrentStatusSvc { + type Response = super::EntityStatus; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_current_status( + &inner, + request, + ) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = GetCurrentStatusSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/clementine.ClementineVerifier/InternalCreateSignedTxs" => { + #[allow(non_camel_case_types)] + struct InternalCreateSignedTxsSvc(pub Arc); + impl< + T: ClementineVerifier, + > tonic::server::UnaryService + for InternalCreateSignedTxsSvc { + type Response = super::SignedTxsWithType; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::internal_create_signed_txs( + &inner, + request, + ) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = InternalCreateSignedTxsSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/clementine.ClementineVerifier/InternalCreateWatchtowerChallenge" => { + #[allow(non_camel_case_types)] + struct InternalCreateWatchtowerChallengeSvc( + pub Arc, + ); + impl< + T: ClementineVerifier, + > tonic::server::UnaryService + for InternalCreateWatchtowerChallengeSvc { + type Response = super::RawTxWithRbfInfo; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::internal_create_watchtower_challenge( + &inner, + request, + ) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = InternalCreateWatchtowerChallengeSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/clementine.ClementineVerifier/Vergen" => { + #[allow(non_camel_case_types)] + struct VergenSvc(pub Arc); + impl tonic::server::UnaryService + for VergenSvc { + type Response = super::VergenResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::vergen(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = VergenSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + _ => { + Box::pin(async move { + let mut response = http::Response::new(empty_body()); + let headers = response.headers_mut(); + headers + .insert( + tonic::Status::GRPC_STATUS, + (tonic::Code::Unimplemented as i32).into(), + ); + headers + .insert( + http::header::CONTENT_TYPE, + tonic::metadata::GRPC_CONTENT_TYPE, + ); + Ok(response) + }) + } + } + } + } + impl Clone for ClementineVerifierServer { + fn clone(&self) -> Self { + let inner = self.inner.clone(); + Self { + inner, + accept_compression_encodings: self.accept_compression_encodings, + send_compression_encodings: self.send_compression_encodings, + max_decoding_message_size: self.max_decoding_message_size, + max_encoding_message_size: self.max_encoding_message_size, + } + } + } + /// Generated gRPC service name + pub const SERVICE_NAME: &str = "clementine.ClementineVerifier"; + impl tonic::server::NamedService for ClementineVerifierServer { + const NAME: &'static str = SERVICE_NAME; + } +} +/// Generated server implementations. +pub mod clementine_aggregator_server { + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] + use tonic::codegen::*; + /// Generated trait containing gRPC methods that should be implemented for use with ClementineAggregatorServer. + #[async_trait] + pub trait ClementineAggregator: std::marker::Send + std::marker::Sync + 'static { + async fn get_nofn_aggregated_xonly_pk( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status>; + /// Sets up the system of verifiers, watchtowers and operators by: + /// + /// 1. Collects verifier keys from each verifier + /// 2. Distributes these verifier keys to all verifiers + /// 3. Collects all operator configs from each operator + /// 4. Distributes these operator configs to all verifiers + /// + /// Used by the clementine-backend service + async fn setup( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// This will call, DepositNonceGen for every verifier, + /// then it will aggregate one by one and then send it to DepositSign, + /// then it will aggregate the partial sigs and send it to DepositFinalize, + /// this will also call the operator to get their signatures and send it to + /// DepositFinalize then it will collect the partial sigs and create the move + /// tx. + /// + /// Used by the clementine-backend service to initiate a deposit + async fn new_deposit( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status>; + /// Call's withdraw on all operators + /// Used by the clementine-backend service to initiate a withdrawal + /// If the operator's xonly public keys list is empty, the withdrawal will be sent to all operators. + /// If not, only the operators in the list will be sent the withdrawal request. + async fn withdraw( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// Perform an optimistic payout to reimburse a peg-out from Citrea + async fn optimistic_payout( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status>; + /// Send a pre-signed tx to the network + async fn internal_send_tx( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status>; + async fn send_move_to_vault_tx( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status>; + /// Returns the current status of tasks running on the operators/verifiers. + /// If restart_tasks is true, it will restart the tasks on the entities if they are stopped. + async fn get_entity_statuses( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status>; + /// Creates an emergency stop tx that won't be broadcasted. + /// Tx will have around 3 sats/vbyte fee. + /// Set add_anchor to true to add an anchor output for cpfp.. + async fn internal_get_emergency_stop_tx( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + async fn vergen( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status>; + } + #[derive(Debug)] + pub struct ClementineAggregatorServer { + inner: Arc, + accept_compression_encodings: EnabledCompressionEncodings, + send_compression_encodings: EnabledCompressionEncodings, + max_decoding_message_size: Option, + max_encoding_message_size: Option, + } + impl ClementineAggregatorServer { + pub fn new(inner: T) -> Self { + Self::from_arc(Arc::new(inner)) + } + pub fn from_arc(inner: Arc) -> Self { + Self { + inner, + accept_compression_encodings: Default::default(), + send_compression_encodings: Default::default(), + max_decoding_message_size: None, + max_encoding_message_size: None, + } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> InterceptedService + where + F: tonic::service::Interceptor, + { + InterceptedService::new(Self::new(inner), interceptor) + } + /// Enable decompressing requests with the given encoding. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.accept_compression_encodings.enable(encoding); + self + } + /// Compress responses with the given encoding, if the client supports it. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.send_compression_encodings.enable(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.max_decoding_message_size = Some(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.max_encoding_message_size = Some(limit); + self + } + } + impl tonic::codegen::Service> + for ClementineAggregatorServer + where + T: ClementineAggregator, + B: Body + std::marker::Send + 'static, + B::Error: Into + std::marker::Send + 'static, + { + type Response = http::Response; + type Error = std::convert::Infallible; + type Future = BoxFuture; + fn poll_ready( + &mut self, + _cx: &mut Context<'_>, + ) -> Poll> { + Poll::Ready(Ok(())) + } + fn call(&mut self, req: http::Request) -> Self::Future { + match req.uri().path() { + "/clementine.ClementineAggregator/GetNofnAggregatedXonlyPk" => { + #[allow(non_camel_case_types)] + struct GetNofnAggregatedXonlyPkSvc( + pub Arc, + ); + impl< + T: ClementineAggregator, + > tonic::server::UnaryService + for GetNofnAggregatedXonlyPkSvc { + type Response = super::NofnResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_nofn_aggregated_xonly_pk( + &inner, + request, + ) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = GetNofnAggregatedXonlyPkSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/clementine.ClementineAggregator/Setup" => { + #[allow(non_camel_case_types)] + struct SetupSvc(pub Arc); + impl< + T: ClementineAggregator, + > tonic::server::UnaryService for SetupSvc { + type Response = super::VerifierPublicKeys; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::setup(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = SetupSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/clementine.ClementineAggregator/NewDeposit" => { + #[allow(non_camel_case_types)] + struct NewDepositSvc(pub Arc); + impl< + T: ClementineAggregator, + > tonic::server::UnaryService for NewDepositSvc { + type Response = super::RawSignedTx; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::new_deposit(&inner, request) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = NewDepositSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/clementine.ClementineAggregator/Withdraw" => { + #[allow(non_camel_case_types)] + struct WithdrawSvc(pub Arc); + impl< + T: ClementineAggregator, + > tonic::server::UnaryService + for WithdrawSvc { + type Response = super::AggregatorWithdrawResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::withdraw(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = WithdrawSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/clementine.ClementineAggregator/OptimisticPayout" => { + #[allow(non_camel_case_types)] + struct OptimisticPayoutSvc(pub Arc); + impl< + T: ClementineAggregator, + > tonic::server::UnaryService + for OptimisticPayoutSvc { + type Response = super::RawSignedTx; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::optimistic_payout( + &inner, + request, + ) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = OptimisticPayoutSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/clementine.ClementineAggregator/InternalSendTx" => { + #[allow(non_camel_case_types)] + struct InternalSendTxSvc(pub Arc); + impl< + T: ClementineAggregator, + > tonic::server::UnaryService + for InternalSendTxSvc { + type Response = super::Empty; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::internal_send_tx( + &inner, + request, + ) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = InternalSendTxSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/clementine.ClementineAggregator/SendMoveToVaultTx" => { + #[allow(non_camel_case_types)] + struct SendMoveToVaultTxSvc(pub Arc); + impl< + T: ClementineAggregator, + > tonic::server::UnaryService + for SendMoveToVaultTxSvc { + type Response = super::Txid; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::send_move_to_vault_tx( + &inner, + request, + ) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = SendMoveToVaultTxSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/clementine.ClementineAggregator/GetEntityStatuses" => { + #[allow(non_camel_case_types)] + struct GetEntityStatusesSvc(pub Arc); + impl< + T: ClementineAggregator, + > tonic::server::UnaryService + for GetEntityStatusesSvc { + type Response = super::EntityStatuses; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_entity_statuses( + &inner, + request, + ) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = GetEntityStatusesSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/clementine.ClementineAggregator/InternalGetEmergencyStopTx" => { + #[allow(non_camel_case_types)] + struct InternalGetEmergencyStopTxSvc( + pub Arc, + ); + impl< + T: ClementineAggregator, + > tonic::server::UnaryService + for InternalGetEmergencyStopTxSvc { + type Response = super::GetEmergencyStopTxResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::internal_get_emergency_stop_tx( + &inner, + request, + ) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = InternalGetEmergencyStopTxSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/clementine.ClementineAggregator/Vergen" => { + #[allow(non_camel_case_types)] + struct VergenSvc(pub Arc); + impl< + T: ClementineAggregator, + > tonic::server::UnaryService for VergenSvc { + type Response = super::VergenResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::vergen(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = VergenSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + _ => { + Box::pin(async move { + let mut response = http::Response::new(empty_body()); + let headers = response.headers_mut(); + headers + .insert( + tonic::Status::GRPC_STATUS, + (tonic::Code::Unimplemented as i32).into(), + ); + headers + .insert( + http::header::CONTENT_TYPE, + tonic::metadata::GRPC_CONTENT_TYPE, + ); + Ok(response) + }) + } + } + } + } + impl Clone for ClementineAggregatorServer { + fn clone(&self) -> Self { + let inner = self.inner.clone(); + Self { + inner, + accept_compression_encodings: self.accept_compression_encodings, + send_compression_encodings: self.send_compression_encodings, + max_decoding_message_size: self.max_decoding_message_size, + max_encoding_message_size: self.max_encoding_message_size, + } + } + } + /// Generated gRPC service name + pub const SERVICE_NAME: &str = "clementine.ClementineAggregator"; + impl tonic::server::NamedService for ClementineAggregatorServer { + const NAME: &'static str = SERVICE_NAME; + } +} diff --git a/core/src/rpc/ecdsa_verification_sig.rs b/core/src/rpc/ecdsa_verification_sig.rs new file mode 100644 index 000000000..e15d9b3c1 --- /dev/null +++ b/core/src/rpc/ecdsa_verification_sig.rs @@ -0,0 +1,132 @@ +//! # ECDSA Verification Signature +//! +//! This module contains the ECDSA verification signature for the Clementine protocol. +//! It is for additional verification that the request for optimistic payout and withdrawal is coming from the aggregator, which +//! is the owner of the address in operator/verifiers config. +//! +//! The address who signed the signature is retrieved by calculating the EIP-712 hash of the withdrawal params. +//! The address is then compared to the address in the config. +//! + +use alloy::primitives::PrimitiveSignature; +use alloy::sol_types::Eip712Domain; +use bitcoin::hashes::Hash; +use bitcoin::secp256k1::schnorr::Signature; +use bitcoin::OutPoint; +use bitcoin::{Amount, ScriptBuf}; +use eyre::{Context, Result}; + +use crate::errors::BridgeError; + +alloy_sol_types::sol! { + #[derive(Debug)] + struct OptimisticPayoutMessage { + uint32 withdrawal_id; + bytes input_signature; + bytes32 input_outpoint_txid; + uint32 input_outpoint_vout; + bytes output_script_pubkey; + uint64 output_amount; + } + + #[derive(Debug)] + struct OperatorWithdrawalMessage { + uint32 withdrawal_id; + bytes input_signature; + bytes32 input_outpoint_txid; + uint32 input_outpoint_vout; + bytes output_script_pubkey; + uint64 output_amount; + } +} + +pub static CLEMENTINE_EIP712_DOMAIN: Eip712Domain = alloy_sol_types::eip712_domain! { + name: "ClementineVerification", + version: "1", +}; + +pub trait WithdrawalMessage { + fn new( + deposit_id: u32, + input_signature: Signature, + input_outpoint: OutPoint, + output_script_pubkey: ScriptBuf, + output_amount: Amount, + ) -> Self; +} + +impl WithdrawalMessage for OptimisticPayoutMessage { + fn new( + deposit_id: u32, + input_signature: Signature, + input_outpoint: OutPoint, + output_script_pubkey: ScriptBuf, + output_amount: Amount, + ) -> Self { + OptimisticPayoutMessage { + withdrawal_id: deposit_id, + input_signature: input_signature.serialize().to_vec().into(), + input_outpoint_txid: input_outpoint.txid.to_byte_array().into(), + input_outpoint_vout: input_outpoint.vout, + output_script_pubkey: output_script_pubkey.as_bytes().to_vec().into(), + output_amount: output_amount.to_sat(), + } + } +} + +impl WithdrawalMessage for OperatorWithdrawalMessage { + fn new( + deposit_id: u32, + input_signature: Signature, + input_outpoint: OutPoint, + output_script_pubkey: ScriptBuf, + output_amount: Amount, + ) -> Self { + OperatorWithdrawalMessage { + withdrawal_id: deposit_id, + input_signature: input_signature.serialize().to_vec().into(), + input_outpoint_txid: input_outpoint.txid.to_byte_array().into(), + input_outpoint_vout: input_outpoint.vout, + output_script_pubkey: output_script_pubkey.as_bytes().to_vec().into(), + output_amount: output_amount.to_sat(), + } + } +} + +/// Recover the address from the signature +/// EIP712 hash is calculated from optimistic payout params +/// Signature is the signature of the eip712 hash +/// +/// Parameters: +/// - deposit_id: The id of the deposit +/// - input_signature: The signature of the withdrawal input +/// - input_outpoint: The outpoint of the withdrawal input +/// - output_script_pubkey: The script pubkey of the withdrawal output +/// - output_amount: The amount of the withdrawal output +/// - signature: The signature of the eip712 hash of the withdrawal params +/// +/// Returns: +/// - The address recovered from the signature +pub fn recover_address_from_ecdsa_signature( + deposit_id: u32, + input_signature: Signature, + input_outpoint: OutPoint, + output_script_pubkey: ScriptBuf, + output_amount: Amount, + signature: PrimitiveSignature, +) -> Result { + let params = M::new( + deposit_id, + input_signature, + input_outpoint, + output_script_pubkey, + output_amount, + ); + + let eip712_hash = params.eip712_signing_hash(&CLEMENTINE_EIP712_DOMAIN); + + let address = signature + .recover_address_from_prehash(&eip712_hash) + .wrap_err("Invalid signature")?; + Ok(address) +} diff --git a/core/src/rpc/error.rs b/core/src/rpc/error.rs new file mode 100644 index 000000000..66f70882b --- /dev/null +++ b/core/src/rpc/error.rs @@ -0,0 +1,26 @@ +use std::fmt::Display; +use tokio::sync::mpsc::error::SendError; +use tonic::Status; + +pub(crate) fn _expected_msg_got_error(msg: Status) -> Status { + Status::invalid_argument(format!("Expected message, got error: {msg}")) +} + +pub(crate) fn expected_msg_got_none(msg: &str) -> impl (Fn() -> Status) + '_ { + move || Status::invalid_argument(format!("Expected {msg} but received None")) +} + +pub(crate) fn input_ended_prematurely() -> Status { + Status::invalid_argument("Input stream ended prematurely") +} + +pub(crate) fn output_stream_ended_prematurely(e: SendError) -> Status { + Status::internal(format!("Output stream ended prematurely: {}", e)) +} + +pub(crate) fn invalid_argument<'a, T: std::error::Error + Send + Sync + 'static + Display>( + field: &'a str, + msg: &'a str, +) -> impl 'a + Fn(T) -> Status { + move |e| Status::invalid_argument(format!("Failed to parse {field}: {msg}\n{e}")) +} diff --git a/core/src/rpc/interceptors.rs b/core/src/rpc/interceptors.rs new file mode 100644 index 000000000..3daa8712c --- /dev/null +++ b/core/src/rpc/interceptors.rs @@ -0,0 +1,69 @@ +use tonic::{service::Interceptor, transport::CertificateDer, Request, Status}; + +#[derive(Debug, Clone)] +pub enum Interceptors { + OnlyAggregatorAndSelf { + aggregator_cert: CertificateDer<'static>, + our_cert: CertificateDer<'static>, + }, + Noop, +} + +fn is_internal(req: &Request<()>) -> bool { + // This normally doesn't exist but we add it in the AddMethodMiddleware + let Some(path) = req.metadata().get("grpc-method") else { + // No grpc method? this should not happen + tracing::error!("Missing grpc-method header in request"); + return false; + }; + + path.as_bytes().starts_with(b"Internal") +} + +impl Interceptor for Interceptors { + #[allow(clippy::result_large_err)] + fn call(&mut self, req: Request<()>) -> Result, Status> { + match self { + Interceptors::OnlyAggregatorAndSelf { + our_cert, + aggregator_cert, + } => only_aggregator_and_self(req, our_cert, aggregator_cert), + Interceptors::Noop => Ok(req), + } + } +} + +#[allow(clippy::result_large_err)] +fn only_aggregator_and_self( + req: Request<()>, + our_cert: &CertificateDer<'static>, + aggregator_cert: &CertificateDer<'static>, +) -> Result, Status> { + let Some(peer_certs) = req.peer_certs() else { + if cfg!(test) { + // Test mode, we don't need to verify peer certificates + return Ok(req); + } else { + // If we're not in test mode, we need to check peer certificates + return Err(Status::unauthenticated( + "Failed to verify peer certificate, is TLS enabled?", + )); + } + }; + + if is_internal(&req) { + if peer_certs.contains(our_cert) { + Ok(req) + } else { + Err(Status::unauthenticated( + "Unauthorized call to internal method (not self)", + )) + } + } else if peer_certs.contains(aggregator_cert) || peer_certs.contains(our_cert) { + Ok(req) + } else { + Err(Status::unauthenticated( + "Unauthorized call to method (not aggregator or self)", + )) + } +} diff --git a/core/src/rpc/mod.rs b/core/src/rpc/mod.rs new file mode 100644 index 000000000..50896e98a --- /dev/null +++ b/core/src/rpc/mod.rs @@ -0,0 +1,203 @@ +use crate::{ + config::BridgeConfig, + errors::BridgeError, + rpc::clementine::{ + clementine_operator_client::ClementineOperatorClient, + clementine_verifier_client::ClementineVerifierClient, + }, +}; +use clementine::*; +use eyre::Context; +use hyper_util::rt::TokioIo; +use std::{path::PathBuf, time::Duration}; +use tagged_signature::SignatureId; +use tonic::transport::{Certificate, Channel, ClientTlsConfig, Identity, Uri}; + +#[cfg(test)] +use crate::test::common::ensure_test_certificates; + +#[allow(clippy::all)] +#[rustfmt::skip] +pub mod clementine; + +pub mod aggregator; +pub mod ecdsa_verification_sig; +mod error; +pub mod interceptors; +pub mod operator; +pub mod parser; +pub mod verifier; + +pub use parser::ParserError; + +impl From for SignatureId { + fn from(value: NormalSignatureKind) -> Self { + SignatureId::NormalSignature(NormalSignatureId { + signature_kind: value as i32, + }) + } +} + +impl From<(NumberedSignatureKind, i32)> for SignatureId { + fn from(value: (NumberedSignatureKind, i32)) -> Self { + SignatureId::NumberedSignature(NumberedSignatureId { + signature_kind: value.0 as i32, + idx: value.1, + }) + } +} + +/// Returns gRPC clients. +/// +/// # Parameters +/// +/// - `endpoints`: URIs for clients (can be http/https URLs or unix:// paths) +/// - `connect`: Function that will be used to initiate gRPC connection +/// - `config`: Configuration containing TLS certificate paths +/// +/// # Returns +/// +/// - `CLIENT`: [`tonic`] gRPC client. +pub async fn get_clients( + endpoints: Vec, + connect: F, + config: &crate::config::BridgeConfig, + use_client_cert: bool, +) -> Result, BridgeError> +where + F: Fn(Channel) -> CLIENT, +{ + // Ensure certificates exist in test mode + #[cfg(test)] + { + ensure_test_certificates().map_err(|e| { + BridgeError::ConfigError(format!("Failed to ensure test certificates: {}", e)) + })?; + } + + // Get certificate paths from config or use defaults + let client_ca_cert = tokio::fs::read(&config.ca_cert_path) + .await + .wrap_err(format!( + "Failed to read CA certificate from {}", + config.ca_cert_path.display() + ))?; + + let client_ca = Certificate::from_pem(client_ca_cert); + + // Get certificate paths from config or use defaults + let client_cert_path = &config.client_cert_path.clone(); + let client_key_path = &config.client_key_path.clone(); + + // Load client certificate and key + let client_cert = tokio::fs::read(&client_cert_path).await.map_err(|e| { + BridgeError::ConfigError(format!( + "Failed to read client certificate from {}: {}", + client_cert_path.display(), + e + )) + })?; + + let client_key = tokio::fs::read(&client_key_path).await.map_err(|e| { + BridgeError::ConfigError(format!( + "Failed to read client key from {}: {}", + client_key_path.display(), + e + )) + })?; + + futures::future::try_join_all( + endpoints + .into_iter() + .map(|endpoint| { + let client_cert = client_cert.clone(); + let client_key = client_key.clone(); + let client_ca = client_ca.clone(); + + let tls_config = if use_client_cert { + let client_identity = Identity::from_pem(client_cert, client_key); + ClientTlsConfig::new() + .identity(client_identity) + .ca_certificate(client_ca) + } else { + ClientTlsConfig::new().ca_certificate(client_ca) + }; + + let connect = &connect; + + async move { + let channel = if endpoint.starts_with("unix://") { + #[cfg(unix)] + { + // Handle Unix socket (only available on Unix platforms) + let path = endpoint.trim_start_matches("unix://").to_string(); + Channel::from_static("lttp://[::]:50051") + .connect_with_connector(tower::service_fn(move |_| { + let path = PathBuf::from(path.clone()); + async move { + let unix_stream = + tokio::net::UnixStream::connect(path).await?; + Ok::<_, std::io::Error>(TokioIo::new(unix_stream)) + } + })) + .await + .wrap_err_with(|| { + format!("Failed to connect to Unix socket {}", endpoint) + })? + } + + #[cfg(not(unix))] + { + // Windows doesn't support Unix sockets + return Err(BridgeError::ConfigError(format!( + "Unix sockets ({}), are not supported on this platform", + endpoint + ))); + } + } else { + // Handle TCP/HTTP connection + let uri = Uri::try_from(endpoint.clone()).map_err(|e| { + BridgeError::ConfigError(format!( + "Endpoint {} is malformed: {}", + endpoint, e + )) + })?; + + Channel::builder(uri) + .timeout(Duration::from_secs(config.grpc.timeout_secs)) + .concurrency_limit(config.grpc.req_concurrency_limit) + .keep_alive_timeout(Duration::from_secs(config.grpc.tcp_keepalive_secs)) + .tls_config(tls_config) + .wrap_err("Failed to configure TLS")? + .connect_lazy() + }; + + Ok(connect(channel)) + } + }) + .collect::>(), + ) + .await +} + +pub fn operator_client_builder( + config: &BridgeConfig, +) -> impl Fn(Channel) -> ClementineOperatorClient { + let max_msg_size = config.grpc.max_message_size; + move |channel| { + ClementineOperatorClient::new(channel) + .max_decoding_message_size(max_msg_size) + .max_encoding_message_size(max_msg_size) + } +} + +pub fn verifier_client_builder( + config: &BridgeConfig, +) -> impl Fn(Channel) -> ClementineVerifierClient { + let max_msg_size = config.grpc.max_message_size; + move |channel| { + ClementineVerifierClient::new(channel) + .max_decoding_message_size(max_msg_size) + .max_encoding_message_size(max_msg_size) + } +} diff --git a/core/src/rpc/operator.rs b/core/src/rpc/operator.rs new file mode 100644 index 000000000..b42f3d454 --- /dev/null +++ b/core/src/rpc/operator.rs @@ -0,0 +1,420 @@ +use super::clementine::clementine_operator_server::ClementineOperator; +use super::clementine::{ + self, ChallengeAckDigest, DepositParams, DepositSignSession, Empty, FinalizedPayoutParams, + OperatorKeys, OperatorParams, SchnorrSig, SignedTxWithType, SignedTxsWithType, + TransactionRequest, VergenResponse, WithdrawParams, XOnlyPublicKeyRpc, +}; +use super::error::*; +use crate::bitvm_client::ClementineBitVMPublicKeys; +use crate::builder::transaction::sign::{create_and_sign_txs, TransactionRequestData}; +use crate::builder::transaction::ContractContext; +use crate::citrea::CitreaClientT; +use crate::constants::DEFAULT_CHANNEL_SIZE; +use crate::deposit::DepositData; +use crate::errors::BridgeError; +use crate::errors::ResultExt; +use crate::operator::OperatorServer; +use crate::rpc::clementine::{RawSignedTx, WithdrawParamsWithSig}; +use crate::rpc::ecdsa_verification_sig::{ + recover_address_from_ecdsa_signature, OperatorWithdrawalMessage, +}; +use crate::rpc::parser; +use crate::utils::{get_vergen_response, monitor_standalone_task}; +use alloy::primitives::PrimitiveSignature; +use bitcoin::hashes::Hash; +use bitcoin::{BlockHash, OutPoint}; +use bitvm::chunk::api::{NUM_HASH, NUM_PUBS, NUM_U256}; +use futures::TryFutureExt; +use std::str::FromStr; +use tokio::sync::mpsc; +use tokio_stream::wrappers::ReceiverStream; +use tonic::{async_trait, Request, Response, Status}; + +#[async_trait] +impl ClementineOperator for OperatorServer +where + C: CitreaClientT, +{ + type DepositSignStream = ReceiverStream>; + type GetParamsStream = ReceiverStream>; + + async fn vergen(&self, _request: Request) -> Result, Status> { + Ok(Response::new(get_vergen_response())) + } + + async fn restart_background_tasks( + &self, + _request: tonic::Request, + ) -> std::result::Result, tonic::Status> { + let result = tokio::time::timeout( + std::time::Duration::from_secs(60), + self.start_background_tasks(), + ) + .await; + match result { + Ok(Ok(_)) => Ok(tonic::Response::new(super::Empty {})), + Ok(Err(e)) => Err(e.into()), + Err(_) => Err(tonic::Status::deadline_exceeded( + "Timed out while restarting background tasks. Recommended to restart the operator manually.", + )), + } + } + + #[tracing::instrument(skip_all, err(level = tracing::Level::ERROR), ret(level = tracing::Level::TRACE))] + async fn get_params( + &self, + _request: Request, + ) -> Result, Status> { + let operator = self.operator.clone(); + let (tx, rx) = mpsc::channel(DEFAULT_CHANNEL_SIZE); + let out_stream: Self::GetParamsStream = ReceiverStream::new(rx); + + let (mut wpk_receiver, mut signature_receiver) = operator.get_params().await?; + + let handle = tokio::spawn(async move { + let operator_config: OperatorParams = operator.clone().into(); + tx.send(Ok(operator_config)) + .await + .map_err(output_stream_ended_prematurely)?; + + while let Some(winternitz_public_key) = wpk_receiver.recv().await { + let operator_winternitz_pubkey: OperatorParams = winternitz_public_key.into(); + tx.send(Ok(operator_winternitz_pubkey)) + .await + .map_err(output_stream_ended_prematurely)?; + } + + while let Some(operator_sig) = signature_receiver.recv().await { + let unspent_kickoff_sig: OperatorParams = operator_sig.into(); + tx.send(Ok(unspent_kickoff_sig)) + .await + .map_err(output_stream_ended_prematurely)?; + } + + Ok::<(), Status>(()) + }); + monitor_standalone_task(handle, "Operator get_params"); + + Ok(Response::new(out_stream)) + } + + #[tracing::instrument(skip(self), err(level = tracing::Level::ERROR), ret(level = tracing::Level::TRACE))] + async fn deposit_sign( + &self, + request: Request, + ) -> Result, Status> { + let (tx, rx) = mpsc::channel(DEFAULT_CHANNEL_SIZE); + + let deposit_sign_session = request.into_inner(); + let deposit_params: DepositParams = deposit_sign_session.try_into()?; + let deposit_data: DepositData = deposit_params.try_into()?; + + let expected_sigs = self + .operator + .config + .get_num_required_operator_sigs(&deposit_data); + + let mut deposit_signatures_rx = self.operator.deposit_sign(deposit_data).await?; + + tokio::spawn(async move { + let mut sent_sigs = 0; + while let Some(sig) = deposit_signatures_rx.recv().await { + let operator_burn_sig = SchnorrSig { + schnorr_sig: sig.serialize().to_vec(), + }; + + if tx + .send(Ok(operator_burn_sig)) + .inspect_ok(|_| { + sent_sigs += 1; + tracing::debug!( + "Sent signature {}/{} in deposit_sign()", + sent_sigs, + expected_sigs + ); + }) + .await + .is_err() + { + break; + } + } + }); + + Ok(Response::new(ReceiverStream::new(rx))) + } + + #[tracing::instrument(skip(self), err(level = tracing::Level::ERROR), ret(level = tracing::Level::TRACE))] + async fn internal_withdraw( + &self, + request: Request, + ) -> Result, Status> { + let (withdrawal_id, input_signature, input_outpoint, output_script_pubkey, output_amount) = + parser::operator::parse_withdrawal_sig_params(request.into_inner())?; + + let payout_tx = self + .operator + .withdraw( + withdrawal_id, + input_signature, + input_outpoint, + output_script_pubkey, + output_amount, + ) + .await?; + + Ok(Response::new(RawSignedTx::from(&payout_tx))) + } + + #[tracing::instrument(skip(self), err(level = tracing::Level::ERROR), ret(level = tracing::Level::TRACE))] + async fn withdraw( + &self, + request: Request, + ) -> Result, Status> { + let params = request.into_inner(); + let withdraw_params = params.withdrawal.ok_or(Status::invalid_argument( + "Withdrawal params not found for withdrawal", + ))?; + let (withdrawal_id, input_signature, input_outpoint, output_script_pubkey, output_amount) = + parser::operator::parse_withdrawal_sig_params(withdraw_params)?; + + // if verification address is set in config, check if verification signature is valid + if let Some(address_in_config) = self.operator.config.aggregator_verification_address { + let verification_signature = params + .verification_signature + .map(|sig| { + PrimitiveSignature::from_str(&sig).map_err(|e| { + Status::invalid_argument(format!("Invalid verification signature: {}", e)) + }) + }) + .transpose()?; + // check if verification signature is provided by aggregator + if let Some(verification_signature) = verification_signature { + let address_from_sig = + recover_address_from_ecdsa_signature::( + withdrawal_id, + input_signature, + input_outpoint, + output_script_pubkey.clone(), + output_amount, + verification_signature, + )?; + + // check if verification signature is signed by the address in config + if address_from_sig != address_in_config { + return Err(BridgeError::InvalidECDSAVerificationSignature).map_to_status(); + } + } else { + // if verification signature is not provided, but verification address is set in config, return error + return Err(BridgeError::ECDSAVerificationSignatureMissing).map_to_status(); + } + } + + let payout_tx = self + .operator + .withdraw( + withdrawal_id, + input_signature, + input_outpoint, + output_script_pubkey, + output_amount, + ) + .await?; + + Ok(Response::new(RawSignedTx::from(&payout_tx))) + } + + #[tracing::instrument(skip(self, request), err(level = tracing::Level::ERROR), ret(level = tracing::Level::TRACE))] + async fn internal_create_assert_commitment_txs( + &self, + request: Request, + ) -> std::result::Result, tonic::Status> { + let tx_req = request.into_inner(); + let tx_req_data: TransactionRequestData = tx_req.try_into()?; + + let raw_txs = self + .operator + .create_assert_commitment_txs( + tx_req_data, + ClementineBitVMPublicKeys::get_assert_commit_data( + ( + [[0u8; 32]; NUM_PUBS], + [[0u8; 32]; NUM_U256], + [[0u8; 16]; NUM_HASH], + ), + &[0u8; 20], + ), + None, + ) + .await?; + + Ok(Response::new(SignedTxsWithType { + signed_txs: raw_txs + .into_iter() + .map(|(tx_type, signed_tx)| SignedTxWithType { + transaction_type: Some(tx_type.into()), + raw_tx: bitcoin::consensus::serialize(&signed_tx), + }) + .collect(), + })) + } + + #[tracing::instrument(skip(self, request), err(level = tracing::Level::ERROR), ret(level = tracing::Level::TRACE))] + async fn get_deposit_keys( + &self, + request: Request, + ) -> Result, Status> { + let start = std::time::Instant::now(); + let deposit_params = request.into_inner(); + let deposit_data: DepositData = deposit_params.try_into()?; + + let winternitz_keys = self + .operator + .generate_assert_winternitz_pubkeys(deposit_data.get_deposit_outpoint())?; + let hashes = self + .operator + .generate_challenge_ack_preimages_and_hashes(&deposit_data)?; + tracing::info!("Generated deposit keys in {:?}", start.elapsed()); + + Ok(Response::new(OperatorKeys { + winternitz_pubkeys: winternitz_keys + .into_iter() + .map(|pubkey| pubkey.into()) + .collect(), + challenge_ack_digests: hashes + .into_iter() + .map(|hash| ChallengeAckDigest { hash: hash.into() }) + .collect(), + })) + } + + async fn internal_create_signed_txs( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status> { + let transaction_request = request.into_inner(); + let transaction_data: TransactionRequestData = transaction_request.try_into()?; + let (_, deposit_data) = self + .operator + .db + .get_deposit_data(None, transaction_data.deposit_outpoint) + .await? + .ok_or(Status::invalid_argument("Deposit not found in database"))?; + let context = ContractContext::new_context_for_kickoff( + transaction_data.kickoff_data, + deposit_data, + self.operator.config.protocol_paramset(), + ); + let raw_txs = create_and_sign_txs( + self.operator.db.clone(), + &self.operator.signer, + self.operator.config.clone(), + context, + Some([0u8; 20]), // dummy blockhash + None, + ) + .await?; + + Ok(Response::new(SignedTxsWithType { + signed_txs: raw_txs + .into_iter() + .map(|(tx_type, signed_tx)| SignedTxWithType { + transaction_type: Some(tx_type.into()), + raw_tx: bitcoin::consensus::serialize(&signed_tx), + }) + .collect(), + })) + } + + #[tracing::instrument(skip(self), err(level = tracing::Level::ERROR), ret(level = tracing::Level::TRACE))] + async fn internal_finalized_payout( + &self, + request: Request, + ) -> Result, Status> { + if !cfg!(test) { + return Err(Status::permission_denied( + "This method is only available in tests", + )); + } + + let payout_blockhash: [u8; 32] = request + .get_ref() + .payout_blockhash + .clone() + .try_into() + .expect("Failed to convert payout blockhash to [u8; 32]"); + let deposit_outpoint = request + .get_ref() + .deposit_outpoint + .clone() + .expect("Failed to get deposit outpoint"); + let deposit_outpoint: OutPoint = deposit_outpoint + .try_into() + .expect("Failed to convert deposit outpoint to OutPoint"); + + let mut dbtx = self.operator.db.begin_transaction().await?; + let kickoff_txid = self + .operator + .handle_finalized_payout( + &mut dbtx, + deposit_outpoint, + BlockHash::from_byte_array(payout_blockhash), + ) + .await?; + dbtx.commit().await.expect("Failed to commit transaction"); + + Ok(Response::new(kickoff_txid.into())) + } + + #[tracing::instrument(skip(self), err(level = tracing::Level::ERROR), ret(level = tracing::Level::TRACE))] + async fn internal_end_round( + &self, + _request: Request, + ) -> Result, Status> { + #[cfg(feature = "automation")] + { + let mut dbtx = self.operator.db.begin_transaction().await?; + + self.operator.end_round(&mut dbtx).await?; + + dbtx.commit().await.expect("Failed to commit transaction"); + Ok(Response::new(Empty {})) + } + + #[cfg(not(feature = "automation"))] + Err(Status::unimplemented( + "Automation is not enabled. Operator does not manage its rounds", + )) + } + + #[tracing::instrument(skip(self), err(level = tracing::Level::ERROR), ret(level = tracing::Level::TRACE))] + async fn get_x_only_public_key( + &self, + _request: Request, + ) -> Result, Status> { + let xonly_pk = self.operator.signer.xonly_public_key.serialize(); + Ok(Response::new(XOnlyPublicKeyRpc { + xonly_public_key: xonly_pk.to_vec(), + })) + } + + async fn get_current_status( + &self, + _request: Request, + ) -> Result, Status> { + let status = self.get_current_status().await?; + Ok(Response::new(status)) + } + + async fn get_reimbursement_txs( + &self, + request: Request, + ) -> Result, Status> { + let deposit_outpoint: OutPoint = request.into_inner().try_into()?; + let txs = self + .operator + .get_reimbursement_txs(deposit_outpoint) + .await?; + Ok(Response::new(txs.into())) + } +} diff --git a/core/src/rpc/parser/mod.rs b/core/src/rpc/parser/mod.rs new file mode 100644 index 000000000..526c543c5 --- /dev/null +++ b/core/src/rpc/parser/mod.rs @@ -0,0 +1,684 @@ +use super::clementine::{ + self, DepositParams, FeeType, Outpoint, RawSignedTx, RbfSigningInfoRpc, SchnorrSig, + TransactionRequest, WinternitzPubkey, +}; +use super::error; +use crate::builder::transaction::sign::TransactionRequestData; +use crate::builder::transaction::TransactionType; +use crate::constants::{MAX_BYTES_PER_WINTERNITZ_KEY, MAX_WINTERNITZ_DIGITS_PER_KEY}; +use crate::deposit::{ + Actors, BaseDepositData, DepositData, DepositInfo, DepositType, ReplacementDepositData, + SecurityCouncil, +}; +use crate::errors::BridgeError; +use crate::operator::RoundIndex; +use crate::rpc::clementine::{SignedTxWithType, SignedTxsWithType}; +use crate::utils::{FeePayingType, RbfSigningInfo}; +use bitcoin::hashes::{sha256d, FromSliceError, Hash}; +use bitcoin::secp256k1::schnorr::Signature; +use bitcoin::{OutPoint, TapNodeHash, Transaction, Txid, XOnlyPublicKey}; +use bitvm::signatures::winternitz; +use eyre::Context; +use std::fmt::{Debug, Display}; +use std::num::TryFromIntError; +use tonic::Status; + +pub mod operator; +pub mod verifier; + +#[derive(Debug, Clone, thiserror::Error)] +pub enum ParserError { + // RPC errors + #[error("RPC function field {0} is required")] + RPCRequiredParam(&'static str), + #[error("RPC function parameter {0} is malformed")] + RPCParamMalformed(String), + #[error("RPC function parameter {0} is oversized: {1}")] + RPCParamOversized(String, usize), +} + +impl From for tonic::Status { + fn from(value: ParserError) -> Self { + match value { + ParserError::RPCRequiredParam(field) => { + Status::invalid_argument(format!("RPC function field {} is required.", field)) + } + ParserError::RPCParamMalformed(field) => { + Status::invalid_argument(format!("RPC function parameter {} is malformed.", field)) + } + ParserError::RPCParamOversized(field, size) => Status::invalid_argument(format!( + "RPC function parameter {} is oversized: {}", + field, size + )), + } + } +} + +#[allow(dead_code)] +#[allow(clippy::result_large_err)] +/// Converts an integer type in to another integer type. This is needed because +/// tonic defaults to wrong integer types for some parameters. +pub fn convert_int_to_another( + field_name: &str, + value: SOURCE, + try_from: fn(SOURCE) -> Result, +) -> Result +where + SOURCE: Copy + Debug + Display, +{ + try_from(value) + .map_err(|e| error::invalid_argument(field_name, "Given number is out of bounds")(e)) +} + +/// Fetches the next message from a stream which is unwrapped and encapsulated +/// by a [`Result`]. +/// +/// # Parameters +/// +/// - stream: [`tonic::Streaming`] typed input stream +/// - field: Input field ident (struct member) to look in the next message +/// +/// # Returns +/// +/// A [`Result`] containing the next message. Will return an [`Err`] variant if +/// stream has exhausted. +#[macro_export] +macro_rules! fetch_next_message_from_stream { + ($stream:expr, $field:ident) => { + $crate::fetch_next_optional_message_from_stream!($stream, $field).ok_or( + $crate::rpc::error::expected_msg_got_none(stringify!($field))(), + ) + }; +} + +/// Fetches next message from a stream. +/// +/// # Parameters +/// +/// - stream: [`tonic::Streaming`] typed input stream +/// - field: Input field ident (struct member) to look in the next message +/// +/// # Returns +/// +/// An [`Option`] containing the next message. Will return a [`None`] variant if +/// stream has exhausted. +#[macro_export] +macro_rules! fetch_next_optional_message_from_stream { + ($stream:expr, $field:ident) => { + $stream + .message() + .await? + .ok_or($crate::rpc::error::input_ended_prematurely())? + .$field + }; +} + +impl From for RbfSigningInfoRpc { + fn from(value: RbfSigningInfo) -> Self { + RbfSigningInfoRpc { + merkle_root: value + .tweak_merkle_root + .map_or(vec![], |root| root.to_byte_array().to_vec()), + vout: value.vout, + } + } +} + +impl TryFrom for RbfSigningInfo { + type Error = BridgeError; + + fn try_from(value: RbfSigningInfoRpc) -> Result { + Ok(RbfSigningInfo { + tweak_merkle_root: if value.merkle_root.is_empty() { + None + } else { + Some( + TapNodeHash::from_slice(&value.merkle_root).wrap_err(eyre::eyre!( + "Failed to convert merkle root bytes from rpc to TapNodeHash" + ))?, + ) + }, + vout: value.vout, + #[cfg(test)] + annex: None, + #[cfg(test)] + additional_taproot_output_count: None, + }) + } +} + +impl TryFrom for OutPoint { + type Error = BridgeError; + + fn try_from(value: Outpoint) -> Result { + let hash = match Hash::from_slice( + &value + .txid + .ok_or(eyre::eyre!("Can't convert empty txid"))? + .txid, + ) { + Ok(h) => h, + Err(e) => return Err(BridgeError::FromSliceError(e)), + }; + + Ok(OutPoint { + txid: Txid::from_raw_hash(hash), + vout: value.vout, + }) + } +} +impl From for Outpoint { + fn from(value: OutPoint) -> Self { + Outpoint { + txid: Some(value.txid.into()), + vout: value.vout, + } + } +} + +impl TryFrom for winternitz::PublicKey { + type Error = BridgeError; + + fn try_from(value: WinternitzPubkey) -> Result { + let inner = value.digit_pubkey; + + // Add reasonable size limit per key + if inner.len() > MAX_WINTERNITZ_DIGITS_PER_KEY { + return Err(BridgeError::Parser(ParserError::RPCParamOversized( + "digit_pubkey".to_string(), + inner.len(), + ))); + } + + // Add total memory limit check + let total_bytes = inner.len() * 20; + if total_bytes > MAX_BYTES_PER_WINTERNITZ_KEY { + return Err(BridgeError::Parser(ParserError::RPCParamOversized( + "digit_pubkey".to_string(), + inner.len(), + ))); + } + + inner + .into_iter() + .enumerate() + .map(|(i, inner_vec)| { + inner_vec + .try_into() + .map_err(|e: Vec<_>| eyre::eyre!("Incorrect length {:?}, expected 20", e.len())) + .wrap_err_with(|| { + ParserError::RPCParamMalformed(format!("digit_pubkey.[{}]", i)) + }) + }) + .collect::, eyre::Report>>() + .map_err(Into::into) + } +} + +impl From for FeeType { + fn from(value: FeePayingType) -> Self { + match value { + FeePayingType::CPFP => FeeType::Cpfp, + FeePayingType::RBF => FeeType::Rbf, + FeePayingType::NoFunding => FeeType::NoFunding, + } + } +} + +impl TryFrom for FeePayingType { + type Error = Status; + + fn try_from(value: FeeType) -> Result { + match value { + FeeType::Cpfp => Ok(FeePayingType::CPFP), + FeeType::Rbf => Ok(FeePayingType::RBF), + FeeType::NoFunding => Ok(FeePayingType::NoFunding), + _ => Err(Status::invalid_argument("Invalid FeeType variant")), + } + } +} + +impl TryFrom for Signature { + type Error = BridgeError; + + fn try_from(value: SchnorrSig) -> Result { + Signature::from_slice(&value.schnorr_sig) + .wrap_err("Failed to parse schnorr signature") + .wrap_err_with(|| ParserError::RPCParamMalformed("schnorr_sig".to_string())) + .map_err(Into::into) + } +} +impl From for WinternitzPubkey { + fn from(value: winternitz::PublicKey) -> Self { + { + let digit_pubkey = value.into_iter().map(|inner| inner.to_vec()).collect(); + + WinternitzPubkey { digit_pubkey } + } + } +} + +impl From for clementine::Deposit { + fn from(value: DepositInfo) -> Self { + clementine::Deposit { + deposit_outpoint: Some(value.deposit_outpoint.into()), + deposit_data: Some(value.deposit_type.into()), + } + } +} + +impl TryFrom for DepositInfo { + type Error = Status; + + fn try_from(value: clementine::Deposit) -> Result { + let deposit_outpoint: OutPoint = value + .deposit_outpoint + .ok_or_else(|| Status::invalid_argument("No deposit outpoint received"))? + .try_into()?; + + let deposit_type = value + .deposit_data + .ok_or_else(|| Status::invalid_argument("No deposit data received"))? + .try_into()?; + + Ok(DepositInfo { + deposit_outpoint, + deposit_type, + }) + } +} + +impl From for DepositParams { + fn from(value: DepositData) -> Self { + let actors: clementine::Actors = value.actors.into(); + let security_council: clementine::SecurityCouncil = value.security_council.into(); + let deposit: clementine::Deposit = value.deposit.into(); + + DepositParams { + deposit: Some(deposit), + actors: Some(actors), + security_council: Some(security_council), + } + } +} + +impl TryFrom for DepositData { + type Error = Status; + + fn try_from(value: DepositParams) -> Result { + let deposit: DepositInfo = value + .deposit + .ok_or(Status::invalid_argument("No deposit received"))? + .try_into()?; + let actors: Actors = value + .actors + .ok_or(Status::invalid_argument("No actors received"))? + .try_into()?; + + let security_council: SecurityCouncil = value + .security_council + .ok_or(Status::invalid_argument("No security council received"))? + .try_into()?; + + Ok(DepositData { + nofn_xonly_pk: None, + deposit, + actors, + security_council, + }) + } +} + +impl TryFrom for DepositType { + type Error = Status; + + fn try_from(value: clementine::deposit::DepositData) -> Result { + match value { + clementine::deposit::DepositData::BaseDeposit(data) => { + Ok(DepositType::BaseDeposit(BaseDepositData { + evm_address: data.evm_address.try_into().map_err(|e| { + Status::invalid_argument(format!( + "Failed to convert evm_address to EVMAddress: {}", + e + )) + })?, + recovery_taproot_address: data + .recovery_taproot_address + .parse::>() + .map_err(|e| Status::internal(e.to_string()))?, + })) + } + clementine::deposit::DepositData::ReplacementDeposit(data) => { + Ok(DepositType::ReplacementDeposit(ReplacementDepositData { + old_move_txid: data + .old_move_txid + .ok_or(Status::invalid_argument("No move_txid received"))? + .try_into().map_err(|e| { + Status::invalid_argument(format!( + "Failed to convert replacement deposit move_txid to bitcoin::Txid: {}", + e + )) + })?, + })) + } + } + } +} + +impl From for clementine::deposit::DepositData { + fn from(value: DepositType) -> Self { + match value { + DepositType::BaseDeposit(data) => { + clementine::deposit::DepositData::BaseDeposit(clementine::BaseDeposit { + evm_address: data.evm_address.0.to_vec(), + recovery_taproot_address: data + .recovery_taproot_address + .assume_checked() + .to_string(), + }) + } + DepositType::ReplacementDeposit(data) => { + clementine::deposit::DepositData::ReplacementDeposit( + clementine::ReplacementDeposit { + old_move_txid: Some(data.old_move_txid.into()), + }, + ) + } + } + } +} + +impl TryFrom for Vec { + type Error = Status; + + fn try_from(value: clementine::XOnlyPublicKeys) -> Result { + value + .xonly_public_keys + .iter() + .map(|pk| { + XOnlyPublicKey::from_slice(pk).map_err(|e| { + Status::invalid_argument(format!("Failed to parse xonly public key: {}", e)) + }) + }) + .collect::, _>>() + } +} + +impl From> for clementine::XOnlyPublicKeys { + fn from(value: Vec) -> Self { + clementine::XOnlyPublicKeys { + xonly_public_keys: value.iter().map(|pk| pk.serialize().to_vec()).collect(), + } + } +} + +impl TryFrom for Actors { + type Error = Status; + + fn try_from(value: clementine::Actors) -> Result { + let verifiers = value + .verifiers + .ok_or(Status::invalid_argument("No verifiers received"))? + .try_into()?; + let watchtowers = value + .watchtowers + .ok_or(Status::invalid_argument("No watchtowers received"))? + .try_into()?; + let operators = value + .operators + .ok_or(Status::invalid_argument("No operators received"))? + .try_into()?; + + Ok(Actors { + verifiers, + watchtowers, + operators, + }) + } +} + +impl From for clementine::Actors { + fn from(value: Actors) -> Self { + clementine::Actors { + verifiers: Some(value.verifiers.into()), + watchtowers: Some(value.watchtowers.into()), + operators: Some(value.operators.into()), + } + } +} + +impl From for clementine::SecurityCouncil { + fn from(value: SecurityCouncil) -> Self { + clementine::SecurityCouncil { + pks: value + .pks + .into_iter() + .map(|pk| pk.serialize().to_vec()) + .collect(), + threshold: value.threshold, + } + } +} + +impl TryFrom for SecurityCouncil { + type Error = Status; + + fn try_from(value: clementine::SecurityCouncil) -> Result { + let pks = value + .pks + .into_iter() + .map(|pk| { + XOnlyPublicKey::from_slice(&pk).map_err(|e| { + Status::invalid_argument(format!("Failed to parse xonly public key: {}", e)) + }) + }) + .collect::, _>>()?; + + Ok(SecurityCouncil { + pks, + threshold: value.threshold, + }) + } +} + +impl TryFrom for bitcoin::Transaction { + type Error = Status; + + fn try_from(value: RawSignedTx) -> Result { + bitcoin::consensus::encode::deserialize(&value.raw_tx) + .map_err(|e| Status::invalid_argument(format!("Failed to parse raw signed tx: {}", e))) + } +} + +impl From<&bitcoin::Transaction> for RawSignedTx { + fn from(value: &bitcoin::Transaction) -> Self { + RawSignedTx { + raw_tx: bitcoin::consensus::encode::serialize(value), + } + } +} + +impl From for clementine::Txid { + fn from(value: Txid) -> Self { + clementine::Txid { + txid: value.to_byte_array().to_vec(), + } + } +} +impl TryFrom for Txid { + type Error = FromSliceError; + + fn try_from(value: clementine::Txid) -> Result { + Ok(Txid::from_raw_hash(sha256d::Hash::from_slice(&value.txid)?)) + } +} + +#[allow(clippy::result_large_err)] +impl TryFrom for TransactionRequestData { + type Error = Status; + + fn try_from(request: TransactionRequest) -> Result { + let deposit_outpoint: OutPoint = request + .deposit_outpoint + .ok_or(Status::invalid_argument("No deposit params received"))? + .try_into()?; + + let kickoff_id = request + .kickoff_id + .ok_or(Status::invalid_argument("No kickoff params received"))?; + + Ok(TransactionRequestData { + deposit_outpoint, + kickoff_data: kickoff_id.try_into()?, + }) + } +} + +impl From for TransactionRequest { + fn from(value: TransactionRequestData) -> Self { + TransactionRequest { + deposit_outpoint: Some(value.deposit_outpoint.into()), + kickoff_id: Some(value.kickoff_data.into()), + } + } +} + +impl TryFrom for crate::deposit::KickoffData { + type Error = Status; + + fn try_from(value: clementine::KickoffId) -> Result { + let operator_xonly_pk = + XOnlyPublicKey::from_slice(&value.operator_xonly_pk).map_err(|e| { + Status::invalid_argument(format!("Failed to parse operator_xonly_pk: {}", e)) + })?; + + Ok(crate::deposit::KickoffData { + operator_xonly_pk, + round_idx: RoundIndex::from_index(value.round_idx as usize), + kickoff_idx: value.kickoff_idx, + }) + } +} + +impl From for clementine::KickoffId { + fn from(value: crate::deposit::KickoffData) -> Self { + clementine::KickoffId { + operator_xonly_pk: value.operator_xonly_pk.serialize().to_vec(), + round_idx: value.round_idx.to_index() as u32, + kickoff_idx: value.kickoff_idx, + } + } +} + +impl From> for SignedTxsWithType { + fn from(value: Vec<(TransactionType, Transaction)>) -> Self { + SignedTxsWithType { + signed_txs: value + .into_iter() + .map(|(tx_type, signed_tx)| SignedTxWithType { + transaction_type: Some(tx_type.into()), + raw_tx: bitcoin::consensus::serialize(&signed_tx), + }) + .collect(), + } + } +} + +impl TryFrom for (TransactionType, Transaction) { + type Error = Status; + + fn try_from(value: SignedTxWithType) -> Result { + Ok(( + value + .transaction_type + .ok_or(Status::invalid_argument("No transaction type received"))? + .try_into() + .map_err(|e| { + Status::invalid_argument(format!("Failed to parse transaction type: {}", e)) + })?, + bitcoin::consensus::encode::deserialize(&value.raw_tx).map_err(|e| { + Status::invalid_argument(format!("Failed to parse raw signed tx: {}", e)) + })?, + )) + } +} + +impl TryFrom for Vec<(TransactionType, Transaction)> { + type Error = Status; + + fn try_from(value: clementine::SignedTxsWithType) -> Result { + value + .signed_txs + .into_iter() + .map(|signed_tx| signed_tx.try_into()) + .collect::, _>>() + } +} + +#[cfg(test)] +mod tests { + use crate::rpc::clementine::{self, Outpoint, WinternitzPubkey}; + use bitcoin::{hashes::Hash, OutPoint, Txid}; + use bitvm::signatures::winternitz; + + #[test] + fn from_bitcoin_outpoint_to_proto_outpoint() { + let og_outpoint = OutPoint { + txid: Txid::from_raw_hash(Hash::from_slice(&[0x1F; 32]).unwrap()), + vout: 0x45, + }; + + let proto_outpoint: Outpoint = og_outpoint.into(); + let bitcoin_outpoint: OutPoint = proto_outpoint.try_into().unwrap(); + assert_eq!(og_outpoint, bitcoin_outpoint); + + let proto_outpoint = Outpoint { + txid: Some(clementine::Txid { + txid: vec![0x1F; 32], + }), + vout: 0x45, + }; + let bitcoin_outpoint: OutPoint = proto_outpoint.try_into().unwrap(); + assert_eq!(og_outpoint, bitcoin_outpoint); + } + + #[test] + fn from_proto_outpoint_to_bitcoin_outpoint() { + let og_outpoint = Outpoint { + txid: Some(clementine::Txid { + txid: vec![0x1F; 32], + }), + vout: 0x45, + }; + + let bitcoin_outpoint: OutPoint = og_outpoint.clone().try_into().unwrap(); + let proto_outpoint: Outpoint = bitcoin_outpoint.into(); + assert_eq!(og_outpoint, proto_outpoint); + + let bitcoin_outpoint = OutPoint { + txid: Txid::from_raw_hash(Hash::from_slice(&[0x1F; 32]).unwrap()), + vout: 0x45, + }; + let proto_outpoint: Outpoint = bitcoin_outpoint.into(); + assert_eq!(og_outpoint, proto_outpoint); + } + + #[test] + fn from_proto_winternitz_public_key_to_bitvm() { + let og_wpk = vec![[0x45u8; 20]]; + + let rpc_wpk: WinternitzPubkey = og_wpk.clone().into(); + let rpc_converted_wpk: winternitz::PublicKey = + rpc_wpk.try_into().expect("encoded wpk has to be valid"); + assert_eq!(og_wpk, rpc_converted_wpk); + } + + #[test] + fn from_txid_to_proto_txid() { + let og_txid = Txid::from_raw_hash(Hash::from_slice(&[0x1F; 32]).unwrap()); + + let rpc_txid: clementine::Txid = og_txid.into(); + let rpc_converted_txid: Txid = rpc_txid.try_into().unwrap(); + assert_eq!(og_txid, rpc_converted_txid); + } +} diff --git a/core/src/rpc/parser/operator.rs b/core/src/rpc/parser/operator.rs new file mode 100644 index 000000000..42807419e --- /dev/null +++ b/core/src/rpc/parser/operator.rs @@ -0,0 +1,181 @@ +use crate::{ + citrea::CitreaClientT, + errors::BridgeError, + fetch_next_message_from_stream, + operator::Operator, + rpc::{ + clementine::{ + operator_params, DepositParams, DepositSignSession, OperatorConfig, OperatorParams, + Outpoint, SchnorrSig, WithdrawParams, XOnlyPublicKeyRpc, + }, + error::{self, expected_msg_got_none}, + }, +}; +use bitcoin::{ + address::NetworkUnchecked, secp256k1::schnorr::Signature, Address, Amount, OutPoint, ScriptBuf, + XOnlyPublicKey, +}; +use bitvm::signatures::winternitz; +use eyre::Context; +use std::str::FromStr; +use tonic::Status; + +impl From> for OperatorParams +where + C: CitreaClientT, +{ + fn from(operator: Operator) -> Self { + let operator_config = OperatorConfig { + collateral_funding_outpoint: Some(Outpoint { + txid: Some(operator.collateral_funding_outpoint.txid.into()), + vout: operator.collateral_funding_outpoint.vout, + }), + xonly_pk: operator.signer.xonly_public_key.to_string(), + wallet_reimburse_address: operator.reimburse_addr.to_string(), + }; + + OperatorParams { + response: Some(operator_params::Response::OperatorDetails(operator_config)), + } + } +} + +impl From for OperatorParams { + fn from(winternitz_pubkey: winternitz::PublicKey) -> Self { + OperatorParams { + response: Some(operator_params::Response::WinternitzPubkeys( + winternitz_pubkey.into(), + )), + } + } +} + +impl From for OperatorParams { + fn from(sig: Signature) -> Self { + OperatorParams { + response: Some(operator_params::Response::UnspentKickoffSig(SchnorrSig { + schnorr_sig: sig.serialize().to_vec(), + })), + } + } +} + +impl TryFrom for DepositParams { + type Error = Status; + + fn try_from(deposit_sign_session: DepositSignSession) -> Result { + match deposit_sign_session.deposit_params { + Some(deposit_params) => Ok(deposit_params), + None => Err(expected_msg_got_none("Deposit Params")()), + } + } +} + +impl From for XOnlyPublicKeyRpc { + fn from(xonly_public_key: XOnlyPublicKey) -> Self { + XOnlyPublicKeyRpc { + xonly_public_key: xonly_public_key.serialize().to_vec(), + } + } +} + +impl TryFrom for XOnlyPublicKey { + type Error = BridgeError; + + fn try_from(xonly_public_key_rpc: XOnlyPublicKeyRpc) -> Result { + Ok( + XOnlyPublicKey::from_slice(&xonly_public_key_rpc.xonly_public_key) + .wrap_err("Failed to parse XOnlyPublicKey")?, + ) + } +} + +/// Parses operator configuration from a given stream. +/// +/// # Returns +/// +/// A tuple, containing: +/// +/// - Operator index +/// - Collateral Funding txid +/// - Operator's X-only public key +/// - Wallet reimburse address +pub async fn parse_details( + stream: &mut tonic::Streaming, +) -> Result<(OutPoint, XOnlyPublicKey, Address), Status> { + let operator_param = fetch_next_message_from_stream!(stream, response)?; + + let operator_config = + if let operator_params::Response::OperatorDetails(operator_config) = operator_param { + operator_config + } else { + return Err(expected_msg_got_none("OperatorDetails")()); + }; + + let operator_xonly_pk = XOnlyPublicKey::from_str(&operator_config.xonly_pk) + .map_err(|_| Status::invalid_argument("Invalid operator xonly public key".to_string()))?; + + let collateral_funding_outpoint = operator_config + .collateral_funding_outpoint + .ok_or(Status::invalid_argument( + "Collateral funding outpoint not provided".to_string(), + ))? + .try_into()?; + + let wallet_reimburse_address = Address::from_str(&operator_config.wallet_reimburse_address) + .map_err(|e| { + Status::invalid_argument(format!("Failed to parse wallet reimburse address: {:?}", e)) + })?; + + Ok(( + collateral_funding_outpoint, + operator_xonly_pk, + wallet_reimburse_address, + )) +} + +pub async fn parse_winternitz_public_keys( + stream: &mut tonic::Streaming, +) -> Result { + let operator_param = fetch_next_message_from_stream!(stream, response)?; + + if let operator_params::Response::WinternitzPubkeys(wpk) = operator_param { + Ok(wpk.try_into()?) + } else { + Err(expected_msg_got_none("WinternitzPubkeys")()) + } +} + +pub async fn parse_schnorr_sig( + stream: &mut tonic::Streaming, +) -> Result { + let operator_param = fetch_next_message_from_stream!(stream, response)?; + + if let operator_params::Response::UnspentKickoffSig(wpk) = operator_param { + Ok(wpk.try_into()?) + } else { + Err(expected_msg_got_none("UnspentKickoffSig")()) + } +} + +pub fn parse_withdrawal_sig_params( + params: WithdrawParams, +) -> Result<(u32, Signature, OutPoint, ScriptBuf, Amount), Status> { + let input_signature = Signature::from_slice(¶ms.input_signature) + .map_err(|e| error::invalid_argument("user_sig", "Can't convert input to Signature")(e))?; + + let input_outpoint: OutPoint = params + .input_outpoint + .ok_or_else(error::input_ended_prematurely)? + .try_into()?; + + let users_intent_script_pubkey = ScriptBuf::from_bytes(params.output_script_pubkey); + + Ok(( + params.withdrawal_id, + input_signature, + input_outpoint, + users_intent_script_pubkey, + Amount::from_sat(params.output_amount), + )) +} diff --git a/core/src/rpc/parser/verifier.rs b/core/src/rpc/parser/verifier.rs new file mode 100644 index 000000000..a44218329 --- /dev/null +++ b/core/src/rpc/parser/verifier.rs @@ -0,0 +1,263 @@ +use super::ParserError; +use crate::citrea::CitreaClientT; +use crate::deposit::DepositData; +use crate::errors::BridgeError; +use crate::fetch_next_optional_message_from_stream; +use crate::rpc::clementine::{ + nonce_gen_response, verifier_deposit_sign_params, DepositSignSession, NonceGenFirstResponse, + OperatorKeys, OperatorKeysWithDeposit, PartialSig, VerifierDepositSignParams, VerifierParams, +}; +use crate::verifier::Verifier; +use crate::{ + fetch_next_message_from_stream, + rpc::{ + clementine::{ + self, verifier_deposit_finalize_params, NonceGenResponse, + VerifierDepositFinalizeParams, VerifierPublicKeys, + }, + error::{self, invalid_argument}, + }, +}; +use bitcoin::secp256k1::schnorr; +use bitcoin::secp256k1::schnorr::Signature; +use bitcoin::secp256k1::PublicKey; +use bitcoin::XOnlyPublicKey; +use eyre::Context; +use secp256k1::musig::{AggregatedNonce, PartialSignature, PublicNonce}; +use tonic::Status; + +impl TryFrom<&Verifier> for VerifierParams +where + C: CitreaClientT, +{ + type Error = Status; + + fn try_from(verifier: &Verifier) -> Result { + Ok(VerifierParams { + public_key: verifier.signer.public_key.serialize().to_vec(), + }) + } +} + +impl TryFrom for Vec { + type Error = BridgeError; + + fn try_from(value: VerifierPublicKeys) -> Result { + let inner = value.verifier_public_keys; + + Ok(inner + .iter() + .map(|inner_vec| { + PublicKey::from_slice(inner_vec).wrap_err_with(|| { + ParserError::RPCParamMalformed("verifier_public_keys".to_string()) + }) + }) + .collect::, eyre::Report>>()?) + } +} +impl From> for VerifierPublicKeys { + fn from(value: Vec) -> Self { + let verifier_public_keys: Vec> = value + .into_iter() + .map(|inner| inner.serialize().to_vec()) + .collect(); + + VerifierPublicKeys { + verifier_public_keys, + } + } +} + +impl From for VerifierDepositSignParams { + fn from(value: DepositSignSession) -> Self { + VerifierDepositSignParams { + params: Some(verifier_deposit_sign_params::Params::DepositSignFirstParam( + value, + )), + } + } +} + +impl From for VerifierDepositFinalizeParams { + fn from(value: DepositSignSession) -> Self { + VerifierDepositFinalizeParams { + params: Some(verifier_deposit_finalize_params::Params::DepositSignFirstParam(value)), + } + } +} + +impl From<&Signature> for VerifierDepositFinalizeParams { + fn from(value: &Signature) -> Self { + VerifierDepositFinalizeParams { + params: Some(verifier_deposit_finalize_params::Params::SchnorrSig( + value.serialize().to_vec(), + )), + } + } +} + +impl From for NonceGenResponse { + fn from(value: NonceGenFirstResponse) -> Self { + NonceGenResponse { + response: Some(nonce_gen_response::Response::FirstResponse(value)), + } + } +} + +impl From<&PublicNonce> for NonceGenResponse { + fn from(value: &PublicNonce) -> Self { + NonceGenResponse { + response: Some(nonce_gen_response::Response::PubNonce( + value.serialize().to_vec(), + )), + } + } +} + +impl From for PartialSig { + fn from(value: PartialSignature) -> Self { + PartialSig { + partial_sig: value.serialize().to_vec(), + } + } +} + +#[allow(clippy::result_large_err)] +pub fn parse_deposit_sign_session( + deposit_sign_session: clementine::DepositSignSession, + verifier_pk: &PublicKey, +) -> Result<(DepositData, u128), Status> { + let deposit_params = deposit_sign_session + .deposit_params + .ok_or(Status::invalid_argument("No deposit params received"))?; + + let deposit_data: DepositData = deposit_params.try_into()?; + + let verifier_idx = deposit_data + .get_verifier_index(verifier_pk) + .map_err(|e| Status::invalid_argument(e.to_string()))?; + + let session_id = deposit_sign_session + .nonce_gen_first_responses + .get(verifier_idx) + .ok_or(Status::invalid_argument(format!( + "Verifier with index {verifier_idx} and public key of {verifier_pk} doesn't exists in nonce_gen_first_responses!" + )))? + .id.parse() + .map_err(|e| Status::invalid_argument(format!("Invalid nonce session id: {}", e)))?; + + Ok((deposit_data, session_id)) +} + +#[allow(clippy::result_large_err)] +pub fn parse_partial_sigs(partial_sigs: Vec>) -> Result, Status> { + partial_sigs + .iter() + .enumerate() + .map(|(idx, sig)| { + PartialSignature::from_byte_array( + &sig.as_slice() + .try_into() + .map_err(|_| Status::invalid_argument("PartialSignature must be 32 bytes"))?, + ) + .map_err(|e| { + error::invalid_argument( + "partial_sig", + format!("Verifier {idx} returned an invalid partial signature").as_str(), + )(e) + }) + }) + .collect::, _>>() +} + +#[allow(clippy::result_large_err)] +pub fn parse_op_keys_with_deposit( + data: OperatorKeysWithDeposit, +) -> Result<(DepositData, OperatorKeys, XOnlyPublicKey), Status> { + let deposit_params = data + .deposit_params + .ok_or(Status::invalid_argument("deposit_params is empty"))?; + + let deposit_data: DepositData = deposit_params.try_into()?; + + let op_keys = data + .operator_keys + .ok_or(Status::invalid_argument("OperatorDepositKeys is empty"))?; + + let operator_xonly_pk = XOnlyPublicKey::from_slice(&data.operator_xonly_pk).map_err( + invalid_argument("operator_xonly_pk", "Invalid xonly public key"), + )?; + + Ok((deposit_data, op_keys, operator_xonly_pk)) +} + +pub async fn parse_next_deposit_finalize_param_schnorr_sig( + stream: &mut tonic::Streaming, +) -> Result, Status> { + let sig = match fetch_next_optional_message_from_stream!(stream, params) { + Some(sig) => sig, + None => return Ok(None), + }; + + let final_sig = match sig { + verifier_deposit_finalize_params::Params::SchnorrSig(final_sig) => { + schnorr::Signature::from_slice(&final_sig) + .map_err(invalid_argument("FinalSig", "Invalid signature length"))? + } + _ => return Err(Status::internal("Expected FinalSig 1")), + }; + + Ok(Some(final_sig)) +} + +pub async fn parse_deposit_finalize_param_move_tx_agg_nonce( + stream: &mut tonic::Streaming, +) -> Result { + let sig = fetch_next_message_from_stream!(stream, params)?; + + match sig { + verifier_deposit_finalize_params::Params::MoveTxAggNonce(aggnonce) => { + let arr: [u8; 66] = aggnonce + .as_slice() + .try_into() + .map_err(|_| Status::invalid_argument("AggregatedNonce must be 66 bytes"))?; + + Ok(AggregatedNonce::from_byte_array(&arr) + .map_err(invalid_argument("AggregatedNonce", "failed to parse"))?) + } + _ => Err(Status::internal("Expected FinalSig 2")), + } +} + +pub async fn parse_deposit_finalize_param_emergency_stop_agg_nonce( + stream: &mut tonic::Streaming, +) -> Result { + let sig = fetch_next_message_from_stream!(stream, params)?; + + match sig { + verifier_deposit_finalize_params::Params::EmergencyStopAggNonce(aggnonce) => { + Ok(AggregatedNonce::from_byte_array( + &aggnonce + .as_slice() + .try_into() + .map_err(|_| Status::invalid_argument("AggregatedNonce must be 66 bytes"))?, + ) + .map_err(invalid_argument("AggregatedNonce", "failed to parse"))?) + } + _ => Err(Status::internal("Expected FinalSig 2")), + } +} + +pub async fn parse_nonce_gen_first_response( + stream: &mut tonic::Streaming, +) -> Result { + let nonce_gen_response = fetch_next_message_from_stream!(stream, response)?; + + if let clementine::nonce_gen_response::Response::FirstResponse(nonce_gen_first_response) = + nonce_gen_response + { + Ok(nonce_gen_first_response) + } else { + Err(Status::invalid_argument("Expected first_response")) + } +} diff --git a/core/src/rpc/verifier.rs b/core/src/rpc/verifier.rs new file mode 100644 index 000000000..f093e5f0e --- /dev/null +++ b/core/src/rpc/verifier.rs @@ -0,0 +1,602 @@ +use std::str::FromStr; + +use super::clementine::{ + self, clementine_verifier_server::ClementineVerifier, Empty, NonceGenRequest, NonceGenResponse, + OperatorParams, OptimisticPayoutParams, PartialSig, RawTxWithRbfInfo, VergenResponse, + VerifierDepositFinalizeParams, VerifierDepositSignParams, VerifierParams, +}; +use super::error; +use super::parser::ParserError; +use crate::builder::transaction::sign::{create_and_sign_txs, TransactionRequestData}; +use crate::builder::transaction::ContractContext; +use crate::citrea::CitreaClientT; +use crate::constants::RESTART_BACKGROUND_TASKS_TIMEOUT; +use crate::rpc::clementine::VerifierDepositFinalizeResponse; +use crate::utils::{get_vergen_response, monitor_standalone_task, timed_request}; +use crate::verifier::VerifierServer; +use crate::{constants, fetch_next_optional_message_from_stream}; +use crate::{ + fetch_next_message_from_stream, + rpc::parser::{self}, +}; +use alloy::primitives::PrimitiveSignature; +use bitcoin::Witness; +use clementine::verifier_deposit_finalize_params::Params; +use secp256k1::musig::AggregatedNonce; +use tokio::sync::mpsc::{self, error::SendError}; +use tokio_stream::wrappers::ReceiverStream; +use tonic::{async_trait, Request, Response, Status, Streaming}; + +#[async_trait] +impl ClementineVerifier for VerifierServer +where + C: CitreaClientT, +{ + async fn vergen(&self, _request: Request) -> Result, Status> { + Ok(Response::new(get_vergen_response())) + } + + async fn restart_background_tasks( + &self, + _request: tonic::Request, + ) -> std::result::Result, tonic::Status> { + // because start_background_tasks uses a RwLock, we set a timeout to be safe + timed_request( + RESTART_BACKGROUND_TASKS_TIMEOUT, + "Restarting background tasks", + self.start_background_tasks(), + ) + .await?; + Ok(Response::new(Empty {})) + } + + async fn optimistic_payout_sign( + &self, + request: Request, + ) -> Result, Status> { + let params = request.into_inner(); + let agg_nonce = AggregatedNonce::from_byte_array( + params + .agg_nonce + .as_slice() + .try_into() + .map_err(|_| Status::invalid_argument("agg_nonce must be exactly 66 bytes"))?, + ) + .map_err(|e| Status::invalid_argument(format!("Invalid musigagg nonce: {}", e)))?; + let nonce_session_id = params + .nonce_gen + .ok_or(Status::invalid_argument( + "Nonce params not found for optimistic payout", + ))? + .id + .parse::() + .map_err(|e| Status::invalid_argument(format!("Invalid nonce session id: {}", e)))?; + + let opt_withdraw_params = params.opt_withdrawal.ok_or(Status::invalid_argument( + "Withdrawal params not found for optimistic payout", + ))?; + let verification_signature_str = opt_withdraw_params.verification_signature.clone(); + let withdrawal_params = opt_withdraw_params + .withdrawal + .ok_or(Status::invalid_argument( + "Withdrawal params not found for optimistic payout", + ))?; + let (withdrawal_id, input_signature, input_outpoint, output_script_pubkey, output_amount) = + parser::operator::parse_withdrawal_sig_params(withdrawal_params)?; + + let verification_signature = verification_signature_str + .map(|sig| { + PrimitiveSignature::from_str(&sig).map_err(|e| { + Status::invalid_argument(format!("Invalid verification signature: {}", e)) + }) + }) + .transpose()?; + + let partial_sig = self + .verifier + .sign_optimistic_payout( + nonce_session_id, + agg_nonce, + withdrawal_id, + input_signature, + input_outpoint, + output_script_pubkey, + output_amount, + verification_signature, + ) + .await?; + Ok(Response::new(partial_sig.into())) + } + + async fn internal_create_watchtower_challenge( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status> { + let transaction_request = request.into_inner(); + let transaction_data: TransactionRequestData = transaction_request.try_into()?; + + let (_tx_type, signed_tx, rbf_info) = self + .verifier + .create_watchtower_challenge( + transaction_data, + &{ + let challenge_bytes = self + .verifier + .config + .protocol_paramset() + .watchtower_challenge_bytes; + let mut challenge = vec![0u8; challenge_bytes]; + for (step, i) in (0..challenge_bytes).step_by(32).enumerate() { + if i < challenge_bytes { + challenge[i] = step as u8; + } + } + challenge + }, // dummy challenge with 1u8, 2u8 every 32 bytes + None, + ) + .await?; + + Ok(Response::new(RawTxWithRbfInfo { + raw_tx: bitcoin::consensus::serialize(&signed_tx), + rbf_info: Some(rbf_info.into()), + })) + } + type NonceGenStream = ReceiverStream>; + type DepositSignStream = ReceiverStream>; + + #[tracing::instrument(skip(self), err(level = tracing::Level::ERROR), ret(level = tracing::Level::TRACE))] + async fn get_params(&self, _: Request) -> Result, Status> { + let params: VerifierParams = (&self.verifier).try_into()?; + + Ok(Response::new(params)) + } + + #[tracing::instrument(skip_all, err(level = tracing::Level::ERROR), ret(level = tracing::Level::TRACE))] + async fn set_operator( + &self, + req: Request>, + ) -> Result, Status> { + let mut in_stream = req.into_inner(); + + let (collateral_funding_outpoint, operator_xonly_pk, wallet_reimburse_address) = + parser::operator::parse_details(&mut in_stream).await?; + + // check if address is valid + let wallet_reimburse_address_checked = wallet_reimburse_address + .clone() + .require_network(self.verifier.config.protocol_paramset().network) + .map_err(|e| { + Status::invalid_argument(format!( + "Invalid operator reimbursement address: {:?} for bitcoin network {:?} for operator {:?}. ParseError: {}", + wallet_reimburse_address, + self.verifier.config.protocol_paramset().network, + operator_xonly_pk, + e + )) + })?; + + let mut operator_kickoff_winternitz_public_keys = Vec::new(); + // we need num_round_txs + 1 because the last round includes reimburse generators of previous round + for _ in 0..self.verifier.config.get_num_kickoff_winternitz_pks() { + operator_kickoff_winternitz_public_keys + .push(parser::operator::parse_winternitz_public_keys(&mut in_stream).await?); + } + + let mut unspent_kickoff_sigs = + Vec::with_capacity(self.verifier.config.get_num_unspent_kickoff_sigs()); + for _ in 0..self.verifier.config.get_num_unspent_kickoff_sigs() { + unspent_kickoff_sigs.push(parser::operator::parse_schnorr_sig(&mut in_stream).await?); + } + + if in_stream.message().await?.is_some() { + return Err(Status::invalid_argument( + "Expected end of stream, got more messages in set_operator", + )); + } + + self.verifier + .set_operator( + collateral_funding_outpoint, + operator_xonly_pk, + wallet_reimburse_address_checked, + operator_kickoff_winternitz_public_keys, + unspent_kickoff_sigs, + ) + .await?; + + Ok(Response::new(Empty {})) + } + + #[tracing::instrument(skip(self), err(level = tracing::Level::ERROR), ret(level = tracing::Level::TRACE))] + async fn nonce_gen( + &self, + req: Request, + ) -> Result, Status> { + let num_nonces = req.into_inner().num_nonces; + + let (session_id, pub_nonces) = self.verifier.nonce_gen(num_nonces).await?; + + let (tx, rx) = mpsc::channel(pub_nonces.len() + 1); + + let handle = tokio::spawn(async move { + let nonce_gen_first_response = clementine::NonceGenFirstResponse { + id: session_id.to_string(), + num_nonces, + }; + let session_id: NonceGenResponse = nonce_gen_first_response.into(); + tx.send(Ok(session_id)).await?; + + for pub_nonce in &pub_nonces { + let pub_nonce: NonceGenResponse = pub_nonce.into(); + tx.send(Ok(pub_nonce)).await?; + } + + Ok::<(), SendError<_>>(()) + }); + monitor_standalone_task(handle, "Verifier nonce_gen"); + + Ok(Response::new(ReceiverStream::new(rx))) + } + + async fn deposit_sign( + &self, + req: Request>, + ) -> Result, Status> { + let mut in_stream = req.into_inner(); + let verifier = self.verifier.clone(); + + let (tx, rx) = mpsc::channel(constants::DEFAULT_CHANNEL_SIZE); + let out_stream: Self::DepositSignStream = ReceiverStream::new(rx); + + let (param_tx, mut param_rx) = mpsc::channel(1); + let (agg_nonce_tx, agg_nonce_rx) = mpsc::channel(constants::DEFAULT_CHANNEL_SIZE); + + // Send incoming data to deposit sign job. + let handle = tokio::spawn(async move { + let params = fetch_next_message_from_stream!(in_stream, params)?; + let (deposit_data, session_id) = match params { + clementine::verifier_deposit_sign_params::Params::DepositSignFirstParam( + deposit_sign_session, + ) => parser::verifier::parse_deposit_sign_session( + deposit_sign_session, + &verifier.signer.public_key, + )?, + _ => return Err(Status::invalid_argument("Expected DepositOutpoint")), + }; + param_tx + .send((deposit_data, session_id)) + .await + .map_err(error::output_stream_ended_prematurely)?; + + while let Some(result) = + fetch_next_optional_message_from_stream!(&mut in_stream, params) + { + let agg_nonce = match result { + clementine::verifier_deposit_sign_params::Params::AggNonce(agg_nonce) => { + AggregatedNonce::from_byte_array( + agg_nonce.as_slice().try_into().map_err(|_| { + ParserError::RPCParamMalformed("AggNonce".to_string()) + })?, + ) + .map_err(|_| ParserError::RPCParamMalformed("AggNonce".to_string()))? + } + _ => return Err(Status::invalid_argument("Expected AggNonce")), + }; + + agg_nonce_tx + .send(agg_nonce) + .await + .map_err(error::output_stream_ended_prematurely)?; + } + Ok(()) + }); + monitor_standalone_task(handle, "Verifier deposit data receiver"); + + // Start partial sig job and return partial sig responses. + let handle = tokio::spawn(async move { + let (deposit_data, session_id) = param_rx + .recv() + .await + .ok_or(error::expected_msg_got_none("parameters")())?; + + let mut partial_sig_receiver = verifier + .deposit_sign(deposit_data.clone(), session_id, agg_nonce_rx) + .await?; + + let mut nonce_idx = 0; + let num_required_sigs = verifier.config.get_num_required_nofn_sigs(&deposit_data); + while let Some(partial_sig) = partial_sig_receiver.recv().await { + tx.send(Ok(PartialSig { + partial_sig: partial_sig.serialize().to_vec(), + })) + .await + .map_err(|e| { + Status::aborted(format!( + "Error sending partial sig, stream ended prematurely: {e}" + )) + })?; + + nonce_idx += 1; + tracing::trace!( + "Verifier {:?} signed and sent sighash {} of {} through rpc deposit_sign", + verifier.signer.public_key, + nonce_idx, + num_required_sigs + ); + if nonce_idx == num_required_sigs { + break; + } + } + + Ok::<(), Status>(()) + }); + monitor_standalone_task(handle, "Verifier deposit signature sender"); + + Ok(Response::new(out_stream)) + } + + /// Function to finalize the deposit. Verifier will check the validity of the both nofn signatures and + /// operator signatures. It will receive data from the stream in this order -> nofn sigs, movetx agg nonce, operator sigs. + /// If everything is correct, it will partially sign the move tx and send it to aggregator. + #[tracing::instrument(skip(self), err(level = tracing::Level::ERROR), ret(level = tracing::Level::TRACE))] + async fn deposit_finalize( + &self, + req: Request>, + ) -> Result, Status> { + let mut in_stream = req.into_inner(); + tracing::trace!( + "In verifier {:?} deposit_finalize()", + self.verifier.signer.public_key + ); + + let (sig_tx, sig_rx) = mpsc::channel(constants::DEFAULT_CHANNEL_SIZE); + let (agg_nonce_tx, agg_nonce_rx) = mpsc::channel(1); + let (operator_sig_tx, operator_sig_rx) = mpsc::channel(constants::DEFAULT_CHANNEL_SIZE); + + let params = fetch_next_message_from_stream!(in_stream, params)?; + let (deposit_data, session_id) = match params { + Params::DepositSignFirstParam(deposit_sign_session) => { + parser::verifier::parse_deposit_sign_session( + deposit_sign_session, + &self.verifier.signer.public_key, + )? + } + _ => Err(Status::internal("Expected DepositOutpoint"))?, + }; + tracing::trace!( + "Verifier {:?} got DepositSignFirstParam in deposit_finalize()", + self.verifier.signer.public_key + ); + + // Start deposit finalize job. + let verifier = self.verifier.clone(); + let mut dep_data = deposit_data.clone(); + let deposit_finalize_handle = tokio::spawn(async move { + verifier + .deposit_finalize( + &mut dep_data, + session_id, + sig_rx, + agg_nonce_rx, + operator_sig_rx, + ) + .await + }); + + // Start parsing inputs and send them to deposit finalize job. + let verifier = self.verifier.clone(); + let sig_handle = tokio::spawn(async move { + let num_required_nofn_sigs = verifier.config.get_num_required_nofn_sigs(&deposit_data); + let mut nonce_idx = 0; + while let Some(sig) = + parser::verifier::parse_next_deposit_finalize_param_schnorr_sig(&mut in_stream) + .await? + { + tracing::debug!( + "Received full nofn sig {} in deposit_finalize()", + nonce_idx + 1 + ); + sig_tx + .send(sig) + .await + .map_err(error::output_stream_ended_prematurely)?; + tracing::debug!( + "Sent full nofn sig {} to src/verifier in deposit_finalize()", + nonce_idx + 1 + ); + nonce_idx += 1; + if nonce_idx == num_required_nofn_sigs { + break; + } + } + if nonce_idx < num_required_nofn_sigs { + let err_msg = format!( + "Insufficient N-of-N signatures received: got {}, expected {}", + nonce_idx, num_required_nofn_sigs + ); + tracing::error!(err_msg); + return Err(Status::invalid_argument(err_msg)); + } + + let move_tx_agg_nonce = + parser::verifier::parse_deposit_finalize_param_move_tx_agg_nonce(&mut in_stream) + .await?; + agg_nonce_tx + .send(move_tx_agg_nonce) + .await + .map_err(error::output_stream_ended_prematurely)?; + + let emergency_stop_agg_nonce = + parser::verifier::parse_deposit_finalize_param_emergency_stop_agg_nonce( + &mut in_stream, + ) + .await?; + agg_nonce_tx + .send(emergency_stop_agg_nonce) + .await + .map_err(error::output_stream_ended_prematurely)?; + + let num_required_op_sigs = verifier + .config + .get_num_required_operator_sigs(&deposit_data); + let num_operators = deposit_data.get_num_operators(); + let num_required_total_op_sigs = num_required_op_sigs * num_operators; + let mut total_op_sig_count = 0; + for _ in 0..num_operators { + let mut op_sig_count = 0; + + while let Some(operator_sig) = + parser::verifier::parse_next_deposit_finalize_param_schnorr_sig(&mut in_stream) + .await? + { + tracing::debug!( + "Received full operator sig {} in deposit_finalize()", + op_sig_count + 1 + ); + operator_sig_tx + .send(operator_sig) + .await + .map_err(error::output_stream_ended_prematurely)?; + tracing::debug!( + "Sent full operator sig {} to src/verifier in deposit_finalize()", + op_sig_count + 1 + ); + + op_sig_count += 1; + total_op_sig_count += 1; + if op_sig_count == num_required_op_sigs { + break; + } + } + } + + if total_op_sig_count < num_required_total_op_sigs { + let err_msg = format!( + "Insufficient operator signatures received: got {}, expected {}", + total_op_sig_count, num_required_total_op_sigs + ); + tracing::error!(err_msg); + return Err(Status::invalid_argument(err_msg)); + } + + Ok::<(), Status>(()) + }); + + sig_handle.await.map_err(|e| { + Status::internal(format!("Deposit sign thread failed to finish: {}", e).as_str()) + })??; + + let partial_sig = deposit_finalize_handle.await.map_err(|e| { + Status::internal(format!("Deposit finalize thread failed to finish: {}", e).as_str()) + })??; + + let response = VerifierDepositFinalizeResponse { + move_to_vault_partial_sig: partial_sig.0.serialize().to_vec(), + emergency_stop_partial_sig: partial_sig.1.serialize().to_vec(), + }; + + Ok(Response::new(response)) + } + + async fn set_operator_keys( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status> { + let data = request.into_inner(); + let (deposit_params, op_keys, operator_xonly_pk) = + parser::verifier::parse_op_keys_with_deposit(data)?; + self.verifier + .set_operator_keys(deposit_params, op_keys, operator_xonly_pk) + .await?; + Ok(Response::new(Empty {})) + } + + async fn internal_create_signed_txs( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status> { + let transaction_request = request.into_inner(); + let transaction_data: TransactionRequestData = transaction_request.try_into()?; + let (_, deposit_data) = self + .verifier + .db + .get_deposit_data(None, transaction_data.deposit_outpoint) + .await? + .ok_or(Status::invalid_argument("Deposit not found in database"))?; + let context = ContractContext::new_context_for_kickoff( + transaction_data.kickoff_data, + deposit_data, + self.verifier.config.protocol_paramset(), + ); + let raw_txs = create_and_sign_txs( + self.verifier.db.clone(), + &self.verifier.signer, + self.verifier.config.clone(), + context, + None, // empty blockhash, will not sign this + None, + ) + .await?; + + Ok(Response::new(raw_txs.into())) + } + + async fn internal_handle_kickoff( + &self, + request: Request, + ) -> Result, Status> { + let txid = request.into_inner(); + let txid = bitcoin::Txid::try_from(txid).expect("Should be able to convert"); + let mut dbtx = self.verifier.db.begin_transaction().await?; + let kickoff_data = self + .verifier + .db + .get_deposit_data_with_kickoff_txid(None, txid) + .await?; + if let Some((deposit_data, kickoff_id)) = kickoff_data { + self.verifier + .handle_kickoff(&mut dbtx, Witness::new(), deposit_data, kickoff_id, false) + .await?; + } else { + return Err(Status::not_found("Kickoff txid not found")); + } + dbtx.commit().await.expect("Failed to commit transaction"); + Ok(Response::new(Empty {})) + } + + #[tracing::instrument(skip(self), err(level = tracing::Level::ERROR), ret(level = tracing::Level::TRACE))] + async fn debug_tx( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status> { + #[cfg(not(feature = "automation"))] + { + Err(tonic::Status::unimplemented( + "Automation is not enabled, TxSender is not running.", + )) + } + + // Get debug info from tx_sender + #[cfg(feature = "automation")] + { + let tx_id = request.into_inner().tx_id; + + match self.verifier.tx_sender.debug_tx(tx_id).await { + Ok(debug_info) => Ok(tonic::Response::new(debug_info)), + Err(e) => Err(tonic::Status::internal(format!( + "Failed to debug TX {}: {}", + tx_id, e + ))), + } + } + } + + async fn get_current_status( + &self, + _request: Request, + ) -> Result, Status> { + let status = self.get_current_status().await?; + Ok(Response::new(status)) + } +} diff --git a/core/src/servers.rs b/core/src/servers.rs new file mode 100644 index 000000000..ba392cbe5 --- /dev/null +++ b/core/src/servers.rs @@ -0,0 +1,436 @@ +//! # Servers +//! +//! Utilities for operator and verifier servers. +use crate::aggregator::AggregatorServer; +use crate::citrea::CitreaClientT; +use crate::extended_bitcoin_rpc::ExtendedBitcoinRpc; +use crate::operator::OperatorServer; +use crate::rpc::clementine::clementine_aggregator_server::ClementineAggregatorServer; +use crate::rpc::clementine::clementine_operator_server::ClementineOperatorServer; +use crate::rpc::clementine::clementine_verifier_server::ClementineVerifierServer; +use crate::rpc::interceptors::Interceptors::{Noop, OnlyAggregatorAndSelf}; +use crate::utils::AddMethodMiddlewareLayer; +use crate::verifier::VerifierServer; +use crate::{config::BridgeConfig, errors}; +use errors::BridgeError; +use eyre::Context; +use rustls_pki_types::pem::PemObject; +use std::time::Duration; +use tokio::sync::oneshot; +use tonic::server::NamedService; +use tonic::service::interceptor::InterceptedService; +use tonic::transport::{Certificate, CertificateDer, Identity, ServerTlsConfig}; +use tower::buffer::BufferLayer; +use tower::limit::RateLimitLayer; + +#[cfg(test)] +use crate::test::common::ensure_test_certificates; + +pub type ServerFuture = dyn futures::Future>; + +/// Represents a network address that can be either TCP or Unix socket +#[derive(Debug, Clone)] +pub enum ServerAddr { + Tcp(std::net::SocketAddr), + #[cfg(unix)] + Unix(std::path::PathBuf), +} + +impl From for ServerAddr { + fn from(addr: std::net::SocketAddr) -> Self { + ServerAddr::Tcp(addr) + } +} + +#[cfg(unix)] +impl From for ServerAddr { + fn from(path: std::path::PathBuf) -> Self { + ServerAddr::Unix(path) + } +} + +/// Generic function to create a gRPC server with the given service +pub async fn create_grpc_server( + addr: ServerAddr, + service: S, + server_name: &str, + config: &BridgeConfig, +) -> Result<(ServerAddr, oneshot::Sender<()>), BridgeError> +where + S: tower::Service< + http::Request, + Response = http::Response, + Error = std::convert::Infallible, + > + Clone + + Send + + NamedService + + 'static, + S::Future: Send + 'static, +{ + // Create channels for server readiness and shutdown + let (ready_tx, ready_rx) = oneshot::channel(); + let (shutdown_tx, shutdown_rx) = oneshot::channel(); + + // Ensure certificates exist in test mode + #[cfg(test)] + { + ensure_test_certificates().wrap_err("Failed to ensure test certificates")?; + } + + match addr { + ServerAddr::Tcp(socket_addr) => { + let cert = tokio::fs::read(&config.server_cert_path) + .await + .wrap_err(format!( + "Failed to read server certificate from {}", + config.server_cert_path.display() + ))?; + let key = tokio::fs::read(&config.server_key_path) + .await + .wrap_err(format!( + "Failed to read server key from {}", + config.server_key_path.display() + ))?; + + let server_identity = Identity::from_pem(cert, key); + + // Load CA certificate for client verification + let client_ca_cert = tokio::fs::read(&config.ca_cert_path) + .await + .wrap_err(format!( + "Failed to read CA certificate from {}", + config.ca_cert_path.display() + ))?; + + let client_ca = Certificate::from_pem(client_ca_cert); + + // Build TLS configuration + let tls_config = if config.client_verification { + ServerTlsConfig::new() + .identity(server_identity) + .client_ca_root(client_ca) + } else { + ServerTlsConfig::new().identity(server_identity) + }; + + let service = InterceptedService::new( + service, + if config.client_verification { + let client_cert = CertificateDer::from_pem_file(&config.client_cert_path) + .wrap_err(format!( + "Failed to read client certificate from {}", + config.client_cert_path.display() + ))? + .to_owned(); + + let aggregator_cert = + CertificateDer::from_pem_file(&config.aggregator_cert_path) + .wrap_err(format!( + "Failed to read aggregator certificate from {}", + config.aggregator_cert_path.display() + ))? + .to_owned(); + + OnlyAggregatorAndSelf { + aggregator_cert, + our_cert: client_cert, + } + } else { + Noop + }, + ); + + tracing::info!( + "Starting {} gRPC server with TCP address: {}", + server_name, + socket_addr + ); + + let server_builder = tonic::transport::Server::builder() + .layer(AddMethodMiddlewareLayer) + .layer(BufferLayer::new(config.grpc.req_concurrency_limit)) + .layer(RateLimitLayer::new( + config.grpc.ratelimit_req_count as u64, + Duration::from_secs(config.grpc.ratelimit_req_interval_secs), + )) + .timeout(Duration::from_secs(config.grpc.timeout_secs)) + .tcp_keepalive(Some(Duration::from_secs(config.grpc.tcp_keepalive_secs))) + .concurrency_limit_per_connection(config.grpc.req_concurrency_limit) + .http2_adaptive_window(Some(true)) + .tls_config(tls_config) + .wrap_err("Failed to configure TLS")? + .add_service(service); + + let server_name_str = server_name.to_string(); + + let handle = server_builder.serve_with_shutdown(socket_addr, async move { + let _ = ready_tx.send(()); + shutdown_rx.await.ok(); + tracing::info!("{} gRPC server shutting down", server_name_str); + }); + + let server_name_str = server_name.to_string(); + + tokio::spawn(async move { + if let Err(e) = handle.await { + tracing::error!("{} gRPC server error: {:?}", server_name_str, e); + } + }); + } + #[cfg(unix)] + ServerAddr::Unix(ref socket_path) => { + let server_builder = tonic::transport::Server::builder() + .layer(AddMethodMiddlewareLayer) + .layer(BufferLayer::new(config.grpc.req_concurrency_limit)) + .layer(RateLimitLayer::new( + config.grpc.ratelimit_req_count as u64, + Duration::from_secs(config.grpc.ratelimit_req_interval_secs), + )) + .timeout(Duration::from_secs(config.grpc.timeout_secs)) + .concurrency_limit_per_connection(config.grpc.req_concurrency_limit) + .add_service(service); + tracing::info!( + "Starting {} gRPC server with Unix socket: {:?}", + server_name, + socket_path + ); + + // Remove socket file if it already exists + if socket_path.exists() { + std::fs::remove_file(socket_path) + .wrap_err("Failed to remove existing gRPC unix socket file")?; + } + + // Create Unix socket listener + let uds = tokio::net::UnixListener::bind(socket_path) + .wrap_err("Failed to bind to Unix socket")?; + let incoming = tokio_stream::wrappers::UnixListenerStream::new(uds); + + let server_name_str = server_name.to_string(); + + let handle = server_builder.serve_with_incoming_shutdown(incoming, async move { + let _ = ready_tx.send(()); + shutdown_rx.await.ok(); + tracing::info!("{} gRPC server shutting down", server_name_str); + }); + + let server_name_str = server_name.to_string(); + + tokio::spawn(async move { + if let Err(e) = handle.await { + tracing::error!("{} gRPC server error: {:?}", server_name_str, e); + } + }); + } + } + + // Wait for server to be ready + let _ = ready_rx.await; + tracing::info!("{} gRPC server started", server_name); + + Ok((addr, shutdown_tx)) +} + +pub async fn create_verifier_grpc_server( + config: BridgeConfig, +) -> Result<(std::net::SocketAddr, oneshot::Sender<()>), BridgeError> { + let _rpc = ExtendedBitcoinRpc::connect( + config.bitcoin_rpc_url.clone(), + config.bitcoin_rpc_user.clone(), + config.bitcoin_rpc_password.clone(), + None, + ) + .await + .wrap_err("Failed to connect to Bitcoin RPC")?; + + let addr: std::net::SocketAddr = format!("{}:{}", config.host, config.port) + .parse() + .wrap_err("Failed to parse address")?; + let verifier = VerifierServer::::new(config.clone()).await?; + verifier.start_background_tasks().await?; + + let svc = ClementineVerifierServer::new(verifier) + .max_encoding_message_size(config.grpc.max_message_size) + .max_decoding_message_size(config.grpc.max_message_size); + + let (server_addr, shutdown_tx) = + create_grpc_server(addr.into(), svc, "Verifier", &config).await?; + + match server_addr { + ServerAddr::Tcp(socket_addr) => Ok((socket_addr, shutdown_tx)), + _ => Err(BridgeError::ConfigError("Expected TCP address".into())), + } +} + +pub async fn create_operator_grpc_server( + config: BridgeConfig, +) -> Result<(std::net::SocketAddr, oneshot::Sender<()>), BridgeError> { + tracing::info!( + "config host and port are: {} and {}", + config.host, + config.port + ); + let addr: std::net::SocketAddr = format!("{}:{}", config.host, config.port) + .parse() + .wrap_err("Failed to parse address")?; + + tracing::info!("Creating operator server"); + let operator = OperatorServer::::new(config.clone()).await?; + operator.start_background_tasks().await?; + + tracing::info!("Creating ClementineOperatorServer"); + let svc = ClementineOperatorServer::new(operator) + .max_encoding_message_size(config.grpc.max_message_size) + .max_decoding_message_size(config.grpc.max_message_size); + let (server_addr, shutdown_tx) = + create_grpc_server(addr.into(), svc, "Operator", &config).await?; + tracing::info!("Operator gRPC server created"); + + match server_addr { + ServerAddr::Tcp(socket_addr) => Ok((socket_addr, shutdown_tx)), + _ => Err(BridgeError::ConfigError("Expected TCP address".into())), + } +} + +pub async fn create_aggregator_grpc_server( + mut config: BridgeConfig, +) -> Result<(std::net::SocketAddr, oneshot::Sender<()>), BridgeError> { + let addr: std::net::SocketAddr = format!("{}:{}", config.host, config.port) + .parse() + .wrap_err("Failed to parse address")?; + let aggregator_server = AggregatorServer::new(config.clone()).await?; + aggregator_server.start_background_tasks().await?; + + let svc = ClementineAggregatorServer::new(aggregator_server) + .max_encoding_message_size(config.grpc.max_message_size) + .max_decoding_message_size(config.grpc.max_message_size); + + if config.client_verification { + tracing::warn!( + "Client verification is enabled, even though Aggregator gRPC server should have client certificate verification DISABLED. Overriding to false...", + + ); + + config.client_verification = false; + } + + let (server_addr, shutdown_tx) = + create_grpc_server(addr.into(), svc, "Aggregator", &config).await?; + + match server_addr { + ServerAddr::Tcp(socket_addr) => Ok((socket_addr, shutdown_tx)), + _ => Err(BridgeError::ConfigError("Expected TCP address".into())), + } +} + +// Functions for creating servers with Unix sockets (useful for tests) +#[cfg(unix)] +pub async fn create_verifier_unix_server( + config: BridgeConfig, + socket_path: std::path::PathBuf, +) -> Result<(std::path::PathBuf, oneshot::Sender<()>), BridgeError> { + let _rpc = ExtendedBitcoinRpc::connect( + config.bitcoin_rpc_url.clone(), + config.bitcoin_rpc_user.clone(), + config.bitcoin_rpc_password.clone(), + None, + ) + .await + .wrap_err("Failed to connect to Bitcoin RPC")?; + + let verifier = VerifierServer::::new(config.clone()).await?; + verifier.start_background_tasks().await?; + + let svc = ClementineVerifierServer::new(verifier) + .max_encoding_message_size(config.grpc.max_message_size) + .max_decoding_message_size(config.grpc.max_message_size); + + let (server_addr, shutdown_tx) = + create_grpc_server(socket_path.into(), svc, "Verifier", &config).await?; + + match server_addr { + ServerAddr::Unix(path) => Ok((path, shutdown_tx)), + _ => Err(BridgeError::ConfigError("Expected Unix socket path".into())), + } +} + +#[cfg(not(unix))] +pub async fn create_verifier_unix_server( + _config: BridgeConfig, + _socket_path: std::path::PathBuf, +) -> Result<(std::path::PathBuf, oneshot::Sender<()>), BridgeError> { + Err(BridgeError::ConfigError( + "Unix sockets are not supported on this platform".into(), + )) +} + +#[cfg(unix)] +pub async fn create_operator_unix_server( + config: BridgeConfig, + socket_path: std::path::PathBuf, +) -> Result<(std::path::PathBuf, oneshot::Sender<()>), BridgeError> { + let _rpc = ExtendedBitcoinRpc::connect( + config.bitcoin_rpc_url.clone(), + config.bitcoin_rpc_user.clone(), + config.bitcoin_rpc_password.clone(), + None, + ) + .await + .wrap_err("Failed to connect to Bitcoin RPC")?; + + let operator = OperatorServer::::new(config.clone()).await?; + operator.start_background_tasks().await?; + + let svc = ClementineOperatorServer::new(operator) + .max_encoding_message_size(config.grpc.max_message_size) + .max_decoding_message_size(config.grpc.max_message_size); + + let (server_addr, shutdown_tx) = + create_grpc_server(socket_path.into(), svc, "Operator", &config).await?; + + match server_addr { + ServerAddr::Unix(path) => Ok((path, shutdown_tx)), + _ => Err(BridgeError::ConfigError("Expected Unix socket path".into())), + } +} + +#[cfg(not(unix))] +pub async fn create_operator_unix_server( + _config: BridgeConfig, + _socket_path: std::path::PathBuf, +) -> Result<(std::path::PathBuf, oneshot::Sender<()>), BridgeError> { + Err(BridgeError::ConfigError( + "Unix sockets are not supported on this platform".into(), + )) +} + +#[cfg(unix)] +pub async fn create_aggregator_unix_server( + config: BridgeConfig, + socket_path: std::path::PathBuf, +) -> Result<(std::path::PathBuf, oneshot::Sender<()>), BridgeError> { + let aggregator_server = AggregatorServer::new(config.clone()).await?; + aggregator_server.start_background_tasks().await?; + + let svc = ClementineAggregatorServer::new(aggregator_server) + .max_encoding_message_size(config.grpc.max_message_size) + .max_decoding_message_size(config.grpc.max_message_size); + + let (server_addr, shutdown_tx) = + create_grpc_server(socket_path.into(), svc, "Aggregator", &config).await?; + + match server_addr { + ServerAddr::Unix(path) => Ok((path, shutdown_tx)), + _ => Err(BridgeError::ConfigError("Expected Unix socket path".into())), + } +} + +#[cfg(not(unix))] +pub async fn create_aggregator_unix_server( + _config: BridgeConfig, + _socket_path: std::path::PathBuf, +) -> Result<(std::path::PathBuf, oneshot::Sender<()>), BridgeError> { + Err(BridgeError::ConfigError( + "Unix sockets are not supported on this platform".into(), + )) +} diff --git a/core/src/states/context.rs b/core/src/states/context.rs new file mode 100644 index 000000000..b01487774 --- /dev/null +++ b/core/src/states/context.rs @@ -0,0 +1,201 @@ +use crate::config::protocol::ProtocolParamset; +use crate::database::DatabaseTransaction; +use crate::deposit::{DepositData, KickoffData}; +use crate::operator::RoundIndex; +use crate::utils::NamedEntity; + +use bitcoin::BlockHash; +use bitcoin::Transaction; +use bitcoin::Txid; +use bitcoin::Witness; +use bitcoin::XOnlyPublicKey; +use statig::awaitable::InitializedStateMachine; +use tonic::async_trait; + +use std::collections::HashMap; +use std::sync::Arc; + +use crate::database::Database; + +use crate::builder::transaction::TxHandler; + +use std::collections::BTreeMap; + +use crate::builder::transaction::ContractContext; + +use crate::builder::transaction::TransactionType; + +use crate::errors::BridgeError; + +use std::collections::HashSet; + +use super::block_cache; +use super::kickoff; +use super::round; + +#[derive(Debug, Clone)] +/// Duties are notifications that are sent to the owner (verifier or operator) of the state machine to notify them on changes to the current +/// contract state that require action. +/// Note that for all kickoff state duties, they are only sent if withdrawal process is still going on, meaning the burn connector and +/// kickoff finalizer is still on-chain/unspent. +pub enum Duty { + /// -- Round state duties -- + /// This duty is sent after a new ready to reimburse tx is sent by the corresponding operator. + /// used_kickoffs is a set of kickoff indexes that have been used in the previous round. + /// If there are unspent kickoffs, the owner can send a unspent kickoff connector tx. + NewReadyToReimburse { + round_idx: RoundIndex, + operator_xonly_pk: XOnlyPublicKey, + used_kickoffs: HashSet, + }, + /// This duty is sent after a kickoff utxo is spent by the operator. + /// It includes the txid in which the utxo was spent, so that the owner can verify if this is an actual kickoff sent by operator. + /// Witness is also sent as if tx is an actual kickoff, the witness includes payout blockhash. + CheckIfKickoff { + txid: Txid, + block_height: u32, + witness: Witness, + challenged_before: bool, + }, + /// -- Kickoff state duties -- + /// This duty is only sent if a kickoff was challenged. + /// This duty is sent after some time (paramset.time_to_send_watchtower_challenge number of blocks) passes after a kickoff was sent to chain. + /// It denotes to the owner that it is time to send a watchtower challenge to the corresponding kickoff. + WatchtowerChallenge { + kickoff_data: KickoffData, + deposit_data: DepositData, + }, + /// This duty is only sent if a kickoff was challenged. + /// This duty is sent only after latest blockhash is committed. Latest blockhash is committed after all watchtower challenges are sent + /// or timed out so that it is certain no new watchtower challenges can be sent. + /// The duty denotes that it is time to start sending operator asserts to the corresponding kickoff. + /// It includes the all watchtower challenges and the payout blockhash so that they can be used in the proof. + SendOperatorAsserts { + kickoff_data: KickoffData, + deposit_data: DepositData, + watchtower_challenges: HashMap, + payout_blockhash: Witness, + latest_blockhash: Witness, + }, + /// This duty is only sent if a kickoff was challenged. + /// This duty is sent after all asserts and latest blockhash commit are finalized on chain, and all watchtower challenge + /// utxos are spent. + /// It denotes to the owner that it is time to send a disprove to the corresponding kickoff. + /// It includes the operator asserts, operator acks and the payout blockhash so that they can be used in the disprove tx if the proof + /// is invalid. + VerifierDisprove { + kickoff_data: KickoffData, + deposit_data: DepositData, + operator_asserts: HashMap, + operator_acks: HashMap, + payout_blockhash: Witness, + latest_blockhash: Witness, + }, + /// This duty is only sent if a kickoff was challenged. + /// This duty is sent after every watchtower challenge is either sent or timed out. + /// It denotes to the owner that it is time to send a latest blockhash to the corresponding kickoff to be used in the proof. + SendLatestBlockhash { + kickoff_data: KickoffData, + deposit_data: DepositData, + latest_blockhash: BlockHash, + }, +} + +/// Result of handling a duty +#[derive(Debug, Clone)] +pub enum DutyResult { + /// Duty was handled, no return value is necessary + Handled, + /// Result of checking if a kickoff contains if a challenge was sent because the kickoff was determined as malicious + CheckIfKickoff { challenged: bool }, +} + +/// Owner trait with async handling and tx handler creation +#[async_trait] +pub trait Owner: Clone + NamedEntity { + /// Handle a protocol-related duty + async fn handle_duty(&self, duty: Duty) -> Result; + + /// Create the transactions for an instance of the L1 contract + async fn create_txhandlers( + &self, + tx_type: TransactionType, + contract_context: ContractContext, + ) -> Result, BridgeError>; + + /// Handle a new finalized block + async fn handle_finalized_block( + &self, + dbtx: DatabaseTransaction<'_, '_>, + block_id: u32, + block_height: u32, + block_cache: Arc, + _light_client_proof_wait_interval_secs: Option, + ) -> Result<(), BridgeError>; +} + +/// Context for the state machine +/// Every state can access the context +#[derive(Debug, Clone)] +pub struct StateContext { + pub db: Database, + pub owner: Arc, + pub cache: Arc, + pub new_round_machines: Vec>>, + pub new_kickoff_machines: Vec>>, + pub errors: Vec>, + pub paramset: &'static ProtocolParamset, + pub owner_type: String, +} + +impl StateContext { + pub fn new( + db: Database, + owner: Arc, + cache: Arc, + paramset: &'static ProtocolParamset, + ) -> Self { + // Get the owner type string from the owner instance + let owner_type = T::ENTITY_NAME.to_string(); + + Self { + db, + owner, + cache, + new_round_machines: Vec::new(), + new_kickoff_machines: Vec::new(), + errors: Vec::new(), + paramset, + owner_type, + } + } + + pub async fn dispatch_duty(&self, duty: Duty) -> Result { + self.owner.handle_duty(duty).await + } + + /// Run an async closure and capture any errors in execution. + /// + /// It will store the error report in the context's `errors` field. The + /// errors are later collected by the state manager and reported. This + /// ensures that all errors are collected and reported in a single place. + /// In general, it's expected that the closure attaches context about the + /// state machine to the error report. You may check + /// `KickoffStateMachine::wrap_err` and `RoundStateMachine::wrap_err` + /// for an example implementation of an error wrapper utility function. + /// + /// # Parameters + /// * `fnc`: An async closure that takes a mutable reference to the state context and returns a result. + /// + /// # Returns + /// * `()` + pub async fn capture_error( + &mut self, + fnc: impl AsyncFnOnce(&mut Self) -> Result<(), eyre::Report>, + ) { + let result = fnc(self).await; + if let Err(e) = result { + self.errors.push(e.into()); + } + } +} diff --git a/core/src/states/event.rs b/core/src/states/event.rs new file mode 100644 index 000000000..46419e7ca --- /dev/null +++ b/core/src/states/event.rs @@ -0,0 +1,192 @@ +use std::sync::Arc; + +use bitcoin::Witness; +use pgmq::PGMQueueExt; +use statig::awaitable::IntoStateMachineExt; + +use crate::{ + database::{Database, DatabaseTransaction}, + deposit::{DepositData, KickoffData, OperatorData}, + errors::BridgeError, +}; + +use super::{ + block_cache, kickoff::KickoffStateMachine, round::RoundStateMachine, Owner, StateManager, +}; + +/// System events are events that are sent by other parts of clementine to the state machine +/// They are used to update the state machine +/// They are sent by the state manager to the state machine +#[derive(Debug, serde::Serialize, Clone, serde::Deserialize)] +#[allow(clippy::large_enum_variant)] +pub enum SystemEvent { + /// An event for a new finalized block + /// So that state manager can update the states of all current state machines + NewFinalizedBlock { + block_id: u32, + block: bitcoin::Block, + height: u32, + }, + /// An event for when a new operator is set in clementine + /// So that the state machine can create a new round state machine to track the operator + NewOperator { operator_data: OperatorData }, + /// An event for when a new kickoff is set in clementine + /// So that the state machine can create a new kickoff state machine to track the kickoff status + NewKickoff { + kickoff_data: KickoffData, + kickoff_height: u32, + deposit_data: DepositData, + payout_blockhash: Witness, + }, +} + +impl StateManager { + /// Appends a message to the state manager's message queue to create a new round state machine + pub async fn dispatch_new_round_machine( + db: Database, + tx: DatabaseTransaction<'_, '_>, + operator_data: OperatorData, + ) -> Result<(), eyre::Report> { + let queue_name = StateManager::::queue_name(); + let queue = PGMQueueExt::new_with_pool(db.get_pool()).await; + + let message = SystemEvent::NewOperator { operator_data }; + queue + .send_with_cxn(&queue_name, &message, &mut *(*tx)) + .await + .map_err(|e| eyre::eyre!("Error sending NewOperator event: {:?}", e))?; + Ok(()) + } + + /// Appends a message to the state manager's message queue to create a new kickoff state machine + pub async fn dispatch_new_kickoff_machine( + db: Database, + tx: DatabaseTransaction<'_, '_>, + kickoff_data: KickoffData, + kickoff_height: u32, + deposit_data: DepositData, + payout_blockhash: Witness, + ) -> Result<(), eyre::Report> { + let queue_name = StateManager::::queue_name(); + let queue = PGMQueueExt::new_with_pool(db.get_pool()).await; + + let message = SystemEvent::NewKickoff { + kickoff_data, + kickoff_height, + deposit_data, + payout_blockhash, + }; + queue + .send_with_cxn(&queue_name, &message, &mut *(*tx)) + .await + .map_err(|e| eyre::eyre!("Error sending NewKickoff event: {:?}", e))?; + Ok(()) + } + + /// Updates the block cache with the new block + /// Sets the block cache in the context + pub fn update_block_cache(&mut self, block: &bitcoin::Block, block_height: u32) { + let mut cache: block_cache::BlockCache = Default::default(); + cache.update_with_block(block, block_height); + self.context.cache = Arc::new(cache); + } + + /// Handles the system events + pub async fn handle_event( + &mut self, + event: SystemEvent, + dbtx: DatabaseTransaction<'_, '_>, + ) -> Result<(), BridgeError> { + match event { + // Received when a block is finalized in Bitcoin + SystemEvent::NewFinalizedBlock { + block_id, + block, + height, + } => { + if self.next_height_to_process != height { + tracing::warn!("Finalized block arrived to state manager out of order. Ignoring block. This can happen for some blocks during restarts. Otherwise it might be due to an error. Expected: {}, Got: {}", self.next_height_to_process, height); + return Ok(()); + } + + self.update_block_cache(&block, height); + + // Handle the finalized block on the owner (verifier or operator) + self.owner + .handle_finalized_block( + dbtx, + block_id, + height, + self.context.cache.clone(), + None, + ) + .await?; + // Process the block on all state machines + self.process_block_parallel(height).await?; + } + // Received when a new operator is set in clementine + SystemEvent::NewOperator { operator_data } => { + // Check if operator's state machine already exists. + // This can happen if aggregator calls set_operator for the same operator multiple times. + // In this case, we don't want to create a new state machine. + for operator_machine in self.round_machines.iter() { + if operator_machine.operator_data.xonly_pk == operator_data.xonly_pk { + return Ok(()); + } + } + + let operator_machine = RoundStateMachine::new(operator_data) + .uninitialized_state_machine() + .init_with_context(&mut self.context) + .await; + self.process_and_add_new_states_from_height( + vec![operator_machine], + vec![], + self.paramset.start_height, + ) + .await?; + } + // Received when a new kickoff is detected + SystemEvent::NewKickoff { + kickoff_data, + kickoff_height, + deposit_data, + payout_blockhash, + } => { + // Check if the kickoff machine already exists. If so do not add a new one. + // This can happen if during block processing an error happens, reverting the state machines + // but a new kickoff state was already dispatched during block processing. + for kickoff_machine in self.kickoff_machines.iter() { + if kickoff_machine.kickoff_data == kickoff_data + && kickoff_machine.deposit_data == deposit_data + && kickoff_machine.payout_blockhash == payout_blockhash + && kickoff_machine.kickoff_height == kickoff_height + { + return Ok(()); + } + } + + let kickoff_machine = KickoffStateMachine::new( + kickoff_data, + kickoff_height, + deposit_data, + payout_blockhash, + ) + .uninitialized_state_machine() + .init_with_context(&mut self.context) + .await; + self.process_and_add_new_states_from_height( + vec![], + vec![kickoff_machine], + kickoff_height, + ) + .await?; + } + } + // Save the state machines to the database with the current block height + // So that in case of a node restart the state machines can be restored + self.save_state_to_db(self.next_height_to_process, Some(dbtx)) + .await?; + Ok(()) + } +} diff --git a/core/src/states/kickoff.rs b/core/src/states/kickoff.rs new file mode 100644 index 000000000..cc5f00705 --- /dev/null +++ b/core/src/states/kickoff.rs @@ -0,0 +1,761 @@ +use std::collections::{HashMap, HashSet}; + +use bitcoin::{OutPoint, Transaction, Witness}; +use eyre::Context; +use serde_with::serde_as; +use statig::prelude::*; + +use crate::{ + bitvm_client::ClementineBitVMPublicKeys, + builder::transaction::{ + input::UtxoVout, remove_txhandler_from_map, ContractContext, TransactionType, + }, + deposit::{DepositData, KickoffData}, + errors::BridgeError, +}; + +use super::{ + block_cache::BlockCache, + context::{Duty, StateContext}, + matcher::{BlockMatcher, Matcher}, + Owner, StateMachineError, +}; + +/// Events that can be dispatched to the kickoff state machine +/// These event either trigger state transitions or trigger actions of the owner +#[derive( + Debug, Clone, PartialEq, Eq, Hash, Ord, PartialOrd, serde::Serialize, serde::Deserialize, +)] +pub enum KickoffEvent { + /// Event that is dispatched when the kickoff is challenged + /// This will change the state to "Challenged" + Challenged, + /// Event that is dispatched when a watchtower challenge is detected in Bitcoin + WatchtowerChallengeSent { + watchtower_idx: usize, + challenge_outpoint: OutPoint, + }, + /// Event that is dispatched when an operator BitVM assert is detected in Bitcoin + OperatorAssertSent { + assert_idx: usize, + assert_outpoint: OutPoint, + }, + /// Event that is dispatched when a watchtower challenge timeout is detected in Bitcoin + WatchtowerChallengeTimeoutSent { watchtower_idx: usize }, + /// Event that is dispatched when an operator challenge ack is detected in Bitcoin + /// Operator challenge acks are sent by operators to acknowledge watchtower challenges + OperatorChallengeAckSent { + watchtower_idx: usize, + challenge_ack_outpoint: OutPoint, + }, + /// Event that is dispatched when the latest blockhash is detected in Bitcoin + LatestBlockHashSent { latest_blockhash_outpoint: OutPoint }, + /// Event that is dispatched when the kickoff finalizer is spent in Bitcoin + /// Irrespective of whether the kickoff is malicious or not, the kickoff process is finished when the kickoff finalizer is spent. + KickoffFinalizerSpent, + /// Event that is dispatched when the burn connector is spent in Bitcoin + BurnConnectorSpent, + /// Vvent that is used to indicate that it is time for the owner to send latest blockhash tx. + /// Matcher for this event is created after all watchtower challenge utxos are spent. + /// Latest blockhash is sent some blocks after all watchtower challenge utxos are spent, so that the total work until the block commiitted + /// in latest blockhash is definitely higher than the highest work in valid watchtower challenges. + TimeToSendLatestBlockhash, + /// Event that is used to indicate that it is time for the owner to send watchtower challenge tx. + /// Watchtower challenges are sent after some blocks pass since the kickoff tx, so that the total work in the watchtower challenge is as high as possible. + TimeToSendWatchtowerChallenge, + /// Special event that is used to indicate that the state machine has been saved to the database and the dirty flag should be reset to false + SavedToDb, +} + +/// State machine for tracking a single kickoff process in the protocol. +/// +/// # Purpose +/// The `KickoffStateMachine` manages the lifecycle of a single kickoff process, which is created after a kickoff transaction is detected on Bitcoin. It tracks the transactions related to the kickoff and the resulting data. +/// +/// # States +/// - `kickoff_started`: The initial state after a kickoff is detected. Waits for further events such as challenges, but still tracks any committed data on Bitcoin (like latest blockhash, operator asserts, watchtower challenges, etc) +/// - `challenged`: Entered if the kickoff is challenged. Watchtower challenges are only sent if the kickoff is challenged. +/// - `closed`: Terminal state indicating the kickoff process has ended, either by kickoff finalizer utxo or burn connector utxo being spent. +/// +/// # Events +/// - `Challenged`: The kickoff is challenged, transitioning to the `challenged` state. +/// - `WatchtowerChallengeSent`: A watchtower challenge is detected on Bitcoin, stores the watchtower challenge transaction, and stores the watchtower utxo as spent. +/// - `OperatorAssertSent`: An operator BitVM assert is detected, stores the witness of the assert utxo. +/// - `WatchtowerChallengeTimeoutSent`: A watchtower challenge timeout is detected, stores watchtower utxo as spent. +/// - `OperatorChallengeAckSent`: An operator challenge acknowledgment is detected, stores the witness of the challenge ack utxo, which holds the revealed preimage that can be used to disprove if the operator maliciously doesn't include the watchtower challenge in the BitVM proof. After sending this transaction, the operator is forced to use the corresponding watchtower challenge in its BitVM proof, otherwise it can be disproven. +/// - `LatestBlockHashSent`: The latest blockhash is committed on Bitcoin, stores the witness of the latest blockhash utxo, which holds the blockhash that should be used by the operator in its BitVM proof. +/// - `KickoffFinalizerSpent`: The kickoff finalizer is spent, ending the kickoff process, transitions to the `closed` state. +/// - `BurnConnectorSpent`: The burn connector is spent, ending the kickoff process, transitions to the `closed` state. +/// - `TimeToSendWatchtowerChallenge`: Time to send a watchtower challenge (used in challenged state), this event notifies the owner to create and send a watchtower challenge tx. Verifiers wait after a kickoff to send a watchtower challenge so that the total work in the watchtower challenge is as high as possible. +/// - `SavedToDb`: Indicates the state machine has been persisted and resets the dirty flag. +/// +/// # Behavior +/// - The state machine maintains a set of matchers to detect relevant Bitcoin transactions and trigger corresponding events. +/// - It tracks the progress of the kickoff, including challenges, operator actions, and finalization. +/// - When terminal events occur (e.g., finalizer or burn connector spent), the state machine transitions to `closed` and clears all matchers. +/// - The state machine interacts with the owner to perform protocol duties (e.g., sending challenges, asserts, or disproves) as required by the protocol logic. +/// +/// This design ensures that all protocol-critical events related to a kickoff are tracked and handled in a robust, stateful manner, supporting both normal and adversarial scenarios. +#[serde_as] +#[derive(Debug, Clone, PartialEq, Eq, serde::Serialize, serde::Deserialize)] +pub struct KickoffStateMachine { + /// Maps matchers to the resulting kickoff events. + #[serde_as(as = "Vec<(_, _)>")] + pub(crate) matchers: HashMap, + /// Indicates if the state machine has unsaved changes that need to be persisted on db. + /// dirty flag is set if any matcher matches the current block. + /// the flag is set to true in on_transition and on_dispatch + /// the flag is set to false after the state machine is saved to db and the event SavedToDb is dispatched + pub(crate) dirty: bool, + /// The kickoff data associated with the kickoff being tracked. + pub(crate) kickoff_data: KickoffData, + /// The deposit data that the kickoff tries to withdraw from. + pub(crate) deposit_data: DepositData, + /// The block height at which the kickoff transaction was mined. + pub(crate) kickoff_height: u32, + /// The witness for the kickoff transactions input which is a winternitz signature that commits the payout blockhash. + pub(crate) payout_blockhash: Witness, + /// Set of indices of watchtower UTXOs that have already been spent. + spent_watchtower_utxos: HashSet, + /// The witness taken from the transaction spending the latest blockhash utxo. + latest_blockhash: Witness, + /// Saves watchtower challenges with the watchtower index as the key. + /// Watchtower challenges are encoded as the output of the watchtower challenge tx. + /// (taproot addresses parsed as 32 bytes + OP_RETURN data), in total 144 bytes. + watchtower_challenges: HashMap, + /// Saves operator asserts with the index of the assert utxo as the key. + /// Operator asserts are witnesses that spend the assert utxo's and contain the winternitz signature of the BitVM assertion. + operator_asserts: HashMap, + /// Saves operator challenge acks with the index of the challenge ack utxo as the key. + /// Operator challenge acks are witnesses that spend the challenge ack utxo's. + /// The witness contains the revealed preimage that can be used to disprove if the operator + /// maliciously doesn't include the watchtower challenge in the BitVM proof. + operator_challenge_acks: HashMap, + /// Marker for the generic owner type (phantom data for type safety). + /// This is used to ensure that the state machine is generic over the owner type. + phantom: std::marker::PhantomData, +} + +impl BlockMatcher for KickoffStateMachine { + type StateEvent = KickoffEvent; + + fn match_block(&self, block: &BlockCache) -> Vec { + self.matchers + .iter() + .filter_map(|(matcher, kickoff_event)| { + matcher.matches(block).map(|ord| (ord, kickoff_event)) + }) + .min() + .map(|(_, kickoff_event)| kickoff_event) + .into_iter() + .cloned() + .collect() + } +} + +impl KickoffStateMachine { + pub fn new( + kickoff_data: KickoffData, + kickoff_height: u32, + deposit_data: DepositData, + payout_blockhash: Witness, + ) -> Self { + Self { + kickoff_data, + kickoff_height, + deposit_data, + payout_blockhash, + latest_blockhash: Witness::default(), + matchers: HashMap::new(), + dirty: true, + phantom: std::marker::PhantomData, + watchtower_challenges: HashMap::new(), + operator_asserts: HashMap::new(), + spent_watchtower_utxos: HashSet::new(), + operator_challenge_acks: HashMap::new(), + } + } +} + +#[state_machine( + initial = "State::kickoff_started()", + on_dispatch = "Self::on_dispatch", + on_transition = "Self::on_transition", + state(derive(Debug, Clone, PartialEq, Eq, serde::Serialize, serde::Deserialize)) +)] +impl KickoffStateMachine { + #[action] + pub(crate) fn on_transition(&mut self, state_a: &State, state_b: &State) { + tracing::trace!(?self.kickoff_data, ?self.deposit_data, "Transitioning from {:?} to {:?}", state_a, state_b); + self.dirty = true; + } + + pub fn kickoff_meta(&self, method: &'static str) -> StateMachineError { + eyre::eyre!( + "Error in kickoff state machine for kickoff {:?} in {}", + self.kickoff_data, + method + ) + .into() + } + + #[action] + pub(crate) fn on_dispatch( + &mut self, + _state: StateOrSuperstate<'_, '_, Self>, + evt: &KickoffEvent, + ) { + if matches!(evt, KickoffEvent::SavedToDb) { + self.dirty = false; + } else { + tracing::trace!(?self.kickoff_data, "Dispatching event {:?}", evt); + self.dirty = true; + + // Remove the matcher corresponding to the event. + if let Some((matcher, _)) = self.matchers.iter().find(|(_, ev)| ev == &evt) { + let matcher = matcher.clone(); + self.matchers.remove(&matcher); + } + } + } + + /// Checks if the latest blockhash is ready to be committed on Bitcoin. + /// The check is done by checking if all watchtower challenge utxos are spent. + /// If the check is successful, the a new matcher is created to send latest blockhash tx after finality depth blocks pass from current block height. + async fn create_matcher_for_latest_blockhash_if_ready( + &mut self, + context: &mut StateContext, + ) { + context + .capture_error(async |context| { + { + // if all watchtower challenge utxos are spent, its safe to send latest blockhash commit tx + if self.spent_watchtower_utxos.len() == self.deposit_data.get_num_watchtowers() + { + // create a matcher to send latest blockhash tx after finality depth blocks pass from current block height + self.matchers.insert( + Matcher::BlockHeight( + context.cache.block_height + context.paramset.finality_depth, + ), + KickoffEvent::TimeToSendLatestBlockhash, + ); + } + Ok::<(), BridgeError>(()) + } + .wrap_err(self.kickoff_meta("on check_if_time_to_commit_latest_blockhash")) + }) + .await; + } + + /// Checks if the disprove is ready to be sent on Bitcoin + /// The check is done by checking if all operator asserts are received, + /// latest blockhash is committed and all watchtower challenge utxos are spent + /// If the check is successful, the disprove is sent on Bitcoin + async fn disprove_if_ready(&mut self, context: &mut StateContext) { + if self.operator_asserts.len() == ClementineBitVMPublicKeys::number_of_assert_txs() + && self.latest_blockhash != Witness::default() + && self.spent_watchtower_utxos.len() == self.deposit_data.get_num_watchtowers() + { + self.send_disprove(context).await; + } + } + + /// Checks if the operator asserts are ready to be sent on Bitcoin + /// The check is done by checking if all watchtower challenge utxos are spent and latest blockhash is committed + /// If the check is successful, the operator asserts are sent on Bitcoin + async fn send_operator_asserts_if_ready(&mut self, context: &mut StateContext) { + context + .capture_error(async |context| { + { + // if all watchtower challenge utxos are spent and latest blockhash is committed, its safe to send asserts + if self.spent_watchtower_utxos.len() == self.deposit_data.get_num_verifiers() + && self.latest_blockhash != Witness::default() + { + context + .owner + .handle_duty(Duty::SendOperatorAsserts { + kickoff_data: self.kickoff_data, + deposit_data: self.deposit_data.clone(), + watchtower_challenges: self.watchtower_challenges.clone(), + payout_blockhash: self.payout_blockhash.clone(), + latest_blockhash: self.latest_blockhash.clone(), + }) + .await?; + } + Ok::<(), BridgeError>(()) + } + .wrap_err(self.kickoff_meta("on send_operator_asserts")) + }) + .await; + } + + async fn send_watchtower_challenge(&mut self, context: &mut StateContext) { + context + .capture_error(async |context| { + { + context + .owner + .handle_duty(Duty::WatchtowerChallenge { + kickoff_data: self.kickoff_data, + deposit_data: self.deposit_data.clone(), + }) + .await?; + Ok::<(), BridgeError>(()) + } + .wrap_err(self.kickoff_meta("on send_watchtower_challenge")) + }) + .await; + } + + async fn send_disprove(&mut self, context: &mut StateContext) { + context + .capture_error(async |context| { + { + context + .owner + .handle_duty(Duty::VerifierDisprove { + kickoff_data: self.kickoff_data, + deposit_data: self.deposit_data.clone(), + operator_asserts: self.operator_asserts.clone(), + operator_acks: self.operator_challenge_acks.clone(), + payout_blockhash: self.payout_blockhash.clone(), + latest_blockhash: self.latest_blockhash.clone(), + }) + .await?; + Ok::<(), BridgeError>(()) + } + .wrap_err(self.kickoff_meta("on send_disprove")) + }) + .await; + } + + async fn send_latest_blockhash(&mut self, context: &mut StateContext) { + context + .capture_error(async |context| { + { + context + .owner + .handle_duty(Duty::SendLatestBlockhash { + kickoff_data: self.kickoff_data, + deposit_data: self.deposit_data.clone(), + latest_blockhash: context + .cache + .block + .as_ref() + .ok_or(eyre::eyre!("Block object not found in block cache"))? + .header + .block_hash(), + }) + .await?; + Ok::<(), BridgeError>(()) + } + .wrap_err(self.kickoff_meta("on send_latest_blockhash")) + }) + .await; + } + + async fn unhandled_event(&mut self, context: &mut StateContext, event: &KickoffEvent) { + context + .capture_error(async |_context| { + let event_str = format!("{:?}", event); + Err(StateMachineError::UnhandledEvent(event_str)) + .wrap_err(self.kickoff_meta("kickoff unhandled event")) + }) + .await; + } + + /// If the kickoff is challenged, the state machine will add corresponding matchers for + /// sending watchtower challenges after some amount of blocks passes since the kickoff was included in Bitcoin. + /// Sending watchtower challenges only happen if the kickoff is challenged. + /// As sending latest blockhash commit and asserts depend on watchtower challenges/timeouts being sent, + /// they will also not be sent if the kickoff is not challenged and kickoff finalizer is spent with ChallengeTimeout, + /// which changes the state to "Closed". + #[action] + pub(crate) async fn on_challenged_entry(&mut self, context: &mut StateContext) { + context + .capture_error(async |context| { + { + // create times to send necessary challenge asserts + self.matchers.insert( + Matcher::BlockHeight( + self.kickoff_height + + context.paramset.time_to_send_watchtower_challenge as u32, + ), + KickoffEvent::TimeToSendWatchtowerChallenge, + ); + Ok::<(), BridgeError>(()) + } + .wrap_err(self.kickoff_meta("on_kickoff_started_entry")) + }) + .await; + } + + /// State that is entered when the kickoff is challenged + /// It only includes special handling for the TimeToSendWatchtowerChallenge event + /// All other events are handled in the kickoff superstate + #[state(superstate = "kickoff", entry_action = "on_challenged_entry")] + pub(crate) async fn challenged( + &mut self, + event: &KickoffEvent, + context: &mut StateContext, + ) -> Response { + match event { + KickoffEvent::WatchtowerChallengeSent { .. } + | KickoffEvent::OperatorAssertSent { .. } + | KickoffEvent::OperatorChallengeAckSent { .. } + | KickoffEvent::KickoffFinalizerSpent + | KickoffEvent::BurnConnectorSpent + | KickoffEvent::WatchtowerChallengeTimeoutSent { .. } + | KickoffEvent::LatestBlockHashSent { .. } + | KickoffEvent::TimeToSendLatestBlockhash + | KickoffEvent::SavedToDb => Super, + KickoffEvent::TimeToSendWatchtowerChallenge => { + self.send_watchtower_challenge(context).await; + Handled + } + _ => { + self.unhandled_event(context, event).await; + Handled + } + } + } + + #[superstate] + async fn kickoff( + &mut self, + event: &KickoffEvent, + context: &mut StateContext, + ) -> Response { + tracing::trace!("Received event in kickoff superstate: {:?}", event); + match event { + // When a watchtower challenge is detected in Bitcoin, + // save the full challenge transaction and check if the latest blockhash can be committed + // and if the disprove is ready to be sent + KickoffEvent::WatchtowerChallengeSent { + watchtower_idx, + challenge_outpoint, + } => { + self.spent_watchtower_utxos.insert(*watchtower_idx); + let tx = context + .cache + .get_tx_of_utxo(challenge_outpoint) + .expect("Challenge outpoint that got matched should be in block"); + // save challenge witness + self.watchtower_challenges + .insert(*watchtower_idx, tx.clone()); + self.create_matcher_for_latest_blockhash_if_ready(context) + .await; + self.disprove_if_ready(context).await; + Handled + } + // When an operator assert is detected in Bitcoin, + // save the assert witness (which is the BitVM winternitz commit) + // and check if the disprove is ready to be sent + KickoffEvent::OperatorAssertSent { + assert_idx, + assert_outpoint, + } => { + let witness = context + .cache + .get_witness_of_utxo(assert_outpoint) + .expect("Assert outpoint that got matched should be in block"); + // save assert witness + self.operator_asserts.insert(*assert_idx, witness); + self.disprove_if_ready(context).await; + Handled + } + // When an operator challenge ack is detected in Bitcoin, + // save the ack witness as the witness includes the revealed preimage that + // can be used to disprove if the operator maliciously doesn't include the + // watchtower challenge in the BitVM proof + KickoffEvent::OperatorChallengeAckSent { + watchtower_idx, + challenge_ack_outpoint, + } => { + let witness = context + .cache + .get_witness_of_utxo(challenge_ack_outpoint) + .expect("Challenge ack outpoint that got matched should be in block"); + // save challenge ack witness + self.operator_challenge_acks + .insert(*watchtower_idx, witness); + Handled + } + // When the kickoff finalizer is spent in Bitcoin, + // the kickoff process is finished and the state machine will transition to the "Closed" state + KickoffEvent::KickoffFinalizerSpent => Transition(State::closed()), + // When the burn connector of the operator is spent in Bitcoin, it means the operator cannot continue with any more kickoffs + // (unless burn connector is spent by ready to reimburse tx), so the state machine will transition to the "Closed" state + KickoffEvent::BurnConnectorSpent => { + tracing::error!( + "Burn connector spent before kickoff was finalized for kickoff {:?}", + self.kickoff_data + ); + Transition(State::closed()) + } + // When a watchtower challenge timeout is detected in Bitcoin, + // set the watchtower utxo as spent and check if the latest blockhash can be committed + KickoffEvent::WatchtowerChallengeTimeoutSent { watchtower_idx } => { + self.spent_watchtower_utxos.insert(*watchtower_idx); + self.create_matcher_for_latest_blockhash_if_ready(context) + .await; + Handled + } + // When the latest blockhash is detected in Bitcoin, + // save the witness which includes the blockhash and check if the operator asserts and + // disprove tx are ready to be sent + KickoffEvent::LatestBlockHashSent { + latest_blockhash_outpoint, + } => { + let witness = context + .cache + .get_witness_of_utxo(latest_blockhash_outpoint) + .expect("Latest blockhash outpoint that got matched should be in block"); + // save latest blockhash witness + self.latest_blockhash = witness; + // can start sending asserts as latest blockhash is committed and finalized + self.send_operator_asserts_if_ready(context).await; + self.disprove_if_ready(context).await; + Handled + } + KickoffEvent::TimeToSendLatestBlockhash => { + // tell owner to send latest blockhash tx + self.send_latest_blockhash(context).await; + Handled + } + KickoffEvent::SavedToDb => Handled, + _ => { + self.unhandled_event(context, event).await; + Handled + } + } + } + + /// State that is entered when the kickoff is started + /// It will transition to the "Challenged" state if the kickoff is challenged + #[state(superstate = "kickoff", entry_action = "on_kickoff_started_entry")] + pub(crate) async fn kickoff_started( + &mut self, + event: &KickoffEvent, + context: &mut StateContext, + ) -> Response { + match event { + KickoffEvent::Challenged => { + tracing::warn!("Warning: Operator challenged: {:?}", self.kickoff_data); + Transition(State::challenged()) + } + KickoffEvent::WatchtowerChallengeSent { .. } + | KickoffEvent::OperatorAssertSent { .. } + | KickoffEvent::OperatorChallengeAckSent { .. } + | KickoffEvent::KickoffFinalizerSpent + | KickoffEvent::BurnConnectorSpent + | KickoffEvent::WatchtowerChallengeTimeoutSent { .. } + | KickoffEvent::LatestBlockHashSent { .. } + | KickoffEvent::TimeToSendLatestBlockhash + | KickoffEvent::SavedToDb => Super, + _ => { + self.unhandled_event(context, event).await; + Handled + } + } + } + + /// Adds the default matchers that will be used if the state is "challenged" or "kickoff_started". + /// These matchers are used to detect when various transactions in the contract are mined on Bitcoin. + async fn add_default_kickoff_matchers( + &mut self, + context: &mut StateContext, + ) -> Result<(), BridgeError> { + // First create all transactions for the current deposit + let contract_context = ContractContext::new_context_for_kickoff( + self.kickoff_data, + self.deposit_data.clone(), + context.paramset, + ); + let mut txhandlers = context + .owner + .create_txhandlers(TransactionType::AllNeededForDeposit, contract_context) + .await?; + let kickoff_txhandler = + remove_txhandler_from_map(&mut txhandlers, TransactionType::Kickoff)?; + + // add operator asserts + let kickoff_txid = *kickoff_txhandler.get_txid(); + let num_asserts = crate::bitvm_client::ClementineBitVMPublicKeys::number_of_assert_txs(); + for assert_idx in 0..num_asserts { + let mini_assert_vout = UtxoVout::Assert(assert_idx).get_vout(); + let assert_timeout_txhandler = remove_txhandler_from_map( + &mut txhandlers, + TransactionType::AssertTimeout(assert_idx), + )?; + let assert_timeout_txid = assert_timeout_txhandler.get_txid(); + // Assert transactions can have any txid (there is no enforcement on how the assert utxo is spent, just that + // spending assert utxo reveals the BitVM winternitz commit in the utxo's witness) + // But assert timeouts are nofn signed transactions with a fixed txid, so we can detect assert transactions + // by checking if the assert utxo is spent but not by the assert timeout tx + self.matchers.insert( + Matcher::SpentUtxoButNotTxid( + OutPoint { + txid: kickoff_txid, + vout: mini_assert_vout, + }, + vec![*assert_timeout_txid], + ), + KickoffEvent::OperatorAssertSent { + assert_outpoint: OutPoint { + txid: kickoff_txid, + vout: mini_assert_vout, + }, + assert_idx, + }, + ); + } + // add latest blockhash tx sent matcher + let latest_blockhash_timeout_txhandler = + remove_txhandler_from_map(&mut txhandlers, TransactionType::LatestBlockhashTimeout)?; + let latest_blockhash_timeout_txid = latest_blockhash_timeout_txhandler.get_txid(); + let latest_blockhash_outpoint = OutPoint { + txid: kickoff_txid, + vout: UtxoVout::LatestBlockhash.get_vout(), + }; + // Same logic as before with assert transaction detection, if latest blockhash utxo is not spent by latest blockhash timeout tx, + // it means the latest blockhash is committed on Bitcoin + self.matchers.insert( + Matcher::SpentUtxoButNotTxid( + latest_blockhash_outpoint, + vec![*latest_blockhash_timeout_txid], + ), + KickoffEvent::LatestBlockHashSent { + latest_blockhash_outpoint, + }, + ); + // add watchtower challenges and challenge acks matchers + for watchtower_idx in 0..self.deposit_data.get_num_watchtowers() { + let watchtower_challenge_vout = + UtxoVout::WatchtowerChallenge(watchtower_idx).get_vout(); + let watchtower_timeout_txhandler = remove_txhandler_from_map( + &mut txhandlers, + TransactionType::WatchtowerChallengeTimeout(watchtower_idx), + )?; + let watchtower_timeout_txid = watchtower_timeout_txhandler.get_txid(); + // matcher in case watchtower challenge timeout is sent + self.matchers.insert( + Matcher::SentTx(*watchtower_timeout_txid), + KickoffEvent::WatchtowerChallengeTimeoutSent { watchtower_idx }, + ); + // matcher in case watchtower challenge is sent (watchtower challenge utxo is spent but not by watchtower challenge timeout tx) + self.matchers.insert( + Matcher::SpentUtxoButNotTxid( + OutPoint { + txid: kickoff_txid, + vout: watchtower_challenge_vout, + }, + vec![*watchtower_timeout_txid], + ), + KickoffEvent::WatchtowerChallengeSent { + watchtower_idx, + challenge_outpoint: OutPoint { + txid: kickoff_txid, + vout: watchtower_challenge_vout, + }, + }, + ); + // add operator challenge ack matcher + let operator_challenge_ack_vout = + UtxoVout::WatchtowerChallengeAck(watchtower_idx).get_vout(); + let operator_challenge_nack_txhandler = remove_txhandler_from_map( + &mut txhandlers, + TransactionType::OperatorChallengeNack(watchtower_idx), + )?; + let operator_challenge_nack_txid = operator_challenge_nack_txhandler.get_txid(); + // operator challenge ack utxo is spent but not by operator challenge nack tx or watchtower challenge timeout tx + self.matchers.insert( + Matcher::SpentUtxoButNotTxid( + OutPoint { + txid: kickoff_txid, + vout: operator_challenge_ack_vout, + }, + vec![*operator_challenge_nack_txid, *watchtower_timeout_txid], + ), + KickoffEvent::OperatorChallengeAckSent { + watchtower_idx, + challenge_ack_outpoint: OutPoint { + txid: kickoff_txid, + vout: operator_challenge_ack_vout, + }, + }, + ); + } + + // add burn connector tx spent matcher + // Burn connector can also be spent in ready to reimburse tx, but before spending burn connector that way, + // the kickoff finalizer needs to be spent first, otherwise pre-signed "Kickoff not finalized" tx can be sent by + // any verifier, slashing the operator. + // If the kickoff finalizer is spent first, the state will be in "Closed" state and all matchers will be deleted. + let round_txhandler = remove_txhandler_from_map(&mut txhandlers, TransactionType::Round)?; + let round_txid = *round_txhandler.get_txid(); + self.matchers.insert( + Matcher::SpentUtxo(OutPoint { + txid: round_txid, + vout: UtxoVout::CollateralInRound.get_vout(), + }), + KickoffEvent::BurnConnectorSpent, + ); + // add kickoff finalizer utxo spent matcher + self.matchers.insert( + Matcher::SpentUtxo(OutPoint { + txid: kickoff_txid, + vout: UtxoVout::KickoffFinalizer.get_vout(), + }), + KickoffEvent::KickoffFinalizerSpent, + ); + // add challenge detector matcher, if challenge utxo is not spent by challenge timeout tx, it means the kickoff is challenged + let challenge_timeout_txhandler = + remove_txhandler_from_map(&mut txhandlers, TransactionType::ChallengeTimeout)?; + let challenge_timeout_txid = challenge_timeout_txhandler.get_txid(); + self.matchers.insert( + Matcher::SpentUtxoButNotTxid( + OutPoint { + txid: kickoff_txid, + vout: UtxoVout::Challenge.get_vout(), + }, + vec![*challenge_timeout_txid], + ), + KickoffEvent::Challenged, + ); + Ok(()) + } + + #[action] + pub(crate) async fn on_kickoff_started_entry(&mut self, context: &mut StateContext) { + context + .capture_error(async |context| { + { + // Add all watchtower challenges and operator asserts to matchers + self.add_default_kickoff_matchers(context).await?; + Ok::<(), BridgeError>(()) + } + .wrap_err(self.kickoff_meta("on_kickoff_started_entry")) + }) + .await; + } + + /// Clears all matchers when the state is "closed". + /// This means the state machine will not do any more actions anymore. + #[action] + #[allow(unused_variables)] + pub(crate) async fn on_closed_entry(&mut self, context: &mut StateContext) { + self.matchers.clear(); + } + + #[state(entry_action = "on_closed_entry")] + // Terminal state when the kickoff process ends + #[allow(unused_variables)] + pub(crate) async fn closed( + &mut self, + event: &KickoffEvent, + context: &mut StateContext, + ) -> Response { + Handled + } +} diff --git a/core/src/states/matcher.rs b/core/src/states/matcher.rs new file mode 100644 index 000000000..af9669aa0 --- /dev/null +++ b/core/src/states/matcher.rs @@ -0,0 +1,87 @@ +use bitcoin::{OutPoint, Txid}; +use std::cmp::Ordering; + +use super::block_cache::BlockCache; + +/// A trait that will return a single event when a block matches any of the matchers. +pub(crate) trait BlockMatcher { + type StateEvent; + + fn match_block(&self, block: &super::block_cache::BlockCache) -> Vec; +} + +// Matcher for state machines to define what they're interested in +#[derive( + Debug, Clone, PartialEq, Eq, Hash, Ord, PartialOrd, serde::Serialize, serde::Deserialize, +)] +pub enum Matcher { + SentTx(Txid), + SpentUtxo(OutPoint), + /// This matcher is used to determine that an outpoint was spent, but the txid of the tx that spent the outpoint + /// is not equal to any of the txids in the vector. + /// For many transactions in clementine, there are many utxos that can be spent in two ways: + /// 1. A nofn-presigned timeout transaction. These timeout transactions have fixed txid (because they are nofn signed) + /// and can be sent after the utxo is not spent by operator before the timelock. + /// 2. A transaction that spends the utxo to reveal/inscribe something in Bitcoin. These transactions are not nofn presigned and + /// can be spent by operators/verifiers in any way they want as long as the witness is valid so there are + /// no fixed txids for these transactions. (Transactions like Watchtower Challenge, Operator Assert, etc.) + /// + /// This matcher is used to detect the second case, and the Txid vector is used to check if utxo is instead spent by a timeout transaction. + /// This matcher is used for detection of transactions like Watchtower Challenge, Operator Assert, etc. + SpentUtxoButNotTxid(OutPoint, Vec), + BlockHeight(u32), +} + +/// An enum that represents the order of the matchers. +/// The reason for the order is to make sure if a transaction has lower index in the block, +/// any events resulting from that transaction are processed first. +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub enum MatcherOrd { + /// Matcher ordering for matchers concerning a single tx + TxIndex(usize), + /// Matcher ordering for matchers concerning a block height + BlockHeight, +} + +impl PartialOrd for MatcherOrd { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for MatcherOrd { + fn cmp(&self, other: &Self) -> Ordering { + match (self, other) { + (MatcherOrd::TxIndex(a), MatcherOrd::TxIndex(b)) => a.cmp(b), + (MatcherOrd::BlockHeight, MatcherOrd::BlockHeight) => Ordering::Equal, + (MatcherOrd::BlockHeight, _) => Ordering::Less, + (_, MatcherOrd::BlockHeight) => Ordering::Greater, + } + } +} + +impl Matcher { + /// Returns the order of the matcher if the block matches the matcher. + pub fn matches(&self, block: &BlockCache) -> Option { + match self { + Matcher::SentTx(txid) if block.contains_txid(txid) => Some(MatcherOrd::TxIndex( + *block.txids.get(txid).expect("txid is in cache"), + )), + Matcher::SpentUtxo(outpoint) if (block.is_utxo_spent(outpoint)) => Some( + MatcherOrd::TxIndex(*block.spent_utxos.get(outpoint).expect("utxo is in cache")), + ), + Matcher::BlockHeight(height) if *height <= block.block_height => { + Some(MatcherOrd::BlockHeight) + } + Matcher::SpentUtxoButNotTxid(outpoint, txids) + if block.is_utxo_spent(outpoint) + && !txids.iter().any(|txid| block.contains_txid(txid)) => + { + Some(MatcherOrd::TxIndex( + *block.spent_utxos.get(outpoint).expect("utxo is in cache"), + )) + } + _ => None, + } + } +} diff --git a/core/src/states/mod.rs b/core/src/states/mod.rs new file mode 100644 index 000000000..a723129a6 --- /dev/null +++ b/core/src/states/mod.rs @@ -0,0 +1,590 @@ +//! State manager module +//! +//! This module contains the state manager, which is responsible for holding the state machines +//! of the system representing the current operator state for each operator, and kickoff state for each kickoff +//! (Each operator and kickoff process has its own state machine). +//! +//! The main responsibility of the state manager is to process each finalized block in Bitcoin and update the state machines. +//! The blocks are scanned for relevant Clementine tx's and internal state of the state machines is updated. This relevant data +//! is passed on verifiers/operators when its time to send bridge transactions (like operator asserts, watchtower challenges, etc). +//! +//! Operator state machine: Stores where in the collateral chain the operator is. (Which round or ready to reimburse tx) +//! Additionally during rounds it stores which kickoff utxos of the round are spent. +//! Kickoff state machine: +//! - Stores if the kickoff was challenged. +//! - Stores/tracks watchtower challenges, latest blockhash commit by operator, operator asserts, any relevant information needed +//! for proving and disproving the kickoff. +//! +//! For state machines we use the [statig](https://github.com/mdeloof/statig) crate. +//! + +pub use crate::builder::block_cache; +use crate::config::protocol::ProtocolParamset; +use crate::database::{Database, DatabaseTransaction}; +use crate::errors::BridgeError; +use eyre::Context; +use futures::future::{join, join_all}; +use kickoff::KickoffEvent; +use matcher::BlockMatcher; +use pgmq::PGMQueueExt; +use round::RoundEvent; +use statig::awaitable::{InitializedStateMachine, UninitializedStateMachine}; +use statig::prelude::*; +use std::cmp::max; +use std::future::Future; +use std::sync::Arc; +use thiserror::Error; + +pub mod context; +mod event; +pub mod kickoff; +mod matcher; +pub mod round; +pub mod task; + +pub use context::{Duty, Owner}; +pub use event::SystemEvent; + +#[derive(Debug, Error)] +pub enum StateMachineError { + #[error("State machine received event that it doesn't know how to handle: {0}")] + UnhandledEvent(String), + + #[error(transparent)] + Other(#[from] eyre::Report), +} +pub(crate) enum ContextProcessResult< + T: Owner, + M: IntoStateMachine, + Fut: Future, context::StateContext)> + Send, +> { + Unchanged(InitializedStateMachine), + Processing(Fut), +} + +/// Utility trait to make processing generic +pub(crate) trait ContextProcessor { + /// Processes the machine with the given state context (which contains the block cache) + /// If the machine is unchanged, it is returned as is. Otherwise, the machine is processed + /// and the result is returned as a future that processes the new events. + fn process_with_ctx( + self, + block: &context::StateContext, + ) -> ContextProcessResult< + T, + M, + impl Future, context::StateContext)> + Send, + >; +} + +/// Generic implementation for all state machines +impl ContextProcessor for InitializedStateMachine +where + T: Owner, + for<'evt, 'ctx> M: IntoStateMachine = M::StateEvent, Context<'ctx> = context::StateContext> + + Send + + BlockMatcher + + Clone, + M::State: awaitable::State + 'static + Send, + for<'sub> M::Superstate<'sub>: awaitable::Superstate + Send, + for<'evt> M::Event<'evt>: Send + Sync, +{ + fn process_with_ctx( + mut self, + block: &context::StateContext, + ) -> ContextProcessResult)> + Send> + { + let events = self.match_block(&block.cache); + if events.is_empty() { + ContextProcessResult::Unchanged(self) + } else { + let mut ctx = block.clone(); + ContextProcessResult::Processing(async move { + for event in events { + self.handle_with_context(&event, &mut ctx).await; + } + (self, ctx) + }) + } + } +} + +/// State manager stores the state machines. +/// It is responsible for following: +/// - Persisting current state of the state machines to the database. +/// - Collecting new [`SystemEvent`]s from the message queue and passing them to the state machines, +/// thus updating the state machines. +#[derive(Debug, Clone)] +pub struct StateManager { + pub db: Database, + queue: PGMQueueExt, + owner: T, + round_machines: Vec>>, + kickoff_machines: Vec>>, + context: context::StateContext, + paramset: &'static ProtocolParamset, + next_height_to_process: u32, +} + +impl StateManager { + /// Returns message queue name for the state manager. + pub fn queue_name() -> String { + format!("{}_state_mgr_events", T::ENTITY_NAME) + } + + pub async fn new( + db: Database, + owner: T, + paramset: &'static ProtocolParamset, + ) -> eyre::Result { + let queue = PGMQueueExt::new_with_pool(db.get_pool()).await; + + queue.create(&Self::queue_name()).await.wrap_err_with(|| { + format!("Error creating pqmq queue with name {}", Self::queue_name()) + })?; + + let mut mgr = Self { + context: context::StateContext::new( + db.clone(), + Arc::new(owner.clone()), + Default::default(), + paramset, + ), + db, + owner, + paramset, + round_machines: Vec::new(), + kickoff_machines: Vec::new(), + queue, + next_height_to_process: paramset.start_height, + }; + + mgr.load_from_db().await?; + Ok(mgr) + } + + /// Loads the state machines from the database. + /// This method should be called when initializing the StateManager. + /// + /// # Errors + /// Returns a `BridgeError` if the database operation fails + pub async fn load_from_db(&mut self) -> Result<(), BridgeError> { + // Get the owner type from the context + let owner_type = &self.context.owner_type; + + // First, check if we have any state saved + let status = self.db.get_next_height_to_process(None, owner_type).await?; + + // If no state is saved, return early + let Some(block_height) = status else { + tracing::info!("No state machines found in the database"); + return Ok(()); + }; + + tracing::info!("Loading state machines from block height {}", block_height); + + // Load kickoff machines + let kickoff_machines = self.db.load_kickoff_machines(None, owner_type).await?; + + // Load round machines + let round_machines = self.db.load_round_machines(None, owner_type).await?; + + // Process and recreate kickoff machines + for (state_json, kickoff_id, saved_block_height) in &kickoff_machines { + tracing::debug!( + "Loaded kickoff machine: state={}, block_height={}", + state_json, + saved_block_height + ); + + // Deserialize the machine state from JSON + let machine: Result>, _> = + serde_json::from_str(state_json); + + match machine { + Ok(uninitialized) => { + // Create a context for initialization + let mut ctx = context::StateContext::new( + self.db.clone(), + Arc::new(self.owner.clone()), + Default::default(), + self.paramset, + ); + + // Initialize the machine with the context + let initialized = uninitialized.init_with_context(&mut ctx).await; + self.kickoff_machines.push(initialized); + } + Err(e) => { + tracing::warn!( + "Failed to deserialize kickoff machine with ID {}: {}", + kickoff_id, + e + ); + } + } + } + + // Process and recreate round machines + for (state_json, operator_xonly_pk, saved_block_height) in &round_machines { + tracing::debug!( + "Loaded round machine: state={}, block_height={}", + state_json, + saved_block_height + ); + + // Deserialize the machine state from JSON + let machine: Result>, _> = + serde_json::from_str(state_json); + + match machine { + Ok(uninitialized) => { + // Create a context for initialization + let mut ctx = context::StateContext::new( + self.db.clone(), + Arc::new(self.owner.clone()), + Default::default(), + self.paramset, + ); + + // Initialize the machine with the context + let initialized = uninitialized.init_with_context(&mut ctx).await; + self.round_machines.push(initialized); + } + Err(e) => { + tracing::error!( + "Failed to deserialize round machine with operator index {:?}: {}", + operator_xonly_pk, + e + ); + } + } + } + + tracing::info!( + "Loaded {} kickoff machines and {} round machines from the database", + kickoff_machines.len(), + round_machines.len() + ); + + self.next_height_to_process = + u32::try_from(block_height).wrap_err(BridgeError::IntConversionError)?; + Ok(()) + } + #[cfg(test)] + #[doc(hidden)] + pub fn round_machines(&self) -> Vec>> { + self.round_machines.clone() + } + + #[cfg(test)] + #[doc(hidden)] + pub fn kickoff_machines( + &self, + ) -> Vec>> { + self.kickoff_machines.clone() + } + + /// Saves the state machines with dirty flag set to the database. + /// Resets the dirty flag for all machines after successful save. + /// + /// # Errors + /// Returns a `BridgeError` if the database operation fails. + pub async fn save_state_to_db( + &mut self, + block_height: u32, + dbtx: Option>, + ) -> eyre::Result<()> { + // Get the owner type from the context + let owner_type = &self.context.owner_type; + + // Prepare kickoff machines data with direct serialization + let kickoff_machines: eyre::Result> = self + .kickoff_machines + .iter() + // Only serialize machines that are dirty + .filter(|machine| machine.dirty) + .map(|machine| -> eyre::Result<_> { + let state_json = serde_json::to_string(&machine).wrap_err_with(|| { + format!("Failed to serialize kickoff machine: {:?}", machine) + })?; + let kickoff_id = + serde_json::to_string(&machine.kickoff_data).wrap_err_with(|| { + format!("Failed to serialize kickoff id for machine: {:?}", machine) + })?; + Ok((state_json, (kickoff_id))) + }) + .collect(); + + // Prepare round machines data with direct serialization + let round_machines: eyre::Result> = self + .round_machines + .iter() + // Only serialize machines that are dirty + .filter(|machine| machine.dirty) + .map(|machine| -> eyre::Result<_> { + let state_json = serde_json::to_string(machine).wrap_err_with(|| { + format!("Failed to serialize round machine: {:?}", machine) + })?; + let operator_xonly_pk = machine.operator_data.xonly_pk; + + // Use the machine's dirty flag to determine if it needs updating + Ok((state_json, (operator_xonly_pk))) + }) + .collect(); + + // Use the database function to save the state machines + self.db + .save_state_machines( + dbtx, + kickoff_machines?, + round_machines?, + block_height as i32, + owner_type, + ) + .await?; + + // Reset the dirty flag for all machines after successful save + for machine in &mut self.kickoff_machines { + if machine.dirty { + machine + .handle_with_context(&KickoffEvent::SavedToDb, &mut self.context) + .await; + } + } + + for machine in &mut self.round_machines { + if machine.dirty { + machine + .handle_with_context(&RoundEvent::SavedToDb, &mut self.context) + .await; + } + } + + Ok(()) + } + + pub fn get_next_height_to_process(&self) -> u32 { + self.next_height_to_process + } + + /// Updates the machines using the context and returns machines without + /// events and futures that process new events for machines that changed. + /// Empties the `machines` vector. + /// + /// # Parameters + /// * `machines`: A mutable reference to the vector of state machines to update. + /// * `base_context`: A reference to the base state context. + /// + /// # Returns + /// A tuple of the unchanged machines and the futures that process new + /// events for machines that generated events. + /// + /// # Type Parameters + /// * `M`: The type of the state machine. + /// * `a`: The lifetime of the state context reference (the future captures the context by reference). + #[allow(clippy::type_complexity)] + fn update_machines<'a, M>( + machines: &mut Vec>, + base_context: &'a context::StateContext, + ) -> ( + Vec>, + Vec< + impl Future, context::StateContext)> + Send + 'a, + >, + ) + where + M: IntoStateMachine + Send + Sync + 'static, + M::State: Send + Sync + 'static, + InitializedStateMachine: ContextProcessor, + { + let mut unchanged_machines = Vec::new(); + let mut processing_futures = Vec::new(); + + for machine in std::mem::take(machines).into_iter() { + match machine.process_with_ctx(base_context) { + ContextProcessResult::Processing(future) => processing_futures.push(future), + ContextProcessResult::Unchanged(machine) => unchanged_machines.push(machine), + } + } + + (unchanged_machines, processing_futures) + } + + /// Given some new states and a start height, process the states from the given start height until the next height to process. + /// Then append the new states to the current state machines. + pub async fn process_and_add_new_states_from_height( + &mut self, + new_round_machines: Vec>>, + new_kickoff_machines: Vec>>, + start_height: u32, + ) -> Result<(), eyre::Report> { + // create a temporary state manager that only includes the new states + let mut temporary_manager = self.clone(); + temporary_manager.round_machines = new_round_machines; + temporary_manager.kickoff_machines = new_kickoff_machines; + + for block_height in start_height..temporary_manager.next_height_to_process { + let block = temporary_manager + .db + .get_full_block(None, block_height) + .await?; + if let Some(block) = block { + temporary_manager.update_block_cache(&block, block_height); + temporary_manager + .process_block_parallel(block_height) + .await?; + } else { + return Err(eyre::eyre!( + "Block at height {} not found in process_and_add_new_states_from_height", + block_height + )); + } + } + + // append new states to the current state manager + self.round_machines.extend(temporary_manager.round_machines); + self.kickoff_machines + .extend(temporary_manager.kickoff_machines); + + Ok(()) + } + + /// It requires that the block cache is updated before calling this function. + /// Moves all state machines forward in parallel. + /// The state machines are updated until all of them stabilize in their state (ie. + /// the block does not generate any new events) + /// + /// # Errors + /// If the state machines do not stabilize after some iterations, we return an error. + pub async fn process_block_parallel(&mut self, block_height: u32) -> Result<(), eyre::Report> { + eyre::ensure!( + self.context.cache.block_height == block_height, + "Block cache is not updated" + ); + + // Store the original machines to revert to in case of an error happens during processing + // If an error is encountered, the block processing will retry. If we don't store and revert all + // state machines during processing, some state machines can be left in an invalid state + // depending on where the error occurred. To be safe, we revert to the original machines. + let kickoff_machines_checkpoint = self.kickoff_machines.clone(); + let round_machines_checkpoint = self.round_machines.clone(); + + // Process all machines, for those unaffected collect them them, otherwise return + // a future that processes the new events. + let (mut final_kickoff_machines, mut kickoff_futures) = + Self::update_machines(&mut self.kickoff_machines, &self.context); + let (mut final_round_machines, mut round_futures) = + Self::update_machines(&mut self.round_machines, &self.context); + + // Here we store number of iterations to detect if the machines do not stabilize after a while + // to prevent infinite loops. If a matcher is used, it is deleted, but a bug in implementation + // can technically cause infinite loops. + let mut iterations = 0; + + // On each iteration, we'll update the changed machines until all machines + // stabilize in their state. + while !kickoff_futures.is_empty() || !round_futures.is_empty() { + // Execute all futures in parallel + let (kickoff_results, round_results) = + join(join_all(kickoff_futures), join_all(round_futures)).await; + + // Unzip the results into updated machines and state contexts + let (mut changed_kickoff_machines, mut kickoff_contexts): (Vec<_>, Vec<_>) = + kickoff_results.into_iter().unzip(); + let (mut changed_round_machines, mut round_contexts): (Vec<_>, Vec<_>) = + round_results.into_iter().unzip(); + + // Merge and handle errors + let mut all_errors = Vec::new(); + for ctx in kickoff_contexts.iter_mut().chain(round_contexts.iter_mut()) { + all_errors.extend(std::mem::take(&mut ctx.errors)); + } + + if !all_errors.is_empty() { + // revert state machines to the saved state as the content of the machines might be changed before the error occurred + self.kickoff_machines = kickoff_machines_checkpoint; + self.round_machines = round_machines_checkpoint; + // Return first error or create a combined error + return Err(eyre::eyre!( + "Multiple errors occurred during state processing: {:?}", + all_errors + )); + } + + // Append the newly generated state machines into the changed machines list + for ctx in kickoff_contexts.iter_mut().chain(round_contexts.iter_mut()) { + #[cfg(debug_assertions)] + for machine in &ctx.new_round_machines { + if !machine.dirty { + panic!( + "Round machine not dirty despite having been newly created: {:?}", + machine.state() + ); + } + } + #[cfg(debug_assertions)] + for machine in &ctx.new_kickoff_machines { + if !machine.dirty { + panic!( + "Kickoff machine not dirty despite having been newly created: {:?}", + machine.state() + ); + } + } + changed_round_machines.extend(std::mem::take(&mut ctx.new_round_machines)); + changed_kickoff_machines.extend(std::mem::take(&mut ctx.new_kickoff_machines)); + } + + // If the machines do not stabilize after a while, we return an error + // + // Something like max(2 * num_kickoffs_per_round, number of utxos in a kickoff * 2) is possibly a safe value + if iterations > 100000 { + return Err(eyre::eyre!( + r#"{}/{} kickoff and {}/{} round state machines did not stabilize after 100000 iterations, debug repr of changed machines: + ---- Kickoff machines ---- + {:?} + ---- Round machines ---- + {:?} + "#, + changed_kickoff_machines.len(), + final_kickoff_machines.len() + changed_kickoff_machines.len(), + changed_round_machines.len(), + final_round_machines.len() + changed_round_machines.len(), + changed_kickoff_machines + .iter() + .map(|m| m.state()) + .collect::>(), + changed_round_machines + .iter() + .map(|m| m.state()) + .collect::>(), + )); + } + + // Reprocess changed machines and commit these futures to be handled + // in the next round If they're empty, we'll exit the loop. + let (finalized_kickoff_machines, new_kickoff_futures) = + Self::update_machines(&mut changed_kickoff_machines, &self.context); + let (finalized_round_machines, new_round_futures) = + Self::update_machines(&mut changed_round_machines, &self.context); + final_kickoff_machines.extend(finalized_kickoff_machines); + final_round_machines.extend(finalized_round_machines); + + // Update the futures to be processed + kickoff_futures = new_kickoff_futures; + round_futures = new_round_futures; + iterations += 1; + } + + drop(kickoff_futures); + drop(round_futures); + + // Set back the original machines + self.round_machines = final_round_machines; + self.kickoff_machines = final_kickoff_machines; + self.next_height_to_process = max(block_height + 1, self.next_height_to_process); + + Ok(()) + } +} diff --git a/core/src/states/round.rs b/core/src/states/round.rs new file mode 100644 index 000000000..88a4677e4 --- /dev/null +++ b/core/src/states/round.rs @@ -0,0 +1,513 @@ +use statig::prelude::*; +use std::collections::{HashMap, HashSet}; + +use crate::deposit::OperatorData; +use crate::operator::RoundIndex; +use crate::{ + builder::transaction::{input::UtxoVout, ContractContext, TransactionType}, + errors::{BridgeError, TxError}, +}; +use bitcoin::OutPoint; +use serde_with::serde_as; + +use super::{ + block_cache::BlockCache, + context::{Duty, DutyResult, StateContext}, + matcher::{self, BlockMatcher}, + Owner, StateMachineError, +}; + +/// Events that change the state of the round state machine. +#[derive( + Debug, Clone, Eq, PartialEq, Hash, Ord, PartialOrd, serde::Serialize, serde::Deserialize, +)] +pub enum RoundEvent { + /// Event that is dispatched when a kickoff utxo in a round tx is spent. + KickoffUtxoUsed { + kickoff_idx: usize, + kickoff_outpoint: OutPoint, + }, + /// Event that is dispatched when the next ready to reimburse tx is mined. + ReadyToReimburseSent { round_idx: RoundIndex }, + /// Event that is dispatched when the next round tx is mined. + RoundSent { round_idx: RoundIndex }, + /// This event is sent if operators collateral was spent in any way other than default behaviour (default is round -> ready to reimburse -> round -> ready to reimburse -> ...) + /// It means operator stopped participating in the protocol and can no longer participate in clementine bridge protocol. + OperatorExit, + /// Special event that is used to indicate that the state machine has been saved to the database and the dirty flag should be reset to false + SavedToDb, +} + +/// State machine for the round state. +/// It has following states: +/// - `initial_collateral`: The initial collateral state, when the operator didn't create the first round tx yet. +/// - `round_tx`: The round tx state, when the operator's collateral utxo is currently in a round tx. +/// - `ready_to_reimburse`: The ready to reimburse state, when the operator's collateral utxo is currently in a ready to reimburse tx. +/// - `operator_exit`: The operator exit state, when the operator exited the protocol (collateral spent in a non-bridge tx). +/// +/// It has following events: +/// - `KickoffUtxoUsed`: The kickoff utxo is used in a round tx. The state machine stores this utxo as used, and additionally calls the owner to check if this kickoff utxo was used in a kickoff tx (If so, that will result in creation of a kickoff state machine). +/// - `ReadyToReimburseSent`: The ready to reimburse tx is sent. The state machine transitions to the ready to reimburse state. Additionally, if there are unused kickoff utxos, this information is passed to the owner which can then create a "Unspent Kickoff Connector" tx. +/// - `RoundSent`: The round tx is sent. The state machine transitions to the round tx state. +/// - `OperatorExit`: The operator exited the protocol. The state machine transitions to the operator exit state. In this state, all tracking of the operator is stopped as operator is no longer participating in the protocol. +/// - `SavedToDb`: The state machine has been saved to the database and the dirty flag should be reset to false. +/// +#[serde_as] +#[derive(Debug, Clone, PartialEq, Eq, serde::Serialize, serde::Deserialize)] +pub struct RoundStateMachine { + /// Maps matchers to the resulting round events. + #[serde_as(as = "Vec<(_, _)>")] + pub(crate) matchers: HashMap, + /// Data of the operator that is being tracked. + pub(crate) operator_data: OperatorData, + /// Indicates if the state machine has unsaved changes that need to be persisted on db. + /// dirty flag is set if any matcher matches the current block. + /// the flag is set to true in on_transition and on_dispatch + /// the flag is set to false after the state machine is saved to db and the event SavedToDb is dispatched + pub(crate) dirty: bool, + phantom: std::marker::PhantomData, +} + +impl BlockMatcher for RoundStateMachine { + type StateEvent = RoundEvent; + + fn match_block(&self, block: &BlockCache) -> Vec { + self.matchers + .iter() + .filter_map(|(matcher, round_event)| { + matcher.matches(block).map(|ord| (ord, round_event)) + }) + .min() + .map(|(_, round_event)| round_event) + .into_iter() + .cloned() + .collect() + } +} + +impl RoundStateMachine { + pub fn new(operator_data: OperatorData) -> Self { + Self { + matchers: HashMap::new(), + operator_data, + dirty: true, + phantom: std::marker::PhantomData, + } + } +} +use eyre::Context; + +#[state_machine( + initial = "State::initial_collateral()", + on_dispatch = "Self::on_dispatch", + on_transition = "Self::on_transition", + state(derive(Debug, Clone, PartialEq, Eq, serde::Serialize, serde::Deserialize)) +)] +impl RoundStateMachine { + #[action] + pub(crate) fn on_transition(&mut self, state_a: &State, state_b: &State) { + tracing::trace!(?self.operator_data, "Transitioning from {:?} to {:?}", state_a, state_b); + self.dirty = true; + } + + pub fn round_meta(&self, method: &'static str) -> StateMachineError { + eyre::eyre!( + "Error in round state machine for operator {} in {}", + self.operator_data.xonly_pk, + method + ) + .into() + } + + async fn unhandled_event(&mut self, context: &mut StateContext, event: &RoundEvent) { + context + .capture_error(async |_context| { + let event_str = format!("{:?}", event); + Err(StateMachineError::UnhandledEvent(event_str)) + .wrap_err(self.round_meta("round unhandled event")) + }) + .await; + } + + #[action] + pub(crate) fn on_dispatch( + &mut self, + _state: StateOrSuperstate<'_, '_, Self>, + evt: &RoundEvent, + ) { + if matches!(evt, RoundEvent::SavedToDb) { + self.dirty = false; + } else { + tracing::trace!(?self.operator_data, "Dispatching event {:?}", evt); + self.dirty = true; + + // Remove the matcher corresponding to the event. + if let Some((matcher, _)) = self.matchers.iter().find(|(_, ev)| ev == &evt) { + let matcher = matcher.clone(); + self.matchers.remove(&matcher); + } + } + } + + #[state(entry_action = "on_initial_collateral_entry")] + pub(crate) async fn initial_collateral( + &mut self, + event: &RoundEvent, + context: &mut StateContext, + ) -> Response { + match event { + // If the initial collateral is spent, we can transition to the first round tx. + RoundEvent::RoundSent { round_idx } => { + Transition(State::round_tx(*round_idx, HashSet::new(), false)) + } + RoundEvent::SavedToDb => Handled, + RoundEvent::OperatorExit => Transition(State::operator_exit()), + _ => { + self.unhandled_event(context, event).await; + Handled + } + } + } + + /// Entry action for the initial collateral state. + /// This method adds the matcher for the first round tx and the matcher if the operator exits + /// the protocol by not spending the collateral in the first round tx. + #[action] + #[allow(unused_variables)] + pub(crate) async fn on_initial_collateral_entry(&mut self, context: &mut StateContext) { + context + .capture_error(async |context| { + { + self.matchers = HashMap::new(); + + // To determine if operator exited the protocol, we check if collateral was not spent in the first round tx. + let contract_context = ContractContext::new_context_for_round( + self.operator_data.xonly_pk, + RoundIndex::Round(0), + context.paramset, + ); + let round_txhandlers = context + .owner + .create_txhandlers(TransactionType::Round, contract_context) + .await?; + let round_txid = round_txhandlers + .get(&TransactionType::Round) + .ok_or(TxError::TxHandlerNotFound(TransactionType::Round))? + .get_txid(); + // if round tx is sent, we can send the round sent event + self.matchers.insert( + matcher::Matcher::SentTx(*round_txid), + RoundEvent::RoundSent { + round_idx: RoundIndex::Round(0), + }, + ); + // If the tx the collateral is spent on is not the round tx, we dispatch the operator exit event. + self.matchers.insert( + matcher::Matcher::SpentUtxoButNotTxid( + self.operator_data.collateral_funding_outpoint, + vec![*round_txid], + ), + RoundEvent::OperatorExit, + ); + Ok::<(), BridgeError>(()) + } + .wrap_err(self.round_meta("on_initial_collateral_entry")) + }) + .await; + } + + /// State that represents a round tx. + /// This state is entered when a round tx is mined. + /// It is exited when the operator sends the ready to reimburse tx or exits the protocol. + #[state(entry_action = "on_round_tx_entry", exit_action = "on_round_tx_exit")] + #[allow(unused_variables)] + pub(crate) async fn round_tx( + &mut self, + event: &RoundEvent, + round_idx: &mut RoundIndex, + used_kickoffs: &mut HashSet, + challenged_before: &mut bool, + context: &mut StateContext, + ) -> Response { + match event { + // If a kickoff utxo is spent, we add it to the used kickoffs set. + // The set will be used to determine if the operator has used all kickoffs in the round. + // If the operator did not use all kickoffs, "Unspent Kickoff Connector" tx can potentially be sent, slashing the operator. + // Additionally, the owner will check if the kickoff utxo is used in a kickoff transaction. + // If so, the owner (if verifier) will do additional checks to determine if the kickoff is malicious or not. + RoundEvent::KickoffUtxoUsed { + kickoff_idx, + kickoff_outpoint, + } => { + used_kickoffs.insert(*kickoff_idx); + let txid = context + .cache + .get_txid_of_utxo(kickoff_outpoint) + .expect("UTXO should be in block"); + + context + .capture_error(async |context| { + { + let duty_result = context + .owner + .handle_duty(Duty::CheckIfKickoff { + txid, + block_height: context.cache.block_height, + witness: context + .cache + .get_witness_of_utxo(kickoff_outpoint) + .expect("UTXO should be in block"), + challenged_before: *challenged_before, + }) + .await?; + if let DutyResult::CheckIfKickoff { challenged } = duty_result { + *challenged_before |= challenged; + } + Ok::<(), BridgeError>(()) + } + .wrap_err(self.round_meta("round_tx kickoff_utxo_used")) + }) + .await; + Handled + } + // If the ready to reimburse tx is mined, we transition to the ready to reimburse state. + RoundEvent::ReadyToReimburseSent { round_idx } => { + Transition(State::ready_to_reimburse(*round_idx)) + } + RoundEvent::SavedToDb => Handled, + RoundEvent::OperatorExit => Transition(State::operator_exit()), + _ => { + self.unhandled_event(context, event).await; + Handled + } + } + } + + /// State that represents the operator exiting the protocol. + #[state(entry_action = "on_operator_exit_entry")] + pub(crate) async fn operator_exit( + &mut self, + event: &RoundEvent, + context: &mut StateContext, + ) -> Response { + match event { + RoundEvent::SavedToDb => Handled, + _ => { + self.unhandled_event(context, event).await; + Handled + } + } + } + + /// Entry action for the operator exit state. + /// This method removes all matchers for the round state machine. + /// We do not care about anything after the operator exits the protocol. + /// For example, even if operator sends a kickoff after exiting the protocol, that + /// kickoff is useless as reimburse connector utxo of that kickoff is in the next round, + /// which cannot be created anymore as the collateral is spent. So we do not want to challenge it, etc. + #[action] + pub(crate) async fn on_operator_exit_entry(&mut self) { + self.matchers = HashMap::new(); + tracing::warn!(?self.operator_data, "Operator exited the protocol."); + } + + /// Exit action for the round tx state. + /// This method will check if all kickoffs were used in the round. + /// If not, the owner will send a "Unspent Kickoff Connector" tx, slashing the operator. + #[action] + pub(crate) async fn on_round_tx_exit( + &mut self, + round_idx: &mut RoundIndex, + used_kickoffs: &mut HashSet, + context: &mut StateContext, + ) { + context + .capture_error(async |context| { + { + context + .owner + .handle_duty(Duty::NewReadyToReimburse { + round_idx: *round_idx, + used_kickoffs: used_kickoffs.clone(), + operator_xonly_pk: self.operator_data.xonly_pk, + }) + .await?; + Ok::<(), BridgeError>(()) + } + .wrap_err(self.round_meta("on_round_tx_exit")) + }) + .await; + } + + /// Entry action for the round tx state. + /// This method adds the matchers for the round tx and the ready to reimburse tx. + /// It adds the matchers for the kickoff utxos in the round tx. + /// It also adds the matchers for the operator exit. + #[action] + pub(crate) async fn on_round_tx_entry( + &mut self, + round_idx: &mut RoundIndex, + challenged_before: &mut bool, + context: &mut StateContext, + ) { + // ensure challenged_before starts at false for each round + // In a single round, a challenge is enough to slash all of the operators current kickoffs in the same round. + // This way, if the operator posts 50 different kickoffs, we only need one challenge. + // If that challenge is successful, operator will not be able to get reimbursement from all kickoffs. + *challenged_before = false; + context + .capture_error(async |context| { + { + self.matchers = HashMap::new(); + // On the round after last round, do not care about anything, + // last round has index num_round_txs and is there only for reimbursement generators of previous round + // nothing is signed with them + if *round_idx == RoundIndex::Round(context.paramset.num_round_txs) { + Ok::<(), BridgeError>(()) + } else { + let contract_context = ContractContext::new_context_for_round( + self.operator_data.xonly_pk, + *round_idx, + context.paramset, + ); + let mut txhandlers = context + .owner + .create_txhandlers(TransactionType::Round, contract_context) + .await?; + let round_txhandler = txhandlers + .remove(&TransactionType::Round) + .ok_or(TxError::TxHandlerNotFound(TransactionType::Round))?; + let ready_to_reimburse_txhandler = txhandlers + .remove(&TransactionType::ReadyToReimburse) + .ok_or(TxError::TxHandlerNotFound( + TransactionType::ReadyToReimburse, + ))?; + // Add a matcher for the ready to reimburse tx. + self.matchers.insert( + matcher::Matcher::SentTx(*ready_to_reimburse_txhandler.get_txid()), + RoundEvent::ReadyToReimburseSent { + round_idx: *round_idx, + }, + ); + // To determine if operator exited the protocol, we check if collateral was not spent in ready to reimburse tx. + self.matchers.insert( + matcher::Matcher::SpentUtxoButNotTxid( + OutPoint::new( + *round_txhandler.get_txid(), + UtxoVout::CollateralInRound.get_vout(), + ), + vec![*ready_to_reimburse_txhandler.get_txid()], + ), + RoundEvent::OperatorExit, + ); + // Add a matcher for each kickoff utxo in the round tx. + for idx in 0..context.paramset.num_kickoffs_per_round { + let outpoint = *round_txhandler + .get_spendable_output(UtxoVout::Kickoff(idx))? + .get_prev_outpoint(); + self.matchers.insert( + matcher::Matcher::SpentUtxo(outpoint), + RoundEvent::KickoffUtxoUsed { + kickoff_idx: idx, + kickoff_outpoint: outpoint, + }, + ); + } + Ok::<(), BridgeError>(()) + } + } + .wrap_err(self.round_meta("on_round_tx_entry")) + }) + .await; + } + + #[state(entry_action = "on_ready_to_reimburse_entry")] + #[allow(unused_variables)] + pub(crate) async fn ready_to_reimburse( + &mut self, + event: &RoundEvent, + context: &mut StateContext, + round_idx: &mut RoundIndex, + ) -> Response { + match event { + // If the next round tx is mined, we transition to the round tx state. + RoundEvent::RoundSent { + round_idx: next_round_idx, + } => Transition(State::round_tx(*next_round_idx, HashSet::new(), false)), + RoundEvent::SavedToDb => Handled, + RoundEvent::OperatorExit => Transition(State::operator_exit()), + _ => { + self.unhandled_event(context, event).await; + Handled + } + } + } + + /// Entry action for the ready to reimburse state. + /// This method adds the matchers for the next round tx and the operator exit. + #[action] + pub(crate) async fn on_ready_to_reimburse_entry( + &mut self, + context: &mut StateContext, + round_idx: &mut RoundIndex, + ) { + context + .capture_error(async |context| { + { + self.matchers = HashMap::new(); + // get next rounds Round tx + let next_round_context = ContractContext::new_context_for_round( + self.operator_data.xonly_pk, + round_idx.next_round(), + context.paramset, + ); + let next_round_txhandlers = context + .owner + .create_txhandlers(TransactionType::Round, next_round_context) + .await?; + let next_round_txid = next_round_txhandlers + .get(&TransactionType::Round) + .ok_or(TxError::TxHandlerNotFound(TransactionType::Round))? + .get_txid(); + // Add a matcher for the next round tx. + self.matchers.insert( + matcher::Matcher::SentTx(*next_round_txid), + RoundEvent::RoundSent { + round_idx: round_idx.next_round(), + }, + ); + // calculate the current ready to reimburse txid + // to generate the SpentUtxoButNotTxid matcher for the operator exit + let current_round_context = ContractContext::new_context_for_round( + self.operator_data.xonly_pk, + *round_idx, + context.paramset, + ); + let current_round_txhandlers = context + .owner + .create_txhandlers(TransactionType::Round, current_round_context) + .await?; + let current_ready_to_reimburse_txid = current_round_txhandlers + .get(&TransactionType::ReadyToReimburse) + .ok_or(TxError::TxHandlerNotFound( + TransactionType::ReadyToReimburse, + ))? + .get_txid(); + // To determine if operator exited the protocol, we check if collateral was not spent in the next round tx. + self.matchers.insert( + matcher::Matcher::SpentUtxoButNotTxid( + OutPoint::new( + *current_ready_to_reimburse_txid, + UtxoVout::CollateralInReadyToReimburse.get_vout(), + ), + vec![*next_round_txid], + ), + RoundEvent::OperatorExit, + ); + Ok::<(), BridgeError>(()) + } + .wrap_err(self.round_meta("on_ready_to_reimburse_entry")) + }) + .await; + } +} diff --git a/core/src/states/task.rs b/core/src/states/task.rs new file mode 100644 index 000000000..ff07f977e --- /dev/null +++ b/core/src/states/task.rs @@ -0,0 +1,278 @@ +use crate::{ + bitcoin_syncer::{BlockHandler, FinalizedBlockFetcherTask}, + database::{Database, DatabaseTransaction}, + task::{BufferedErrors, IntoTask, TaskVariant, WithDelay}, +}; +use eyre::Context as _; +use pgmq::{Message, PGMQueueExt}; +use std::time::Duration; +use tonic::async_trait; + +use crate::{ + config::protocol::ProtocolParamset, + errors::BridgeError, + states::SystemEvent, + task::{Task, TaskExt}, +}; + +use super::{context::Owner, StateManager}; + +const POLL_DELAY: Duration = if cfg!(test) { + Duration::from_millis(250) +} else { + Duration::from_secs(30) +}; + +/// Block handler that sends events to a PostgreSQL message queue +#[derive(Debug, Clone)] +pub struct QueueBlockHandler { + queue: PGMQueueExt, + queue_name: String, +} + +#[async_trait] +impl BlockHandler for QueueBlockHandler { + /// Handles a new block by sending a new block event to the queue. + /// State manager will process the block after reading the event from the queue. + async fn handle_new_block( + &mut self, + dbtx: DatabaseTransaction<'_, '_>, + block_id: u32, + block: bitcoin::Block, + height: u32, + ) -> Result<(), BridgeError> { + let event = SystemEvent::NewFinalizedBlock { + block_id, + block, + height, + }; + + self.queue + .send_with_cxn(&self.queue_name, &event, &mut **dbtx) + .await + .wrap_err("Error sending new block event to queue")?; + Ok(()) + } +} + +/// A task that fetches new finalized blocks from Bitcoin and adds them to the state management queue +#[derive(Debug)] +pub struct BlockFetcherTask { + /// Owner type marker + _phantom: std::marker::PhantomData, +} + +impl BlockFetcherTask { + /// Creates a new finalized block fetcher task that sends new finalized blocks to the message queue. + pub async fn new_finalized_block_fetcher_task( + db: Database, + paramset: &'static ProtocolParamset, + ) -> Result, BridgeError> { + let queue = PGMQueueExt::new_with_pool(db.get_pool()).await; + let queue_name = StateManager::::queue_name(); + + let handler = QueueBlockHandler { + queue, + queue_name: queue_name.clone(), + }; + + // get the next finalized block height to start from + let next_height = db + .get_next_finalized_block_height_for_consumer( + None, + T::FINALIZED_BLOCK_CONSUMER_ID_AUTOMATION, + paramset, + ) + .await?; + + tracing::info!( + "Creating block fetcher task for owner type {} starting from height {}", + T::ENTITY_NAME, + next_height + ); + + Ok(crate::bitcoin_syncer::FinalizedBlockFetcherTask::new( + db, + T::FINALIZED_BLOCK_CONSUMER_ID_AUTOMATION.to_string(), + paramset, + next_height, + handler, + )) + } +} + +/// A task that reads new events from the message queue and processes them. +#[derive(Debug)] +pub struct MessageConsumerTask { + db: Database, + inner: StateManager, + /// Queue name for this owner type (cached) + queue_name: String, +} + +#[async_trait] +impl Task for MessageConsumerTask { + type Output = bool; + const VARIANT: TaskVariant = TaskVariant::StateManager; + + async fn run_once(&mut self) -> Result { + let new_event_received = async { + let mut dbtx = self.db.begin_transaction().await?; + + // Poll new event + let Some(Message { + msg_id, message, .. + }): Option> = self + .inner + .queue + // 2nd param of read_with_cxn is the visibility timeout, set to 0 as we only have 1 consumer of the queue, which is the state machine + // visibility timeout is the time after which the message is visible again to other consumers + .read_with_cxn(&self.queue_name, 0, &mut *dbtx) + .await + .wrap_err("Reading event from queue")? + else { + dbtx.commit().await?; + return Ok::<_, BridgeError>(false); + }; + + self.inner.handle_event(message, &mut dbtx).await?; + + // Delete event from queue + self.inner + .queue + .archive_with_cxn(&self.queue_name, msg_id, &mut *dbtx) + .await + .wrap_err("Deleting event from queue")?; + + dbtx.commit().await?; + Ok(true) + } + .await?; + + Ok(new_event_received) + } +} + +impl IntoTask for StateManager { + type Task = WithDelay>>; + + /// Converts the StateManager into the consumer task with a polling delay. + fn into_task(self) -> Self::Task { + MessageConsumerTask { + db: self.db.clone(), + inner: self, + queue_name: StateManager::::queue_name(), + } + .into_buffered_errors(20) + .with_delay(POLL_DELAY) + } +} + +impl StateManager { + pub async fn block_fetcher_task( + &self, + ) -> Result + std::fmt::Debug>, BridgeError> { + Ok( + BlockFetcherTask::::new_finalized_block_fetcher_task(self.db.clone(), self.paramset) + .await? + .into_buffered_errors(20) + .with_delay(POLL_DELAY), + ) + } +} + +#[cfg(test)] +mod tests { + use std::{collections::BTreeMap, sync::Arc}; + + use tokio::{sync::oneshot, task::JoinHandle, time::timeout}; + use tonic::async_trait; + + use crate::{ + builder::transaction::{ContractContext, TransactionType, TxHandler}, + config::{protocol::ProtocolParamsetName, BridgeConfig}, + database::DatabaseTransaction, + states::{block_cache, context::DutyResult, Duty}, + test::common::create_test_config_with_thread_name, + utils::NamedEntity, + }; + + use super::*; + + #[derive(Clone, Debug)] + struct MockHandler; + + impl NamedEntity for MockHandler { + const ENTITY_NAME: &'static str = "MockHandler"; + const TX_SENDER_CONSUMER_ID: &'static str = "mock_tx_sender"; + const FINALIZED_BLOCK_CONSUMER_ID_NO_AUTOMATION: &'static str = + "mock_finalized_block_no_automation"; + const FINALIZED_BLOCK_CONSUMER_ID_AUTOMATION: &'static str = + "mock_finalized_block_automation"; + } + + #[async_trait] + impl Owner for MockHandler { + async fn handle_duty(&self, _: Duty) -> Result { + Ok(DutyResult::Handled) + } + + async fn create_txhandlers( + &self, + _: TransactionType, + _: ContractContext, + ) -> Result, BridgeError> { + Ok(BTreeMap::new()) + } + + async fn handle_finalized_block( + &self, + _dbtx: DatabaseTransaction<'_, '_>, + _block_id: u32, + _block_height: u32, + _block_cache: Arc, + _light_client_proof_wait_interval_secs: Option, + ) -> Result<(), BridgeError> { + Ok(()) + } + } + + async fn create_state_manager( + config: &mut BridgeConfig, + ) -> (JoinHandle>, oneshot::Sender<()>) { + let db = Database::new(config).await.unwrap(); + + let state_manager = + StateManager::new(db, MockHandler, ProtocolParamsetName::Regtest.into()) + .await + .unwrap(); + let (t, shutdown) = state_manager.into_task().cancelable_loop(); + (t.into_bg(), shutdown) + } + + #[tokio::test] + async fn test_run_state_manager() { + let mut config = create_test_config_with_thread_name().await; + let (handle, shutdown) = create_state_manager(&mut config).await; + + drop(shutdown); + + timeout(Duration::from_secs(1), handle) + .await + .expect("state manager should exit after shutdown signal (timed out after 1s)") + .expect("state manager should shutdown gracefully (thread panic should not happen)") + .expect("state manager should shutdown gracefully"); + } + + #[tokio::test] + async fn test_state_mgr_does_not_shutdown() { + let mut config = create_test_config_with_thread_name().await; + let (handle, shutdown) = create_state_manager(&mut config).await; + + timeout(Duration::from_secs(1), handle).await.expect_err( + "state manager should not shutdown while shutdown handle is alive (timed out after 1s)", + ); + + drop(shutdown); + } +} diff --git a/core/src/task/aggregator_metric_publisher.rs b/core/src/task/aggregator_metric_publisher.rs new file mode 100644 index 000000000..d4c5ce8c1 --- /dev/null +++ b/core/src/task/aggregator_metric_publisher.rs @@ -0,0 +1,160 @@ +use std::str::FromStr; +use std::{collections::HashMap, time::Duration}; + +use tonic::async_trait; + +use crate::{ + aggregator::{Aggregator, EntityId, OperatorId, VerifierId}, + errors::BridgeError, + metrics::EntityL1SyncStatusMetrics, + rpc::clementine::EntityType, + task::{Task, TaskVariant}, +}; + +pub const AGGREGATOR_METRIC_PUBLISHER_POLL_DELAY: Duration = Duration::from_secs(120); + +/// Publishes metrics for the aggregator, including the Entity Statuses of all registered entities. +#[derive(Debug)] +pub struct AggregatorMetricPublisher { + aggregator: Aggregator, + metrics: HashMap, +} + +impl AggregatorMetricPublisher { + pub async fn new(aggregator: Aggregator) -> Result { + Ok(Self { + aggregator: Aggregator::new(aggregator.config).await?, + metrics: HashMap::new(), + }) + } + + /// Convert protobuf EntityId to rust EntityId + fn convert_entity_id( + proto_entity_id: &crate::rpc::clementine::EntityId, + ) -> Result { + let entity_type = EntityType::try_from(proto_entity_id.kind) + .map_err(|_| BridgeError::ConfigError("Invalid entity type".into()))?; + + match entity_type { + EntityType::Operator => { + let xonly_pk = + bitcoin::XOnlyPublicKey::from_str(&proto_entity_id.id).map_err(|e| { + BridgeError::ConfigError(format!( + "Invalid operator xonly public key: {}", + e + )) + })?; + Ok(EntityId::Operator(OperatorId(xonly_pk))) + } + EntityType::Verifier => { + let pk = + bitcoin::secp256k1::PublicKey::from_str(&proto_entity_id.id).map_err(|e| { + BridgeError::ConfigError(format!("Invalid verifier public key: {}", e)) + })?; + Ok(EntityId::Verifier(VerifierId(pk))) + } + EntityType::EntityUnknown => { + Err(BridgeError::ConfigError("Unknown entity type".into())) + } + } + } + + /// Create or get metrics for an entity + fn get_or_create_metrics(&mut self, entity_id: EntityId) -> &mut EntityL1SyncStatusMetrics { + self.metrics.entry(entity_id).or_insert_with(|| { + let scope = format!("{}_l1_sync_status", entity_id); + EntityL1SyncStatusMetrics::describe(&scope); + EntityL1SyncStatusMetrics::new(&scope) + }) + } +} + +#[async_trait] +impl Task for AggregatorMetricPublisher { + const VARIANT: TaskVariant = TaskVariant::MetricPublisher; + type Output = bool; + + async fn run_once(&mut self) -> Result { + // Metrics are not published in tests + if cfg!(test) { + return Ok(false); + } + tracing::info!("Publishing metrics for aggregator"); + + let entity_statuses = self + .aggregator + .get_entity_statuses(false) + .await + .inspect_err(|e| { + tracing::error!("Error getting entities status: {:?}", e); + })?; + + tracing::info!("Entities status: {:?}", entity_statuses); + + // Process each entity status + for entity_status_with_id in entity_statuses { + let proto_entity_id = entity_status_with_id + .entity_id + .ok_or_else(|| BridgeError::ConfigError("Missing entity_id".into()))?; + + let entity_id = match Self::convert_entity_id(&proto_entity_id) { + Ok(id) => id, + Err(e) => { + tracing::error!("Failed to convert entity_id: {}", e); + continue; + } + }; + + let metrics = self.get_or_create_metrics(entity_id); + + match entity_status_with_id.status_result { + Some(crate::rpc::clementine::entity_status_with_id::StatusResult::Status( + status, + )) => { + // Parse wallet balance from string (format is "X.XXX BTC") + if let Some(balance) = status + .wallet_balance + .and_then(|s| s.strip_suffix(" BTC").and_then(|s| s.parse::().ok())) + { + metrics.wallet_balance_btc.set(balance); + } + + if let Some(height) = status.rpc_tip_height { + metrics.rpc_tip_height.set(height as f64); + } + if let Some(height) = status.bitcoin_syncer_synced_height { + metrics.btc_syncer_synced_height.set(height as f64); + } + if let Some(height) = status.hcp_last_proven_height { + metrics.hcp_last_proven_height.set(height as f64); + } + if let Some(height) = status.tx_sender_synced_height { + metrics.tx_sender_synced_height.set(height as f64); + } + if let Some(height) = status.finalized_synced_height { + metrics.finalized_synced_height.set(height as f64); + } + if let Some(height) = status.state_manager_next_height { + metrics.state_manager_next_height.set(height as f64); + } + if let Some(tasks) = status.stopped_tasks { + metrics + .stopped_tasks_count + .set(tasks.stopped_tasks.len() as f64); + } + } + Some(crate::rpc::clementine::entity_status_with_id::StatusResult::Err(error)) => { + tracing::error!("Entity {} error: {}", entity_id, error.error); + // Increment error counter + metrics.entity_status_error_count.increment(1); + } + None => { + tracing::warn!("Entity {} has no status", entity_id); + } + } + } + + // Always delay by returning false (ie. no work done) + Ok(false) + } +} diff --git a/core/src/task/entity_metric_publisher.rs b/core/src/task/entity_metric_publisher.rs new file mode 100644 index 000000000..f3f9c3060 --- /dev/null +++ b/core/src/task/entity_metric_publisher.rs @@ -0,0 +1,91 @@ +use std::sync::LazyLock; +use std::time::Duration; + +use tonic::async_trait; + +use crate::metrics::L1SyncStatusProvider; + +use crate::{ + database::Database, + errors::BridgeError, + extended_bitcoin_rpc::ExtendedBitcoinRpc, + metrics::L1_SYNC_STATUS, + task::{Task, TaskVariant}, + utils::NamedEntity, +}; + +/// The interval at which the entity metrics are polled and published +/// (Not sent to Prometheus at this interval, since we use a pull-based http listener) +/// +/// This doubles as the timeout for entity status retrieval. +pub const ENTITY_METRIC_PUBLISHER_INTERVAL: Duration = Duration::from_secs(120); + +#[derive(Debug, Clone)] +/// Publishes the metrics available for an entity (operator/verifier) +pub struct EntityMetricPublisher { + db: Database, + rpc: ExtendedBitcoinRpc, + _phantom: std::marker::PhantomData, +} + +impl EntityMetricPublisher { + pub fn new(db: Database, rpc: ExtendedBitcoinRpc) -> Self { + Self { + db, + rpc, + _phantom: std::marker::PhantomData, + } + } +} + +#[async_trait] +impl Task for EntityMetricPublisher { + const VARIANT: TaskVariant = TaskVariant::MetricPublisher; + type Output = bool; + + async fn run_once(&mut self) -> Result { + // Metrics are not published in tests + if cfg!(test) { + return Ok(false); + } + + let l1_status = match T::get_l1_status(&self.db, &self.rpc).await { + Ok(l1_status) => l1_status, + Err(e) => { + tracing::error!( + "Failed to get l1 status when publishing metrics for {}: {:?}", + T::ENTITY_NAME, + e + ); + + return Ok(false); + } + }; + + let metric = LazyLock::force(&L1_SYNC_STATUS); + + metric + .wallet_balance_btc + .set(l1_status.wallet_balance.map_or(0.0, |a| a.to_btc())); + metric + .rpc_tip_height + .set(l1_status.rpc_tip_height.unwrap_or(0) as f64); + metric + .hcp_last_proven_height + .set(l1_status.hcp_last_proven_height.unwrap_or(0) as f64); + metric + .btc_syncer_synced_height + .set(l1_status.btc_syncer_synced_height.unwrap_or(0) as f64); + metric + .finalized_synced_height + .set(l1_status.finalized_synced_height.unwrap_or(0) as f64); + metric + .tx_sender_synced_height + .set(l1_status.tx_sender_synced_height.unwrap_or(0) as f64); + metric + .state_manager_next_height + .set(l1_status.state_manager_next_height.unwrap_or(0) as f64); + + Ok(false) + } +} diff --git a/core/src/task/manager.rs b/core/src/task/manager.rs new file mode 100644 index 000000000..5535e9315 --- /dev/null +++ b/core/src/task/manager.rs @@ -0,0 +1,268 @@ +use super::status_monitor::{TaskStatusMonitorTask, TASK_STATUS_MONITOR_POLL_DELAY}; +use super::{IntoTask, Task, TaskExt, TaskVariant}; +use crate::errors::BridgeError; +use crate::rpc::clementine::StoppedTasks; +use crate::utils::timed_request; +use std::collections::HashMap; +use std::sync::Arc; +use std::time::Duration; +use tokio::sync::{oneshot, RwLock}; +use tokio::task::{AbortHandle, JoinHandle}; +use tokio::time::sleep; + +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub enum TaskStatus { + Running, + NotRunning(String), +} + +pub type TaskRegistry = + HashMap>)>; + +const TASK_STATUS_FETCH_TIMEOUT: Duration = Duration::from_secs(60); + +/// A background task manager that can hold and manage multiple tasks. When +/// dropped, it will abort all tasks. Graceful shutdown can be performed with +/// `graceful_shutdown` +#[derive(Debug)] +pub struct BackgroundTaskManager { + task_registry: Arc>, +} + +impl Default for BackgroundTaskManager { + fn default() -> Self { + Self { + task_registry: Arc::new(RwLock::new(HashMap::new())), + } + } +} + +impl BackgroundTaskManager { + /// Monitors the spawned task. If any task stops running, logs the reason + /// why and updates the task registry to register the task as not running. + fn monitor_spawned_task( + &self, + handle: JoinHandle>, + task_variant: TaskVariant, + ) { + let task_registry = Arc::downgrade(&self.task_registry); + + tokio::spawn(async move { + let exit_reason = match handle.await { + Ok(Ok(_)) => { + // Task completed successfully + tracing::debug!("Task {:?} completed successfully", task_variant); + "Completed successfully".to_owned() + } + Ok(Err(e)) => { + // Task returned an error + tracing::error!("Task {:?} failed with error: {:?}", task_variant, e); + format!("Failed due to error: {:?}", e) + } + Err(e) => { + if e.is_cancelled() { + // Task was cancelled, which is expected during cleanup + tracing::debug!("Task {:?} was cancelled", task_variant); + "Cancelled".to_owned() + } else { + // Task panicked or was aborted + tracing::error!("Task {:?} panicked: {:?}", task_variant, e); + format!("Panicked due to {:?}", e) + } + } + }; + + let Some(task_registry) = task_registry.upgrade() else { + tracing::debug!( + "Task manager has been dropped, task {:?} no longer monitored", + task_variant + ); + return; + }; + + let mut task_registry = task_registry.write().await; + + if !task_registry.contains_key(&task_variant) { + tracing::error!( + "Invariant violated: Monitored task {:?} not registered in the task registry", + task_variant + ); + return; + } + + task_registry + .entry(task_variant) + .and_modify(|(status, _, _)| { + *status = TaskStatus::NotRunning(exit_reason); + }); + }); + } + + /// Checks if a task is running by checking the task registry + async fn is_task_running(&self, variant: TaskVariant) -> bool { + self.task_registry + .read() + .await + .get(&variant) + .map(|(status, _, _)| status == &TaskStatus::Running) + .unwrap_or(false) + } + + /// Gets all tasks that are not running + /// Returns an error if the task status fetch takes too long + pub async fn get_stopped_tasks(&self) -> Result { + timed_request(TASK_STATUS_FETCH_TIMEOUT, "get_stopped_tasks", async { + let mut stopped_tasks = vec![]; + let task_registry = self.task_registry.read().await; + for (variant, (status, _, _)) in task_registry.iter() { + match status { + TaskStatus::Running => {} + TaskStatus::NotRunning(reason) => { + stopped_tasks.push(format!("{:?}: {}", variant, reason)); + } + } + } + Ok(StoppedTasks { stopped_tasks }) + }) + .await + } + + /// Gets the status of a single task by checking the task registry + pub async fn get_task_status(&self, variant: TaskVariant) -> Option { + self.task_registry + .read() + .await + .get(&variant) + .map(|(status, _, _)| status.clone()) + } + + /// Wraps the task in a cancelable loop and spawns it, registers it in the + /// task registry. If a task with the same TaskVariant is already running, + /// it will not be started. + pub async fn ensure_task_looping>(&self, task: U) + where + S: Task + Sized + std::fmt::Debug, + ::Output: Into, + { + self.ensure_monitor_running().await; + + let variant = S::VARIANT; + + // do not start the same task if it is already running + if self.is_task_running(variant).await { + tracing::debug!("Task {:?} is already running, skipping", variant); + return; + } + + let task = task.into_task(); + let (task, cancel_tx) = task.cancelable_loop(); + + let join_handle = task.into_bg(); + let abort_handle = join_handle.abort_handle(); + + self.task_registry.write().await.insert( + variant, + (TaskStatus::Running, abort_handle, Some(cancel_tx)), + ); + + self.monitor_spawned_task(join_handle, variant); + } + + async fn ensure_monitor_running(&self) { + if !self.is_task_running(TaskVariant::TaskStatusMonitor).await { + let task = TaskStatusMonitorTask::new(self.task_registry.clone()) + .with_delay(TASK_STATUS_MONITOR_POLL_DELAY); + + let variant = TaskVariant::TaskStatusMonitor; + let (task, cancel_tx) = task.cancelable_loop(); + let bg_task = task.into_bg(); + let abort_handle = bg_task.abort_handle(); + + self.task_registry.write().await.insert( + variant, + (TaskStatus::Running, abort_handle, Some(cancel_tx)), + ); + + self.monitor_spawned_task(bg_task, variant); + } + } + + /// Sends cancel signals to all tasks that have a cancel_tx + async fn send_cancel_signals(&self) { + let mut task_registry = self.task_registry.write().await; + for (_, (_, _, cancel_tx)) in task_registry.iter_mut() { + let oneshot_tx = cancel_tx.take(); + if let Some(oneshot_tx) = oneshot_tx { + // send can fail, but if it fails the task is dropped. + let _ = oneshot_tx.send(()); + } + } + } + + /// Abort all tasks by dropping their cancellation senders + pub fn abort_all(&mut self) { + tracing::info!("Aborting all tasks"); + + // only one thread must have &mut self, so lock should be able to be acquired + if let Ok(task_registry) = self.task_registry.try_read() { + for (_, (_, abort_handle, _)) in task_registry.iter() { + abort_handle.abort(); + } + } + } + + /// Graceful shutdown of all tasks + /// + /// This function does not have any timeout, please use + /// `graceful_shutdown_with_timeout` instead for cases where you need a + /// timeout. The function polls tasks until they are finished with a 100ms + /// poll interval. + pub async fn graceful_shutdown(&mut self) { + tracing::info!("Gracefully shutting down all tasks"); + + self.send_cancel_signals().await; + + loop { + let mut all_finished = true; + let task_registry = self.task_registry.read().await; + + for (_, (_, abort_handle, _)) in task_registry.iter() { + if !abort_handle.is_finished() { + all_finished = false; + break; + } + } + + if all_finished { + break; + } + + sleep(Duration::from_millis(100)).await; + } + } + + /// Graceful shutdown of all tasks with a timeout. All tasks will be aborted + /// if the timeout is reached. + /// + /// # Arguments + /// + /// * `timeout` - The timeout duration for the graceful shutdown. Since the + /// `graceful_shutdown` function polls tasks until they are finished with a + /// 100ms poll interval, the timeout should be at least 100ms for the + /// timeout to be effective. + pub async fn graceful_shutdown_with_timeout(&mut self, timeout: Duration) { + let timeout_handle = tokio::time::timeout(timeout, self.graceful_shutdown()); + + if timeout_handle.await.is_err() { + self.abort_all(); + } + } +} + +impl Drop for BackgroundTaskManager { + fn drop(&mut self) { + tracing::info!("Dropping BackgroundTaskManager, aborting all tasks"); + + self.abort_all(); + } +} diff --git a/core/src/task/mod.rs b/core/src/task/mod.rs new file mode 100644 index 000000000..50d269c46 --- /dev/null +++ b/core/src/task/mod.rs @@ -0,0 +1,378 @@ +use std::time::Duration; +use tokio::sync::oneshot; +use tokio::sync::oneshot::error::TryRecvError; +use tokio::task::{self, JoinHandle}; +use tokio::time::sleep; +use tonic::async_trait; + +use crate::errors::BridgeError; + +pub mod aggregator_metric_publisher; +pub mod entity_metric_publisher; +pub mod manager; +pub mod payout_checker; +pub mod status_monitor; + +/// The variant of the task, used for identifying the task in the status monitor +/// Create a new enum variant for each task that you want to track in the status monitor +/// BackgroundTaskManager will use TaskVariant to identify the tasks, to not start the same task twice. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +pub enum TaskVariant { + PayoutChecker, + StateManager, + FinalizedBlockFetcher, + TxSender, + BitcoinSyncer, + TaskStatusMonitor, + #[cfg(test)] + Counter, + #[cfg(test)] + Sleep, + /// Used to publish metrics to Prometheus periodically. This + MetricPublisher, +} + +/// Task trait defining the core behavior for cancelable background tasks +/// +/// This trait is implemented by any struct that needs to run as a background task. +/// The run_once method contains the main logic of the task, and returns a bool +/// indicating whether it did work (true) or needs to wait (false). +#[async_trait] +pub trait Task: Send + Sync + 'static { + /// The output of the fn run_once + type Output: Send + Sync + 'static + Sized; + /// The variant of the task + const VARIANT: TaskVariant; + /// Run the task once, returning whether work was done + /// + /// Returns: + /// - `Ok(true)` if the task did some work and is ready to run again immediately + /// - `Ok(false)` if the task did not do work and should wait before running again + /// - `Err(...)` if the task encountered an error + async fn run_once(&mut self) -> Result; +} + +/// A trait for objects that can be converted into a Task +pub trait IntoTask { + type Task: Task; + + /// Convert self into a Task + fn into_task(self) -> Self::Task; +} + +impl IntoTask for T { + type Task = T; + + fn into_task(self) -> Self::Task { + self + } +} + +/// A task that adds a certain delay after the inner task has run +/// to reduce polling frequency. When inner returns false, the delay is applied. +#[derive(Debug)] +pub struct WithDelay +where + T::Output: Into, +{ + /// The task to poll + inner: T, + /// The interval between polls when no work is done + poll_delay: Duration, +} + +impl WithDelay +where + T::Output: Into, +{ + /// Create a new delayed task + pub fn new(inner: T, poll_delay: Duration) -> Self { + Self { inner, poll_delay } + } +} + +#[async_trait] +impl Task for WithDelay +where + T::Output: Into, +{ + type Output = bool; + const VARIANT: TaskVariant = T::VARIANT; + async fn run_once(&mut self) -> Result { + // Run the inner task + let did_work = self.inner.run_once().await?.into(); + + // If the inner task did not do work, sleep for the poll delay + if !did_work { + sleep(self.poll_delay).await; + } + + // Always return false since we've handled the waiting internally + Ok(false) + } +} + +/// A task that can be canceled via a oneshot channel +#[derive(Debug)] +pub struct CancelableTask { + /// The task to run + inner: T, + /// Receiver for cancellation signal + cancel_rx: oneshot::Receiver<()>, +} + +impl CancelableTask { + /// Create a new cancelable task with a cancellation channel + pub fn new(inner: T, cancel_rx: oneshot::Receiver<()>) -> Self { + Self { inner, cancel_rx } + } +} + +#[derive(Debug, Clone)] +pub enum CancelableResult { + Running(T), + Cancelled, +} + +#[async_trait] +impl Task for CancelableTask { + type Output = CancelableResult; + const VARIANT: TaskVariant = T::VARIANT; + + async fn run_once(&mut self) -> Result { + // Check if we've been canceled + if let Err(TryRecvError::Empty) = self.cancel_rx.try_recv() { + // Run the inner task + Ok(CancelableResult::Running(self.inner.run_once().await?)) + } else { + Ok(CancelableResult::Cancelled) + } + } +} + +#[derive(Debug)] +pub struct CancelableLoop { + inner: CancelableTask, +} + +#[async_trait] +impl Task for CancelableLoop { + type Output = (); + const VARIANT: TaskVariant = T::VARIANT; + + async fn run_once(&mut self) -> Result { + loop { + match self.inner.run_once().await { + Ok(CancelableResult::Running(_)) => { + tokio::task::yield_now().await; + continue; + } + Ok(CancelableResult::Cancelled) => return Ok(()), + Err(e) => return Err(e), + } + } + } +} + +#[derive(Debug)] +pub struct BufferedErrors +where + T::Output: Default, +{ + inner: T, + buffer: Vec, + error_overflow_limit: usize, +} + +impl BufferedErrors +where + T::Output: Default, +{ + pub fn new(inner: T, error_overflow_limit: usize) -> Self { + Self { + inner, + buffer: Vec::new(), + error_overflow_limit, + } + } +} + +#[async_trait] +impl Task for BufferedErrors +where + T::Output: Default, +{ + type Output = T::Output; + const VARIANT: TaskVariant = T::VARIANT; + + async fn run_once(&mut self) -> Result { + let result = self.inner.run_once().await; + + match result { + Ok(output) => { + self.buffer.clear(); // clear buffer on first success + Ok(output) + } + Err(e) => { + tracing::error!("Task error, suppressing due to buffer: {e:?}"); + self.buffer.push(e); + if self.buffer.len() >= self.error_overflow_limit { + let mut base_error: eyre::Report = + self.buffer.pop().expect("just inserted above").into(); + + for error in std::mem::take(&mut self.buffer) { + base_error = base_error.wrap_err(error); + } + + base_error = base_error.wrap_err(format!( + "Exiting due to {} consecutive errors, the following chain is the list of errors.", + self.error_overflow_limit + )); + + Err(base_error.into()) + } else { + Ok(Default::default()) + } + } + } + } +} + +#[derive(Debug)] +pub struct Map T::Output + Send + Sync + 'static> { + inner: T, + map: F, +} + +#[async_trait] +impl T::Output + Send + Sync + 'static> Task for Map { + type Output = T::Output; + const VARIANT: TaskVariant = T::VARIANT; + + #[track_caller] + async fn run_once(&mut self) -> Result { + let result = self.inner.run_once().await; + let output = match result { + Ok(output) => (self.map)(output), + Err(e) => return Err(e), + }; + Ok(output) + } +} + +/// A task that ignores errors from the inner task and returns a default value. +#[derive(Debug)] +pub struct IgnoreError +where + T::Output: Default, +{ + inner: T, +} + +#[async_trait] +impl Task for IgnoreError +where + T::Output: Default, +{ + type Output = T::Output; + const VARIANT: TaskVariant = T::VARIANT; + + async fn run_once(&mut self) -> Result { + Ok(self + .inner + .run_once() + .await + .inspect_err(|e| { + tracing::error!(task=?self.inner, "Task error, suppressing due to errors ignored: {e:?}"); + }) + .ok() + .unwrap_or_default()) + } +} + +pub trait TaskExt: Task + Sized { + /// Skips running the task after cancellation using the sender. + fn cancelable(self) -> (CancelableTask, oneshot::Sender<()>); + + /// Runs the task in an infinite loop until cancelled using the sender. + fn cancelable_loop(self) -> (CancelableLoop, oneshot::Sender<()>); + + /// Adds the given delay after a run of the task when the task returns false. + fn with_delay(self, poll_delay: Duration) -> WithDelay + where + Self::Output: Into; + + /// Spawns a [`tokio::task`] that runs the task once in the background. + fn into_bg(self) -> JoinHandle>; + + /// Buffers consecutive errors until the task succeeds, emits all errors when there are + /// more than `error_overflow_limit` consecutive errors. + fn into_buffered_errors(self, error_overflow_limit: usize) -> BufferedErrors + where + Self::Output: Default; + + /// Maps the task's `Ok()` output using the given function. + fn map Self::Output + Send + Sync + 'static>( + self, + map: F, + ) -> Map; + + /// Ignores errors from the task. + fn ignore_error(self) -> IgnoreError + where + Self::Output: Default; +} + +impl TaskExt for T { + fn cancelable(self) -> (CancelableTask, oneshot::Sender<()>) { + let (cancel_tx, cancel_rx) = oneshot::channel(); + (CancelableTask::new(self, cancel_rx), cancel_tx) + } + + fn cancelable_loop(self) -> (CancelableLoop, oneshot::Sender<()>) { + let (task, cancel_tx) = self.cancelable(); + (CancelableLoop { inner: task }, cancel_tx) + } + + fn with_delay(self, poll_delay: Duration) -> WithDelay + where + Self::Output: Into, + { + WithDelay::new(self, poll_delay) + } + + fn into_bg(mut self) -> JoinHandle> { + tokio::spawn(async move { + tracing::debug!( + "Running task {:?} with ID {:?}", + Self::VARIANT, + task::try_id() + ); + self.run_once().await + }) + } + + fn into_buffered_errors(self, error_overflow_limit: usize) -> BufferedErrors + where + Self::Output: Default, + { + BufferedErrors::new(self, error_overflow_limit) + } + + fn map Self::Output + Send + Sync + 'static>( + self, + map: F, + ) -> Map { + Map { inner: self, map } + } + + fn ignore_error(self) -> IgnoreError + where + Self::Output: Default, + { + IgnoreError { inner: self } + } +} + +#[cfg(test)] +mod tests; diff --git a/core/src/task/payout_checker.rs b/core/src/task/payout_checker.rs new file mode 100644 index 000000000..ab2c1983b --- /dev/null +++ b/core/src/task/payout_checker.rs @@ -0,0 +1,111 @@ +use eyre::OptionExt; +use tokio::time::Duration; +use tonic::async_trait; + +use crate::{citrea::CitreaClientT, database::Database, errors::BridgeError, operator::Operator}; + +use super::{Task, TaskVariant}; + +pub const PAYOUT_CHECKER_POLL_DELAY: Duration = if cfg!(test) { + Duration::from_millis(250) +} else { + Duration::from_secs(60) +}; + +#[derive(Debug, Clone)] +pub struct PayoutCheckerTask { + db: Database, + operator: Operator, +} + +impl PayoutCheckerTask +where + C: CitreaClientT, +{ + pub fn new(db: Database, operator: Operator) -> Self { + Self { db, operator } + } +} + +#[async_trait] +impl Task for PayoutCheckerTask +where + C: CitreaClientT, +{ + type Output = bool; + const VARIANT: TaskVariant = TaskVariant::PayoutChecker; + + async fn run_once(&mut self) -> Result { + let mut dbtx = self.db.begin_transaction().await?; + let unhandled_payout = self + .db + .get_first_unhandled_payout_by_operator_xonly_pk( + Some(&mut dbtx), + self.operator.signer.xonly_public_key, + ) + .await?; + + if unhandled_payout.is_none() { + return Ok(false); + } + + let (citrea_idx, move_to_vault_txid, payout_tx_blockhash) = + unhandled_payout.expect("Must be Some"); + + tracing::info!( + "Unhandled payout found for withdrawal {}, move_txid: {}", + citrea_idx, + move_to_vault_txid + ); + + let deposit_data = self + .db + .get_deposit_data_with_move_tx(Some(&mut dbtx), move_to_vault_txid) + .await?; + if deposit_data.is_none() { + return Err(eyre::eyre!("Deposit data not found").into()); + } + + let deposit_data = deposit_data.expect("Must be Some"); + + let kickoff_txid = self + .operator + .handle_finalized_payout( + &mut dbtx, + deposit_data.get_deposit_outpoint(), + payout_tx_blockhash, + ) + .await?; + + // fetch and save the LCP for if we get challenged and need to provide proof of payout later + let (_, payout_block_height) = self + .operator + .db + .get_block_info_from_hash(Some(&mut dbtx), payout_tx_blockhash) + .await? + .ok_or_eyre("Couldn't find payout blockhash in bitcoin sync")?; + + let _ = self + .operator + .citrea_client + .fetch_validate_and_store_lcp( + payout_block_height as u64, + citrea_idx, + &self.operator.db, + Some(&mut dbtx), + self.operator.config.protocol_paramset(), + ) + .await?; + + #[cfg(feature = "automation")] + self.operator.end_round(&mut dbtx).await?; + + self.db + .mark_payout_handled(Some(&mut dbtx), citrea_idx, kickoff_txid) + .await?; + + dbtx.commit().await?; + + Ok(true) + } +} diff --git a/core/src/task/status_monitor.rs b/core/src/task/status_monitor.rs new file mode 100644 index 000000000..351410e5e --- /dev/null +++ b/core/src/task/status_monitor.rs @@ -0,0 +1,40 @@ +use std::sync::Arc; +use tokio::sync::RwLock; +use tokio::time::Duration; +use tonic::async_trait; + +use crate::errors::BridgeError; + +use super::manager::TaskRegistry; +use super::{manager::TaskStatus, Task, TaskVariant}; + +pub const TASK_STATUS_MONITOR_POLL_DELAY: Duration = Duration::from_secs(300); + +/// A task that monitors the status of all tasks in the background task manager. +/// If a task is not running, it will log an error periodically. +#[derive(Debug)] +pub struct TaskStatusMonitorTask { + task_registry: Arc>, +} + +impl TaskStatusMonitorTask { + pub fn new(task_registry: Arc>) -> Self { + Self { task_registry } + } +} + +#[async_trait] +impl Task for TaskStatusMonitorTask { + type Output = bool; + const VARIANT: TaskVariant = TaskVariant::TaskStatusMonitor; + + async fn run_once(&mut self) -> Result { + let task_registry = self.task_registry.read().await; + for (task_variant, (task_status, _, _)) in task_registry.iter() { + if let TaskStatus::NotRunning(reason) = task_status { + tracing::error!("Task {:?} is not running: {}", task_variant, reason); + } + } + Ok(false) + } +} diff --git a/core/src/task/tests.rs b/core/src/task/tests.rs new file mode 100644 index 000000000..2d3d0c8d6 --- /dev/null +++ b/core/src/task/tests.rs @@ -0,0 +1,403 @@ +use std::sync::Arc; +use std::time::{Duration, Instant}; +use tokio::sync::Mutex; + +use tokio::time::sleep; + +use crate::errors::BridgeError; +use crate::task::manager::TaskStatus; + +use super::manager::BackgroundTaskManager; +use super::{CancelableResult, Task, TaskExt, TaskVariant}; + +// A simple counter task that increments a counter each time it runs +#[derive(Debug, Clone)] +struct CounterTask { + counter: Arc>, + work_to_do: u32, + current_work: u32, + one_time_fix_at: Option, + should_error: bool, +} + +impl CounterTask { + fn new(counter: Arc>, work_to_do: u32) -> Self { + Self { + counter, + work_to_do, + current_work: 0, + should_error: false, + one_time_fix_at: None, + } + } + + fn with_error(counter: Arc>, work_to_do: u32, one_time_fix_at: Option) -> Self { + Self { + counter, + work_to_do, + current_work: 0, + should_error: true, + one_time_fix_at, + } + } +} + +#[tonic::async_trait] +impl Task for CounterTask { + type Output = bool; + const VARIANT: TaskVariant = TaskVariant::Counter; + + async fn run_once(&mut self) -> Result { + if self.should_error && self.one_time_fix_at != Some(*self.counter.lock().await) { + return Err(eyre::eyre!("Task error").into()); + } + + if self.current_work < self.work_to_do { + let mut counter = self.counter.lock().await; + *counter += 1; + self.current_work += 1; + Ok(true) // did work + } else { + Ok(false) // no work to do + } + } +} + +// A task that sleeps for a specified duration +#[derive(Debug, Clone)] +struct SleepTask { + duration: Duration, +} + +impl SleepTask { + fn new(duration: Duration) -> Self { + Self { duration } + } +} + +#[tonic::async_trait] +impl Task for SleepTask { + type Output = bool; + const VARIANT: TaskVariant = TaskVariant::Sleep; + + async fn run_once(&mut self) -> Result { + sleep(self.duration).await; + Ok(true) + } +} + +#[tokio::test] +async fn test_counter_task() { + let counter = Arc::new(Mutex::new(0)); + let mut task = CounterTask::new(Arc::clone(&counter), 5); + + // Run the task 6 times, should increment counter 5 times + for i in 0..6 { + let result = task.run_once().await.unwrap(); + if i < 5 { + assert!(result); // task did work + } else { + assert!(!result); // task did not do work + } + } + + assert_eq!(*counter.lock().await, 5); +} + +#[tokio::test] +async fn test_with_delay() { + let counter = Arc::new(Mutex::new(0)); + let task = CounterTask::new(Arc::clone(&counter), 1); + let mut delayed_task = task.with_delay(Duration::from_millis(100)); + + // First run should do work and return false (because of WithDelay) + let start = Instant::now(); + let result = delayed_task.run_once().await.unwrap(); + assert!(!result); + assert!(start.elapsed() < Duration::from_millis(100)); + assert_eq!(*counter.lock().await, 1); + + // Second run should not do work and wait for the delay + let start = Instant::now(); + let result = delayed_task.run_once().await.unwrap(); + let elapsed = start.elapsed(); + assert!(!result); + assert!(elapsed >= Duration::from_millis(100)); + { + assert_eq!(*counter.lock().await, 1); + } +} + +#[tokio::test] +async fn test_cancelable_task() { + let counter = Arc::new(Mutex::new(0)); + let task = CounterTask::new(Arc::clone(&counter), 5); + + let (mut cancelable_task, cancel_tx) = task.cancelable(); + + // Run once, should increment counter + let result = cancelable_task.run_once().await.unwrap(); + if let CancelableResult::Running(did_work) = result { + assert!(did_work); + } else { + panic!("Expected Running result"); + } + { + assert_eq!(*counter.lock().await, 1); + } + + // Cancel the task + cancel_tx.send(()).unwrap(); + + // Run again, should be cancelled + let result = cancelable_task.run_once().await.unwrap(); + if let CancelableResult::Cancelled = result { + // Expected + } else { + panic!("Expected Cancelled result"); + } + + // Counter should still be at 1 + { + assert_eq!(*counter.lock().await, 1); + } +} + +#[tokio::test] +async fn test_cancelable_loop() { + let counter = Arc::new(Mutex::new(0)); + let task = CounterTask::new(Arc::clone(&counter), 5); + + let (mut cancelable_loop, cancel_tx) = task.cancelable_loop(); + + tokio::spawn(async move { + sleep(Duration::from_millis(10)).await; + cancel_tx.send(()).unwrap(); + }); + + // Run the loop, should stop after counter reaches 3 + let result = tokio::time::timeout(Duration::from_millis(20), cancelable_loop.run_once()).await; + assert!(result.is_ok()); + drop(cancelable_loop); + + // Counter should be at 3 (or slightly more if there was a race) + let final_counter = *counter.lock().await; + assert!((3..=5).contains(&final_counter)); +} + +#[tokio::test] +async fn test_into_bg() { + let counter = Arc::new(Mutex::new(0)); + let task = CounterTask::new(Arc::clone(&counter), 1); + + // Spawn the task in the background + let bg_handle = task.into_bg(); + + // Wait for the task to complete + let result = bg_handle.await.unwrap(); + + // Check that the task completed successfully and did work + assert!(result.is_ok()); + assert!(result.unwrap()); + + // Counter should be incremented + assert_eq!(*counter.lock().await, 1); +} + +#[tokio::test] +async fn test_buffered_errors() { + let counter = Arc::new(Mutex::new(0)); + let task = CounterTask::with_error(Arc::clone(&counter), 5, None); + let mut buffered_task = task.into_buffered_errors(3); + + // First two errors should be buffered + for _ in 0..2 { + let result = buffered_task.run_once().await; + assert!(result.is_ok()); + } + + // Third error should cause the task to fail + let result = buffered_task.run_once().await; + assert!(result.is_err()); + + // Print the actual error message to understand its format + let err = result.unwrap_err(); + let err_str = format!("{:?}", err); + + assert!( + err_str.contains("Task error"), + "Error does not contain the expected task error message: '{}'", + err_str + ); + + assert!( + err_str.contains("3 consecutive errors"), + "Error does not contain '3 consecutive errors': '{}'", + err_str + ); +} + +#[tokio::test] +async fn test_buffered_errors_without_consecutive_errors() { + let counter = Arc::new(Mutex::new(0)); + let task = CounterTask::with_error(Arc::clone(&counter), 5, Some(2)); + let mut buffered_task = task.into_buffered_errors(3); + + // First two errors should be buffered, then an Ok should reset and the next + // two should also be buffered + for _ in 0..2 { + let result = buffered_task.run_once().await; + assert!(result.is_ok()); + } + + *counter.lock().await = 2; + + for _ in 0..3 { + let result = buffered_task.run_once().await; + assert!(result.is_ok()); + } + + // Sixth error should cause the task to fail + let result = buffered_task.run_once().await; + assert!(result.is_err()); + + // Print the actual error message to understand its format + let err = result.unwrap_err(); + let err_str = format!("{:?}", err); + + assert!( + err_str.contains("Task error"), + "Error does not contain the expected task error message: '{}'", + err_str + ); + + assert!( + err_str.contains("3 consecutive errors"), + "Error does not contain '3 consecutive errors': '{}'", + err_str + ); +} + +#[tokio::test] +async fn test_ignore_error() { + let counter = Arc::new(Mutex::new(0)); + let task = CounterTask::with_error(Arc::clone(&counter), 5, None); + let mut ignore_task = task.ignore_error(); + + // Task errors should be ignored + let result = ignore_task.run_once().await; + assert!(result.is_ok()); + assert!(!result.unwrap()); // default value when error is ignored +} + +#[tokio::test] +async fn test_map() { + let counter = Arc::new(Mutex::new(0)); + let task = CounterTask::new(Arc::clone(&counter), 5); + let mut map_task = task.map(|did_work| !did_work); // Invert the boolean + + // Run the task, counter should be incremented + let result = map_task.run_once().await.unwrap(); + assert!(!result); // The original would return true, but we mapped it to !true = false + assert_eq!(*counter.lock().await, 1); +} + +#[tokio::test] +async fn test_task_manager() { + let counter = Arc::new(Mutex::new(0)); + let mut manager = BackgroundTaskManager::default(); + + // Add a task that increments the counter 5 times + let task = CounterTask::new(Arc::clone(&counter), 5); + manager.ensure_task_looping(task.clone()).await; + + // Sleep to give the task time to run + sleep(Duration::from_millis(500)).await; + + // Counter should be at 5 + assert_eq!(*counter.lock().await, 5); + + // Graceful shutdown should allow the task to complete + manager.graceful_shutdown().await; +} + +#[tokio::test] +async fn test_task_manager_abort() { + let counter = Arc::new(Mutex::new(0)); + let mut manager = BackgroundTaskManager::default(); + + // Add a task that sleeps for a long time + let task = SleepTask::new(Duration::from_secs(10)); + manager.ensure_task_looping(task.clone()).await; + + // Start a counter task too + let task = CounterTask::new(Arc::clone(&counter), 100); + manager.ensure_task_looping(task.clone()).await; + + // Sleep for a short time to let tasks start + sleep(Duration::from_millis(100)).await; + + // Abort all tasks + manager.abort_all(); +} + +#[tokio::test] +async fn test_task_manager_timeout() { + let mut manager = BackgroundTaskManager::default(); + + // Add a task that sleeps for a long time + let task = SleepTask::new(Duration::from_secs(10)); + manager.ensure_task_looping(task.clone()).await; + + // Graceful shutdown with short timeout should abort the task + let start = Instant::now(); + manager + .graceful_shutdown_with_timeout(Duration::from_millis(200)) + .await; + let elapsed = start.elapsed(); + + // Should timeout and abort quickly + assert!(elapsed < Duration::from_secs(1)); +} + +#[tokio::test] +async fn test_task_manager_abort_and_restart() { + let counter = Arc::new(Mutex::new(0)); + let mut manager = BackgroundTaskManager::default(); + + // Add a task that sleeps for a long time + let sleep_task = SleepTask::new(Duration::from_secs(10)); + manager.ensure_task_looping(sleep_task.clone()).await; + + // Start a counter task too + let counter_task = CounterTask::new(Arc::clone(&counter), 100); + manager.ensure_task_looping(counter_task.clone()).await; + + // Sleep for a short time to let tasks start + sleep(Duration::from_millis(100)).await; + + // Abort all tasks + manager.abort_all(); + + // check tasks are set as not running + let variants = [TaskVariant::Counter, TaskVariant::Sleep]; + tokio::time::sleep(Duration::from_secs(5)).await; + for variant in variants { + assert!(matches!( + manager.get_task_status(variant).await, + Some(TaskStatus::NotRunning(_)) + )); + } + + // check if restart works + manager.ensure_task_looping(sleep_task.clone()).await; + manager.ensure_task_looping(counter_task.clone()).await; + + // check if they are running + for variant in variants { + assert!(matches!( + manager.get_task_status(variant).await, + Some(TaskStatus::Running) + )); + } +} diff --git a/core/src/test/additional_disprove_scripts.rs b/core/src/test/additional_disprove_scripts.rs new file mode 100644 index 000000000..3a81e1fc8 --- /dev/null +++ b/core/src/test/additional_disprove_scripts.rs @@ -0,0 +1,336 @@ +use super::common::citrea::get_bridge_params; +use crate::builder::transaction::input::UtxoVout; +use crate::citrea::{CitreaClient, CitreaClientT}; +use crate::test::common::citrea::{CitreaE2EData, SECRET_KEYS}; +use crate::test::common::clementine_utils::disprove_tests_common_setup; +use crate::test::common::tx_utils::get_txid_where_utxo_is_spent_while_waiting_for_state_mngr_sync; +use crate::utils::initialize_logger; +use crate::{ + extended_bitcoin_rpc::ExtendedBitcoinRpc, + test::common::{ + citrea::{self}, + create_test_config_with_thread_name, + }, +}; +use async_trait::async_trait; +use bitcoin::OutPoint; +use bitcoincore_rpc::RpcApi; +use citrea_e2e::config::{BatchProverConfig, LightClientProverConfig}; +use citrea_e2e::{ + config::{BitcoinConfig, SequencerConfig, TestCaseConfig, TestCaseDockerConfig}, + framework::TestFramework, + test_case::{TestCase, TestCaseRunner}, + Result, +}; +pub enum TestVariant { + CorruptedLatestBlockHash, + CorruptedPayoutTxBlockHash, + CorruptedChallengeSendingWatchtowers, + OperatorForgotWatchtowerChallenge, + CorruptedPublicInput, +} + +struct AdditionalDisproveTest { + variant: TestVariant, +} + +#[async_trait] +impl TestCase for AdditionalDisproveTest { + fn bitcoin_config() -> BitcoinConfig { + BitcoinConfig { + extra_args: vec![ + "-txindex=1", + "-fallbackfee=0.000001", + "-rpcallowip=0.0.0.0/0", + "-dustrelayfee=0", + ], + ..Default::default() + } + } + + fn test_config() -> TestCaseConfig { + TestCaseConfig { + with_sequencer: true, + with_batch_prover: true, + with_light_client_prover: true, + with_full_node: true, + docker: TestCaseDockerConfig { + bitcoin: true, + citrea: true, + }, + ..Default::default() + } + } + + fn sequencer_config() -> SequencerConfig { + SequencerConfig { + bridge_initialize_params: get_bridge_params(), + ..Default::default() + } + } + + fn batch_prover_config() -> BatchProverConfig { + BatchProverConfig { + enable_recovery: false, + ..Default::default() + } + } + + fn light_client_prover_config() -> LightClientProverConfig { + LightClientProverConfig { + enable_recovery: false, + initial_da_height: 60, + ..Default::default() + } + } + + async fn run_test(&mut self, f: &mut TestFramework) -> Result<()> { + tracing::info!("Starting Citrea"); + let (sequencer, full_node, lc_prover, batch_prover, da) = + citrea::start_citrea(Self::sequencer_config(), f) + .await + .unwrap(); + + let lc_prover = lc_prover.unwrap(); + let batch_prover = batch_prover.unwrap(); + + let mut config = create_test_config_with_thread_name().await; + + match self.variant { + TestVariant::CorruptedLatestBlockHash => { + config.test_params.disrupt_latest_block_hash_commit = true; + } + TestVariant::CorruptedPayoutTxBlockHash => { + config.test_params.disrupt_payout_tx_block_hash_commit = true; + } + TestVariant::CorruptedChallengeSendingWatchtowers => { + config + .test_params + .disrupt_challenge_sending_watchtowers_commit = true; + } + TestVariant::OperatorForgotWatchtowerChallenge => { + config.test_params.operator_forgot_watchtower_challenge = true; + } + TestVariant::CorruptedPublicInput => { + config.test_params.corrupted_public_input = true; + } + } + + citrea::update_config_with_citrea_e2e_values( + &mut config, + da, + sequencer, + Some(( + lc_prover.config.rollup.rpc.bind_host.as_str(), + lc_prover.config.rollup.rpc.bind_port, + )), + ); + + let rpc = ExtendedBitcoinRpc::connect( + config.bitcoin_rpc_url.clone(), + config.bitcoin_rpc_user.clone(), + config.bitcoin_rpc_password.clone(), + None, + ) + .await?; + + let citrea_client = CitreaClient::new( + config.citrea_rpc_url.clone(), + config.citrea_light_client_prover_url.clone(), + config.citrea_chain_id, + Some(SECRET_KEYS[0].to_string().parse().unwrap()), + config.citrea_request_timeout, + ) + .await + .unwrap(); + + let citrea_e2e_data = CitreaE2EData { + sequencer, + full_node, + lc_prover, + batch_prover, + da, + config: config.clone(), + citrea_client: &citrea_client, + rpc: &rpc, + }; + + let (actors, kickoff_txid, kickoff_tx) = + disprove_tests_common_setup(&citrea_e2e_data).await; + + let disprove_outpoint = OutPoint { + txid: kickoff_txid, + vout: UtxoVout::Disprove.get_vout(), + }; + + tracing::info!( + "Disprove outpoint: {:?}, txid: {:?}", + disprove_outpoint, + kickoff_txid + ); + + let txid = get_txid_where_utxo_is_spent_while_waiting_for_state_mngr_sync( + &rpc, + disprove_outpoint, + &actors, + ) + .await + .unwrap(); + + tracing::info!("Additional disprove txid: {:?}", txid); + + let round_txid = kickoff_tx.input[0].previous_output.txid; + + let burn_connector = OutPoint { + txid: round_txid, + vout: UtxoVout::CollateralInRound.get_vout(), + }; + + let add_disprove_tx = rpc.get_raw_transaction(&txid, None).await?; + + assert!( + add_disprove_tx.input[1].previous_output == burn_connector, + "Additional disprove tx input does not match burn connector outpoint" + ); + + assert_eq!( + add_disprove_tx.input[0].witness.len(), + 562, + "Additional disprove tx input witness length is not 562" + ); + + tracing::info!("Additional disprove transaction is onchain"); + + Ok(()) + } +} + +/// Tests the disprove mechanism when the latest block hash commitment is intentionally corrupted. +/// +/// # Arrange +/// * Sets up full Citrea infrastructure including sequencer, batch prover, light client prover, and DA node. +/// * Sets `disrupt_latest_block_hash_commit = true` to simulate a corrupted block hash during commitment. +/// +/// # Act +/// * Performs deposit and withdrawal operations between Bitcoin and Citrea. +/// * Processes payout and kickoff transactions. +/// * Waits for the disprove transaction to be triggered due to the corrupted block hash in the commitment. +/// +/// # Assert +/// * Confirms that a disprove transaction is created on Bitcoin. +/// * Validates that the disprove transaction consumes the correct input (the burn connector outpoint). +#[tokio::test(flavor = "multi_thread")] +#[ignore = "This test is too slow, run separately"] +async fn additional_disprove_script_test_disrupted_latest_block_hash() -> Result<()> { + initialize_logger(Some(::tracing::level_filters::LevelFilter::DEBUG)) + .expect("Failed to initialize logger"); + std::env::set_var("CITREA_DOCKER_IMAGE", crate::test::CITREA_E2E_DOCKER_IMAGE); + let additional_disprove_test = AdditionalDisproveTest { + variant: TestVariant::CorruptedLatestBlockHash, + }; + TestCaseRunner::new(additional_disprove_test).run().await +} + +/// Tests the disprove mechanism when the payout transaction's block hash commitment is intentionally corrupted. +/// +/// # Arrange +/// * Sets up full Citrea infrastructure including sequencer, batch prover, light client prover, and DA node. +/// * Sets `disrupt_payout_tx_block_hash_commit = true` to simulate a corrupted block hash for the payout transaction during commitment. +/// +/// # Act +/// * Performs deposit and withdrawal operations between Bitcoin and Citrea. +/// * Processes payout and kickoff transactions. +/// * Waits for the disprove transaction to be triggered due to the corrupted payout transaction block hash in the commitment. +/// +/// # Assert +/// * Confirms that a disprove transaction is created on Bitcoin. +/// * Validates that the disprove transaction consumes the correct input (the burn connector outpoint). +#[tokio::test(flavor = "multi_thread")] +#[ignore = "This test is too slow, run separately"] +async fn additional_disprove_script_test_disrupted_payout_tx_block_hash() -> Result<()> { + initialize_logger(Some(::tracing::level_filters::LevelFilter::DEBUG)) + .expect("Failed to initialize logger"); + std::env::set_var("CITREA_DOCKER_IMAGE", crate::test::CITREA_E2E_DOCKER_IMAGE); + let additional_disprove_test = AdditionalDisproveTest { + variant: TestVariant::CorruptedPayoutTxBlockHash, + }; + TestCaseRunner::new(additional_disprove_test).run().await +} + +/// Tests the disprove mechanism when the commitment for challenges sent by watchtowers is intentionally corrupted. +/// +/// # Arrange +/// * Sets up full Citrea infrastructure including sequencer, batch prover, light client prover, and DA node. +/// * Sets `disrupt_challenge_sending_watchtowers_commit = true` to simulate a corrupted commitment related to watchtower challenges. +/// +/// # Act +/// * Performs deposit and withdrawal operations between Bitcoin and Citrea. +/// * Processes payout and kickoff transactions. +/// * Waits for the disprove transaction to be triggered due to the corrupted watchtower challenge commitment. +/// +/// # Assert +/// * Confirms that a disprove transaction is created on Bitcoin. +/// * Validates that the disprove transaction consumes the correct input (the burn connector outpoint). +#[tokio::test(flavor = "multi_thread")] +#[ignore = "This test is too slow, run separately"] +async fn additional_disprove_script_test_disrupt_chal_sending_wts() -> Result<()> { + initialize_logger(Some(::tracing::level_filters::LevelFilter::DEBUG)) + .expect("Failed to initialize logger"); + std::env::set_var("CITREA_DOCKER_IMAGE", crate::test::CITREA_E2E_DOCKER_IMAGE); + let additional_disprove_test = AdditionalDisproveTest { + variant: TestVariant::CorruptedChallengeSendingWatchtowers, + }; + TestCaseRunner::new(additional_disprove_test).run().await +} + +/// Tests the disprove mechanism when an operator "forgets" to include a watchtower challenge. +/// +/// # Arrange +/// * Sets up full Citrea infrastructure including sequencer, batch prover, light client prover, and DA node. +/// * Sets `operator_forgot_watchtower_challenge = true` to simulate a scenario where an operator fails to send a necessary watchtower challenge. +/// +/// # Act +/// * Performs deposit and withdrawal operations between Bitcoin and Citrea. +/// * Processes payout and kickoff transactions. +/// * Waits for the disprove transaction to be triggered due to the operator's failure to include a watchtower challenge. +/// +/// # Assert +/// * Confirms that a disprove transaction is created on Bitcoin. +/// * Validates that the disprove transaction consumes the correct input (the burn connector outpoint). +#[tokio::test(flavor = "multi_thread")] +#[ignore = "This test is too slow, run separately"] +async fn additional_disprove_script_test_operator_forgot_wt_challenge() -> Result<()> { + initialize_logger(Some(::tracing::level_filters::LevelFilter::DEBUG)) + .expect("Failed to initialize logger"); + std::env::set_var("CITREA_DOCKER_IMAGE", crate::test::CITREA_E2E_DOCKER_IMAGE); + let additional_disprove_test = AdditionalDisproveTest { + variant: TestVariant::OperatorForgotWatchtowerChallenge, + }; + TestCaseRunner::new(additional_disprove_test).run().await +} + +/// Tests the disprove mechanism when the public input is intentionally corrupted. +/// +/// # Arrange +/// * Sets up full Citrea infrastructure including sequencer, batch prover, light client prover, and DA node. +/// * Sets `corrupted_public_input = true` to simulate a corrupted public input scenario. +/// +/// # Act +/// * Performs deposit and withdrawal operations between Bitcoin and Citrea. +/// * Processes payout and kickoff transactions. +/// * Waits for the disprove transaction to be triggered due to the corrupted public input. +/// +/// # Assert +/// * Confirms that a disprove transaction is created on Bitcoin. +/// * Validates that the disprove transaction consumes the correct input (the burn connector outpoint). +#[tokio::test(flavor = "multi_thread")] +#[ignore = "This test is too slow, run separately"] +async fn additional_disprove_script_test_corrupted_public_input() -> Result<()> { + initialize_logger(Some(::tracing::level_filters::LevelFilter::DEBUG)) + .expect("Failed to initialize logger"); + std::env::set_var("CITREA_DOCKER_IMAGE", crate::test::CITREA_E2E_DOCKER_IMAGE); + let additional_disprove_test = AdditionalDisproveTest { + variant: TestVariant::CorruptedPublicInput, + }; + TestCaseRunner::new(additional_disprove_test).run().await +} diff --git a/core/src/test/bitvm_disprove_scripts.rs b/core/src/test/bitvm_disprove_scripts.rs new file mode 100644 index 000000000..8afd986c4 --- /dev/null +++ b/core/src/test/bitvm_disprove_scripts.rs @@ -0,0 +1,298 @@ +use super::common::citrea::get_bridge_params; +use crate::builder::transaction::input::UtxoVout; +use crate::citrea::{CitreaClient, CitreaClientT}; +use crate::test::common::citrea::{CitreaE2EData, SECRET_KEYS}; +use crate::test::common::clementine_utils::disprove_tests_common_setup; +use crate::test::common::tx_utils::get_txid_where_utxo_is_spent_while_waiting_for_state_mngr_sync; +use crate::utils::initialize_logger; +use crate::{ + extended_bitcoin_rpc::ExtendedBitcoinRpc, + test::common::{ + citrea::{self}, + create_test_config_with_thread_name, + }, +}; +use async_trait::async_trait; +use bitcoin::OutPoint; +use bitcoincore_rpc::RpcApi; +use citrea_e2e::config::{BatchProverConfig, LightClientProverConfig}; +use citrea_e2e::{ + config::{BitcoinConfig, SequencerConfig, TestCaseConfig, TestCaseDockerConfig}, + framework::TestFramework, + test_case::{TestCase, TestCaseRunner}, + Result, +}; + +pub enum DisproveTestVariant { + HealthyState, + CorruptedAssert, +} + +struct DisproveTest { + variant: DisproveTestVariant, +} + +#[async_trait] +impl TestCase for DisproveTest { + fn bitcoin_config() -> BitcoinConfig { + BitcoinConfig { + extra_args: vec![ + "-txindex=1", + "-fallbackfee=0.000001", + "-rpcallowip=0.0.0.0/0", + "-limitancestorsize=1010", + "-limitdescendantsize=1010", + "-acceptnonstdtxn=1", + "-dustrelayfee=0", + ], + ..Default::default() + } + } + + fn test_config() -> TestCaseConfig { + TestCaseConfig { + with_sequencer: true, + with_batch_prover: true, + with_light_client_prover: true, + with_full_node: true, + docker: TestCaseDockerConfig { + bitcoin: true, + citrea: true, + }, + ..Default::default() + } + } + + fn sequencer_config() -> SequencerConfig { + SequencerConfig { + bridge_initialize_params: get_bridge_params(), + ..Default::default() + } + } + + fn batch_prover_config() -> BatchProverConfig { + BatchProverConfig { + enable_recovery: false, + ..Default::default() + } + } + + fn light_client_prover_config() -> LightClientProverConfig { + LightClientProverConfig { + enable_recovery: false, + initial_da_height: 60, + ..Default::default() + } + } + + async fn run_test(&mut self, f: &mut TestFramework) -> Result<()> { + tracing::info!("Starting Citrea"); + let (sequencer, full_node, lc_prover, batch_prover, da) = + citrea::start_citrea(Self::sequencer_config(), f) + .await + .unwrap(); + + let lc_prover = lc_prover.unwrap(); + let batch_prover = batch_prover.unwrap(); + + let mut config = create_test_config_with_thread_name().await; + // only verifiers 0 and 1 will send disprove transactions + config.test_params.verifier_do_not_send_disprove_indexes = Some(vec![2, 3]); + + match self.variant { + DisproveTestVariant::HealthyState => {} + DisproveTestVariant::CorruptedAssert => { + config.test_params.corrupted_asserts = true; + } + } + + citrea::update_config_with_citrea_e2e_values( + &mut config, + da, + sequencer, + Some(( + lc_prover.config.rollup.rpc.bind_host.as_str(), + lc_prover.config.rollup.rpc.bind_port, + )), + ); + + let rpc = ExtendedBitcoinRpc::connect( + config.bitcoin_rpc_url.clone(), + config.bitcoin_rpc_user.clone(), + config.bitcoin_rpc_password.clone(), + None, + ) + .await?; + + let citrea_client = CitreaClient::new( + config.citrea_rpc_url.clone(), + config.citrea_light_client_prover_url.clone(), + config.citrea_chain_id, + Some(SECRET_KEYS[0].to_string().parse().unwrap()), + config.citrea_request_timeout, + ) + .await + .unwrap(); + + let citrea_e2e_data = CitreaE2EData { + sequencer, + full_node, + lc_prover, + batch_prover, + da, + config: config.clone(), + citrea_client: &citrea_client, + rpc: &rpc, + }; + + let (actors, kickoff_txid, kickoff_tx) = + disprove_tests_common_setup(&citrea_e2e_data).await; + + match self.variant { + DisproveTestVariant::HealthyState => { + let disprove_timeout_outpoint = OutPoint { + txid: kickoff_txid, + vout: UtxoVout::Disprove.get_vout(), + }; + + tracing::info!( + "Disprove timeout outpoint: {:?}, txid: {:?}", + disprove_timeout_outpoint, + kickoff_txid + ); + + let txid = get_txid_where_utxo_is_spent_while_waiting_for_state_mngr_sync( + &rpc, + disprove_timeout_outpoint, + &actors, + ) + .await + .unwrap(); + + tracing::info!("Disprove timeout txid: {:?}", txid); + + let kickoff_finalizer_out = OutPoint { + txid: kickoff_txid, + vout: UtxoVout::KickoffFinalizer.get_vout(), + }; + + let disprove_timeout_tx = rpc.get_raw_transaction(&txid, None).await?; + + assert!( + disprove_timeout_tx.input[1].previous_output == kickoff_finalizer_out, + "Disprove timeout tx input does not match kickoff finalizer outpoint. Disprove tx is sent instead." + ); + + tracing::info!("Disprove timeout transaction is onchain"); + Ok(()) + } + DisproveTestVariant::CorruptedAssert => { + let disprove_outpoint = OutPoint { + txid: kickoff_txid, + vout: UtxoVout::Disprove.get_vout(), + }; + + tracing::info!( + "Disprove outpoint: {:?}, txid: {:?}", + disprove_outpoint, + kickoff_txid + ); + + let txid = get_txid_where_utxo_is_spent_while_waiting_for_state_mngr_sync( + &rpc, + disprove_outpoint, + &actors, + ) + .await + .unwrap(); + + tracing::info!("Disprove txid: {:?}", txid); + + let round_txid = kickoff_tx.input[0].previous_output.txid; + + let burn_connector = OutPoint { + txid: round_txid, + vout: UtxoVout::CollateralInRound.get_vout(), + }; + + let disprove_tx = rpc.get_raw_transaction(&txid, None).await?; + + assert!( + disprove_tx.input[1].previous_output == burn_connector, + "Disprove tx input does not match burn connector outpoint" + ); + + const CONTROL_BLOCK_LENGTH_DEPTH_11: usize = 1 + 32 + 32 * 11; // 385 - Length of the control block in the disprove script + + const CONTROL_BLOCK_LENGTH_DEPTH_10: usize = 1 + 32 + 32 * 10; // 353 - Length of the control block in the disprove script + + let witness = &disprove_tx.input[0].witness; + let control_block = &witness[witness.len() - 1]; + + // Check if the control block length matches either depth 10 or 11 which are the only valid depths for disprove transactions + // This differs from additional disprove tx, which has a smaller control block length + assert!( + control_block.len() == CONTROL_BLOCK_LENGTH_DEPTH_10 + || control_block.len() == CONTROL_BLOCK_LENGTH_DEPTH_11, + "Control block length does not match expected depth 10 or 11 (got {})", + control_block.len() + ); + + tracing::info!("Disprove transaction is onchain"); + Ok(()) + } + } + } +} + +/// Tests the disprove timeout mechanism in a healthy, non-disrupted protocol state. +/// +/// # Arrange +/// * Sets up full Citrea stack with sequencer, DA node, batch prover, and light client prover. +/// * Uses default bridge configuration without any intentional disruption. +/// +/// # Act +/// * Executes deposit and withdrawal flows. +/// * Processes the payout and kickoff transactions. +/// * Waits for the disprove timeout to activate. +/// +/// # Assert +/// * Confirms that a disprove timeout transaction is created and included on Bitcoin. +/// * Verifies that the transaction correctly spends the `KickoffFinalizer` output. +#[tokio::test(flavor = "multi_thread")] +#[ignore = "This test is too slow, run separately"] +async fn disprove_script_test_healthy() -> Result<()> { + initialize_logger(Some(::tracing::level_filters::LevelFilter::DEBUG)) + .expect("Failed to initialize logger"); + std::env::set_var("CITREA_DOCKER_IMAGE", crate::test::CITREA_E2E_DOCKER_IMAGE); + let additional_disprove_test = DisproveTest { + variant: DisproveTestVariant::HealthyState, + }; + TestCaseRunner::new(additional_disprove_test).run().await +} + +/// Tests the disprove mechanism in the presence of a corrupted assert commitment. +/// +/// # Arrange +/// - Sets up the full Citrea stack: sequencer, DA node, batch prover, and light client prover. +/// - Sets `corrupted_asserts = true` in the configuration to simulate a corrupted assert scenario. +/// +/// # Act +/// - Executes deposit and withdrawal flows. +/// - Processes payout and kickoff transactions. +/// - Waits for the disprove transaction to be triggered due to the corrupted assert. +/// +/// # Assert +/// - Confirms a disprove transaction is created and included on Bitcoin. +/// - Validates that the disprove transaction consumes the correct input (the `BurnConnector` outpoint). +#[tokio::test(flavor = "multi_thread")] +#[ignore = "This test is too slow, run separately"] +async fn disprove_script_test_corrupted_assert() -> Result<()> { + initialize_logger(Some(::tracing::level_filters::LevelFilter::DEBUG)) + .expect("Failed to initialize logger"); + std::env::set_var("CITREA_DOCKER_IMAGE", crate::test::CITREA_E2E_DOCKER_IMAGE); + let additional_disprove_test = DisproveTest { + variant: DisproveTestVariant::CorruptedAssert, + }; + TestCaseRunner::new(additional_disprove_test).run().await +} diff --git a/core/src/test/bitvm_script.rs b/core/src/test/bitvm_script.rs new file mode 100644 index 000000000..9909e0f71 --- /dev/null +++ b/core/src/test/bitvm_script.rs @@ -0,0 +1,461 @@ +mod tests { + use bitcoin::hashes::hash160; + use bitcoin::hashes::Hash; + use bitvm::signatures::signing_winternitz::WINTERNITZ_MESSAGE_VERIFIER; + use bitvm::{ + clementine::additional_disprove::{ + create_additional_replacable_disprove_script, validate_assertions_for_additional_script, + }, + signatures::winternitz::{generate_public_key, Parameters}, + }; + use bridge_circuit_host::structs::BridgeCircuitBitvmInputs; + + pub const BRIDGE_CIRCUIT_BITVM_TEST_INPUTS: BridgeCircuitBitvmInputs = + BridgeCircuitBitvmInputs { + payout_tx_block_hash: [ + 171, 145, 219, 174, 239, 44, 95, 81, 182, 77, 233, 148, 175, 177, 146, 161, 119, + 61, 44, 98, + ], + latest_block_hash: [ + 18, 6, 170, 190, 86, 52, 47, 93, 55, 8, 204, 59, 237, 40, 246, 254, 168, 183, 8, + 111, + ], + challenge_sending_watchtowers: [ + 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ], + deposit_constant: [ + 33, 89, 238, 181, 40, 137, 174, 16, 33, 60, 154, 141, 145, 173, 28, 218, 8, 235, + 65, 88, 190, 165, 233, 68, 142, 1, 26, 31, 141, 101, 180, 40, + ], + combined_method_id: [ + 161, 224, 123, 224, 161, 79, 5, 157, 211, 176, 198, 123, 128, 173, 148, 114, 197, + 152, 64, 188, 185, 37, 45, 158, 225, 162, 241, 192, 225, 240, 16, 113, + ], + }; + + pub const TEST_GROTH16_PUBLIC_INPUT: [u8; 32] = [ + 0, 203, 5, 31, 138, 117, 119, 62, 52, 255, 223, 38, 213, 32, 143, 9, 191, 212, 207, 152, + 21, 182, 225, 177, 179, 58, 105, 29, 64, 114, 229, 184, + ]; + + type BitvmTestEnv = ( + Vec, + Parameters, + Parameters, + Parameters, + Parameters, + Vec, + Vec, + Vec, + Vec, + ); + + fn setup_bitvm_test_environment(num_dummy_challenges: usize) -> BitvmTestEnv { + let groth16_public_input_wsk = vec![1u8; 20]; + let payout_tx_block_hash_wsk = vec![2u8; 20]; + let latest_block_hash_wsk = vec![3u8; 20]; + let challenge_sending_watchtowers_wsk = vec![4u8; 20]; + + let groth16_public_input_params = Parameters::new(64, 4); + let payout_tx_block_hash_params = Parameters::new(40, 4); + let latest_block_hash_params = Parameters::new(40, 4); + let challenge_sending_watchtowers_params = Parameters::new(40, 4); + + let groth16_public_input_pk = + generate_public_key(&groth16_public_input_params, &groth16_public_input_wsk); + let payout_tx_block_hash_pk = + generate_public_key(&payout_tx_block_hash_params, &payout_tx_block_hash_wsk); + let latest_block_hash_pk = + generate_public_key(&latest_block_hash_params, &latest_block_hash_wsk); + let challenge_sending_watchtowers_pk = generate_public_key( + &challenge_sending_watchtowers_params, + &challenge_sending_watchtowers_wsk, + ); + + let dummy_challenge_preimages = vec![[31u8; 20]; num_dummy_challenges]; + let mut dummy_challenge_hashes: [[u8; 20]; 160] = [[0u8; 20]; 160]; + for (idx, preimage) in dummy_challenge_preimages.iter().enumerate() { + dummy_challenge_hashes[idx] = *hash160::Hash::hash(preimage.as_ref()).as_byte_array(); + } + + let script = create_additional_replacable_disprove_script( + BRIDGE_CIRCUIT_BITVM_TEST_INPUTS.combined_method_id, + BRIDGE_CIRCUIT_BITVM_TEST_INPUTS.deposit_constant, + groth16_public_input_pk, + payout_tx_block_hash_pk, + latest_block_hash_pk, + challenge_sending_watchtowers_pk, + dummy_challenge_hashes.to_vec(), + ); + + ( + script, + groth16_public_input_params, + payout_tx_block_hash_params, + latest_block_hash_params, + challenge_sending_watchtowers_params, + groth16_public_input_wsk, + payout_tx_block_hash_wsk, + latest_block_hash_wsk, + challenge_sending_watchtowers_wsk, + ) + } + + #[test] + fn test_bitvm_script() { + let ( + script, + groth16_public_input_params, + payout_tx_block_hash_params, + latest_block_hash_params, + challenge_sending_watchtowers_params, + groth16_public_input_wsk, + payout_tx_block_hash_wsk, + latest_block_hash_wsk, + challenge_sending_watchtowers_wsk, + ) = setup_bitvm_test_environment(1); + + // Sign the winternitz messages + let groth16_public_input_witness = WINTERNITZ_MESSAGE_VERIFIER.sign( + &groth16_public_input_params, + &groth16_public_input_wsk, + TEST_GROTH16_PUBLIC_INPUT.as_ref(), + ); + + let payout_tx_block_hash_witness = WINTERNITZ_MESSAGE_VERIFIER.sign( + &payout_tx_block_hash_params, + &payout_tx_block_hash_wsk, + BRIDGE_CIRCUIT_BITVM_TEST_INPUTS + .payout_tx_block_hash + .as_ref(), + ); + + let latest_block_hash_witness = WINTERNITZ_MESSAGE_VERIFIER.sign( + &latest_block_hash_params, + &latest_block_hash_wsk, + BRIDGE_CIRCUIT_BITVM_TEST_INPUTS.latest_block_hash.as_ref(), + ); + + let challenge_sending_watchtowers_witness = WINTERNITZ_MESSAGE_VERIFIER.sign( + &challenge_sending_watchtowers_params, + &challenge_sending_watchtowers_wsk, + BRIDGE_CIRCUIT_BITVM_TEST_INPUTS + .challenge_sending_watchtowers + .as_ref(), + ); + + let dummy_challenge_preimages_final: [Option<[u8; 20]>; 160] = [None; 160]; + + let resulting_witness = validate_assertions_for_additional_script( + script, + groth16_public_input_witness, + payout_tx_block_hash_witness, + latest_block_hash_witness, + challenge_sending_watchtowers_witness, + dummy_challenge_preimages_final.to_vec(), + ); + + assert!(resulting_witness.is_none(), "Witness is invalid"); + } + + #[test] + fn spendable_by_pre_image() { + let ( + script, + groth16_public_input_params, + payout_tx_block_hash_params, + latest_block_hash_params, + challenge_sending_watchtowers_params, + groth16_public_input_wsk, + payout_tx_block_hash_wsk, + latest_block_hash_wsk, + challenge_sending_watchtowers_wsk, + ) = setup_bitvm_test_environment(160); + + // Sign the winternitz messages + let groth16_public_input_witness = WINTERNITZ_MESSAGE_VERIFIER.sign( + &groth16_public_input_params, + &groth16_public_input_wsk, + TEST_GROTH16_PUBLIC_INPUT.as_ref(), + ); + + let payout_tx_block_hash_witness = WINTERNITZ_MESSAGE_VERIFIER.sign( + &payout_tx_block_hash_params, + &payout_tx_block_hash_wsk, + BRIDGE_CIRCUIT_BITVM_TEST_INPUTS + .payout_tx_block_hash + .as_ref(), + ); + + let latest_block_hash_witness = WINTERNITZ_MESSAGE_VERIFIER.sign( + &latest_block_hash_params, + &latest_block_hash_wsk, + BRIDGE_CIRCUIT_BITVM_TEST_INPUTS.latest_block_hash.as_ref(), + ); + + let challenge_sending_watchtowers_witness = WINTERNITZ_MESSAGE_VERIFIER.sign( + &challenge_sending_watchtowers_params, + &challenge_sending_watchtowers_wsk, + BRIDGE_CIRCUIT_BITVM_TEST_INPUTS + .challenge_sending_watchtowers + .as_ref(), + ); + + let mut dummy_challenge_preimages_final: [Option<[u8; 20]>; 160] = [None; 160]; + dummy_challenge_preimages_final[5] = [31u8; 20].into(); + + let resulting_witness = validate_assertions_for_additional_script( + script, + groth16_public_input_witness, + payout_tx_block_hash_witness, + latest_block_hash_witness, + challenge_sending_watchtowers_witness, + dummy_challenge_preimages_final.to_vec(), + ); + + assert!( + resulting_witness.is_some(), + "The script should be spendable by revealed preimage" + ); + } + + #[test] + fn spendable_by_invalid_latest_block_hash() { + let ( + script, + groth16_public_input_params, + payout_tx_block_hash_params, + latest_block_hash_params, + challenge_sending_watchtowers_params, + groth16_public_input_wsk, + payout_tx_block_hash_wsk, + latest_block_hash_wsk, + challenge_sending_watchtowers_wsk, + ) = setup_bitvm_test_environment(160); + + // Sign the winternitz messages + let groth16_public_input_witness = WINTERNITZ_MESSAGE_VERIFIER.sign( + &groth16_public_input_params, + &groth16_public_input_wsk, + &TEST_GROTH16_PUBLIC_INPUT, + ); + + let payout_tx_block_hash_witness = WINTERNITZ_MESSAGE_VERIFIER.sign( + &payout_tx_block_hash_params, + &payout_tx_block_hash_wsk, + &BRIDGE_CIRCUIT_BITVM_TEST_INPUTS.payout_tx_block_hash, + ); + + let mut latest_block_hash = BRIDGE_CIRCUIT_BITVM_TEST_INPUTS.latest_block_hash.to_vec(); + latest_block_hash[0] = 0; + + let latest_block_hash_witness = WINTERNITZ_MESSAGE_VERIFIER.sign( + &latest_block_hash_params, + &latest_block_hash_wsk, + &latest_block_hash, + ); + + let challenge_sending_watchtowers_witness = WINTERNITZ_MESSAGE_VERIFIER.sign( + &challenge_sending_watchtowers_params, + &challenge_sending_watchtowers_wsk, + &BRIDGE_CIRCUIT_BITVM_TEST_INPUTS.challenge_sending_watchtowers, + ); + + let dummy_challenge_preimages_final: [Option<[u8; 20]>; 160] = [None; 160]; + + let resulting_witness = validate_assertions_for_additional_script( + script, + groth16_public_input_witness, + payout_tx_block_hash_witness, + latest_block_hash_witness, + challenge_sending_watchtowers_witness, + dummy_challenge_preimages_final.to_vec(), + ); + + assert!( + resulting_witness.is_some(), + "The script should be spendable by invalid latest block hash" + ); + } + + #[test] + fn spendable_by_invalid_payout_block_hash() { + let ( + script, + groth16_public_input_params, + payout_tx_block_hash_params, + latest_block_hash_params, + challenge_sending_watchtowers_params, + groth16_public_input_wsk, + payout_tx_block_hash_wsk, + latest_block_hash_wsk, + challenge_sending_watchtowers_wsk, + ) = setup_bitvm_test_environment(160); + + // Sign the winternitz messages + let groth16_public_input_witness = WINTERNITZ_MESSAGE_VERIFIER.sign( + &groth16_public_input_params, + &groth16_public_input_wsk, + &TEST_GROTH16_PUBLIC_INPUT, + ); + + let mut payout_tx_block_hash = BRIDGE_CIRCUIT_BITVM_TEST_INPUTS + .payout_tx_block_hash + .to_vec(); + payout_tx_block_hash[0] = 0; + + let payout_tx_block_hash_witness = WINTERNITZ_MESSAGE_VERIFIER.sign( + &payout_tx_block_hash_params, + &payout_tx_block_hash_wsk, + &payout_tx_block_hash, + ); + + let latest_block_hash_witness = WINTERNITZ_MESSAGE_VERIFIER.sign( + &latest_block_hash_params, + &latest_block_hash_wsk, + &BRIDGE_CIRCUIT_BITVM_TEST_INPUTS.latest_block_hash, + ); + + let challenge_sending_watchtowers_witness = WINTERNITZ_MESSAGE_VERIFIER.sign( + &challenge_sending_watchtowers_params, + &challenge_sending_watchtowers_wsk, + &BRIDGE_CIRCUIT_BITVM_TEST_INPUTS.challenge_sending_watchtowers, + ); + + let dummy_challenge_preimages_final: [Option<[u8; 20]>; 160] = [None; 160]; + + let resulting_witness = validate_assertions_for_additional_script( + script, + groth16_public_input_witness, + payout_tx_block_hash_witness, + latest_block_hash_witness, + challenge_sending_watchtowers_witness, + dummy_challenge_preimages_final.to_vec(), + ); + + assert!( + resulting_witness.is_some(), + "The script should be spendable by invalid payout tx block hash" + ); + } + + #[test] + fn spendable_by_invalid_g16_public_input() { + let ( + script, + groth16_public_input_params, + payout_tx_block_hash_params, + latest_block_hash_params, + challenge_sending_watchtowers_params, + groth16_public_input_wsk, + payout_tx_block_hash_wsk, + latest_block_hash_wsk, + challenge_sending_watchtowers_wsk, + ) = setup_bitvm_test_environment(160); + + let mut g16_public_input = BRIDGE_CIRCUIT_BITVM_TEST_INPUTS.latest_block_hash.to_vec(); + g16_public_input[1] = 0; + + // Sign the winternitz messages + let groth16_public_input_witness = WINTERNITZ_MESSAGE_VERIFIER.sign( + &groth16_public_input_params, + &groth16_public_input_wsk, + &g16_public_input, + ); + + let payout_tx_block_hash_witness = WINTERNITZ_MESSAGE_VERIFIER.sign( + &payout_tx_block_hash_params, + &payout_tx_block_hash_wsk, + &BRIDGE_CIRCUIT_BITVM_TEST_INPUTS.payout_tx_block_hash, + ); + + let latest_block_hash_witness = WINTERNITZ_MESSAGE_VERIFIER.sign( + &latest_block_hash_params, + &latest_block_hash_wsk, + &BRIDGE_CIRCUIT_BITVM_TEST_INPUTS.latest_block_hash, + ); + + let challenge_sending_watchtowers_witness = WINTERNITZ_MESSAGE_VERIFIER.sign( + &challenge_sending_watchtowers_params, + &challenge_sending_watchtowers_wsk, + &BRIDGE_CIRCUIT_BITVM_TEST_INPUTS.challenge_sending_watchtowers, + ); + + let dummy_challenge_preimages_final: [Option<[u8; 20]>; 160] = [None; 160]; + + let resulting_witness = validate_assertions_for_additional_script( + script, + groth16_public_input_witness, + payout_tx_block_hash_witness, + latest_block_hash_witness, + challenge_sending_watchtowers_witness, + dummy_challenge_preimages_final.to_vec(), + ); + + assert!( + resulting_witness.is_some(), + "The script should be spendable by invalid g16 public input" + ); + } + + #[test] + fn spendable_by_invalid_challenge_sending_watchtowers() { + let ( + script, + groth16_public_input_params, + payout_tx_block_hash_params, + latest_block_hash_params, + challenge_sending_watchtowers_params, + groth16_public_input_wsk, + payout_tx_block_hash_wsk, + latest_block_hash_wsk, + challenge_sending_watchtowers_wsk, + ) = setup_bitvm_test_environment(160); + + // Sign the winternitz messages + let groth16_public_input_witness = WINTERNITZ_MESSAGE_VERIFIER.sign( + &groth16_public_input_params, + &groth16_public_input_wsk, + &TEST_GROTH16_PUBLIC_INPUT, + ); + + let payout_tx_block_hash_witness = WINTERNITZ_MESSAGE_VERIFIER.sign( + &payout_tx_block_hash_params, + &payout_tx_block_hash_wsk, + &BRIDGE_CIRCUIT_BITVM_TEST_INPUTS.payout_tx_block_hash, + ); + + let latest_block_hash_witness = WINTERNITZ_MESSAGE_VERIFIER.sign( + &latest_block_hash_params, + &latest_block_hash_wsk, + &BRIDGE_CIRCUIT_BITVM_TEST_INPUTS.latest_block_hash, + ); + + let mut challenge_sending_watchtowers = BRIDGE_CIRCUIT_BITVM_TEST_INPUTS + .challenge_sending_watchtowers + .to_vec(); + challenge_sending_watchtowers[0] = 0; + + let challenge_sending_watchtowers_witness = WINTERNITZ_MESSAGE_VERIFIER.sign( + &challenge_sending_watchtowers_params, + &challenge_sending_watchtowers_wsk, + &challenge_sending_watchtowers, + ); + + let dummy_challenge_preimages_final: [Option<[u8; 20]>; 160] = [None; 160]; + + let resulting_witness = validate_assertions_for_additional_script( + script, + groth16_public_input_witness, + payout_tx_block_hash_witness, + latest_block_hash_witness, + challenge_sending_watchtowers_witness, + dummy_challenge_preimages_final.to_vec(), + ); + + assert!( + resulting_witness.is_some(), + "The script should be spendable by invalid challenge sending watchtowers" + ); + } +} diff --git a/core/src/test/bridge_circuit_test_data.rs b/core/src/test/bridge_circuit_test_data.rs new file mode 100644 index 000000000..6d3173460 --- /dev/null +++ b/core/src/test/bridge_circuit_test_data.rs @@ -0,0 +1,263 @@ +//! This module contains integration tests for generating data used in bridge circuit tests. +//! +//! The tests in this file are intended for data generation purposes only and are not meant to be run as part of the standard test suite. +//! They are ignored by default and should be executed manually when bridge-related code changes, to ensure that the generated test data remains up-to-date and consistent with the current implementation. +use super::common::citrea::get_bridge_params; +use crate::citrea::{CitreaClient, CitreaClientT}; +use crate::test::common::citrea::{CitreaE2EData, SECRET_KEYS}; +use crate::test::common::clementine_utils::disprove_tests_common_setup; +use crate::utils::initialize_logger; +use crate::{ + extended_bitcoin_rpc::ExtendedBitcoinRpc, + test::common::{ + citrea::{self}, + create_test_config_with_thread_name, + }, +}; +use async_trait::async_trait; +use citrea_e2e::config::{BatchProverConfig, LightClientProverConfig}; +use citrea_e2e::{ + config::{BitcoinConfig, SequencerConfig, TestCaseConfig, TestCaseDockerConfig}, + framework::TestFramework, + test_case::{TestCase, TestCaseRunner}, + Result, +}; + +#[derive(PartialEq)] +pub enum BridgeCircuitTestDataVariant { + WithAnnex, + LargeInput, + LargeOutput, + LargeInputAndOutput, + InsufficientTotalWork, + Valid, + FirstTwoValid, +} + +struct BridgeCircuitTestData { + variant: BridgeCircuitTestDataVariant, +} + +#[async_trait] +impl TestCase for BridgeCircuitTestData { + fn bitcoin_config() -> BitcoinConfig { + BitcoinConfig { + extra_args: vec![ + "-txindex=1", + "-fallbackfee=0.000001", + "-rpcallowip=0.0.0.0/0", + "-dustrelayfee=0", + "-acceptnonstdtxn=1", + ], + ..Default::default() + } + } + + fn test_config() -> TestCaseConfig { + TestCaseConfig { + with_sequencer: true, + with_batch_prover: true, + with_light_client_prover: true, + with_full_node: true, + docker: TestCaseDockerConfig { + bitcoin: true, + citrea: true, + }, + ..Default::default() + } + } + + fn sequencer_config() -> SequencerConfig { + SequencerConfig { + bridge_initialize_params: get_bridge_params(), + ..Default::default() + } + } + + fn batch_prover_config() -> BatchProverConfig { + BatchProverConfig { + enable_recovery: false, + ..Default::default() + } + } + + fn light_client_prover_config() -> LightClientProverConfig { + LightClientProverConfig { + enable_recovery: false, + initial_da_height: 60, + ..Default::default() + } + } + + async fn run_test(&mut self, f: &mut TestFramework) -> Result<()> { + tracing::info!("Starting Citrea"); + let (sequencer, full_node, lc_prover, batch_prover, da) = + citrea::start_citrea(Self::sequencer_config(), f) + .await + .unwrap(); + + let lc_prover = lc_prover.unwrap(); + let batch_prover = batch_prover.unwrap(); + + let mut config = create_test_config_with_thread_name().await; + + match self.variant { + BridgeCircuitTestDataVariant::InsufficientTotalWork => { + config + .test_params + .generate_varying_total_works_insufficient_total_work = true; + } + BridgeCircuitTestDataVariant::Valid => { + config.test_params.generate_varying_total_works = true; + } + BridgeCircuitTestDataVariant::WithAnnex => { + config.test_params.use_small_annex = true; + } + BridgeCircuitTestDataVariant::LargeInput => { + config.test_params.use_large_annex = true; + } + BridgeCircuitTestDataVariant::LargeOutput => { + config.test_params.use_large_output = true; + } + BridgeCircuitTestDataVariant::LargeInputAndOutput => { + config.test_params.use_large_annex_and_output = true; + } + BridgeCircuitTestDataVariant::FirstTwoValid => { + config + .test_params + .generate_varying_total_works_first_two_valid = true; + } + } + + citrea::update_config_with_citrea_e2e_values( + &mut config, + da, + sequencer, + Some(( + lc_prover.config.rollup.rpc.bind_host.as_str(), + lc_prover.config.rollup.rpc.bind_port, + )), + ); + + let rpc = ExtendedBitcoinRpc::connect( + config.bitcoin_rpc_url.clone(), + config.bitcoin_rpc_user.clone(), + config.bitcoin_rpc_password.clone(), + None, + ) + .await?; + + let citrea_client = CitreaClient::new( + config.citrea_rpc_url.clone(), + config.citrea_light_client_prover_url.clone(), + config.citrea_chain_id, + Some(SECRET_KEYS[0].to_string().parse().unwrap()), + config.citrea_request_timeout, + ) + .await + .unwrap(); + + let citrea_e2e_data = CitreaE2EData { + sequencer, + full_node, + lc_prover, + batch_prover, + da, + config: config.clone(), + citrea_client: &citrea_client, + rpc: &rpc, + }; + + let (_actors, _kickoff_txid, _kickoff_tx) = + disprove_tests_common_setup(&citrea_e2e_data).await; + + Ok(()) + } +} + +#[tokio::test] +#[ignore = "Only run this test manually, it's for data generation purposes"] +async fn bridge_circuit_test_data_diverse_hcp_lengths() -> Result<()> { + initialize_logger(Some(::tracing::level_filters::LevelFilter::DEBUG)) + .expect("Failed to initialize logger"); + std::env::set_var("CITREA_DOCKER_IMAGE", crate::test::CITREA_E2E_DOCKER_IMAGE); + let bridge_circuit_test_data = BridgeCircuitTestData { + variant: BridgeCircuitTestDataVariant::Valid, + }; + TestCaseRunner::new(bridge_circuit_test_data).run().await +} + +#[tokio::test] +#[ignore = "Only run this test manually, it's for data generation purposes"] +async fn bridge_circuit_test_data_insuff_total_work_diverse_hcp_lens() -> Result<()> { + initialize_logger(Some(::tracing::level_filters::LevelFilter::DEBUG)) + .expect("Failed to initialize logger"); + std::env::set_var("CITREA_DOCKER_IMAGE", crate::test::CITREA_E2E_DOCKER_IMAGE); + + let bridge_circuit_test_data = BridgeCircuitTestData { + variant: BridgeCircuitTestDataVariant::InsufficientTotalWork, + }; + TestCaseRunner::new(bridge_circuit_test_data).run().await +} + +#[tokio::test] +#[ignore = "Only run this test manually, it's for data generation purposes"] +async fn bridge_circuit_test_data_diverse_hcp_lens_first_two_valid() -> Result<()> { + std::env::set_var("CITREA_DOCKER_IMAGE", crate::test::CITREA_E2E_DOCKER_IMAGE); + + let bridge_circuit_test_data = BridgeCircuitTestData { + variant: BridgeCircuitTestDataVariant::FirstTwoValid, + }; + + TestCaseRunner::new(bridge_circuit_test_data).run().await +} + +#[tokio::test] +#[ignore = "Only run this test manually, it's for data generation purposes"] +async fn challenge_tx_with_annex() -> Result<()> { + initialize_logger(Some(::tracing::level_filters::LevelFilter::DEBUG)) + .expect("Failed to initialize logger"); + std::env::set_var("CITREA_DOCKER_IMAGE", crate::test::CITREA_E2E_DOCKER_IMAGE); + let watchtower_challenge_tx_variant = BridgeCircuitTestData { + variant: BridgeCircuitTestDataVariant::WithAnnex, + }; + TestCaseRunner::new(watchtower_challenge_tx_variant) + .run() + .await +} + +#[tokio::test] +#[ignore = "Only run this test manually, it's for data generation purposes"] +async fn challenge_tx_with_large_input() -> Result<()> { + std::env::set_var("CITREA_DOCKER_IMAGE", crate::test::CITREA_E2E_DOCKER_IMAGE); + let watchtower_challenge_tx_variant = BridgeCircuitTestData { + variant: BridgeCircuitTestDataVariant::LargeInput, + }; + TestCaseRunner::new(watchtower_challenge_tx_variant) + .run() + .await +} + +#[tokio::test] +#[ignore = "Only run this test manually, it's for data generation purposes"] +async fn challenge_tx_with_large_output() -> Result<()> { + std::env::set_var("CITREA_DOCKER_IMAGE", crate::test::CITREA_E2E_DOCKER_IMAGE); + let watchtower_challenge_tx_variant = BridgeCircuitTestData { + variant: BridgeCircuitTestDataVariant::LargeOutput, + }; + TestCaseRunner::new(watchtower_challenge_tx_variant) + .run() + .await +} + +#[tokio::test] +#[ignore = "Only run this test manually, it's for data generation purposes"] +async fn challenge_tx_with_both_large_input_and_output() -> Result<()> { + std::env::set_var("CITREA_DOCKER_IMAGE", crate::test::CITREA_E2E_DOCKER_IMAGE); + let watchtower_challenge_tx_variant = BridgeCircuitTestData { + variant: BridgeCircuitTestDataVariant::LargeInputAndOutput, + }; + TestCaseRunner::new(watchtower_challenge_tx_variant) + .run() + .await +} diff --git a/core/src/test/common/citrea/bitcoin_merkle.rs b/core/src/test/common/citrea/bitcoin_merkle.rs new file mode 100644 index 000000000..bef4d09d2 --- /dev/null +++ b/core/src/test/common/citrea/bitcoin_merkle.rs @@ -0,0 +1,174 @@ +use serde::{Deserialize, Serialize}; +use sha2::{Digest, Sha256}; + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct BitcoinMerkleTree { + depth: u32, + nodes: Vec>, +} + +pub fn calculate_double_sha256(input: &[u8]) -> [u8; 32] { + let mut hasher = Sha256::default(); + hasher.update(input); + let result = hasher.finalize_reset(); + hasher.update(result); + hasher.finalize().into() +} + +impl BitcoinMerkleTree { + pub fn new(transactions: Vec<[u8; 32]>) -> Self { + // assert!(depth > 0, "Depth must be greater than 0"); + // assert!(depth <= 254, "Depth must be less than or equal to 254"); + // assert!( + // u32::pow(2, (depth) as u32) >= transactions.len() as u32, + // "Too many transactions for this depth" + // ); + let depth = (transactions.len() - 1).ilog(2) + 1; + let mut tree = BitcoinMerkleTree { + depth, + nodes: vec![], + }; + + // Populate leaf nodes + tree.nodes.push(vec![]); + for tx in transactions.iter() { + tree.nodes[0].push(*tx); + } + + // Construct the tree + let mut curr_level_offset: usize = 1; + let mut prev_level_size = transactions.len(); + let mut prev_level_index_offset = 0; + let mut preimage: [u8; 64] = [0; 64]; + while prev_level_size > 1 { + // println!("curr_level_offset: {}", curr_level_offset); + // println!("prev_level_size: {}", prev_level_size); + // println!("prev_level_index_offset: {}", prev_level_index_offset); + tree.nodes.push(vec![]); + for i in 0..(prev_level_size / 2) { + preimage[..32].copy_from_slice( + &tree.nodes[curr_level_offset - 1_usize][prev_level_index_offset + i * 2], + ); + preimage[32..].copy_from_slice( + &tree.nodes[curr_level_offset - 1][prev_level_index_offset + i * 2 + 1], + ); + let combined_hash = calculate_double_sha256(&preimage); + tree.nodes[curr_level_offset].push(combined_hash); + } + if prev_level_size % 2 == 1 { + let mut preimage: [u8; 64] = [0; 64]; + preimage[..32].copy_from_slice( + &tree.nodes[curr_level_offset - 1] + [prev_level_index_offset + prev_level_size - 1], + ); + preimage[32..].copy_from_slice( + &tree.nodes[curr_level_offset - 1] + [prev_level_index_offset + prev_level_size - 1], + ); + let combined_hash = calculate_double_sha256(&preimage); + tree.nodes[curr_level_offset].push(combined_hash); + } + curr_level_offset += 1; + prev_level_size = prev_level_size.div_ceil(2); + prev_level_index_offset = 0; + } + tree + } + + // Returns the Merkle root + pub fn root(&self) -> [u8; 32] { + self.nodes[self.nodes.len() - 1][0] + } + + pub fn _get_element(&self, level: u32, index: u32) -> [u8; 32] { + self.nodes[level as usize][index as usize] + } + + pub fn get_idx_path(&self, index: u32) -> Vec<[u8; 32]> { + assert!(index < self.nodes[0].len() as u32, "Index out of bounds"); + let mut path = vec![]; + let mut level = 0; + let mut i = index; + while level < self.nodes.len() as u32 - 1 { + if i % 2 == 1 { + path.push(self.nodes[level as usize][i as usize - 1]); + } else if (self.nodes[level as usize].len() - 1) as u32 == i { + path.push(self.nodes[level as usize][i as usize]); + } else { + path.push(self.nodes[level as usize][(i + 1) as usize]); + } + + level += 1; + i /= 2; + } + + path + } + + // pub fn verify_tx_merkle_proof(&self, idx: u32, merkle_proof: Vec<[u8; 32]>) { + // let tx_id = self.nodes[0][idx as usize]; + // let mut preimage: [u8; 64] = [0; 64]; + // let mut combined_hash: [u8; 32] = tx_id.clone(); + // let mut index = idx; + // let mut level: u32 = 0; + // while level < self.depth { + // if index % 2 == 0 { + // preimage[..32].copy_from_slice(&combined_hash); + // preimage[32..].copy_from_slice(&merkle_proof[level as usize]); + // combined_hash = calculate_double_sha256(&preimage); + // } else { + // preimage[..32].copy_from_slice(&merkle_proof[level as usize]); + // preimage[32..].copy_from_slice(&combined_hash); + // combined_hash = calculate_double_sha256(&preimage); + // } + // level += 1; + // index = index / 2; + // } + // assert_eq!(combined_hash, self.root()); + // } + + pub fn calculate_root_with_merkle_proof( + &self, + txid: [u8; 32], + idx: u32, + merkle_proof: Vec<[u8; 32]>, + ) -> [u8; 32] { + let mut preimage: [u8; 64] = [0; 64]; + let mut combined_hash: [u8; 32] = txid; + let mut index = idx; + let mut level: u32 = 0; + while level < self.depth { + if index % 2 == 0 { + preimage[..32].copy_from_slice(&combined_hash); + preimage[32..].copy_from_slice(&merkle_proof[level as usize]); + combined_hash = calculate_double_sha256(&preimage); + } else { + preimage[..32].copy_from_slice(&merkle_proof[level as usize]); + preimage[32..].copy_from_slice(&combined_hash); + combined_hash = calculate_double_sha256(&preimage); + } + level += 1; + index /= 2; + } + combined_hash + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_merkle_tree() { + let mut transactions: Vec<[u8; 32]> = vec![]; + for i in 0u8..10u8 { + let tx = [i; 32]; + transactions.push(tx); + } + let tree = BitcoinMerkleTree::new(transactions.clone()); + let root = tree.root(); + let idx_path = tree.get_idx_path(0); + let calculated_root = tree.calculate_root_with_merkle_proof(transactions[0], 0, idx_path); + assert_eq!(root, calculated_root); + } +} diff --git a/core/src/test/common/citrea/client_mock.rs b/core/src/test/common/citrea/client_mock.rs new file mode 100644 index 000000000..083b3b3b8 --- /dev/null +++ b/core/src/test/common/citrea/client_mock.rs @@ -0,0 +1,348 @@ +use crate::{ + citrea::CitreaClientT, + config::protocol::ProtocolParamset, + database::{Database, DatabaseTransaction}, + errors::BridgeError, +}; +use alloy::signers::local::PrivateKeySigner; +use bitcoin::{OutPoint, Txid}; +use circuits_lib::bridge_circuit::structs::{LightClientProof, StorageProof}; +use eyre::Context; +use risc0_zkvm::Receipt; +use std::{ + collections::HashMap, + fmt, + sync::{Arc, LazyLock, Weak}, + time::Duration, +}; +use tokio::sync::{Mutex, MutexGuard}; +use tonic::async_trait; + +pub struct Deposit { + idx: u32, + height: u64, + move_txid: Txid, +} + +pub struct Withdrawal { + idx: u32, + height: u64, + utxo: OutPoint, +} + +pub struct MockCitreaStorage { + #[allow(dead_code)] + name: String, + deposits: Vec, + withdrawals: Vec, +} + +impl MockCitreaStorage { + pub fn new(name: String) -> Self { + Self { + name, + deposits: vec![], + withdrawals: vec![], + } + } +} + +#[allow(clippy::type_complexity)] +pub static MOCK_CITREA_GLOBAL: LazyLock< + Arc>>>>, +> = LazyLock::new(|| Arc::new(Mutex::new(HashMap::new()))); + +/// A mock implementation of the CitreaClientTrait. This implementation is used +/// for testing purposes and will generate dummy values. Don't use this in +/// citrea-e2e tests, use the real client. +#[derive(Clone)] +pub struct MockCitreaClient { + storage: Arc>, +} + +impl std::fmt::Debug for MockCitreaClient { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "MockCitreaClient") + } +} + +impl MockCitreaClient { + pub async fn get_storage(&self) -> MutexGuard<'_, MockCitreaStorage> { + self.storage.lock().await + } +} + +#[async_trait] +impl CitreaClientT for MockCitreaClient { + async fn get_storage_proof( + &self, + _l2_height: u64, + deposit_index: u32, + ) -> Result { + Ok(StorageProof { + storage_proof_utxo: "".to_string(), + storage_proof_vout: "".to_string(), + storage_proof_deposit_txid: "".to_string(), + index: deposit_index, + }) + } + + async fn fetch_validate_and_store_lcp( + &self, + _payout_block_height: u64, + _deposit_index: u32, + _db: &Database, + _dbtx: Option>, + _paramset: &'static ProtocolParamset, + ) -> Result { + Ok(borsh::from_slice(include_bytes!( + "../../../../../circuits-lib/test_data/lcp_receipt.bin" + )) + .wrap_err("Couldn't create mock receipt")?) + } + /// Connects a database with the given URL which is stored in + /// `citrea_rpc_url`. Other parameters are dumped. + async fn new( + citrea_rpc_url: String, + _light_client_prover_url: String, + _chain_id: u32, + _secret_key: Option, + _timeout: Option, + ) -> Result { + tracing::info!( + "Using the mock Citrea client ({citrea_rpc_url}), beware that data returned from this client is not real" + ); + if citrea_rpc_url.is_empty() { + return Err(eyre::eyre!( + "citrea_rpc_url is empty, please use create_mock_citrea_database to create a mock citrea client" + ) + .into()); + } + + let mut global = MOCK_CITREA_GLOBAL.lock().await; + if global.contains_key(&citrea_rpc_url) { + let storage = global + .get(&citrea_rpc_url) + .unwrap() + .upgrade() + .expect("Storage dropped during test"); + Ok(MockCitreaClient { storage }) + } else { + let storage = Arc::new(Mutex::new(MockCitreaStorage::new(citrea_rpc_url.clone()))); + global.insert(citrea_rpc_url.clone(), Arc::downgrade(&storage)); + Ok(MockCitreaClient { storage }) + } + } + + #[tracing::instrument(skip(self), err(level = tracing::Level::ERROR), ret(level = tracing::Level::DEBUG))] + async fn collect_deposit_move_txids( + &self, + last_deposit_idx: Option, + to_height: u64, + ) -> Result, BridgeError> { + let storage = self.storage.lock().await; + let start_idx = match last_deposit_idx { + Some(idx) => idx + 1, + None => 0, + }; + + let results: Vec<(u64, Txid)> = storage + .deposits + .iter() + .filter(|deposit| deposit.height <= to_height && deposit.idx >= start_idx) + .map(|deposit| (deposit.idx as u64, deposit.move_txid)) + .collect(); + + Ok(results) + } + + #[tracing::instrument(skip(self), err(level = tracing::Level::ERROR), ret(level = tracing::Level::DEBUG))] + async fn collect_withdrawal_utxos( + &self, + last_withdrawal_idx: Option, + to_height: u64, + ) -> Result, BridgeError> { + let storage = self.storage.lock().await; + let start_idx = match last_withdrawal_idx { + Some(idx) => idx + 1, + None => 0, + }; + + let results: Vec<(u64, OutPoint)> = storage + .withdrawals + .iter() + .filter(|withdrawal| withdrawal.height <= to_height && withdrawal.idx >= start_idx) + .map(|withdrawal| (withdrawal.idx as u64, withdrawal.utxo)) + .collect(); + + Ok(results) + } + + async fn get_light_client_proof( + &self, + l1_height: u64, + _paramset: &'static ProtocolParamset, + ) -> Result, BridgeError> { + Ok(Some(( + LightClientProof { + lc_journal: vec![], + l2_height: l1_height.to_string(), + }, + borsh::from_slice(include_bytes!( + "../../../../../circuits-lib/test_data/lcp_receipt.bin" + )) + .wrap_err("Couldn't create mock receipt")?, + l1_height, + ))) + } + + async fn get_citrea_l2_height_range( + &self, + block_height: u64, + _timeout: Duration, + _paramset: &'static ProtocolParamset, + ) -> Result<(u64, u64), BridgeError> { + Ok(( + if block_height == 0 { + 0 + } else { + block_height - 1 + }, + block_height, + )) + } + + async fn get_replacement_deposit_move_txids( + &self, + _from_height: u64, + _to_height: u64, + ) -> Result, BridgeError> { + Ok(vec![]) + } + + async fn check_nofn_correctness( + &self, + _nofn_xonly_pk: bitcoin::XOnlyPublicKey, + ) -> Result<(), BridgeError> { + Ok(()) + } +} + +impl MockCitreaClient { + /// Pushes a deposit move txid to the given height. + pub async fn insert_deposit_move_txid(&mut self, height: u64, txid: Txid) { + let mut storage = self.storage.lock().await; + let idx = storage.deposits.len() as u32; + + tracing::debug!("Inserting deposit move txid {txid:?} at height {height} with index {idx}"); + storage.deposits.push(Deposit { + idx, + height, + move_txid: txid, + }); + } + + /// Pushes a withdrawal utxo and its index to the given height. + pub async fn insert_withdrawal_utxo(&mut self, height: u64, utxo: OutPoint) { + let mut storage = self.storage.lock().await; + let idx = storage.withdrawals.len() as u32; + + tracing::debug!("Inserting withdrawal utxo {utxo:?} at height {height} with index {idx}"); + storage.withdrawals.push(Withdrawal { idx, height, utxo }); + } +} + +#[cfg(all(test, feature = "integration-tests"))] +mod tests { + use crate::{citrea::CitreaClientT, test::common::create_test_config_with_thread_name}; + use bitcoin::hashes::Hash; + + #[tokio::test] + async fn deposit_move_txid() { + let config = create_test_config_with_thread_name().await; + let mut client = super::MockCitreaClient::new( + config.citrea_rpc_url, + "".to_string(), + config.citrea_chain_id, + None, + None, + ) + .await + .unwrap(); + + assert!(client + .collect_deposit_move_txids(None, 2) + .await + .unwrap() + .is_empty()); + + client + .insert_deposit_move_txid(1, bitcoin::Txid::from_slice(&[1; 32]).unwrap()) + .await; + client + .insert_deposit_move_txid(1, bitcoin::Txid::from_slice(&[2; 32]).unwrap()) + .await; + + let txids = client.collect_deposit_move_txids(None, 1).await.unwrap(); + assert_eq!(txids.len(), 2); + assert_eq!(txids[0].1, bitcoin::Txid::from_slice(&[1; 32]).unwrap()); + + let txids = client.collect_deposit_move_txids(Some(0), 2).await.unwrap(); + assert_eq!(txids.len(), 1); + assert_eq!(txids[0].1, bitcoin::Txid::from_slice(&[2; 32]).unwrap()); + + // Idx 1 is not available till height 2 (0 indexed). + assert!(client + .collect_deposit_move_txids(Some(0), 0) + .await + .unwrap() + .is_empty()); + } + + #[tokio::test] + async fn withdrawal_utxos() { + let config = create_test_config_with_thread_name().await; + let mut client = super::MockCitreaClient::new( + config.citrea_rpc_url, + "".to_string(), + config.citrea_chain_id, + None, + None, + ) + .await + .unwrap(); + + assert!(client + .collect_withdrawal_utxos(None, 2) + .await + .unwrap() + .is_empty()); + + client + .insert_withdrawal_utxo( + 1, + bitcoin::OutPoint::new(bitcoin::Txid::from_slice(&[1; 32]).unwrap(), 0), + ) + .await; + client + .insert_withdrawal_utxo( + 1, + bitcoin::OutPoint::new(bitcoin::Txid::from_slice(&[2; 32]).unwrap(), 1), + ) + .await; + + let utxos = client.collect_withdrawal_utxos(None, 2).await.unwrap(); + assert_eq!(utxos.len(), 2); + assert_eq!( + utxos[0].1, + bitcoin::OutPoint::new(bitcoin::Txid::from_slice(&[1; 32]).unwrap(), 0) + ); + + let utxos = client.collect_withdrawal_utxos(Some(0), 2).await.unwrap(); + assert_eq!(utxos.len(), 1); + assert_eq!( + utxos[0].1, + bitcoin::OutPoint::new(bitcoin::Txid::from_slice(&[2; 32]).unwrap(), 1) + ); + } +} diff --git a/core/src/test/common/citrea/mod.rs b/core/src/test/common/citrea/mod.rs new file mode 100644 index 000000000..87eafe9e7 --- /dev/null +++ b/core/src/test/common/citrea/mod.rs @@ -0,0 +1,591 @@ +//! # Citrea Related Utilities + +use crate::bitvm_client::SECP; +use crate::citrea::{CitreaClient, SATS_TO_WEI_MULTIPLIER}; +use crate::extended_bitcoin_rpc::ExtendedBitcoinRpc; +use crate::musig2::AggregateFromPublicKeys; +use crate::test::common::generate_withdrawal_transaction_and_signature; +use crate::{config::BridgeConfig, errors::BridgeError}; +use alloy::primitives::U256; +use bitcoin::consensus::Encodable; +use bitcoin::hashes::Hash; +use bitcoin::secp256k1::{PublicKey, SecretKey}; +use bitcoin::{Address, Amount, Block, OutPoint, Transaction, TxOut, Txid, VarInt, XOnlyPublicKey}; +use bitcoincore_rpc::RpcApi; +use citrea_e2e::bitcoin::DEFAULT_FINALITY_DEPTH; +use citrea_e2e::{ + bitcoin::BitcoinNode, + config::{BatchProverConfig, EmptyConfig, LightClientProverConfig, SequencerConfig}, + framework::TestFramework, + node::{Node, NodeKind}, +}; +pub use client_mock::*; +use eyre::Context; +use jsonrpsee::http_client::HttpClient; +pub use parameters::*; +pub use requests::*; + +use super::test_actors::TestActors; + +mod bitcoin_merkle; +mod client_mock; +mod parameters; +mod requests; + +/// Calculates bridge params dynamically with the N-of-N public key which +/// calculated from the verifier secret keys in `BridgeConfig::default`. +pub fn get_bridge_params() -> String { + let config = BridgeConfig::default(); + + let verifiers_secret_keys = config.test_params.all_verifiers_secret_keys; + let secp = bitcoin::secp256k1::Secp256k1::new(); + let verifiers_public_keys: Vec = verifiers_secret_keys + .iter() + .map(|sk| PublicKey::from_secret_key(&secp, sk)) + .collect(); + + let nofn_xonly_pk = + bitcoin::XOnlyPublicKey::from_musig2_pks(verifiers_public_keys.clone(), None) + .unwrap() + .to_string(); + + let bridge_params = format!( + "000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000c00000000000000000000000000000000000000000000000008ac7230489e80000000000000000000000000000000000000000000000000000000000000000002d4120{}ac006306636974726561140000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000016800000000000000000000000000000000000000000000000000000000000000", nofn_xonly_pk + ); + + tracing::info!("Bridge params: {}", bridge_params); + + bridge_params +} + +/// Citrea e2e hardcoded EVM secret keys. +pub const SECRET_KEYS: [&str; 10] = [ + "0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80", + "0x59c6995e998f97a5a0044966f0945389dc9e86dae88c7a8412f4603b6b78690d", + "0x5de4111afa1a4b94908f83103eb1f1706367c2e68ca870fc3fb9a804cdab365a", + "0x7c852118294e51e653712a81e05800f419141751be58f605c371e15141b007a6", + "0x47e179ec197488593b187f80a00eb0da91f1b9d0b13f8733639f19c30a34926a", + "0x8b3a350cf5c34c9194ca85829a2df0ec3153be0318b5e2d3348e872092edffba", + "0x92db14e403b83dfe3df233f83dfa3a0d7096f21ca9b0d6d6b8d88b2b4ec1564e", + "0x4bbbf85ce3377467afe5d46f804f221813b2bb87f24d81f60f1fcdbf7cbf4356", + "0xdbda1821b80551c9d65939329250298aa3472ba22feea921c0cf5d620ea67b97", + "0x2a871d0798f97d79848a013d4936a73bf4cc922c825d33c1cf7073dff6d409c6", +]; + +/// Citrea e2e hardcoded EVM addresses. +pub const EVM_ADDRESSES: [&str; 10] = [ + "f39Fd6e51aad88F6F4ce6aB8827279cffFb92266", + "70997970C51812dc3A010C7d01b50e0d17dc79C8", + "3C44CdDdB6a900fa2b585dd299e03d12FA4293BC", + "90F79bf6EB2c4f870365E785982E1f101E93b906", + "15d34AAf54267DB7D7c367839AAf71A00a2C6A65", + "9965507D1a55bcC2695C58ba16FB37d819B0A4dc", + "976EA74026E726554dB657fA54763abd0C3a0aa9", + "14dC79964da2C08b23698B3D3cc7Ca32193d9955", + "23618e81E3f5cdF7f54C3d65f7FBc0aBf5B21E8f", + "a0Ee7A142d267C1f36714E4a8F75612F20a79720", +]; + +/// Starts typical nodes with typical configs for a test that needs Citrea. +pub async fn start_citrea( + sequencer_config: SequencerConfig, + f: &mut TestFramework, +) -> citrea_e2e::Result<( + &Node, + &mut Node, + Option<&Node>, + Option<&Node>, + &BitcoinNode, +)> { + let sequencer = f.sequencer.as_ref().expect("Sequencer is present"); + let full_node = f.full_node.as_mut().expect("Full node is present"); + let batch_prover = f.batch_prover.as_ref(); + let light_client_prover = f.light_client_prover.as_ref(); + let da = f.bitcoin_nodes.get(0).expect("There is a bitcoin node"); + + let min_soft_confirmations_per_commitment = sequencer_config.max_l2_blocks_per_commitment; + + if sequencer_config.test_mode { + for _ in 0..min_soft_confirmations_per_commitment { + sequencer.client.send_publish_batch_request().await?; + } + } + sequencer + .wait_for_l2_height(min_soft_confirmations_per_commitment, None) + .await?; + println!("Sequencer is ready"); + + Ok((sequencer, full_node, light_client_prover, batch_prover, da)) +} + +/// Updates given config with the values set by the Citrea e2e. +pub fn update_config_with_citrea_e2e_values( + config: &mut BridgeConfig, + da: &citrea_e2e::bitcoin::BitcoinNode, + sequencer: &citrea_e2e::node::Node, + light_client_prover: Option<(&str, u16)>, +) { + config.bitcoin_rpc_user = da.config.rpc_user.clone().into(); + config.bitcoin_rpc_password = da.config.rpc_password.clone().into(); + config.bitcoin_rpc_url = format!( + "http://127.0.0.1:{}/wallet/{}", + da.config.rpc_port, + NodeKind::Bitcoin // citrea-e2e internal. + ); + + let citrea_url = format!( + "http://{}:{}", + sequencer.config.rollup.rpc.bind_host, sequencer.config.rollup.rpc.bind_port + ); + config.citrea_rpc_url = citrea_url; + + if let Some(light_client_prover) = light_client_prover { + let citrea_light_client_prover_url = + format!("http://{}:{}", light_client_prover.0, light_client_prover.1); + config.citrea_light_client_prover_url = citrea_light_client_prover_url; + } else { + let citrea_light_client_prover_url = format!("http://{}:{}", "127.0.0.1", 8080); // Dummy value + config.citrea_light_client_prover_url = citrea_light_client_prover_url; + } +} + +/// Wait until the light client contract is updated to the given block height +pub async fn wait_until_lc_contract_updated( + client: &HttpClient, + block_height: u64, +) -> Result<(), BridgeError> { + let mut attempts = 0; + let max_attempts = 600; + + while attempts < max_attempts { + let block_number = block_number(client).await?; + if block_number >= block_height as u32 { + break; + } + attempts += 1; + tokio::time::sleep(std::time::Duration::from_millis(1000)).await; + } + Ok(()) +} + +/// Convert scriptbuf into how it would look like as a tapscript in a witness +/// (basically adds a VarInt to the beginning of the script) +/// Then search the script bytes, find the location where the next bytes exactly matches cut_bytes +/// cut it from the script and return the resulting prefix and suffix +pub fn extract_suffix_and_prefix_from_witness_script( + script: bitcoin::ScriptBuf, + cut_bytes: &[u8], +) -> eyre::Result<(Vec, Vec)> { + // In the witness, the length of the script is appended as VarInt first + // contract expects this VarInt in the script prefix so we add it manually here + let mut script_bytes = script.into_bytes(); + let varint = VarInt::from(script_bytes.len()); + let mut varint_vec: Vec = Vec::with_capacity(varint.size()); + varint.consensus_encode(&mut varint_vec)?; + + // Combine varint and script_bytes back to back + varint_vec.append(&mut script_bytes); + let script_bytes = varint_vec; + + // Find the first occurrence of cut_bytes in the script + if let Some(pos) = script_bytes + .windows(cut_bytes.len()) + .position(|window| window == cut_bytes) + { + let prefix = script_bytes[..pos].to_vec(); + let suffix = script_bytes[pos + cut_bytes.len()..].to_vec(); + Ok((prefix, suffix)) + } else { + // If cut_bytes is not found, return an error + Err(eyre::eyre!("The requested bytes not found in script")) + } +} + +/// helper struct to hold e2e nodes and relevant clients/configs +pub struct CitreaE2EData<'a> { + pub sequencer: &'a Node, + pub full_node: &'a Node, + pub lc_prover: &'a Node, + pub batch_prover: &'a Node, + pub da: &'a BitcoinNode, + pub config: BridgeConfig, + pub citrea_client: &'a CitreaClient, + pub rpc: &'a ExtendedBitcoinRpc, +} + +/// Creates a new withdrawal utxo and register to citrea using safeWithdraw +/// First it registers the deposit to Citrea. +/// After it is registered a new utxo is created and mined, it is registered to citrea +/// using safeWithdraw. Afterwards, this utxo is saved on contract and operators can use this +/// utxo to fulfill withdrawals. +/// +/// # Parameters +/// +/// - `move_txid`: Move txid of the deposit. +/// - `e2e`: Citrea e2e data. +/// - `actors`: Test actors. +/// +/// # Returns +/// +/// A tuple of: +/// +/// - [`OutPoint`]: UTXO for the given withdrawal. +/// - [`TxOut`]: Output corresponding to the withdrawal. +/// - [`schnorr::Signature`]: Signature for the withdrawal utxo. +pub async fn get_new_withdrawal_utxo_and_register_to_citrea( + move_txid: Txid, + e2e: &CitreaE2EData<'_>, + actors: &TestActors, +) -> (OutPoint, TxOut, bitcoin::secp256k1::schnorr::Signature) { + e2e.rpc + .mine_blocks_while_synced(DEFAULT_FINALITY_DEPTH + 2, actors) + .await + .unwrap(); + force_sequencer_to_commit(e2e.sequencer).await.unwrap(); + e2e.rpc + .mine_blocks_while_synced(DEFAULT_FINALITY_DEPTH + 2, actors) + .await + .unwrap(); + // Send deposit to Citrea + let (tx, block, block_height) = get_tx_information_for_citrea(e2e, move_txid).await.unwrap(); + + tracing::info!("Depositing to Citrea..."); + + deposit( + e2e.rpc, + e2e.sequencer.client.http_client().clone(), + block, + block_height.try_into().unwrap(), + tx, + ) + .await + .unwrap(); + + force_sequencer_to_commit(e2e.sequencer).await.unwrap(); + + e2e.rpc.mine_blocks_while_synced(1, actors).await.unwrap(); + + // Wait for the deposit to be processed. + tokio::time::sleep(std::time::Duration::from_secs(2)).await; + + // After the deposit, the balance should be non-zero. + assert_ne!( + eth_get_balance( + e2e.sequencer.client.http_client().clone(), + crate::EVMAddress([1; 20]), + ) + .await + .unwrap(), + 0 + ); + + tracing::info!("Deposit operations are successful."); + + // Prepare withdrawal transaction. + let user_sk = SecretKey::from_slice(&[13u8; 32]).unwrap(); + let withdrawal_address = Address::p2tr( + &SECP, + user_sk.x_only_public_key(&SECP).0, + None, + e2e.config.protocol_paramset().network, + ); + let (withdrawal_utxo_with_txout, payout_txout, sig) = + generate_withdrawal_transaction_and_signature( + &e2e.config, + e2e.rpc, + &withdrawal_address, + e2e.config.protocol_paramset().bridge_amount + - e2e + .config + .operator_withdrawal_fee_sats + .unwrap_or(Amount::from_sat(0)), + ) + .await; + + e2e.rpc + .mine_blocks_while_synced(DEFAULT_FINALITY_DEPTH + 2, actors) + .await + .unwrap(); + force_sequencer_to_commit(e2e.sequencer).await.unwrap(); + e2e.rpc + .mine_blocks_while_synced(DEFAULT_FINALITY_DEPTH + 2, actors) + .await + .unwrap(); + + let params = get_citrea_safe_withdraw_params( + e2e.rpc, + withdrawal_utxo_with_txout.clone(), + payout_txout.clone(), + sig, + ) + .await + .unwrap(); + + tracing::info!("Params: {:?}", params); + + let withdrawal_utxo = withdrawal_utxo_with_txout.outpoint; + tracing::info!("Created withdrawal UTXO: {:?}", withdrawal_utxo); + + let citrea_withdrawal_tx = e2e + .citrea_client + .contract + .safeWithdraw(params.0, params.1, params.2, params.3, params.4) + .value(U256::from( + e2e.config.protocol_paramset().bridge_amount.to_sat() * SATS_TO_WEI_MULTIPLIER, + )) + .send() + .await + .unwrap(); + tracing::info!("Withdrawal TX sent in Citrea"); + + // 1. force sequencer to commit + force_sequencer_to_commit(e2e.sequencer).await.unwrap(); + tracing::info!("Publish batch request sent"); + + let receipt = citrea_withdrawal_tx.get_receipt().await.unwrap(); + tracing::info!("Citrea withdrawal tx receipt: {:?}", receipt); + + e2e.rpc + .mine_blocks_while_synced(DEFAULT_FINALITY_DEPTH + 2, actors) + .await + .unwrap(); + + (withdrawal_utxo, payout_txout, sig) +} + +/// call citrea_testPublishBlock max_l2_blocks_per_commitment times +pub async fn force_sequencer_to_commit(sequencer: &Node) -> eyre::Result<()> { + for _ in 0..sequencer.config.node.max_l2_blocks_per_commitment { + sequencer + .client + .send_publish_batch_request() + .await + .map_err(|e| eyre::eyre!("Failed to publish block: {:?}", e))?; + } + Ok(()) +} + +/// For a given txid, get the full tx, block that includes it and height of the block +pub async fn get_tx_information_for_citrea( + e2e: &CitreaE2EData<'_>, + txid: Txid, +) -> eyre::Result<(Transaction, Block, u64)> { + let tx = e2e.rpc.get_raw_transaction(&txid, None).await?; + let tx_info = e2e.rpc.get_raw_transaction_info(&txid, None).await?; + let block = e2e.rpc.get_block(&tx_info.blockhash.unwrap()).await?; + let block_height = e2e.rpc.get_block_info(&block.block_hash()).await?.height as u64; + Ok((tx, block, block_height)) +} + +/// After a replacement deposit is done, register this replacement on citrea +/// The move_txid for the corresponding deposit_id will be updated to replacement_move_txid +pub async fn register_replacement_deposit_to_citrea( + e2e: &CitreaE2EData<'_>, + replacement_move_txid: Txid, + deposit_id: u32, + actors: &TestActors, +) -> eyre::Result<()> { + e2e.rpc + .mine_blocks_while_synced(DEFAULT_FINALITY_DEPTH, actors) + .await + .unwrap(); + tokio::time::sleep(std::time::Duration::from_secs(1)).await; + force_sequencer_to_commit(e2e.sequencer).await.unwrap(); + tracing::info!("Setting operator to our address"); + // first set our address as operator + let set_operator_tx = e2e + .citrea_client + .contract + .setOperator(e2e.citrea_client.wallet_address) + .send() + .await?; + force_sequencer_to_commit(e2e.sequencer).await?; + let receipt = set_operator_tx.get_receipt().await?; + tracing::info!("Set operator tx receipt: {:?}", receipt); + + e2e.rpc + .mine_blocks_while_synced(DEFAULT_FINALITY_DEPTH, actors) + .await + .unwrap(); + + let (replace_tx, block, block_height) = + get_tx_information_for_citrea(e2e, replacement_move_txid).await?; + + tracing::warn!("Replace transaction: {:?}", replace_tx); + tracing::warn!("Replace transaction block: {:?}", block); + + // wait for light client to sync until replacement deposit tx + e2e.lc_prover + .wait_for_l1_height(block_height, None) + .await + .map_err(|e| eyre::eyre!("Failed to wait for light client to sync: {:?}", e))?; + + wait_until_lc_contract_updated(e2e.sequencer.client.http_client(), block_height) + .await + .unwrap(); + + let (replace_tx, tx_proof, sha_script_pubkeys) = get_citrea_deposit_params( + e2e.rpc, + replace_tx, + block, + block_height as u32, + replacement_move_txid, + ) + .await?; + + tracing::warn!("Replace transaction block height: {:?}", block_height); + tracing::warn!( + "Current chain height: {:?}", + e2e.rpc.get_current_chain_height().await.unwrap() + ); + tracing::warn!("Replace transaction tx proof : {:?}", tx_proof); + + let replace_deposit_tx = e2e + .citrea_client + .contract + .replaceDeposit( + replace_tx, + tx_proof, + U256::from(deposit_id), + sha_script_pubkeys, + ) + .from(e2e.citrea_client.wallet_address) + .send() + .await?; + + force_sequencer_to_commit(e2e.sequencer).await?; + + let receipt = replace_deposit_tx.get_receipt().await?; + tracing::info!("Replace deposit tx receipt: {:?}", receipt); + + e2e.rpc + .mine_blocks_while_synced(DEFAULT_FINALITY_DEPTH, actors) + .await + .unwrap(); + let finalized_height = e2e.da.get_finalized_height(None).await.unwrap(); + e2e.batch_prover + .wait_for_l1_height(finalized_height, None) + .await + .unwrap(); + e2e.rpc + .mine_blocks_while_synced(DEFAULT_FINALITY_DEPTH + 2, actors) + .await + .unwrap(); + + Ok(()) +} + +impl CitreaClient { + /// Update the nofn aggregated key to the given xonly pk + /// It updates both deposit and replacement scripts on Citrea side. + /// To do this it creates a dummy deposit and replacement deposit. + /// For the deposit script, it cuts the EVM address (only dynamic part) from the script and + /// sends the prefix and suffix of the remaining script to citrea. + /// For the replacement script, it cuts the old move txid from the script (again the only dynamic part) instead. + pub async fn update_nofn_aggregated_key( + &self, + nofn_xonly_pk: XOnlyPublicKey, + paramset: &'static crate::config::protocol::ProtocolParamset, + sequencer: &citrea_e2e::node::Node, + ) -> eyre::Result<()> { + use std::str::FromStr; + + use crate::deposit::{ + Actors, BaseDepositData, DepositData, DepositInfo, DepositType, ReplacementDepositData, + SecurityCouncil, + }; + use crate::test::common::citrea::force_sequencer_to_commit; + use crate::EVMAddress; + + // create a dummy script with nofn xonly pk + let dummy_evm_address: EVMAddress = EVMAddress(std::array::from_fn(|i| i as u8)); + let mut dummy_base_deposit_data = DepositData { + nofn_xonly_pk: Some(nofn_xonly_pk), + deposit: DepositInfo { + deposit_outpoint: OutPoint::default(), + deposit_type: DepositType::BaseDeposit(BaseDepositData { + evm_address: dummy_evm_address, + recovery_taproot_address: bitcoin::Address::from_str( + "bcrt1p65yp9q9fxtf7dyvthyrx26xxm2czanvrnh9rtvphmlsjvhdt4k6qw4pkss", // dummy address + ) + .unwrap(), + }), + }, + actors: Actors { + verifiers: vec![], + watchtowers: vec![], + operators: vec![], + }, + security_council: SecurityCouncil { + pks: vec![], + threshold: 0, + }, + }; + + let base_deposit_script = + dummy_base_deposit_data.get_deposit_scripts(paramset)?[0].to_script_buf(); + + let (deposit_prefix, deposit_suffix) = + crate::test::common::citrea::extract_suffix_and_prefix_from_witness_script( + base_deposit_script, + &dummy_evm_address.0, + )?; + + // Make the transaction more explicit + let dep_script_tx = self + .contract + .setDepositScript(deposit_prefix.into(), deposit_suffix.into()) + .from(self.wallet_address) + .send() + .await + .wrap_err("Failed to update nofn aggregated key")?; + + force_sequencer_to_commit(sequencer).await?; + + dep_script_tx.get_receipt().await?; + + // now update the replacement script + let dummy_old_move_txid = Txid::from_byte_array(std::array::from_fn(|i| i as u8)); + let mut dummy_replacement_deposit_data = DepositData { + nofn_xonly_pk: Some(nofn_xonly_pk), + deposit: DepositInfo { + deposit_outpoint: OutPoint::default(), + deposit_type: DepositType::ReplacementDeposit(ReplacementDepositData { + old_move_txid: dummy_old_move_txid, + }), + }, + actors: Actors { + verifiers: vec![], + watchtowers: vec![], + operators: vec![], + }, + security_council: SecurityCouncil { + pks: vec![], + threshold: 0, + }, + }; + + let replacement_deposit_script = + dummy_replacement_deposit_data.get_deposit_scripts(paramset)?[0].to_script_buf(); + + let (replacement_prefix, replacement_suffix) = + crate::test::common::citrea::extract_suffix_and_prefix_from_witness_script( + replacement_deposit_script, + dummy_old_move_txid.as_byte_array(), + )?; + + let rep_deposit_tx = self + .contract + .setReplaceScript(replacement_prefix.into(), replacement_suffix.into()) + .from(self.wallet_address) + .send() + .await + .wrap_err("Failed to update nofn aggregated key")?; + + force_sequencer_to_commit(sequencer).await?; + + rep_deposit_tx.get_receipt().await?; + + Ok(()) + } +} diff --git a/core/src/test/common/citrea/parameters.rs b/core/src/test/common/citrea/parameters.rs new file mode 100644 index 000000000..3023e735d --- /dev/null +++ b/core/src/test/common/citrea/parameters.rs @@ -0,0 +1,315 @@ +//! # Parameter Builder For Citrea Requests + +use crate::builder; +use crate::builder::script::SpendPath; +use crate::builder::transaction::TransactionType; +use crate::citrea::Bridge::MerkleProof as CitreaMerkleProof; +use crate::citrea::Bridge::Transaction as CitreaTransaction; +use crate::constants::NON_STANDARD_V3; +use crate::errors::BridgeError; +use crate::extended_bitcoin_rpc::ExtendedBitcoinRpc; +use crate::rpc::clementine::NormalSignatureKind; +use crate::test::common::citrea::bitcoin_merkle::BitcoinMerkleTree; +use crate::UTXO; +use alloy::primitives::{Bytes, FixedBytes, Uint}; +use bitcoin::consensus::Encodable; +use bitcoin::hashes::sha256; +use bitcoin::hashes::Hash; +use bitcoin::secp256k1::schnorr; +use bitcoin::{Block, Transaction, Txid}; +use bitcoincore_rpc::RpcApi; +use eyre::Context; + +/// Returns merkle proof for a given transaction (via txid) in a block. +fn get_block_merkle_proof( + block: &Block, + target_txid: Txid, + is_witness_merkle_proof: bool, +) -> Result<(usize, Vec), BridgeError> { + let mut txid_index = 0; + let txids = block + .txdata + .iter() + .enumerate() + .map(|(i, tx)| { + let txid = tx.compute_txid(); + if txid == target_txid { + txid_index = i; + } + + if is_witness_merkle_proof { + if i == 0 { + [0; 32] + } else { + let wtxid = tx.compute_wtxid(); + wtxid.as_byte_array().to_owned() + } + } else { + txid.as_byte_array().to_owned() + } + }) + .collect::>(); + + let merkle_tree = BitcoinMerkleTree::new(txids.clone()); + let witness_idx_path = merkle_tree.get_idx_path(txid_index.try_into().unwrap()); + + let _root = merkle_tree.calculate_root_with_merkle_proof( + txids[txid_index], + txid_index.try_into().unwrap(), + witness_idx_path.clone(), + ); + + Ok((txid_index, witness_idx_path.into_iter().flatten().collect())) +} + +fn get_transaction_details_for_citrea( + transaction: &Transaction, +) -> Result { + let version = (transaction.version.0 as u32).to_le_bytes(); + let flag: u16 = 1; + + let vin = [ + vec![transaction.input.len() as u8], + transaction + .input + .iter() + .map(|x| bitcoin::consensus::serialize(&x)) + .collect::>() + .into_iter() + .flatten() + .collect::>(), + ] + .concat(); + + let vout = [ + vec![transaction.output.len() as u8], + transaction + .output + .iter() + .map(|x| bitcoin::consensus::serialize(&x)) + .collect::>() + .into_iter() + .flatten() + .collect::>(), + ] + .concat(); + + let witness: Vec = transaction + .input + .iter() + .map(|param| { + let mut raw = Vec::new(); + param + .witness + .consensus_encode(&mut raw) + .map_err(|e| eyre::eyre!("Can't encode param: {}", e))?; + + Ok::, BridgeError>(raw) + }) + .collect::, _>>()? + .into_iter() + .flatten() + .collect::>(); + + let locktime = bitcoin::consensus::serialize(&transaction.lock_time); + let locktime: [u8; 4] = locktime.try_into().unwrap(); + Ok(CitreaTransaction { + version: FixedBytes::from(version), + flag: FixedBytes::from(flag), + vin: Bytes::copy_from_slice(&vin), + vout: Bytes::copy_from_slice(&vout), + witness: Bytes::copy_from_slice(&witness), + locktime: FixedBytes::from(locktime), + }) +} + +fn get_transaction_merkle_proof_for_citrea( + block_height: u32, + block: &Block, + txid: Txid, + is_witness_merkle_proof: bool, +) -> Result { + let (index, merkle_proof) = get_block_merkle_proof(block, txid, is_witness_merkle_proof)?; + + Ok(CitreaMerkleProof { + intermediateNodes: Bytes::copy_from_slice(&merkle_proof), + blockHeight: Uint::from(block_height), + index: Uint::from(index), + }) +} + +async fn get_transaction_sha_script_pubkeys_for_citrea( + rpc: &ExtendedBitcoinRpc, + transaction: Transaction, +) -> Result, BridgeError> { + let mut enc_script_pubkeys = sha256::Hash::engine(); + for input in transaction.input { + let prevout = rpc.get_txout_from_outpoint(&input.previous_output).await?; + prevout + .script_pubkey + .consensus_encode(&mut enc_script_pubkeys) + .unwrap(); + } + let sha_script_pubkeys = sha256::Hash::from_engine(enc_script_pubkeys); + + let sha_script_pks: [u8; 32] = sha_script_pubkeys + .as_byte_array() + .to_vec() + .try_into() + .unwrap(); + + let sha_script_pubkeys = FixedBytes::from(sha_script_pks); + + Ok(sha_script_pubkeys) +} + +/// Returns [`CitreaTransaction`] for a given transaction, which can be later +/// used for deposit and withdrawal operations. +pub async fn get_citrea_deposit_params( + rpc: &ExtendedBitcoinRpc, + transaction: Transaction, + block: Block, + block_height: u32, + txid: Txid, +) -> Result<(CitreaTransaction, CitreaMerkleProof, FixedBytes<32>), BridgeError> { + let tp = get_transaction_details_for_citrea(&transaction)?; + let mp = get_transaction_merkle_proof_for_citrea(block_height, &block, txid, true)?; + let sha_script_pubkeys = + get_transaction_sha_script_pubkeys_for_citrea(rpc, transaction).await?; + Ok((tp, mp, sha_script_pubkeys)) +} + +pub async fn get_citrea_safe_withdraw_params( + rpc: &ExtendedBitcoinRpc, + withdrawal_dust_utxo: UTXO, + payout_output: bitcoin::TxOut, + sig: schnorr::Signature, +) -> Result< + ( + CitreaTransaction, + CitreaMerkleProof, + CitreaTransaction, + Bytes, + Bytes, + ), + BridgeError, +> { + let prepare_tx = rpc + .get_tx_of_txid(&withdrawal_dust_utxo.outpoint.txid) + .await?; + + let prepare_tx_struct = get_transaction_details_for_citrea(&prepare_tx)?; + + let prepare_tx_blockhash = rpc + .get_blockhash_of_tx(&withdrawal_dust_utxo.outpoint.txid) + .await?; + let prepare_tx_block_height = rpc + .get_block_info(&prepare_tx_blockhash) + .await + .wrap_err("Failed to get prepare tx block height")? + .height; + let prepare_tx_block_header = rpc + .get_block_header(&prepare_tx_blockhash) + .await + .wrap_err("Failed to get prepare tx block header")?; + let prepare_tx_block = rpc + .get_block(&prepare_tx_blockhash) + .await + .wrap_err("Failed to get prepare tx block")?; + + let prepare_tx_mp = get_transaction_merkle_proof_for_citrea( + prepare_tx_block_height as u32, + &prepare_tx_block, + withdrawal_dust_utxo.outpoint.txid, + false, + )?; + + let txin = builder::transaction::input::SpendableTxIn::new( + withdrawal_dust_utxo.outpoint, + withdrawal_dust_utxo.txout.clone(), + vec![], + None, + ); + + let unspent_txout = + builder::transaction::output::UnspentTxOut::from_partial(payout_output.clone()); + + let mut tx = builder::transaction::TxHandlerBuilder::new(TransactionType::Payout) + .with_version(NON_STANDARD_V3) + .add_input( + NormalSignatureKind::NotStored, + txin, + SpendPath::KeySpend, + builder::transaction::DEFAULT_SEQUENCE, + ) + .add_output(unspent_txout.clone()) + .finalize(); + + let taproot_signature = bitcoin::taproot::Signature { + signature: sig, + sighash_type: bitcoin::sighash::TapSighashType::SinglePlusAnyoneCanPay, + }; + + tx.set_p2tr_key_spend_witness(&taproot_signature, 0)?; + + let payout_transaction = tx.get_cached_tx(); + + let payout_tx_params = get_transaction_details_for_citrea(payout_transaction)?; + + let block_header_bytes = + Bytes::copy_from_slice(&bitcoin::consensus::serialize(&prepare_tx_block_header)); + + let output_script_pk_bytes = Bytes::copy_from_slice( + &bitcoin::consensus::serialize(&payout_transaction.output[0].script_pubkey) + .iter() + .skip(1) + .copied() + .collect::>(), + ); + + Ok(( + prepare_tx_struct, + prepare_tx_mp, + payout_tx_params, + block_header_bytes, + output_script_pk_bytes, + )) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::extended_bitcoin_rpc::ExtendedBitcoinRpc; + use bitcoincore_rpc::RpcApi; + use std::str::FromStr; + + #[ignore = "Manual testing utility"] + #[tokio::test] + async fn test_get_citrea_deposit_params() { + let rpc = ExtendedBitcoinRpc::connect( + "http://127.0.0.1:38332".to_string(), + "bitcoin".to_string().into(), + "bitcoin".to_string().into(), + None, + ) + .await + .unwrap(); + + let txid_str = "95fe701dd1fab6677d23e550dd7b7af12c9288ec209acb84bcc06708b8181d6a"; + let txid = Txid::from_str(txid_str).unwrap(); + let get_raw_transaction_result = rpc.get_raw_transaction_info(&txid, None).await.unwrap(); + let block_hash = get_raw_transaction_result.blockhash.unwrap(); + let block = rpc.get_block(&block_hash).await.unwrap(); + let block_info = rpc.get_block_info(&block_hash).await.unwrap(); + let tx = rpc.get_raw_transaction(&txid, None).await.unwrap(); + println!( + "Raw tx: {:?}", + hex::encode(bitcoin::consensus::serialize(&tx)) + ); + let transaction_params = + get_citrea_deposit_params(&rpc, tx, block, block_info.height as u32, txid) + .await + .unwrap(); + println!("{:?}", transaction_params); + } +} diff --git a/core/src/test/common/citrea/requests.rs b/core/src/test/common/citrea/requests.rs new file mode 100644 index 000000000..05bf194eb --- /dev/null +++ b/core/src/test/common/citrea/requests.rs @@ -0,0 +1,78 @@ +use crate::citrea::LIGHT_CLIENT_ADDRESS; +use crate::errors::BridgeError; +use crate::extended_bitcoin_rpc::ExtendedBitcoinRpc; +use crate::test::common::citrea::parameters::get_citrea_deposit_params; +use crate::EVMAddress; +use alloy::sol_types::SolValue; +use bitcoin::{Block, Transaction}; +use eyre::Context; +use jsonrpsee::core::client::ClientT; +use jsonrpsee::http_client::HttpClient; +use jsonrpsee::rpc_params; +use serde_json::json; + +pub async fn block_number(client: &HttpClient) -> Result { + let params = rpc_params![ + json!({ + "to": LIGHT_CLIENT_ADDRESS, + "data": "0x57e871e7" + }), + "latest" + ]; + + let response: String = client + .request("eth_call", params) + .await + .wrap_err("Failed to get block number")?; + + let decoded_hex = hex::decode(&response[2..]).map_err(|e| eyre::eyre!(e.to_string()))?; + let block_number = decoded_hex + .iter() + .rev() + .take(4) + .rev() + .fold(0u32, |acc, &byte| (acc << 8) | byte as u32); + + Ok(block_number) +} + +pub async fn eth_get_balance( + client: HttpClient, + evm_address: EVMAddress, +) -> Result { + let params = rpc_params![evm_address.0, "latest"]; + + let response: String = client + .request("eth_getBalance", params) + .await + .wrap_err("Failed to get balance")?; + let ret = u128::from_str_radix(&response[2..], 16) + .map_err(|e| eyre::eyre!("Can't convert hex to int: {}", e))?; + + Ok(ret) +} + +/// Deposits a transaction to Citrea. This function is different from `contract.deposit` because it +/// won't directly talk with EVM but with Citrea. So that authorization can be done (Citrea will +/// block this call if it isn't an operator). +pub async fn deposit( + rpc: &ExtendedBitcoinRpc, + client: HttpClient, + block: Block, + block_height: u32, + transaction: Transaction, +) -> Result<(), BridgeError> { + let txid = transaction.compute_txid(); + + let params = get_citrea_deposit_params(rpc, transaction, block, block_height, txid).await?; + + let _response: () = client + .request( + "citrea_sendRawDepositTransaction", + rpc_params!(hex::encode(params.abi_encode_params())), + ) + .await + .wrap_err("Failed to send deposit transaction")?; + + Ok(()) +} diff --git a/core/src/test/common/clementine_utils.rs b/core/src/test/common/clementine_utils.rs new file mode 100644 index 000000000..9958bfa4b --- /dev/null +++ b/core/src/test/common/clementine_utils.rs @@ -0,0 +1,300 @@ +//! # Clementine related functions to do common operations + +use crate::bitvm_client::ClementineBitVMPublicKeys; +use crate::builder::transaction::input::UtxoVout; +use crate::builder::transaction::TransactionType; +use crate::citrea::CitreaClient; +use crate::database::Database; +use crate::deposit::KickoffData; +use crate::rpc::clementine::clementine_operator_client::ClementineOperatorClient; +use crate::rpc::clementine::{ + OptimisticWithdrawParams, TransactionRequest, WithdrawParams, WithdrawParamsWithSig, +}; +use crate::rpc::ecdsa_verification_sig::{OperatorWithdrawalMessage, OptimisticPayoutMessage}; +use crate::test::common::citrea::CitreaE2EData; +use crate::test::common::mine_once_after_in_mempool; +use crate::test::common::tx_utils::get_txid_where_utxo_is_spent_while_waiting_for_state_mngr_sync; +use crate::test::sign::sign_withdrawal_verification_signature; +use crate::utils::FeePayingType; +use bitcoin::{OutPoint, Transaction, TxOut, Txid, XOnlyPublicKey}; +use citrea_e2e::bitcoin::DEFAULT_FINALITY_DEPTH; + +use super::test_actors::TestActors; +use super::tx_utils::{ + ensure_outpoint_spent_while_waiting_for_state_mngr_sync, + mine_once_after_outpoint_spent_in_mempool, +}; + +/// Sends a payout tx with given operator for the given withdrawal, starts a kickoff then returns +/// the reimburse connector of the kickoff. +/// operator_xonly_pk and operator_db should match the operator client ClementineOperatorClient +#[allow(clippy::too_many_arguments)] +pub async fn payout_and_start_kickoff( + mut operator: ClementineOperatorClient, + operator_xonly_pk: XOnlyPublicKey, + operator_db: &Database, + withdrawal_id: u32, + withdrawal_utxo: &OutPoint, + payout_txout: &TxOut, + sig: &bitcoin::secp256k1::schnorr::Signature, + e2e: &CitreaE2EData<'_>, + actors: &TestActors, +) -> OutPoint { + let withdrawal_params = WithdrawParams { + withdrawal_id, + input_signature: sig.serialize().to_vec(), + input_outpoint: Some((*withdrawal_utxo).into()), + output_script_pubkey: payout_txout.script_pubkey.to_bytes(), + output_amount: payout_txout.value.to_sat(), + }; + let verification_signature = sign_withdrawal_verification_signature::( + &e2e.config, + withdrawal_params.clone(), + ); + + let verification_signature_str = verification_signature.to_string(); + + loop { + let withdrawal_response = operator + .withdraw(WithdrawParamsWithSig { + withdrawal: Some(withdrawal_params.clone()), + verification_signature: Some(verification_signature_str.clone()), + }) + .await; + + tracing::info!("Withdrawal response: {:?}", withdrawal_response); + + match withdrawal_response { + Ok(_) => break, + Err(e) => tracing::info!("Withdrawal error: {:?}", e), + }; + e2e.rpc.mine_blocks_while_synced(1, actors).await.unwrap(); + } + + let payout_txid = get_txid_where_utxo_is_spent_while_waiting_for_state_mngr_sync( + e2e.rpc, + *withdrawal_utxo, + actors, + ) + .await + .unwrap(); + + e2e.rpc + .mine_blocks_while_synced(DEFAULT_FINALITY_DEPTH, actors) + .await + .unwrap(); + + tracing::info!( + "Waiting until getting first unhandled payout for operator {:?}", + operator_xonly_pk + ); + + // wait until payout is handled + tracing::info!("Waiting until payout is handled"); + while operator_db + .get_handled_payout_kickoff_txid(None, payout_txid) + .await + .unwrap() + .is_none() + { + tokio::time::sleep(std::time::Duration::from_millis(100)).await; + } + + let kickoff_txid = operator_db + .get_handled_payout_kickoff_txid(None, payout_txid) + .await + .unwrap() + .expect("Payout must be handled"); + + let reimburse_connector = OutPoint { + txid: kickoff_txid, + vout: UtxoVout::ReimburseInKickoff.get_vout(), + }; + + let kickoff_block_height = + mine_once_after_in_mempool(e2e.rpc, kickoff_txid, Some("Kickoff tx"), Some(300)) + .await + .unwrap(); + + tracing::info!( + "Kickoff height: {:?}, txid: {:?} operator: {:?}", + kickoff_block_height, + kickoff_txid, + operator_xonly_pk + ); + + reimburse_connector +} + +/// Reimburse a withdrawal with an optimistic payout +/// First it sends an optimistic payout tx request to aggregator, then it ensures the btc in vault is spent. +#[allow(clippy::too_many_arguments)] +pub async fn reimburse_with_optimistic_payout( + actors: &TestActors, + withdrawal_id: u32, + withdrawal_utxo: &OutPoint, + payout_txout: &TxOut, + sig: &bitcoin::secp256k1::schnorr::Signature, + e2e: &CitreaE2EData<'_>, + move_txid: Txid, +) -> eyre::Result<()> { + let mut aggregator = actors.get_aggregator(); + + let withdrawal_params = WithdrawParams { + withdrawal_id, + input_signature: sig.serialize().to_vec(), + input_outpoint: Some(withdrawal_utxo.to_owned().into()), + output_script_pubkey: payout_txout.script_pubkey.to_bytes(), + output_amount: payout_txout.value.to_sat(), + }; + + let verification_signature = sign_withdrawal_verification_signature::( + &e2e.config, + withdrawal_params.clone(), + ); + + let verification_signature_str = verification_signature.to_string(); + + aggregator + .optimistic_payout(OptimisticWithdrawParams { + withdrawal: Some(withdrawal_params), + verification_signature: Some(verification_signature_str), + }) + .await?; + + // ensure the btc in vault is spent + ensure_outpoint_spent_while_waiting_for_state_mngr_sync( + e2e.rpc, + OutPoint { + txid: move_txid, + vout: (UtxoVout::DepositInMove).get_vout(), + }, + actors, + ) + .await?; + + Ok(()) +} + +/// Helper fn for common setup for disprove tests +/// Does a single deposit, registers a withdrawal, starts a kickoff from operator 0 and then challenges the kickoff +/// Afterwards it waits until all asserts are sent by operator. +/// Returns the actors, the kickoff txid and the kickoff tx +#[cfg(feature = "automation")] +pub async fn disprove_tests_common_setup( + e2e: &CitreaE2EData<'_>, +) -> (TestActors, Txid, Transaction) { + use crate::test::common::citrea::get_new_withdrawal_utxo_and_register_to_citrea; + + use super::run_single_deposit; + use super::tx_utils::create_tx_sender; + let mut config = e2e.config.clone(); + let (actors, deposit_info, move_txid, _deposit_blockhash, _) = + run_single_deposit::(&mut config, e2e.rpc.clone(), None, None, None) + .await + .unwrap(); + + // generate a withdrawal + let (withdrawal_utxo, payout_txout, sig) = + get_new_withdrawal_utxo_and_register_to_citrea(move_txid, e2e, &actors).await; + + // withdraw one with a kickoff with operator 0 + let (op0_db, op0_xonly_pk) = actors.get_operator_db_and_xonly_pk_by_index(0).await; + let mut operator0 = actors.get_operator_client_by_index(0); + + let reimburse_connector = payout_and_start_kickoff( + operator0.clone(), + op0_xonly_pk, + &op0_db, + 0, + &withdrawal_utxo, + &payout_txout, + &sig, + e2e, + &actors, + ) + .await; + + let kickoff_txid = reimburse_connector.txid; + + // send a challenge + let kickoff_tx = e2e.rpc.get_tx_of_txid(&kickoff_txid).await.unwrap(); + + // get kickoff utxo index + let kickoff_idx = kickoff_tx.input[0].previous_output.vout - 1; + let base_tx_req = TransactionRequest { + kickoff_id: Some( + KickoffData { + operator_xonly_pk: op0_xonly_pk, + round_idx: crate::operator::RoundIndex::Round(0), + kickoff_idx: kickoff_idx as u32, + } + .into(), + ), + deposit_outpoint: Some(deposit_info.deposit_outpoint.into()), + }; + + let all_txs = operator0 + .internal_create_signed_txs(base_tx_req.clone()) + .await + .unwrap() + .into_inner(); + + let challenge_tx = bitcoin::consensus::deserialize( + &all_txs + .signed_txs + .iter() + .find(|tx| tx.transaction_type == Some(TransactionType::Challenge.into())) + .unwrap() + .raw_tx, + ) + .unwrap(); + + let (tx_sender, tx_sender_db) = create_tx_sender(&config, 0).await.unwrap(); + let mut db_commit = tx_sender_db.begin_transaction().await.unwrap(); + tx_sender + .insert_try_to_send( + &mut db_commit, + None, + &challenge_tx, + FeePayingType::RBF, + None, + &[], + &[], + &[], + &[], + ) + .await + .unwrap(); + db_commit.commit().await.unwrap(); + + e2e.rpc + .mine_blocks_while_synced(DEFAULT_FINALITY_DEPTH, &actors) + .await + .unwrap(); + + let challenge_outpoint = OutPoint { + txid: kickoff_txid, + vout: UtxoVout::Challenge.get_vout(), + }; + // wait until challenge tx is in mempool and mine + mine_once_after_outpoint_spent_in_mempool(e2e.rpc, challenge_outpoint) + .await + .unwrap(); + + // wait until all asserts are mined + for i in 0..ClementineBitVMPublicKeys::number_of_assert_txs() { + ensure_outpoint_spent_while_waiting_for_state_mngr_sync( + e2e.rpc, + OutPoint { + txid: kickoff_txid, + vout: UtxoVout::Assert(i).get_vout(), + }, + &actors, + ) + .await + .unwrap(); + } + + (actors, kickoff_txid, kickoff_tx) +} diff --git a/core/src/test/common/mod.rs b/core/src/test/common/mod.rs new file mode 100644 index 000000000..0e396617b --- /dev/null +++ b/core/src/test/common/mod.rs @@ -0,0 +1,852 @@ +//! # Common Utilities for Tests +//! +//! This module provides all the common utilities needed in unit and integration +//! tests, including: +//! +//! - Setting up databases, servers +//! - Creating test configurations +//! - Making common operations like deposits +//! - Communicating with Citrea + +use crate::actor::Actor; +use crate::bitvm_client::SECP; +use crate::builder::address::create_taproot_address; +use crate::builder::script::{CheckSig, Multisig, SpendableScript}; +use crate::builder::sighash::TapTweakData; +use crate::builder::transaction::input::UtxoVout; +use crate::builder::transaction::{create_replacement_deposit_txhandler, TxHandler}; +use crate::citrea::CitreaClientT; +use crate::config::BridgeConfig; +use crate::database::Database; +use crate::deposit::{BaseDepositData, DepositInfo, DepositType, ReplacementDepositData}; +use crate::errors::BridgeError; +use crate::extended_bitcoin_rpc::ExtendedBitcoinRpc; +use crate::rpc::clementine::{ + entity_status_with_id, Deposit, Empty, EntityStatuses, GetEntityStatusesRequest, + SendMoveTxRequest, +}; +use crate::utils::FeePayingType; +use crate::EVMAddress; +use bitcoin::secp256k1::rand; +use bitcoin::secp256k1::PublicKey; +use bitcoin::XOnlyPublicKey; +use bitcoin::{taproot, BlockHash, OutPoint, Transaction, Txid}; +use bitcoincore_rpc::RpcApi; +use citrea_e2e::bitcoin::DEFAULT_FINALITY_DEPTH; +use eyre::Context; +pub use setup_utils::*; +use std::path::Path; +use std::process::Command; +use std::sync::Mutex; +use std::time::Duration; +use test_actors::TestActors; +use tonic::Request; + +pub mod citrea; +#[cfg(feature = "automation")] +pub mod clementine_utils; +mod setup_utils; +pub mod test_actors; +pub mod tx_utils; + +#[cfg(feature = "automation")] +use crate::test::common::tx_utils::wait_for_fee_payer_utxos_to_be_in_mempool; +#[cfg(feature = "automation")] +use tx_utils::create_tx_sender; + +/// Generate a random XOnlyPublicKey +pub fn generate_random_xonly_pk() -> XOnlyPublicKey { + let (pubkey, _parity) = SECP + .generate_keypair(&mut rand::thread_rng()) + .1 + .x_only_public_key(); + + pubkey +} + +/// Polls a closure until it returns true, or the timeout is reached. Exits +/// early if the closure throws an error. +/// +/// Default timeout is 60 seconds, default poll interval is 500 milliseconds. +/// +/// # Parameters +/// +/// - `func`: The closure to poll. +/// - `timeout`: The timeout duration. +/// - `poll_interval`: The poll interval. +/// +/// # Returns +/// +/// - `Ok(())`: If the condition is met. +/// - `Err(eyre::eyre!("Timeout reached"))`: If the timeout is reached. +/// - `Err(e)`: If the closure returns an error. +pub async fn poll_until_condition( + mut func: impl AsyncFnMut() -> Result, + timeout: Option, + poll_interval: Option, +) -> Result<(), BridgeError> { + poll_get( + async move || { + if func().await? { + Ok(Some(())) + } else { + Ok(None) + } + }, + timeout, + poll_interval, + ) + .await +} + +/// Polls a closure until it returns a value, or the timeout is reached. Exits +/// early if the closure throws an error. +/// +/// Default timeout is 60 seconds, default poll interval is 500 milliseconds. +/// +/// # Parameters +/// +/// - `func`: The closure to poll. +/// - `timeout`: The timeout duration. +/// - `poll_interval`: The poll interval. +pub async fn poll_get( + mut func: impl AsyncFnMut() -> Result, eyre::Error>, + timeout: Option, + poll_interval: Option, +) -> Result { + let timeout = timeout.unwrap_or(Duration::from_secs(90)); + let poll_interval = poll_interval.unwrap_or(Duration::from_millis(500)); + + let start = std::time::Instant::now(); + + loop { + if start.elapsed() > timeout { + return Err(eyre::eyre!( + "Timeout of {:?} seconds reached. Poll interval was {:?} seconds", + timeout.as_secs_f32(), + poll_interval.as_secs_f32() + ) + .into()); + } + + if let Some(result) = func().await? { + return Ok(result); + } + + tokio::time::sleep(poll_interval).await; + } +} + +/// Get the minimum next state manager height from all the state managers +/// If automation is off for any entity, their state manager is assumed to be synced +/// (by setting their next height to u32::MAX). +pub async fn get_next_sync_heights(entity_statuses: EntityStatuses) -> eyre::Result> { + entity_statuses + .entity_statuses + .into_iter() + .map(|entity| { + if let Some(entity_status_with_id::StatusResult::Status(status)) = entity.status_result + { + if status.automation { + Ok(status.state_manager_next_height.unwrap_or(0)) + } else { + // assume synced if automation is off + Ok(u32::MAX) + } + } else { + Err(eyre::eyre!( + "Couldn't retrieve sync status from entity {:?}", + entity.entity_id + )) + } + }) + .collect::, _>>() +} + +/// Calls get_entity_statuses and returns the minimum next state manager height +pub async fn get_min_next_state_manager_height( + actors: &TestActors, +) -> eyre::Result { + let mut aggregator = actors.get_aggregator(); + let l1_sync_status = aggregator + .get_entity_statuses(Request::new(GetEntityStatusesRequest { + restart_tasks: false, + })) + .await? + .into_inner(); + let min_next_sync_height = get_next_sync_heights(l1_sync_status) + .await? + .into_iter() + .min() + .ok_or_else(|| eyre::eyre!("No entities found"))?; + Ok(min_next_sync_height) +} + +/// Checks if all the state managers are synced to the latest finalized block +pub async fn are_all_state_managers_synced( + rpc: &ExtendedBitcoinRpc, + actors: &TestActors, +) -> eyre::Result { + let min_next_sync_height = get_min_next_state_manager_height(actors).await?; + let current_chain_height = rpc.get_current_chain_height().await?; + let finality_depth = actors.aggregator.config.protocol_paramset().finality_depth; + // get the current finalized chain height + let current_finalized_chain_height = current_chain_height.saturating_sub(finality_depth); + // assume synced if state manager is not running + let state_manager_running = actors + .aggregator + .config + .test_params + .should_run_state_manager; + Ok(!state_manager_running || min_next_sync_height > current_finalized_chain_height) +} + +/// Wait for a transaction to be in the mempool and than mines a block to make +/// sure that it is included in the next block. +/// +/// # Parameters +/// +/// - `rpc`: The RPC client to use. +/// - `txid`: The txid to wait for. +/// - `tx_name`: The name of the transaction to wait for. +/// - `timeout`: The timeout in seconds. +pub async fn mine_once_after_in_mempool( + rpc: &ExtendedBitcoinRpc, + txid: Txid, + tx_name: Option<&str>, + timeout: Option, +) -> Result { + let timeout = timeout.unwrap_or(60); + let start = std::time::Instant::now(); + let tx_name = tx_name.unwrap_or("Unnamed tx"); + + if rpc + .get_transaction(&txid, None) + .await + .is_ok_and(|tx| tx.info.blockhash.is_some()) + { + return Err(eyre::eyre!("{} is already mined", tx_name).into()); + } + + loop { + if start.elapsed() > std::time::Duration::from_secs(timeout) { + return Err( + eyre::eyre!("{} didn't hit mempool within {} seconds", tx_name, timeout).into(), + ); + } + + if rpc.get_mempool_entry(&txid).await.is_ok() { + break; + }; + + // mine if there are some txs in mempool + if rpc.mempool_size().await? > 0 { + rpc.mine_blocks(1).await?; + } + + tracing::info!("Waiting for {} transaction to hit mempool...", tx_name); + tokio::time::sleep(std::time::Duration::from_millis(1000)).await; + } + + rpc.mine_blocks(1).await?; + + let tx: bitcoincore_rpc::json::GetRawTransactionResult = rpc + .get_raw_transaction_info(&txid, None) + .await + .map_err(|e| eyre::eyre!("Failed to get raw transaction {}: {}", tx_name, e))?; + + if tx.blockhash.is_none() { + return Err(eyre::eyre!("{} did not get mined", tx_name).into()); + } + + let tx_block_height = rpc + .get_block_info(&tx.blockhash.unwrap()) + .await + .wrap_err("Failed to get block info")?; + + Ok(tx_block_height.height) +} + +pub async fn run_multiple_deposits( + config: &mut BridgeConfig, + rpc: ExtendedBitcoinRpc, + count: usize, + test_actors: Option>, +) -> Result< + ( + TestActors, + Vec, + Vec, + Vec, + Vec, + ), + BridgeError, +> { + let actors = match test_actors { + Some(actors) => actors, + None => create_actors(config).await, + }; + let mut aggregator = actors.get_aggregator(); + + let evm_address = EVMAddress([1u8; 20]); + let actor = Actor::new( + config.secret_key, + config.winternitz_secret_key, + config.protocol_paramset().network, + ); + + let verifiers_public_keys: Vec = aggregator + .setup(Request::new(Empty {})) + .await + .wrap_err("Can't setup aggregator")? + .into_inner() + .try_into()?; + + let (deposit_address, _) = + get_deposit_address(config, evm_address, verifiers_public_keys.clone())?; + let mut move_txids = Vec::new(); + let mut deposit_blockhashes = Vec::new(); + let mut deposit_infos = Vec::new(); + + for _ in 0..count { + let deposit_outpoint: OutPoint = rpc + .send_to_address(&deposit_address, config.protocol_paramset().bridge_amount) + .await?; + rpc.mine_blocks_while_synced(DEFAULT_FINALITY_DEPTH + 1, &actors) + .await?; + + let deposit_info = DepositInfo { + deposit_outpoint, + deposit_type: DepositType::BaseDeposit(BaseDepositData { + evm_address, + recovery_taproot_address: actor.address.as_unchecked().to_owned(), + }), + }; + + deposit_infos.push(deposit_info.clone()); + + let deposit: Deposit = deposit_info.into(); + + let movetx = aggregator + .new_deposit(deposit) + .await + .wrap_err("Error while making a deposit")? + .into_inner(); + let move_txid = aggregator + .send_move_to_vault_tx(SendMoveTxRequest { + deposit_outpoint: Some(deposit_outpoint.into()), + raw_tx: Some(movetx), + }) + .await + .expect("failed to send movetx") + .into_inner() + .try_into()?; + + if !rpc.is_tx_on_chain(&move_txid).await? { + mine_once_after_in_mempool(&rpc, move_txid, Some("Move tx"), Some(180)).await?; + } + + let deposit_blockhash = rpc.get_blockhash_of_tx(&deposit_outpoint.txid).await?; + deposit_blockhashes.push(deposit_blockhash); + move_txids.push(move_txid); + } + + Ok(( + actors, + deposit_infos, + move_txids, + deposit_blockhashes, + verifiers_public_keys, + )) +} + +/// Creates a user deposit transaction and makes a new deposit call to +/// Clementine via aggregator. +/// +/// # Parameters +/// +/// - `config` [`BridgeConfig`]: The bridge configuration. +/// - `rpc` [`ExtendedBitcoinRpc`]: The RPC client to use. +/// - `evm_address` [`EVMAddress`]: Optional EVM address to use for the +/// deposit. If not provided, a default address is used. +/// - `actors` [`TestActors`]: Optional actors to use for the deposit. If not +/// provided, a new actors will be created. +/// - `deposit_outpoint` [`OutPoint`]: Optional deposit outpoint to use for the +/// deposit. If not provided, a new deposit outpoint will be created. +/// +/// # Returns +/// +/// A big tuple, containing: +/// +/// - Server clients: +/// - [`TestActors`]: A helper struct holding all the verifiers, operators, and the aggregator. +/// - [`DepositInfo`]: Information about the deposit. +/// - [`Txid`]: TXID of the move transaction. +/// - [`BlockHash`]: Block hash of the block where the user deposit was mined. +/// - [`Vec`]: Public keys of the verifiers used in the deposit. +pub async fn run_single_deposit( + config: &mut BridgeConfig, + rpc: ExtendedBitcoinRpc, + evm_address: Option, + actors: Option>, + deposit_outpoint: Option, // if a deposit outpoint is provided, it will be used instead of creating a new one +) -> Result<(TestActors, DepositInfo, Txid, BlockHash, Vec), BridgeError> { + let actors = match actors { + Some(actors) => actors, + None => create_actors(config).await, + }; + + let evm_address = evm_address.unwrap_or(EVMAddress([1u8; 20])); + let actor = Actor::new( + config.secret_key, + config.winternitz_secret_key, + config.protocol_paramset().network, + ); + + let setup_start = std::time::Instant::now(); + let mut aggregator = actors.get_aggregator(); + let verifiers_public_keys: Vec = aggregator + .setup(Request::new(Empty {})) + .await + .wrap_err("Failed to setup aggregator")? + .into_inner() + .try_into()?; + let setup_elapsed = setup_start.elapsed(); + tracing::info!("Setup completed in: {:?}", setup_elapsed); + + let deposit_outpoint = match deposit_outpoint { + Some(outpoint) => outpoint, + None => { + let (deposit_address, _) = + get_deposit_address(config, evm_address, verifiers_public_keys.clone())?; + let outpoint = rpc + .send_to_address(&deposit_address, config.protocol_paramset().bridge_amount) + .await?; + match config.protocol_paramset().network { + bitcoin::Network::Regtest => { + mine_once_after_in_mempool(&rpc, outpoint.txid, Some("Deposit outpoint"), None) + .await?; + } + bitcoin::Network::Testnet4 => loop { + tracing::info!("Deposit outpoint: {:?}", outpoint); + if rpc.is_tx_on_chain(&outpoint.txid).await? { + break; + } + tokio::time::sleep(std::time::Duration::from_millis(1000)).await; + }, + _ => { + return Err(eyre::eyre!( + "Unsupported network: {:?}", + config.protocol_paramset().network + ) + .into()) + } + } + outpoint + } + }; + + let deposit_blockhash = rpc.get_blockhash_of_tx(&deposit_outpoint.txid).await?; + + let deposit_info = DepositInfo { + deposit_outpoint, + deposit_type: DepositType::BaseDeposit(BaseDepositData { + evm_address, + recovery_taproot_address: actor.address.as_unchecked().to_owned(), + }), + }; + + let deposit: Deposit = deposit_info.clone().into(); + + let movetx = aggregator + .new_deposit(deposit) + .await + .wrap_err("Error while making a deposit")? + .into_inner(); + let move_txid; + #[cfg(feature = "automation")] + { + move_txid = aggregator + .send_move_to_vault_tx(SendMoveTxRequest { + deposit_outpoint: Some(deposit_outpoint.into()), + raw_tx: Some(movetx), + }) + .await + .expect("failed to send movetx") + .into_inner() + .try_into()?; + + match config.protocol_paramset().network { + bitcoin::Network::Regtest => { + if !rpc.is_tx_on_chain(&move_txid).await? { + let aggregator_db = Database::new(&actors.aggregator.config).await?; + // check if deposit outpoint is spent + let deposit_outpoint_spent = rpc.is_utxo_spent(&deposit_outpoint).await?; + if deposit_outpoint_spent { + return Err(eyre::eyre!( + "Deposit outpoint is spent but move tx is not in chain. In test_bridge_contract_change + this means move tx does not match the one in saved state" + ) + .into()); + } + wait_for_fee_payer_utxos_to_be_in_mempool(&rpc, aggregator_db, move_txid) + .await?; + rpc.mine_blocks_while_synced(1, &actors).await?; + mine_once_after_in_mempool(&rpc, move_txid, Some("Move tx"), Some(180)).await?; + } + } + bitcoin::Network::Testnet4 => { + tracing::info!("Move txid: {:?}", move_txid); + loop { + if rpc.is_tx_on_chain(&move_txid).await? { + break; + } + tokio::time::sleep(std::time::Duration::from_millis(1000)).await; + } + } + _ => { + return Err(eyre::eyre!( + "Unsupported network: {:?}", + config.protocol_paramset().network + ) + .into()) + } + } + + // Uncomment below to debug the move tx. + // let transaction = rpc + // .get_raw_transaction(&move_txid, None) + // .await + // .expect("a"); + // let tx_info: bitcoincore_rpc::json::GetRawTransactionResult = rpc + // .get_raw_transaction_info(&move_txid, None) + // .await + // .expect("a"); + // let block: bitcoincore_rpc::json::GetBlockResult = rpc + // .get_block_info(&tx_info.blockhash.unwrap()) + // .await + // .expect("a"); + // let block_height = block.height; + // let block = rpc + // .get_block(&tx_info.blockhash.unwrap()) + // .await + // .expect("a"); + // let transaction_params = get_citrea_deposit_params( + // &rpc, + // transaction.clone(), + // block, + // block_height as u32, + // move_txid, + // ).await?; + // println!("Move tx Transaction params: {:?}", transaction_params); + // println!( + // "Move tx: {:?}", + // hex::encode(bitcoin::consensus::serialize(&transaction)) + // ); + } + + #[cfg(not(feature = "automation"))] + { + let movetx: Transaction = bitcoin::consensus::deserialize(&movetx.raw_tx) + .wrap_err("Failed to deserialize movetx")?; + move_txid = rpc + .send_raw_transaction(&movetx) + .await + .wrap_err("Failed to send movetx")?; + + while !rpc.is_tx_on_chain(&move_txid).await? { + rpc.mine_blocks(1).await?; + tokio::time::sleep(std::time::Duration::from_secs(1)).await; + } + } + + Ok(( + actors, + deposit_info, + move_txid, + deposit_blockhash, + verifiers_public_keys, + )) +} + +/// Runs a single replacement deposit transaction. It will replace the old movetx using the nofn path, so it needs +/// the nofn xonly public key and secret keys of the old signer set that signed the previous movetx. +/// +/// # Parameters +/// +/// - `config` [`BridgeConfig`]: The bridge configuration. +/// - `rpc` [`ExtendedBitcoinRpc`]: The RPC client to use. +/// - `old_move_txid` [`Txid`]: The TXID of the old move transaction. +/// - `current_actors` [`TestActors`]: The actors to use for the replacement deposit. +/// - `old_nofn_xonly_pk` [`XOnlyPublicKey`]: The nofn xonly public key of the old signer set that signed previous movetx. +/// - `old_secret_keys` [`Vec`]: The secret keys of the old signer set that signed previous movetx. +/// +/// # Returns +/// +/// A big tuple, containing: +/// +/// - Server clients: +/// - [`TestActors`]: A helper struct holding all the verifiers, operators, and the aggregator. +/// - [`DepositInfo`]: Information about the deposit. +/// - [`Txid`]: TXID of the move transaction. +/// - [`BlockHash`]: Block hash of the block where the user deposit was mined. +#[cfg(feature = "automation")] +pub async fn run_single_replacement_deposit( + config: &mut BridgeConfig, + rpc: &ExtendedBitcoinRpc, + old_move_txid: Txid, + current_actors: TestActors, + old_nofn_xonly_pk: XOnlyPublicKey, +) -> Result<(TestActors, DepositInfo, Txid, BlockHash), BridgeError> { + let aggregator_db = Database::new(&BridgeConfig { + db_name: config.db_name.clone() + "0", + ..config.clone() + }) + .await?; + + // create a replacement deposit tx, we will sign it using nofn + let replacement_deposit_txid = send_replacement_deposit_tx( + config, + rpc, + old_move_txid, + ¤t_actors, + old_nofn_xonly_pk, + ) + .await?; + + let deposit_outpoint = OutPoint { + txid: replacement_deposit_txid, + vout: 0, + }; + + let setup_start = std::time::Instant::now(); + let mut aggregator = current_actors.get_aggregator(); + tracing::info!( + "Current chain height before aggregator setup: {:?}", + rpc.get_current_chain_height().await? + ); + aggregator + .setup(Request::new(Empty {})) + .await + .wrap_err("Failed to setup aggregator")?; + + let setup_elapsed = setup_start.elapsed(); + tracing::info!("Setup completed in: {:?}", setup_elapsed); + + let deposit_blockhash = rpc.get_blockhash_of_tx(&deposit_outpoint.txid).await?; + + let deposit_info = DepositInfo { + deposit_outpoint, + deposit_type: DepositType::ReplacementDeposit(ReplacementDepositData { old_move_txid }), + }; + + let deposit: Deposit = deposit_info.clone().into(); + + let movetx = aggregator + .new_deposit(deposit) + .await + .wrap_err("Error while making a replacement deposit")? + .into_inner(); + let move_txid = aggregator + .send_move_to_vault_tx(SendMoveTxRequest { + deposit_outpoint: Some(deposit_outpoint.into()), + raw_tx: Some(movetx), + }) + .await + .expect("failed to send movetx") + .into_inner() + .try_into()?; + + if !rpc.is_tx_on_chain(&move_txid).await? { + wait_for_fee_payer_utxos_to_be_in_mempool(rpc, aggregator_db, move_txid).await?; + rpc.mine_blocks_while_synced(1, ¤t_actors).await?; + mine_once_after_in_mempool(rpc, move_txid, Some("Move tx"), Some(180)).await?; + } + + Ok((current_actors, deposit_info, move_txid, deposit_blockhash)) +} + +/// Signs a replacement deposit transaction using the security council +fn sign_replacement_deposit_tx_with_sec_council( + replacement_deposit: &TxHandler, + config: &BridgeConfig, + old_nofn_xonly_pk: XOnlyPublicKey, +) -> Result { + let security_council = config.security_council.clone(); + let multisig_script = Multisig::from_security_council(security_council.clone()).to_script_buf(); + let sighash = replacement_deposit.calculate_script_spend_sighash( + 0, + &multisig_script, + bitcoin::TapSighashType::SinglePlusAnyoneCanPay, + )?; + + // sign using first threshold security council members, for rest do not sign + let signatures = config + .test_params + .sec_council_secret_keys + .iter() + .enumerate() + .map(|(idx, sk)| { + if idx < security_council.threshold as usize { + let actor = Actor::new(*sk, None, config.protocol_paramset().network); + let sig = actor + .sign_with_tweak_data(sighash, TapTweakData::ScriptPath, None) + .unwrap(); + Some(taproot::Signature { + signature: sig, + sighash_type: bitcoin::TapSighashType::SinglePlusAnyoneCanPay, + }) + } else { + None + } + }) + .collect::>(); + + let mut witness = + Multisig::from_security_council(security_council).generate_script_inputs(&signatures)?; + + // calculate address in movetx vault + let script_buf = CheckSig::new(old_nofn_xonly_pk).to_script_buf(); + let (_, spend_info) = create_taproot_address( + &[script_buf.clone(), multisig_script.clone()], + None, + config.protocol_paramset().network, + ); + // add script path to witness + Actor::add_script_path_to_witness(&mut witness, &multisig_script, &spend_info)?; + let mut tx = replacement_deposit.get_cached_tx().clone(); + // add witness to tx + tx.input[0].witness = witness; + Ok(tx) +} + +#[cfg(feature = "automation")] +async fn send_replacement_deposit_tx( + config: &BridgeConfig, + rpc: &ExtendedBitcoinRpc, + old_move_txid: Txid, + actors: &TestActors, + old_nofn_xonly_pk: XOnlyPublicKey, +) -> Result { + // create a replacement deposit tx, we will sign it using nofn + let replacement_txhandler = create_replacement_deposit_txhandler( + old_move_txid, + OutPoint { + txid: old_move_txid, + vout: UtxoVout::DepositInMove.get_vout(), + }, + old_nofn_xonly_pk, + actors.get_nofn_aggregated_xonly_pk()?, + config.protocol_paramset(), + config.security_council.clone(), + )?; + + let signed_replacement_deposit_tx = sign_replacement_deposit_tx_with_sec_council( + &replacement_txhandler, + config, + old_nofn_xonly_pk, + )?; + + let (tx_sender, tx_sender_db) = create_tx_sender(config, 0).await?; + let mut db_commit = tx_sender_db.begin_transaction().await?; + tx_sender + .insert_try_to_send( + &mut db_commit, + None, + &signed_replacement_deposit_tx, + FeePayingType::CPFP, + None, + &[], + &[], + &[], + &[], + ) + .await + .unwrap(); + db_commit.commit().await?; + + let replacement_deposit_txid = signed_replacement_deposit_tx.compute_txid(); + + wait_for_fee_payer_utxos_to_be_in_mempool(rpc, tx_sender_db, replacement_deposit_txid).await?; + + mine_once_after_in_mempool( + rpc, + replacement_deposit_txid, + Some("Replacement deposit"), + Some(180), + ) + .await?; + tracing::info!( + "Replacement deposit sent, txid: {}", + replacement_deposit_txid + ); + + Ok(replacement_deposit_txid) +} + +/// Ensures that TLS certificates exist for tests. +/// This will run the certificate generation script if certificates don't exist. +pub fn ensure_test_certificates() -> Result<(), std::io::Error> { + static GENERATE_LOCK: Mutex<()> = Mutex::new(()); + + while !Path::new("./certs/ca/ca.pem").exists() { + if let Ok(_lock) = GENERATE_LOCK.lock() { + println!("Generating TLS certificates for tests..."); + + let output = Command::new("sh") + .arg("-c") + .arg("cd .. && ./scripts/generate_certs.sh") + .output()?; + + if !output.status.success() { + let stderr = String::from_utf8_lossy(&output.stderr); + eprintln!("Failed to generate certificates: {}", stderr); + return Err(std::io::Error::other(format!( + "Certificate generation failed: {}", + stderr + ))); + } + + println!("TLS certificates generated successfully"); + break; + } + } + + Ok(()) +} + +mod tests { + #[cfg(feature = "integration-tests")] + #[tokio::test] + async fn test_regtest_create_and_connect() { + use crate::{ + extended_bitcoin_rpc::ExtendedBitcoinRpc, + test::common::{create_regtest_rpc, create_test_config_with_thread_name}, + }; + use bitcoincore_rpc::RpcApi; + + let mut config = create_test_config_with_thread_name().await; + + let regtest = create_regtest_rpc(&mut config).await; + + let macro_rpc = regtest.rpc(); + let rpc = ExtendedBitcoinRpc::connect( + config.bitcoin_rpc_url.clone(), + config.bitcoin_rpc_user.clone(), + config.bitcoin_rpc_password.clone(), + None, + ) + .await + .unwrap(); + + macro_rpc.mine_blocks(1).await.unwrap(); + let height = macro_rpc.get_block_count().await.unwrap(); + let new_rpc_height = rpc.get_block_count().await.unwrap(); + assert_eq!(height, new_rpc_height); + + rpc.mine_blocks(1).await.unwrap(); + let new_rpc_height = rpc.get_block_count().await.unwrap(); + let height = macro_rpc.get_block_count().await.unwrap(); + assert_eq!(height, new_rpc_height); + } +} diff --git a/core/src/test/common/setup_utils.rs b/core/src/test/common/setup_utils.rs new file mode 100644 index 000000000..31be7670b --- /dev/null +++ b/core/src/test/common/setup_utils.rs @@ -0,0 +1,471 @@ +//! # Testing Utilities + +use crate::builder::script::SpendPath; +use crate::builder::transaction::TransactionType; +use crate::citrea::CitreaClientT; +use crate::constants::NON_STANDARD_V3; +use crate::rpc::clementine::NormalSignatureKind; +use crate::utils::initialize_logger; +use crate::utils::NamedEntity; +use crate::{ + actor::Actor, builder, config::BridgeConfig, database::Database, errors::BridgeError, + extended_bitcoin_rpc::ExtendedBitcoinRpc, musig2::AggregateFromPublicKeys, +}; +use crate::{EVMAddress, UTXO}; +use bitcoin::secp256k1::schnorr; +use secrecy::ExposeSecret; +use std::net::TcpListener; + +use super::test_actors::TestActors; + +pub struct WithProcessCleanup( + /// Handle to the bitcoind process + pub Option, + /// RPC client + pub ExtendedBitcoinRpc, + /// Path to the bitcoind debug log file + pub std::path::PathBuf, + /// Whether to wait indefinitely after test finishes before cleanup (for RPC debugging) + pub bool, +); +impl WithProcessCleanup { + pub fn rpc(&self) -> &ExtendedBitcoinRpc { + &self.1 + } +} + +impl Drop for WithProcessCleanup { + fn drop(&mut self) { + tracing::info!( + "Test bitcoin regtest logs can be found at: {}", + self.2.display() + ); + + if self.3 { + tracing::warn!( + "Suspending the test to allow inspection of bitcoind. Ctrl-C to exit. {}", + self.2.display() + ); + std::thread::sleep(std::time::Duration::from_secs(u64::MAX)); + } + if let Some(ref mut child) = self.0.take() { + let _ = child.kill(); + } + } +} + +/// Creates a Bitcoin regtest node for testing, waits for it to start and returns an RPC client. +/// +/// # Environment Variables +/// - `BITCOIN_RPC_DEBUG`: If set to a non-empty value, will use port 18443 and connect to an existing +/// bitcoind instance when available. +/// +/// # Returns +/// Returns a `WithProcessCleanup` which contains: +/// - The bitcoind process handle (if a new instance was started) +/// - An RPC client connected to the node +/// - Path to the debug log file +/// - A flag indicating whether to pause before cleanup +/// +/// # Important +/// The returned value MUST NOT be dropped until the test is complete, as dropping it will terminate +/// the bitcoind process and invalidate the RPC connection. The cleanup is handled automatically when +/// the returned value is dropped. +pub async fn create_regtest_rpc(config: &mut BridgeConfig) -> WithProcessCleanup { + use bitcoincore_rpc::RpcApi; + use tempfile::TempDir; + + // Create temporary directory for bitcoin data + let data_dir = TempDir::new() + .expect("Failed to create temporary directory") + .keep(); + let bitcoin_rpc_debug = std::env::var("BITCOIN_RPC_DEBUG").map(|d| !d.is_empty()) == Ok(true); + + // Get available ports for RPC + let rpc_port = if bitcoin_rpc_debug { + 18443 + } else { + get_available_port() + }; + + config.bitcoin_rpc_url = format!("http://127.0.0.1:{}", rpc_port); + + if bitcoin_rpc_debug && TcpListener::bind(format!("127.0.0.1:{}", rpc_port)).is_err() { + // Bitcoind is already running on port 18443, use existing port. + return WithProcessCleanup( + None, + ExtendedBitcoinRpc::connect( + "http://127.0.0.1:18443".into(), + config.bitcoin_rpc_user.clone(), + config.bitcoin_rpc_password.clone(), + None, + ) + .await + .unwrap(), + data_dir.join("debug.log"), + false, // no need to wait after test + ); + } + // Bitcoin node configuration + // Construct args for bitcoind + let mut args = vec![ + "-regtest".to_string(), + format!("-datadir={}", data_dir.display()), + "-listen=0".to_string(), + format!("-rpcport={}", rpc_port), + format!("-rpcuser={}", config.bitcoin_rpc_user.expose_secret()), + format!( + "-rpcpassword={}", + config.bitcoin_rpc_password.expose_secret() + ), + "-wallet=admin".to_string(), + "-txindex=1".to_string(), + "-fallbackfee=0.00001".to_string(), + "-rpcallowip=0.0.0.0/0".to_string(), + "-maxtxfee=5".to_string(), + ]; + + if config.protocol_paramset().bridge_nonstandard { + // allow 0 sat non-ephemeral outputs in regtest by not considering them as dust + // https://github.com/bitcoin/bitcoin/blob/master/src/policy/policy.cpp + args.push("-dustrelayfee=0".to_string()); + } + + if config.test_params.mine_0_fee_txs { + // allow mining of 0-fee transactions + args.push("-minrelaytxfee=0".to_string()); + args.push("-acceptnonstdtxn=1".to_string()); + args.push("-blockmintxfee=0".to_string()); + } + + // Create log file in temp directory + let log_file = data_dir.join("debug.log"); + let log_file_path = log_file + .to_str() + .expect("Failed to convert log file path to string"); + + // Start bitcoind process with log redirection + let process = std::process::Command::new("bitcoind") + .args(&args) + .arg(format!("-debuglogfile={}", log_file_path)) + .stdout(std::process::Stdio::null()) + .stderr(std::process::Stdio::null()) + .spawn() + .expect("Failed to start bitcoind"); + + if bitcoin_rpc_debug { + tracing::warn!("Bitcoind logs are available at {}", log_file_path); + } + + // Create RPC client + let rpc_url = format!("http://127.0.0.1:{}", rpc_port); + + // Wait for node to be ready + let mut attempts = 0; + let retry_count = 30; + let client = loop { + match ExtendedBitcoinRpc::connect( + rpc_url.clone(), + config.bitcoin_rpc_user.clone(), + config.bitcoin_rpc_password.clone(), + None, + ) + .await + { + Ok(client) => break client, + Err(_) => { + attempts += 1; + if attempts >= retry_count { + panic!("Bitcoin node failed to start in {} seconds", retry_count); + } + tokio::time::sleep(std::time::Duration::from_secs(1)).await; + } + } + }; + + // Get and print bitcoind version + let network_info = client + .get_network_info() + .await + .expect("Failed to get network info"); + tracing::info!("Using bitcoind version: {}", network_info.version); + + // // Create wallet + client + .create_wallet("admin", None, None, None, None) + .await + .expect("Failed to create wallet"); + + // Generate blocks + let address = client + .get_new_address(None, None) + .await + .expect("Failed to get new address"); + + if config.test_params.generate_to_address { + client + .generate_to_address(201, address.assume_checked_ref()) + .await + .expect("Failed to generate blocks"); + } + + WithProcessCleanup(Some(process), client.clone(), log_file, bitcoin_rpc_debug) +} + +/// Creates a temporary database for testing, using current thread's name as the +/// database name. +/// +/// # Parameters +/// +/// - `suffix`: Optional suffix added to the thread handle in `Option` +/// type. +/// +/// # Returns +/// +/// - [`BridgeConfig`]: Modified configuration struct +pub async fn create_test_config_with_thread_name() -> BridgeConfig { + let _ = rustls::crypto::ring::default_provider().install_default(); + + let handle = std::thread::current() + .name() + .expect("Failed to get thread name") + .split(':') + .next_back() + .expect("Failed to get thread name") + .to_owned(); + + // Use maximum log level for tests. + initialize_logger(Some(::tracing::level_filters::LevelFilter::DEBUG)) + .expect("Failed to initialize logger"); + + let config = BridgeConfig { + db_name: handle.to_string(), + citrea_rpc_url: handle.to_string(), + ..Default::default() + }; + + initialize_database(&config).await; + + config +} + +/// Initializes a new database with given configuration. If the database is +/// already initialized, it will be dropped before initialization. Meaning, +/// a clean state is guaranteed. +/// +/// [`Database::new`] must be called after this to connect to the +/// initialized database. +/// +/// # Parameters +/// +/// - `config`: Configuration options in `BridgeConfig` type. +pub async fn initialize_database(config: &BridgeConfig) { + let url = Database::get_postgresql_url(config); + let conn = sqlx::PgPool::connect(url.as_str()).await.unwrap_or_else(|_| panic!("Failed to connect to database, please make sure a test Postgres DB is running at {}", + url)); + + sqlx::query(&format!("DROP DATABASE IF EXISTS {}", &config.db_name)) + .execute(&conn) + .await + .expect("Failed to drop database"); + + sqlx::query(&format!( + "CREATE DATABASE {} WITH OWNER {}", + config.db_name, + config.db_user.expose_secret() + )) + .execute(&conn) + .await + .expect("Failed to create database"); + + conn.close().await; + + Database::run_schema_script(config, true) + .await + .expect("Failed to run schema script"); +} + +/// Starts operators, verifiers, aggregator and watchtower servers. +/// +/// Uses Unix sockets with temporary files for communication between services. +/// +/// # Returns +/// +/// Returns a tuple of vectors of clients, handles, and socket paths for the +/// verifiers, operators, aggregator and watchtowers, along with shutdown channels. +pub async fn create_actors(config: &BridgeConfig) -> TestActors { + TestActors::new(config) + .await + .expect("Failed to create actors") +} + +/// Gets the the deposit address for the user. +/// +/// # Returns +/// +/// - [`Address`]: Deposit address of the user +pub fn get_deposit_address( + config: &BridgeConfig, + evm_address: EVMAddress, + verifiers_public_keys: Vec, +) -> Result<(bitcoin::Address, bitcoin::taproot::TaprootSpendInfo), BridgeError> { + let signer = Actor::new( + config.secret_key, + config.winternitz_secret_key, + config.protocol_paramset().network, + ); + + let nofn_xonly_pk = bitcoin::XOnlyPublicKey::from_musig2_pks(verifiers_public_keys, None) + .expect("Failed to create xonly pk"); + + builder::address::generate_deposit_address( + nofn_xonly_pk, + signer.address.as_unchecked(), + evm_address, + config.protocol_paramset().network, + config.protocol_paramset().user_takes_after, + ) +} + +/// Generates withdrawal transaction and signs it with `SinglePlusAnyoneCanPay`. +/// +/// # Returns +/// +/// A tuple of: +/// +/// - [`UTXO`]: Dust UTXO used as the input of the withdrawal transaction +/// - [`TxOut`]: Txout of the withdrawal transaction +/// - [`Signature`]: Signature of the withdrawal transaction +pub async fn generate_withdrawal_transaction_and_signature( + config: &BridgeConfig, + rpc: &ExtendedBitcoinRpc, + withdrawal_address: &bitcoin::Address, + withdrawal_amount: bitcoin::Amount, +) -> (UTXO, bitcoin::TxOut, schnorr::Signature) { + let signer = Actor::new( + config.secret_key, + config.winternitz_secret_key, + config.protocol_paramset().network, + ); + + const WITHDRAWAL_EMPTY_UTXO_SATS: bitcoin::Amount = bitcoin::Amount::from_sat(550); + + let dust_outpoint = rpc + .send_to_address(&signer.address, WITHDRAWAL_EMPTY_UTXO_SATS) + .await + .expect("Failed to send to address"); + + let dust_utxo = UTXO { + outpoint: dust_outpoint, + txout: bitcoin::TxOut { + value: WITHDRAWAL_EMPTY_UTXO_SATS, + script_pubkey: signer.address.script_pubkey(), + }, + }; + + let txin = builder::transaction::input::SpendableTxIn::new( + dust_utxo.outpoint, + dust_utxo.txout.clone(), + vec![], + None, + ); + let txout = bitcoin::TxOut { + value: withdrawal_amount, + script_pubkey: withdrawal_address.script_pubkey(), + }; + let unspent_txout = builder::transaction::output::UnspentTxOut::from_partial(txout.clone()); + + let tx = builder::transaction::TxHandlerBuilder::new(TransactionType::Payout) + .with_version(NON_STANDARD_V3) + .add_input( + NormalSignatureKind::NotStored, + txin, + SpendPath::KeySpend, + builder::transaction::DEFAULT_SEQUENCE, + ) + .add_output(unspent_txout.clone()) + .finalize(); + + let sighash = tx + .calculate_sighash_txin(0, bitcoin::sighash::TapSighashType::SinglePlusAnyoneCanPay) + .expect("Failed to calculate sighash"); + + let sig = signer + .sign_with_tweak_data(sighash, builder::sighash::TapTweakData::KeyPath(None), None) + .expect("Failed to sign"); + + (dust_utxo, txout, sig) +} + +/// Helper to get a dynamically assigned free port. +pub fn get_available_port() -> u16 { + use std::net::TcpListener; + TcpListener::bind("127.0.0.1:0") + .expect("Could not bind to an available port") + .local_addr() + .expect("Could not get local address") + .port() +} + +// Mock implementation of the Owner trait for testing +#[derive(Debug, Clone, Default)] +pub struct MockOwner { + #[cfg(feature = "automation")] + cached_duties: std::sync::Arc>>, +} + +#[allow(unused_variables)] +impl PartialEq for MockOwner { + fn eq(&self, other: &Self) -> bool { + true // all mock owners are equal + } +} + +impl NamedEntity for MockOwner { + const ENTITY_NAME: &'static str = "test_owner"; + const TX_SENDER_CONSUMER_ID: &'static str = "test_tx_sender"; + const FINALIZED_BLOCK_CONSUMER_ID_NO_AUTOMATION: &'static str = + "test_finalized_block_no_automation"; + const FINALIZED_BLOCK_CONSUMER_ID_AUTOMATION: &'static str = "test_finalized_block_automation"; +} + +#[cfg(feature = "automation")] +mod states { + use super::*; + use crate::builder::block_cache; + use crate::builder::transaction::{ContractContext, TransactionType, TxHandler}; + use crate::database::DatabaseTransaction; + use crate::states::context::DutyResult; + use crate::states::{Duty, Owner}; + use std::collections::BTreeMap; + use std::sync::Arc; + use tonic::async_trait; + + // Implement the Owner trait for MockOwner + #[async_trait] + impl Owner for MockOwner { + async fn handle_duty(&self, duty: Duty) -> Result { + self.cached_duties.lock().await.push(duty); + Ok(DutyResult::Handled) + } + + async fn create_txhandlers( + &self, + _tx_type: TransactionType, + _contract_context: ContractContext, + ) -> Result, BridgeError> { + Ok(BTreeMap::new()) + } + + async fn handle_finalized_block( + &self, + _dbtx: DatabaseTransaction<'_, '_>, + _block_id: u32, + _block_height: u32, + _block_cache: Arc, + _light_client_proof_wait_interval_secs: Option, + ) -> Result<(), BridgeError> { + Ok(()) + } + } +} diff --git a/core/src/test/common/test_actors.rs b/core/src/test/common/test_actors.rs new file mode 100644 index 000000000..4944b3844 --- /dev/null +++ b/core/src/test/common/test_actors.rs @@ -0,0 +1,496 @@ +//! Utility struct to control the actors in the test + +use crate::bitvm_client::SECP; +use crate::citrea::CitreaClientT; +use crate::config::{protocol::ProtocolParamset, BridgeConfig}; +use crate::database::Database; +use crate::musig2::AggregateFromPublicKeys; +use crate::rpc::clementine::clementine_aggregator_client::ClementineAggregatorClient; +use crate::rpc::clementine::clementine_operator_client::ClementineOperatorClient; +use crate::rpc::clementine::clementine_verifier_client::ClementineVerifierClient; +use crate::rpc::get_clients; +use crate::servers::{ + create_aggregator_unix_server, create_operator_unix_server, create_verifier_unix_server, +}; +use std::collections::BTreeMap; +use std::marker::PhantomData; + +use bitcoin::XOnlyPublicKey; +use tokio::sync::oneshot; +use tonic::transport::Channel; + +use super::initialize_database; + +#[derive(Debug)] +pub struct TestVerifier { + pub verifier: ClementineVerifierClient, + pub config: BridgeConfig, + pub shutdown_tx: oneshot::Sender<()>, + pub socket_path: std::path::PathBuf, + pub client_type: PhantomData, + pub secret_key: bitcoin::secp256k1::SecretKey, +} + +#[derive(Debug)] +pub struct TestOperator { + pub operator: ClementineOperatorClient, + pub config: BridgeConfig, + pub shutdown_tx: oneshot::Sender<()>, + pub socket_path: std::path::PathBuf, + pub client_type: PhantomData, + pub secret_key: bitcoin::secp256k1::SecretKey, + pub verifier_index: usize, // index of the verifier that this operator is associated with +} + +#[derive(Debug)] +pub struct TestAggregator { + pub aggregator: ClementineAggregatorClient, + pub config: BridgeConfig, + pub shutdown_tx: oneshot::Sender<()>, + pub socket_path: std::path::PathBuf, +} + +/// This struct is used to control the actors in the test. +/// It contains the verifiers, operators, and aggregator. +/// It stores various information on each actor, can add and remove actors. +/// After each actor set change, the aggregator is restarted with the current actor set in it's config. +/// Each verifier and operator is indexes starting from 0, according to their creation order. +/// All gRPC servers are closed if the TestActors instance is dropped. +#[derive(Debug)] +pub struct TestActors { + verifiers: BTreeMap>, + operators: BTreeMap>, + pub aggregator: TestAggregator, + /// The total number of verifiers, including deleted ones, to ensure unique numbering + pub num_total_verifiers: usize, + /// The total number of operators, including deleted ones, to ensure unique numbering + pub num_total_operators: usize, + /// The total number of aggregators, including deleted ones, to ensure unique numbering + pub num_total_aggregators: usize, + socket_dir: tempfile::TempDir, + base_config: BridgeConfig, +} + +impl TestVerifier { + /// Create a new `TestVerifier` instance. + /// + /// # Parameters + /// - `base_config`: The base configuration for all actors. For verifiers, the database name is appended with the index. + /// - `socket_dir`: The directory to store the Unix sockets. + /// - `index`: The index of the verifier (its position in `TestActors`). + /// - `secret_key`: The secret key of the verifier. + /// + /// # Returns + /// Returns a [`Result`](eyre::Result) containing the new [`TestVerifier`] instance on success, or an error if creation fails. + pub async fn new( + base_config: &BridgeConfig, + socket_dir: &std::path::Path, + index: usize, + secret_key: bitcoin::secp256k1::SecretKey, + ) -> eyre::Result { + let socket_path = socket_dir.join(format!("verifier_{}.sock", index)); + let mut config_with_new_db = base_config.clone(); + config_with_new_db.db_name += &index.to_string(); + config_with_new_db.secret_key = secret_key; + initialize_database(&config_with_new_db).await; + + if config_with_new_db + .test_params + .generate_varying_total_works_insufficient_total_work + || config_with_new_db.test_params.generate_varying_total_works + || config_with_new_db + .test_params + .generate_varying_total_works_first_two_valid + { + // Generate a new protocol paramset for each verifier + // to ensure diverse total works. + let mut paramset = config_with_new_db.protocol_paramset().clone(); + paramset.time_to_send_watchtower_challenge = paramset + .time_to_send_watchtower_challenge + .checked_add(index as u16) + .expect("Failed to add time to send watchtower challenge"); + let paramset_ref: &'static ProtocolParamset = Box::leak(Box::new(paramset)); + + config_with_new_db.protocol_paramset = paramset_ref; + } + + let (socket_path, shutdown_tx) = + create_verifier_unix_server::(config_with_new_db.clone(), socket_path).await?; + + let verifier_client = get_clients( + vec![format!("unix://{}", socket_path.display())], + ClementineVerifierClient::new, + &config_with_new_db, + false, + ) + .await? + .pop() + .ok_or_else(|| eyre::eyre!("Failed to connect to verifier"))?; + + Ok(TestVerifier { + verifier: verifier_client, + config: config_with_new_db, + shutdown_tx, + socket_path, + client_type: PhantomData, + secret_key, + }) + } +} + +impl TestOperator { + /// Create a new `TestOperator` instance. + /// + /// # Parameters + /// - `verifier_config`: The configuration of the verifier that this operator belongs to (only the secret key can be changed). + /// - `socket_dir`: The directory to store the Unix sockets. + /// - `index`: The index of the operator (its position in `TestActors`). + /// - `verifier_index`: The index of the verifier that this operator belongs to (index in `TestActors`). + /// - `secret_key`: The secret key of the operator. + /// + /// # Returns + /// Returns a [`Result`](eyre::Result) containing the new [`TestOperator`] instance on success, or an error if creation fails. + pub async fn new( + verifier_config: &BridgeConfig, + socket_dir: &std::path::Path, + index: usize, + verifier_index: usize, + secret_key: bitcoin::secp256k1::SecretKey, + ) -> eyre::Result { + let socket_path = socket_dir.join(format!("operator_{}.sock", index)); + let mut operator_config = verifier_config.clone(); + operator_config.secret_key = secret_key; + + let (socket_path, shutdown_tx) = + create_operator_unix_server::(operator_config.clone(), socket_path).await?; + + let operator_client = get_clients( + vec![format!("unix://{}", socket_path.display())], + ClementineOperatorClient::new, + &operator_config, + false, + ) + .await? + .pop() + .ok_or_else(|| eyre::eyre!("Failed to connect to operator"))?; + + Ok(TestOperator { + operator: operator_client, + config: operator_config, + shutdown_tx, + socket_path, + client_type: PhantomData, + secret_key, + verifier_index, + }) + } +} + +impl TestAggregator { + /// Create a new `TestAggregator` instance, using the base_config except the verifier and operator endpoints. + /// + /// # Parameters + /// - `base_config`: The base configuration for the aggregator. + /// - `socket_dir`: The directory to store the Unix sockets. + /// - `verifier_paths`: The list of Unix socket paths for verifiers. + /// - `operator_paths`: The list of Unix socket paths for operators. + /// - `socket_suffix`: Suffix for the aggregator socket filename. + /// + /// # Returns + /// Returns a [`Result`](eyre::Result) containing the new [`TestAggregator`] instance on success, or an error if creation fails. + pub async fn new( + base_config: &BridgeConfig, + socket_dir: &std::path::Path, + verifier_paths: &[std::path::PathBuf], + operator_paths: &[std::path::PathBuf], + socket_suffix: Option<&str>, + ) -> eyre::Result { + let socket_name = match socket_suffix { + Some(suffix) => format!("aggregator_{}.sock", suffix), + None => "aggregator.sock".to_string(), + }; + let aggregator_socket_path = socket_dir.join(socket_name); + + let aggregator_config = BridgeConfig { + verifier_endpoints: Some( + verifier_paths + .iter() + .map(|path| format!("unix://{}", path.display())) + .collect(), + ), + operator_endpoints: Some( + operator_paths + .iter() + .map(|path| format!("unix://{}", path.display())) + .collect(), + ), + ..base_config.clone() + }; + + let (aggregator_path, aggregator_shutdown_tx) = + create_aggregator_unix_server(aggregator_config.clone(), aggregator_socket_path) + .await?; + + let aggregator_client = get_clients( + vec![format!("unix://{}", aggregator_path.display())], + ClementineAggregatorClient::new, + &aggregator_config, + false, + ) + .await? + .pop() + .ok_or_else(|| eyre::eyre!("Failed to connect to aggregator"))?; + + Ok(TestAggregator { + aggregator: aggregator_client, + config: aggregator_config, + shutdown_tx: aggregator_shutdown_tx, + socket_path: aggregator_path, + }) + } +} + +impl TestActors { + /// Create a new `TestActors` instance. + /// The verifiers and operators are created according to the secret keys in the config. + /// + /// # Parameters + /// - `config`: The base configuration for all actors. + /// + /// # Returns + /// Returns a [`Result`](eyre::Result) containing the new [`TestActors`] instance on success, or an error if creation fails. + pub async fn new(config: &BridgeConfig) -> eyre::Result { + let all_verifiers_secret_keys = &config.test_params.all_verifiers_secret_keys; + let all_operators_secret_keys = &config.test_params.all_operators_secret_keys; + + // Create temporary directory for Unix sockets + let socket_dir = tempfile::tempdir()?; + + // Create verifiers + let mut verifiers = BTreeMap::new(); + for (i, &secret_key) in all_verifiers_secret_keys.iter().enumerate() { + let verifier = TestVerifier::new(config, socket_dir.path(), i, secret_key).await?; + verifiers.insert(i, verifier); + } + + // Create operators + let mut operators = BTreeMap::new(); + for (i, &secret_key) in all_operators_secret_keys.iter().enumerate() { + let base_config = &verifiers[&i].config; + let operator = + TestOperator::new(base_config, socket_dir.path(), i, i, secret_key).await?; + operators.insert(i, operator); + } + + // Collect paths for aggregator + let verifier_paths: Vec<_> = verifiers.values().map(|v| v.socket_path.clone()).collect(); + let operator_paths: Vec<_> = operators.values().map(|o| o.socket_path.clone()).collect(); + + // Create aggregator + let aggregator = TestAggregator::new( + &verifiers[&0].config, + socket_dir.path(), + &verifier_paths, + &operator_paths, + None, + ) + .await?; + + let num_total_verifiers = all_verifiers_secret_keys.len(); + let num_total_operators = all_operators_secret_keys.len(); + let num_total_aggregators = 1; + + Ok(TestActors { + verifiers, + operators, + aggregator, + num_total_verifiers, + num_total_operators, + num_total_aggregators, + socket_dir, + base_config: config.clone(), + }) + } + + pub fn get_operator_client_by_index(&self, index: usize) -> ClementineOperatorClient { + self.operators[&index].operator.clone() + } + + pub fn get_verifier_client_by_index(&self, index: usize) -> ClementineVerifierClient { + self.verifiers[&index].verifier.clone() + } + + pub async fn get_operator_db_and_xonly_pk_by_index( + &self, + index: usize, + ) -> (Database, XOnlyPublicKey) { + let operator = &self.operators[&index]; + let db = Database::new(&operator.config).await.unwrap(); + let xonly_pk = operator.secret_key.x_only_public_key(&SECP).0; + (db, xonly_pk) + } + + pub fn get_aggregator(&self) -> ClementineAggregatorClient { + self.aggregator.aggregator.clone() + } + + pub fn get_num_verifiers(&self) -> usize { + self.verifiers.len() + } + pub fn get_num_operators(&self) -> usize { + self.operators.len() + } + + pub fn get_verifiers(&self) -> Vec> { + self.verifiers + .values() + .map(|v| v.verifier.clone()) + .collect() + } + + pub fn get_operators(&self) -> Vec> { + self.operators + .values() + .map(|o| o.operator.clone()) + .collect() + } + + /// Restart the aggregator by creating a new one with the current verifier and operator endpoints + pub async fn restart_aggregator(&mut self) -> eyre::Result<()> { + // Collect current paths for aggregator + let verifier_paths: Vec<_> = self + .verifiers + .values() + .map(|v| v.socket_path.clone()) + .collect(); + let operator_paths: Vec<_> = self + .operators + .values() + .map(|o| o.socket_path.clone()) + .collect(); + + // Create new aggregator + self.num_total_aggregators += 1; + let suffix = self.num_total_aggregators.to_string(); + let new_aggregator = TestAggregator::new( + &self.aggregator.config, + self.socket_dir.path(), + &verifier_paths, + &operator_paths, + Some(&suffix), + ) + .await?; + + // Update the aggregator field + self.aggregator = new_aggregator; + Ok(()) + } + + /// Remove a verifier with the given index and restarts the aggregator with the current actor set. + /// Returns an error if the verifier is the first verifier (which is used by the aggregator) + /// or if there is an operator associated with the verifier. If there is an operator associated + /// with the verifier, the operator needs to be removed first. + pub async fn remove_verifier(&mut self, index: usize) -> eyre::Result<()> { + if index == 0 { + // can't remove the first verifier as first verifier is used by aggregator + return Err(eyre::eyre!( + "Cannot remove the first verifier, its aggregator's verifier" + )); + } + if let Some((operator_index, _)) = self + .operators + .iter() + .find(|(_, o)| o.verifier_index == index) + { + return Err(eyre::eyre!( + "Cannot remove verifier, verifier's operator {} is still active", + operator_index + )); + } + self.verifiers.remove(&index); + self.restart_aggregator().await?; + Ok(()) + } + + /// Remove an operator with the given index and restarts the aggregator with the current actor set. + pub async fn remove_operator(&mut self, index: usize) -> eyre::Result<()> { + self.operators.remove(&index); + self.restart_aggregator().await?; + Ok(()) + } + + /// Add a verifier with the given secret key and restarts the aggregator with the current actor set. + pub async fn add_verifier( + &mut self, + secret_key: bitcoin::secp256k1::SecretKey, + ) -> eyre::Result<()> { + let verifier = TestVerifier::new( + &self.base_config, + self.socket_dir.path(), + self.num_total_verifiers, + secret_key, + ) + .await?; + self.verifiers.insert(self.num_total_verifiers, verifier); + self.num_total_verifiers += 1; + self.restart_aggregator().await?; + Ok(()) + } + + /// Add an operator with the given secret key and verifier index and restarts the aggregator with the current actor set. + pub async fn add_operator( + &mut self, + secret_key: bitcoin::secp256k1::SecretKey, + verifier_index: usize, + ) -> eyre::Result<()> { + if !self.verifiers.contains_key(&verifier_index) { + return Err(eyre::eyre!( + "Cannot add operator with verifier index {}, verifier {} does not exist", + verifier_index, + verifier_index + )); + } + let base_config = &self.verifiers[&verifier_index].config; + let operator = TestOperator::new( + base_config, + self.socket_dir.path(), + self.num_total_operators, + verifier_index, + secret_key, + ) + .await?; + self.operators.insert(self.num_total_operators, operator); + self.num_total_operators += 1; + self.restart_aggregator().await?; + Ok(()) + } + + /// Get the aggregated x-only public key of all current verifiers. + pub fn get_nofn_aggregated_xonly_pk(&self) -> eyre::Result { + let verifier_public_keys = self + .verifiers + .values() + .map(|v| v.config.secret_key.public_key(&SECP)) + .collect::>(); + let aggregated_pk = bitcoin::XOnlyPublicKey::from_musig2_pks(verifier_public_keys, None)?; + Ok(aggregated_pk) + } + + /// Get the secret keys of all current verifiers. + pub fn get_verifiers_secret_keys(&self) -> Vec { + self.verifiers.values().map(|v| v.secret_key).collect() + } + + /// Get the secret keys of all current operators. + pub fn get_operators_secret_keys(&self) -> Vec { + self.operators.values().map(|o| o.secret_key).collect() + } + + /// Get the x-only public keys of all current operators. + pub fn get_operators_xonly_pks(&self) -> Vec { + self.get_operators_secret_keys() + .into_iter() + .map(|o| o.x_only_public_key(&SECP).0) + .collect() + } +} diff --git a/core/src/test/common/tx_utils.rs b/core/src/test/common/tx_utils.rs new file mode 100644 index 000000000..c56e6495f --- /dev/null +++ b/core/src/test/common/tx_utils.rs @@ -0,0 +1,373 @@ +use super::test_actors::TestActors; +use super::{mine_once_after_in_mempool, poll_until_condition}; +use crate::builder::transaction::TransactionType as TxType; +use crate::citrea::CitreaClientT; +use crate::config::BridgeConfig; +use crate::database::Database; +use crate::extended_bitcoin_rpc::ExtendedBitcoinRpc; +use crate::rpc::clementine::SignedTxsWithType; +use crate::utils::{FeePayingType, RbfSigningInfo, TxMetadata}; +use bitcoin::consensus::{self}; +use bitcoin::{block, OutPoint, Transaction, Txid}; +use bitcoincore_rpc::RpcApi; +use eyre::{bail, Context, Result}; +use std::time::Duration; +use tokio::time::sleep; + +pub fn get_tx_from_signed_txs_with_type( + txs: &SignedTxsWithType, + tx_type: TxType, +) -> Result { + let tx = txs + .signed_txs + .iter() + .find(|tx| tx.transaction_type == Some(tx_type.into())) + .to_owned() + .unwrap_or_else(|| panic!("expected tx of type: {:?} not found", tx_type)) + .to_owned() + .raw_tx; + bitcoin::consensus::deserialize(&tx).context("expected valid tx") +} +// Cannot use ensure_async due to `Send` requirement being broken upstream +pub async fn ensure_outpoint_spent_while_waiting_for_state_mngr_sync( + rpc: &ExtendedBitcoinRpc, + outpoint: OutPoint, + actors: &TestActors, +) -> Result<(), eyre::Error> { + let mut max_blocks_to_mine = 1000; + while match rpc + .get_tx_out(&outpoint.txid, outpoint.vout, Some(false)) + .await + { + Err(_) => true, + Ok(val) => val.is_some(), + } { + rpc.mine_blocks_while_synced(1, actors).await?; + max_blocks_to_mine -= 1; + + if max_blocks_to_mine == 0 { + bail!( + "timeout while waiting for outpoint {:?} to be spent", + outpoint + ); + } + } + rpc.get_tx_out(&outpoint.txid, outpoint.vout, Some(false)) + .await?; + + Ok(()) +} + +/// Attempts to retrieve the current block count with retry logic. +/// +/// This async function queries the blockchain info from the given RPC client, +/// retrying up to `retries` times with a fixed `delay` between attempts in case of failure. +/// +/// # Parameters +/// - `rpc`: Reference to the `ExtendedBitcoinRpc` containing the RPC client. +/// - `retries`: Maximum number of retry attempts. +/// - `delay`: Duration to wait between retries. +/// +/// # Returns +/// - `Ok(u64)`: The current block count if successful. +/// - `Err`: The final error after exhausting all retries. +/// +/// # Panics +/// This function will panic with `unreachable!()` if the retry loop completes without returning. +/// In practice, this should never happen due to the early return on success or final failure. +pub async fn retry_get_block_count( + rpc: &ExtendedBitcoinRpc, + retries: usize, + delay: Duration, +) -> Result { + for attempt in 0..retries { + match rpc.get_blockchain_info().await { + Ok(info) => return Ok(info.blocks), + Err(e) if attempt + 1 < retries => { + tracing::warn!( + "Retry {}/{} failed to get block count: {}. Retrying after {:?}...", + attempt + 1, + retries, + e, + delay + ); + sleep(delay).await; + } + Err(e) => return Err(eyre::Error::new(e).wrap_err("Failed to get block count")), + } + } + + unreachable!("retry loop should either return Ok or Err") +} + +pub async fn get_txid_where_utxo_is_spent_while_waiting_for_state_mngr_sync( + rpc: &ExtendedBitcoinRpc, + utxo: OutPoint, + actors: &TestActors, +) -> Result { + ensure_outpoint_spent_while_waiting_for_state_mngr_sync(rpc, utxo, actors).await?; + let remaining_block_count = 30; + // look for the txid in the last 30 blocks + for i in 0..remaining_block_count { + let current_height = rpc.get_block_count().await?; + if current_height < i { + bail!( + "Not enough blocks mined to look for the utxo in the last {} blocks", + remaining_block_count + ); + } + let hash = rpc.get_block_hash(current_height - i).await?; + let block: block::Block = rpc.get_block(&hash).await?; + if let Some(tx) = block + .txdata + .iter() + .find(|txid| txid.input.iter().any(|input| input.previous_output == utxo)) + { + return Ok(tx.compute_txid()); + } + } + bail!( + "utxo {:?} not found in the last {} blocks", + utxo, + remaining_block_count + ); +} + +// Polls until a tx that spends the outpoint is in the mempool, without mining any blocks +// After outpoint is spent, mine once to spend the utxo on chain +pub async fn mine_once_after_outpoint_spent_in_mempool( + rpc: &ExtendedBitcoinRpc, + outpoint: OutPoint, +) -> Result<(), eyre::Error> { + let mut timeout_counter = 300; + while rpc + .get_tx_out(&outpoint.txid, outpoint.vout, Some(true)) + .await + .unwrap() + .is_some() + { + tokio::time::sleep(std::time::Duration::from_millis(1000)).await; + timeout_counter -= 1; + + if timeout_counter == 0 { + bail!( + "timeout while waiting for outpoint {:?} to be spent in mempool", + outpoint + ); + } + } + rpc.mine_blocks(1).await?; + if rpc + .get_tx_out(&outpoint.txid, outpoint.vout, Some(false)) + .await? + .is_some() + { + bail!("Outpoint {:?} was not spent after waiting until it was spent in mempool and mining once", outpoint); + } + + Ok(()) +} + +#[cfg(feature = "automation")] +// Helper function to send a transaction and mine a block +pub async fn send_tx( + tx_sender: &crate::tx_sender::TxSenderClient, + rpc: &ExtendedBitcoinRpc, + raw_tx: &[u8], + tx_type: TxType, + rbf_info: Option, +) -> Result<()> { + let tx: Transaction = consensus::deserialize(raw_tx).context("expected valid tx")?; + let mut dbtx = tx_sender.test_dbtx().await.unwrap(); + + // Try to send the transaction with CPFP first + tx_sender + .insert_try_to_send( + &mut dbtx, + Some(TxMetadata { + tx_type, + deposit_outpoint: None, + kickoff_idx: None, + operator_xonly_pk: None, + round_idx: None, + }), + &tx, + if tx_type == TxType::Challenge || matches!(tx_type, TxType::WatchtowerChallenge(_)) { + FeePayingType::RBF + } else { + FeePayingType::CPFP + }, + rbf_info, + &[], + &[], + &[], + &[], + ) + .await + .expect("failed to send tx"); + + dbtx.commit().await?; + + if matches!(tx_type, TxType::Challenge | TxType::WatchtowerChallenge(_)) { + ensure_outpoint_spent(rpc, tx.input[0].previous_output).await?; + } else { + ensure_tx_onchain(rpc, tx.compute_txid()).await?; + } + + Ok(()) +} + +/// Helper function that ensures that utxo is spent then gets the txid where it was spent +/// Be careful that this function will only work if utxo is not already spent. +pub async fn get_txid_where_utxo_is_spent( + rpc: &ExtendedBitcoinRpc, + utxo: OutPoint, +) -> Result { + ensure_outpoint_spent(rpc, utxo).await?; + let current_height = rpc.get_block_count().await?; + let hash = rpc.get_block_hash(current_height).await?; + let block = rpc.get_block(&hash).await?; + let tx = block + .txdata + .iter() + .find(|txid| txid.input.iter().any(|input| input.previous_output == utxo)) + .ok_or(eyre::eyre!( + "utxo not found in block where utxo was supposedly spent" + ))?; + Ok(tx.compute_txid()) +} + +pub async fn ensure_tx_onchain(rpc: &ExtendedBitcoinRpc, tx: Txid) -> Result<(), eyre::Error> { + poll_until_condition( + async || { + if rpc + .get_raw_transaction_info(&tx, None) + .await + .ok() + .and_then(|s| s.blockhash) + .is_some() + { + return Ok(true); + } + + // Mine more blocks and wait longer between checks - wait for fee payer tx to be sent to mempool + rpc.mine_blocks(1).await?; + // mine after tx is sent to mempool - with a timeout + let _ = mine_once_after_in_mempool(rpc, tx, Some("ensure_tx_onchain"), Some(1)).await; + Ok(false) + }, + None, + None, + ) + .await + .wrap_err("Timed out while waiting for tx to land onchain")?; + Ok(()) +} + +pub async fn ensure_outpoint_spent( + rpc: &ExtendedBitcoinRpc, + outpoint: OutPoint, +) -> Result<(), eyre::Error> { + poll_until_condition( + async || { + rpc.mine_blocks(1).await?; + rpc.is_utxo_spent(&outpoint).await.map_err(Into::into) + }, + Some(Duration::from_secs(500)), + None, + ) + .await + .wrap_err_with(|| { + format!( + "Timed out while waiting for outpoint {:?} to be spent", + outpoint + ) + })?; + + rpc.get_tx_out(&outpoint.txid, outpoint.vout, Some(false)) + .await + .wrap_err("Failed to find txout in RPC after outpoint was spent")?; + Ok(()) +} + +#[cfg(feature = "automation")] +pub async fn send_tx_with_type( + rpc: &ExtendedBitcoinRpc, + tx_sender: &crate::tx_sender::TxSenderClient, + all_txs: &SignedTxsWithType, + tx_type: TxType, +) -> Result<(), eyre::Error> { + let round_tx = all_txs + .signed_txs + .iter() + .find(|tx| tx.transaction_type == Some(tx_type.into())) + .unwrap(); + send_tx(tx_sender, rpc, round_tx.raw_tx.as_slice(), tx_type, None) + .await + .context(format!("failed to send {:?} transaction", tx_type))?; + Ok(()) +} + +#[cfg(feature = "automation")] +pub async fn create_tx_sender( + config: &BridgeConfig, + verifier_index: u32, +) -> Result<(crate::tx_sender::TxSenderClient, Database)> { + let verifier_config = { + let mut config = config.clone(); + config.db_name += &verifier_index.to_string(); + config + }; + let db = Database::new(&verifier_config).await?; + let tx_sender = crate::tx_sender::TxSenderClient::new( + db.clone(), + format!("tx_sender_test_{}", verifier_index), + ); + Ok((tx_sender, db)) +} + +#[cfg(feature = "automation")] +pub async fn wait_for_fee_payer_utxos_to_be_in_mempool( + rpc: &ExtendedBitcoinRpc, + db: Database, + txid: Txid, +) -> Result<(), eyre::Error> { + let rpc_clone = rpc.clone(); + poll_until_condition( + async move || { + let tx_id = db.get_id_from_txid(None, txid).await?.unwrap(); + tracing::debug!("Waiting for fee payer utxos for tx_id: {:?}", tx_id); + let fee_payer_utxos = db.get_fee_payer_utxos_for_tx(None, tx_id).await?; + tracing::debug!( + "For TXID {:?}, fee payer utxos: {:?}", + txid, + fee_payer_utxos + ); + + if fee_payer_utxos.is_empty() { + tracing::error!("No fee payer utxos found in db for txid {}", txid); + return Ok(false); + } + + for fee_payer in fee_payer_utxos.iter() { + let entry = rpc_clone.get_mempool_entry(&fee_payer.0).await; + + if entry.is_err() { + tracing::error!( + "Fee payer utxo with txid of {} is not in mempool: {:?}", + fee_payer.0, + entry + ); + return Ok(false); + } + } + + Ok(true) + }, + None, + None, + ) + .await?; + + Ok(()) +} diff --git a/core/src/test/data/bridge_config.toml b/core/src/test/data/bridge_config.toml new file mode 100644 index 000000000..ec7e9f2ab --- /dev/null +++ b/core/src/test/data/bridge_config.toml @@ -0,0 +1,88 @@ +# Host, port and index of the current actor (operator, verifier, or watchtower) +protocol_paramset = "regtest" + +host = "127.0.0.1" +port = 17000 +index = 0 +# Secret key of the current actor (operator or verifier) +secret_key = "2222222222222222222222222222222222222222222222222222222222222222" + +winternitz_secret_key = "2222222222222222222222222222222222222222222222222222222222222222" + +# All of the verifiers public keys +verifiers_public_keys = [ + "034f355bdcb7cc0af728ef3cceb9615d90684bb5b2ca5f859ab0f0b704075871aa", + "02466d7fcae563e5cb09a0d1870bb580344804617879a14949cf22285f1bae3f27", + "023c72addb4fdf09af94f0c94d7fe92a386a7e70cf8a1d85916386bb2535c7b1b1", + "032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991", +] +num_verifiers = 4 + +# All of the operators x-only public keys. +operators_xonly_pks = [ + "4f355bdcb7cc0af728ef3cceb9615d90684bb5b2ca5f859ab0f0b704075871aa", + "466d7fcae563e5cb09a0d1870bb580344804617879a14949cf22285f1bae3f27", +] +num_operators = 2 + +operator_withdrawal_fee_sats = 100000 + +# Bitcoin node configuration options +bitcoin_rpc_url = "http://127.0.0.1:18443" +bitcoin_rpc_user = "admin" +bitcoin_rpc_password = "admin" + +# Fee rate RPC +mempool_api_host = "https://mempool.space/" +mempool_api_endpoint = "api/v1/fees/recommended" + +# PostgreSQL database credentials. +db_host = "127.0.0.1" +db_port = 5432 +db_user = "clementine" +db_password = "clementine" +db_name = "clementine" + +# Citrea RPC URL. +citrea_rpc_url = "http://127.0.0.1:12345" +citrea_light_client_prover_url = "http://127.0.0.1:12346" +citrea_chain_id = 5655 +bridge_contract_address = "3100000000000000000000000000000000000002" + +# Header chain prover's assumption to start with. +# header_chain_proof_path = "../core/src/test/data/first_1.bin" + +verifier_endpoints = [ + "http://127.0.0.1:17001", + "http://127.0.0.1:17002", + "http://127.0.0.1:17003", + "http://127.0.0.1:17004", +] + +security_council = "1:50929b74c1a04954b78b4b6035e97a5e078a5a0f28ec96d547bfee9ace803ac0" + + +operator_endpoints = ["http://127.0.0.1:17005", "http://127.0.0.1:17006"] + +# TLS certificate and key paths +server_cert_path = "certs/server/server.pem" +server_key_path = "certs/server/server.key" +ca_cert_path = "certs/ca/ca.pem" +client_cert_path = "certs/client/client.pem" +client_key_path = "certs/client/client.key" +aggregator_cert_path = "certs/aggregator/aggregator.pem" +client_verification = true + +# socket_path = "/" + +[telemetry] +host = "0.0.0.0" +port = 8081 + +[grpc] +max_message_size = 4194304 +timeout_secs = 43200 +tcp_keepalive_secs = 60 +req_concurrency_limit = 300 +ratelimit_req_count = 1000 +ratelimit_req_interval_secs = 60 diff --git a/core/src/test/data/deposit_state_debug.bincode b/core/src/test/data/deposit_state_debug.bincode new file mode 100755 index 000000000..229350150 Binary files /dev/null and b/core/src/test/data/deposit_state_debug.bincode differ diff --git a/core/src/test/data/deposit_state_release.bincode b/core/src/test/data/deposit_state_release.bincode new file mode 100755 index 000000000..566a0f5b4 Binary files /dev/null and b/core/src/test/data/deposit_state_release.bincode differ diff --git a/core/src/test/data/first_1.bin b/core/src/test/data/first_1.bin new file mode 100644 index 000000000..9b30a6707 Binary files /dev/null and b/core/src/test/data/first_1.bin differ diff --git a/core/src/test/data/mainnet_block_000000000000000000000c835b2adcaedc20fdf6ee440009c249452c726dafae.raw b/core/src/test/data/mainnet_block_000000000000000000000c835b2adcaedc20fdf6ee440009c249452c726dafae.raw new file mode 100644 index 000000000..f195623c3 Binary files /dev/null and b/core/src/test/data/mainnet_block_000000000000000000000c835b2adcaedc20fdf6ee440009c249452c726dafae.raw differ diff --git a/core/src/test/data/mainnet_block_00000000000000000000edfe523d5e2993781d2305f51218ebfc236a250792d6.raw b/core/src/test/data/mainnet_block_00000000000000000000edfe523d5e2993781d2305f51218ebfc236a250792d6.raw new file mode 100644 index 000000000..900cf7bb7 Binary files /dev/null and b/core/src/test/data/mainnet_block_00000000000000000000edfe523d5e2993781d2305f51218ebfc236a250792d6.raw differ diff --git a/core/src/test/data/mainnet_blocks_from_832000_to_833096.raw b/core/src/test/data/mainnet_blocks_from_832000_to_833096.raw new file mode 100644 index 000000000..259d75ef9 Binary files /dev/null and b/core/src/test/data/mainnet_blocks_from_832000_to_833096.raw differ diff --git a/core/src/test/data/mainnet_first_11_blocks.raw b/core/src/test/data/mainnet_first_11_blocks.raw new file mode 100644 index 000000000..5905004ee Binary files /dev/null and b/core/src/test/data/mainnet_first_11_blocks.raw differ diff --git a/core/src/test/data/protocol_paramset.toml b/core/src/test/data/protocol_paramset.toml new file mode 100644 index 000000000..e931f34e8 --- /dev/null +++ b/core/src/test/data/protocol_paramset.toml @@ -0,0 +1,59 @@ +network = "regtest" # "bitcoin", "testnet4", or "regtest" +num_round_txs = 2 +num_kickoffs_per_round = 10 +num_signed_kickoffs = 2 +bridge_amount = 1000000000 # in satoshis +kickoff_amount = 0 # in satoshis +operator_challenge_amount = 200000000 # in satoshis +collateral_funding_amount = 99000000 +kickoff_blockhash_commit_length = 40 +watchtower_challenge_bytes = 144 +winternitz_log_d = 4 +user_takes_after = 200 +operator_challenge_timeout_timelock = 144 # BLOCKS_PER_DAY +operator_challenge_nack_timelock = 432 # BLOCKS_PER_DAY * 3 +disprove_timeout_timelock = 720 # BLOCKS_PER_DAY * 5 +assert_timeout_timelock = 576 # BLOCKS_PER_DAY * 4 +operator_reimburse_timelock = 12 # BLOCKS_PER_HOUR * 2 +watchtower_challenge_timeout_timelock = 288 # BLOCKS_PER_DAY * 2 +time_to_send_watchtower_challenge = 216 # BLOCKS_PER_DAY * 3 / 2 +latest_blockhash_timeout_timelock = 360 # BLOCKS_PER_DAY * 5 / 2 +finality_depth = 1 +start_height = 190 +genesis_height = 0 +genesis_chain_state_hash = [ + 95, + 115, + 2, + 173, + 22, + 200, + 189, + 158, + 242, + 243, + 190, + 0, + 200, + 25, + 154, + 134, + 249, + 224, + 186, + 134, + 20, + 132, + 171, + 180, + 175, + 95, + 126, + 69, + 127, + 140, + 34, + 22, +] +header_chain_proof_batch_size = 100 +bridge_nonstandard = false diff --git a/core/src/test/data/testnet_block_000000000000045e0b1660b6445b5e5c5ab63c9a4f956be7e1e69be04fa4497b.raw b/core/src/test/data/testnet_block_000000000000045e0b1660b6445b5e5c5ab63c9a4f956be7e1e69be04fa4497b.raw new file mode 100644 index 000000000..45376bad3 Binary files /dev/null and b/core/src/test/data/testnet_block_000000000000045e0b1660b6445b5e5c5ab63c9a4f956be7e1e69be04fa4497b.raw differ diff --git a/core/src/test/deposit_and_withdraw_e2e.rs b/core/src/test/deposit_and_withdraw_e2e.rs new file mode 100644 index 000000000..cad780e85 --- /dev/null +++ b/core/src/test/deposit_and_withdraw_e2e.rs @@ -0,0 +1,1977 @@ +use super::common::citrea::get_bridge_params; +use crate::actor::Actor; +use crate::bitvm_client::SECP; +use crate::builder::address::create_taproot_address; +use crate::builder::script::SpendPath; +use crate::builder::transaction::input::{SpendableTxIn, UtxoVout}; +use crate::builder::transaction::output::UnspentTxOut; +use crate::builder::transaction::{TransactionType, TxHandlerBuilder, DEFAULT_SEQUENCE}; +use crate::citrea::{CitreaClient, CitreaClientT}; +use crate::config::protocol::{ProtocolParamset, TESTNET4_TEST_PARAMSET}; +use crate::config::BridgeConfig; +use crate::database::Database; +use crate::deposit::{BaseDepositData, DepositInfo, DepositType}; +use crate::extended_bitcoin_rpc::ExtendedBitcoinRpc; +use crate::header_chain_prover::HeaderChainProver; +use crate::rpc::clementine::clementine_aggregator_client::ClementineAggregatorClient; +use crate::rpc::clementine::{ + Deposit, Empty, FeeType, FinalizedPayoutParams, KickoffId, NormalSignatureKind, + OptimisticWithdrawParams, RawSignedTx, SendMoveTxRequest, SendTxRequest, TransactionRequest, + WithdrawParams, WithdrawParamsWithSig, +}; +use crate::rpc::ecdsa_verification_sig::{OperatorWithdrawalMessage, OptimisticPayoutMessage}; +use crate::test::common::citrea::{ + get_new_withdrawal_utxo_and_register_to_citrea, register_replacement_deposit_to_citrea, + start_citrea, update_config_with_citrea_e2e_values, CitreaE2EData, MockCitreaClient, + SECRET_KEYS, +}; +use crate::test::common::clementine_utils::{ + payout_and_start_kickoff, reimburse_with_optimistic_payout, +}; +use crate::test::common::tx_utils::{ + ensure_outpoint_spent, ensure_outpoint_spent_while_waiting_for_state_mngr_sync, + ensure_tx_onchain, get_tx_from_signed_txs_with_type, get_txid_where_utxo_is_spent, + wait_for_fee_payer_utxos_to_be_in_mempool, +}; +use crate::test::common::{ + create_actors, create_regtest_rpc, generate_withdrawal_transaction_and_signature, + get_deposit_address, mine_once_after_in_mempool, poll_get, poll_until_condition, + run_single_deposit, +}; +use crate::test::common::{ + create_test_config_with_thread_name, run_multiple_deposits, run_single_replacement_deposit, +}; +use crate::test::sign::sign_withdrawal_verification_signature; +use crate::utils::initialize_logger; +use crate::{EVMAddress, UTXO}; +use async_trait::async_trait; +use bitcoin::secp256k1::SecretKey; +use bitcoin::{Address, Amount, OutPoint, Transaction, TxOut, Txid}; +use bitcoincore_rpc::RpcApi; +use citrea_e2e::bitcoin::DEFAULT_FINALITY_DEPTH; +use citrea_e2e::config::{BatchProverConfig, LightClientProverConfig}; +use citrea_e2e::{ + config::{BitcoinConfig, SequencerConfig, TestCaseConfig, TestCaseDockerConfig}, + framework::TestFramework, + test_case::{TestCase, TestCaseRunner}, +}; +use eyre::Context; +use futures::future::try_join_all; +use secrecy::SecretString; +use std::str::FromStr; +use std::time::Duration; +use tokio::time::sleep; +use tonic::transport::Channel; +use tonic::Request; + +#[derive(PartialEq)] +pub enum CitreaDepositAndWithdrawE2EVariant { + GenesisHeightZero, + GenesisHeightNonZero, +} + +struct CitreaDepositAndWithdrawE2E { + variant: CitreaDepositAndWithdrawE2EVariant, +} + +#[async_trait] +impl TestCase for CitreaDepositAndWithdrawE2E { + fn bitcoin_config() -> BitcoinConfig { + BitcoinConfig { + extra_args: vec![ + "-txindex=1", + "-fallbackfee=0.000001", + "-rpcallowip=0.0.0.0/0", + "-dustrelayfee=0", + ], + ..Default::default() + } + } + + fn test_config() -> TestCaseConfig { + TestCaseConfig { + with_sequencer: true, + with_batch_prover: true, + with_light_client_prover: true, + with_full_node: true, + docker: TestCaseDockerConfig { + bitcoin: true, + citrea: true, + }, + ..Default::default() + } + } + + fn sequencer_config() -> SequencerConfig { + SequencerConfig { + bridge_initialize_params: get_bridge_params(), + ..Default::default() + } + } + + fn batch_prover_config() -> BatchProverConfig { + BatchProverConfig { + enable_recovery: false, + ..Default::default() + } + } + + fn light_client_prover_config() -> LightClientProverConfig { + LightClientProverConfig { + enable_recovery: false, + initial_da_height: 60, + ..Default::default() + } + } + + async fn run_test(&mut self, f: &mut TestFramework) -> citrea_e2e::Result<()> { + tracing::info!("Starting Citrea"); + + let (sequencer, full_node, lc_prover, batch_prover, da) = + start_citrea(Self::sequencer_config(), f).await.unwrap(); + + let mut config = create_test_config_with_thread_name().await; + + let lc_prover = lc_prover.unwrap(); + let batch_prover = batch_prover.unwrap(); + + update_config_with_citrea_e2e_values( + &mut config, + da, + sequencer, + Some(( + lc_prover.config.rollup.rpc.bind_host.as_str(), + lc_prover.config.rollup.rpc.bind_port, + )), + ); + + let rpc = ExtendedBitcoinRpc::connect( + config.bitcoin_rpc_url.clone(), + config.bitcoin_rpc_user.clone(), + config.bitcoin_rpc_password.clone(), + None, + ) + .await?; + + let citrea_client = CitreaClient::new( + config.citrea_rpc_url.clone(), + config.citrea_light_client_prover_url.clone(), + config.citrea_chain_id, + Some(SECRET_KEYS[0].to_string().parse().unwrap()), + config.citrea_request_timeout, + ) + .await + .unwrap(); + + if self.variant == CitreaDepositAndWithdrawE2EVariant::GenesisHeightNonZero { + let genesis_height: u32 = 10; + + let genesis_chain_state_hash = HeaderChainProver::get_chain_state_from_height( + rpc.clone(), + genesis_height as u64, + config.protocol_paramset().network, + ) + .await + .unwrap() + .to_hash(); + + let paramset = ProtocolParamset { + genesis_height, + genesis_chain_state_hash, + ..ProtocolParamset::default() + }; + + config.protocol_paramset = Box::leak(Box::new(paramset)); + } + + // do 2 deposits + let (mut actors, _deposit_infos, move_txids, _deposit_blockhashs, _) = + run_multiple_deposits::(&mut config, rpc.clone(), 2, None).await?; + + let citrea_e2e_data = CitreaE2EData { + sequencer, + full_node, + lc_prover, + batch_prover, + da, + config: config.clone(), + citrea_client: &citrea_client, + rpc: &rpc, + }; + + let mut withdrawal_index: u32 = 0; + + tracing::info!( + "Deposit ending block_height: {:?}", + rpc.get_block_count().await? + ); + + let mut withdrawal_infos = Vec::new(); + + tracing::info!("Mining withdrawal utxos"); + for move_txid in move_txids.iter() { + let (withdrawal_utxo, payout_txout, sig) = + get_new_withdrawal_utxo_and_register_to_citrea( + *move_txid, + &citrea_e2e_data, + &actors, + ) + .await; + withdrawal_infos.push((withdrawal_index, withdrawal_utxo, payout_txout, sig)); + withdrawal_index += 1; + } + + tracing::info!("Mining withdrawal utxos done"); + + let mut reimburse_connectors = Vec::new(); + + // withdraw one with a kickoff with operator 0 + let (op0_db, op0_xonly_pk) = actors.get_operator_db_and_xonly_pk_by_index(0).await; + + tracing::info!("Paying and challenging withdrawal 0"); + reimburse_connectors.push( + payout_and_start_kickoff( + actors.get_operator_client_by_index(0), + op0_xonly_pk, + &op0_db, + withdrawal_infos[0].0, + &withdrawal_infos[0].1, + &withdrawal_infos[0].2, + &withdrawal_infos[0].3, + &citrea_e2e_data, + &actors, + ) + .await, + ); + + tracing::info!("Adding new verifier and operator"); + // add a new verifier + let new_sk = SecretKey::new(&mut bitcoin::secp256k1::rand::thread_rng()); + actors.add_verifier(new_sk).await.unwrap(); + // add a new operator too that uses the new verifier + let new_op_sk = SecretKey::new(&mut bitcoin::secp256k1::rand::thread_rng()); + let new_verifier_index = actors.num_total_verifiers - 1; + actors + .add_operator(new_op_sk, new_verifier_index) + .await + .unwrap(); + + let new_agg_key = actors.get_nofn_aggregated_xonly_pk().unwrap(); + citrea_client + .update_nofn_aggregated_key(new_agg_key, config.protocol_paramset(), sequencer) + .await + .unwrap(); + + // do 3 more deposits + tracing::info!("Running 3 more deposits"); + let ( + mut actors, + _new_deposit_infos, + new_move_txids, + _deposit_blockhashs, + _verifiers_public_keys, + ) = run_multiple_deposits::(&mut config, rpc.clone(), 3, Some(actors)) + .await?; + + tracing::info!("3 more deposits done, doing 3 more withdrawals"); + // do 3 more withdrawals + for move_txid in new_move_txids.iter() { + let (withdrawal_utxo, payout_txout, sig) = + get_new_withdrawal_utxo_and_register_to_citrea( + *move_txid, + &citrea_e2e_data, + &actors, + ) + .await; + withdrawal_infos.push((withdrawal_index, withdrawal_utxo, payout_txout, sig)); + withdrawal_index += 1; + } + + // do 1 kickoff with one of the new deposits using the new operator + let new_operator_index = actors.num_total_operators - 1; + let (new_operator_db, new_operator_xonly_pk) = actors + .get_operator_db_and_xonly_pk_by_index(new_operator_index) + .await; + + reimburse_connectors.push( + payout_and_start_kickoff( + actors.get_operator_client_by_index(new_operator_index), + new_operator_xonly_pk, + &new_operator_db, + withdrawal_infos[2].0, + &withdrawal_infos[2].1, + &withdrawal_infos[2].2, + &withdrawal_infos[2].3, + &citrea_e2e_data, + &actors, + ) + .await, + ); + + // do 2 optimistic payouts, 1 with old 1 with new deposit, they should both work as all verifiers that + // signed them still exist + tracing::info!("Doing optimistic payout with old deposit"); + reimburse_with_optimistic_payout( + &actors, + withdrawal_infos[1].0, + &withdrawal_infos[1].1, + &withdrawal_infos[1].2, + &withdrawal_infos[1].3, + &citrea_e2e_data, + move_txids[1], + ) + .await + .unwrap(); + + tracing::info!("Doing optimistic payout with new deposit"); + reimburse_with_optimistic_payout( + &actors, + withdrawal_infos[3].0, + &withdrawal_infos[3].1, + &withdrawal_infos[3].2, + &withdrawal_infos[3].3, + &citrea_e2e_data, + new_move_txids[1], + ) + .await + .unwrap(); + + // save old nofn, then remove verifier 2 + let old_nofn_xonly_pk = actors.get_nofn_aggregated_xonly_pk().unwrap(); + tracing::info!("Removing verifier 2"); + actors.remove_verifier(2).await.unwrap(); + + // update nofn on citrea + let new_agg_key = actors.get_nofn_aggregated_xonly_pk().unwrap(); + citrea_client + .update_nofn_aggregated_key(new_agg_key, config.protocol_paramset(), sequencer) + .await + .unwrap(); + + // try an optimistic payout, should fail because a verifier that signed the withdrawal was removed + tracing::info!("Trying optimistic payout with removed verifier, should fail"); + let _ = reimburse_with_optimistic_payout( + &actors, + withdrawal_infos[4].0, + &withdrawal_infos[4].1, + &withdrawal_infos[4].2, + &withdrawal_infos[4].3, + &citrea_e2e_data, + new_move_txids[2], + ) + .await + .unwrap_err(); + + // replace the deposit + tracing::info!("Replacing deposit"); + let ( + mut actors, + _replacement_deposit_info, + replacement_move_txid, + _replacement_deposit_blockhash, + ) = run_single_replacement_deposit( + &mut config, + &rpc, + new_move_txids[2], + actors, + old_nofn_xonly_pk, + ) + .await + .unwrap(); + + tracing::info!("Registering replacement deposit to Citrea"); + register_replacement_deposit_to_citrea( + &citrea_e2e_data, + replacement_move_txid, + withdrawal_infos[4].0, + &actors, + ) + .await + .unwrap(); + + // do optimistic payout with new replacement deposit, should work now + // mine blocks until the replacement deposit is processed in handle_finalized_block + loop { + tracing::info!( + "Trying to reimburse with optimistic payout for the replacement deposit" + ); + let res = reimburse_with_optimistic_payout( + &actors, + withdrawal_infos[4].0, + &withdrawal_infos[4].1, + &withdrawal_infos[4].2, + &withdrawal_infos[4].3, + &citrea_e2e_data, + replacement_move_txid, + ) + .await; + if res.is_ok() { + break; + } + rpc.mine_blocks_while_synced(1, &actors).await.unwrap(); + } + + // wait for all past kickoff reimburse connectors to be spent + tracing::info!("Waiting for all past kickoff reimburse connectors to be spent"); + for reimburse_connector in reimburse_connectors.iter() { + ensure_outpoint_spent_while_waiting_for_state_mngr_sync( + &rpc, + *reimburse_connector, + &actors, + ) + .await + .unwrap(); + } + + // remove an operator and try a deposit, it should fail because the operator is still in verifiers DB. + // to make it not fail, operator data needs to be removed from verifiers DB. + // if the behavior is changed in the future, the test should be updated. + tracing::info!("Removing operator 1"); + actors.remove_operator(1).await.unwrap(); + // try to do a deposit, it should fail. + assert!(run_single_deposit::( + &mut config, + rpc.clone(), + None, + Some(actors), + None + ) + .await + .is_err()); + + Ok(()) + } +} + +/// Tests the complete deposit and withdrawal flow between Bitcoin and Citrea networks. +/// +/// # Arrange +/// * Sets up Citrea infrastructure (sequencer, prover, DA layer) +/// * Configures bridge parameters and connects to Bitcoin regtest +/// * At first there are 2 operators; 0 and 1, and 4 verifiers; 0, 1, 2, 3 +/// +/// # Act +/// * Executes 2 deposits 0 and 1 from Bitcoin to Citrea +/// * Creates 2 withdrawal utxos and registers them to Citrea, no payout performerd yet +/// * Operator 0 pays and starts the kickoff for deposit 0 +/// * New verifier 4 and new operator 2 that uses verifier 4 are added +/// * 3 new deposits are performed; 2, 3, 4 +/// * Operator 2 pays and starts the kickoff for deposit 2 +/// * Optimistic payout for deposit 1 is performed +/// * Optimistic payout for deposit 3 is performed +/// * Verifier 2 leaves the verifier set +/// * Optimistic payout for deposit 4 is attempted but fails because verifier 2 is not in signer set anymore, +/// but it is one of the nofn in deposit 4 +/// * A replacement deposit is performed for deposit 4 +/// * Optimistic payout for deposit 4 is performed with the new replacement deposit +/// * Remove operator 1, try to do a deposit, it should fail because the operator is still in verifiers DB. +/// * A check to see if reimburse connectors for the kickoffs created previously (for deposit 0 and 2) are spent, +/// meaning operators 0 and 2 got their funds back (the kickoff process is independent of actor set changes, they should +/// always work if the collected signatures are correct from start) +/// * Removes one operator and tries to do a deposit, it should fail because the operator is still in verifiers DB. +#[tokio::test(flavor = "multi_thread")] +#[ignore = "Run in standalone VM in CI"] +async fn citrea_deposit_and_withdraw_e2e_non_zero_genesis_height() -> citrea_e2e::Result<()> { + initialize_logger(Some(::tracing::level_filters::LevelFilter::DEBUG)) + .expect("Failed to initialize logger"); + std::env::set_var("CITREA_DOCKER_IMAGE", crate::test::CITREA_E2E_DOCKER_IMAGE); + let citrea_e2e = CitreaDepositAndWithdrawE2E { + variant: CitreaDepositAndWithdrawE2EVariant::GenesisHeightNonZero, + }; + TestCaseRunner::new(citrea_e2e).run().await +} + +#[tokio::test(flavor = "multi_thread")] +#[ignore = "Ignored, currently no specific reason to test with genesis height zero"] +async fn citrea_deposit_and_withdraw_e2e() -> citrea_e2e::Result<()> { + initialize_logger(Some(::tracing::level_filters::LevelFilter::DEBUG)) + .expect("Failed to initialize logger"); + std::env::set_var( + "CITREA_DOCKER_IMAGE", + "chainwayxyz/citrea-test:ca479a4147be1c3a472e76a3f117124683d81ab5", + ); + let citrea_e2e = CitreaDepositAndWithdrawE2E { + variant: CitreaDepositAndWithdrawE2EVariant::GenesisHeightZero, + }; + TestCaseRunner::new(citrea_e2e).run().await +} + +/// Tests the deposit and withdrawal flow using a mocked Citrea client in a truthful scenario. +/// +/// # Arrange +/// * Sets up mock Citrea client +/// * Configures bridge parameters +/// +/// # Act +/// * Executes a deposit from Bitcoin to mock Citrea +/// * Registers the deposit in the mock client +/// * Executes a withdrawal from mock Citrea back to Bitcoin +/// * Processes the payout transaction +/// +/// # Assert +/// * Verifies payout transaction is successfully created and mined +/// * Confirms payout is properly handled in database (added then removed from unhandled list) +/// * Verifies kickoff transaction is created and mined +/// * Confirms challenge output is spent via timeout (no challenge occurred) +/// * Verifies reimburse connector is spent (proper payout handling) +#[tokio::test] +async fn mock_citrea_run_truthful() { + let mut config = create_test_config_with_thread_name().await; + let regtest = create_regtest_rpc(&mut config).await; + let rpc = regtest.rpc().clone(); + let mut citrea_client = MockCitreaClient::new( + config.citrea_rpc_url.clone(), + "".to_string(), + config.citrea_chain_id, + None, + config.citrea_request_timeout, + ) + .await + .unwrap(); + + tracing::info!("Running deposit"); + + tracing::info!( + "Deposit starting block_height: {:?}", + rpc.get_block_count().await.unwrap() + ); + let (actors, _deposit_params, move_txid, _deposit_blockhash, verifiers_public_keys) = + run_single_deposit::(&mut config, rpc.clone(), None, None, None) + .await + .unwrap(); + + // sleep for 1 second + tokio::time::sleep(std::time::Duration::from_secs(1)).await; + + tracing::info!( + "Deposit ending block_height: {:?}", + rpc.get_block_count().await.unwrap() + ); + + // Send deposit to Citrea + let tx = rpc.get_raw_transaction(&move_txid, None).await.unwrap(); + let tx_info = rpc + .get_raw_transaction_info(&move_txid, None) + .await + .unwrap(); + let block = rpc.get_block(&tx_info.blockhash.unwrap()).await.unwrap(); + let _block_height = rpc + .get_block_info(&block.block_hash()) + .await + .unwrap() + .height as u64; + + tracing::info!("Depositing to Citrea"); + let current_block_height = rpc.get_block_count().await.unwrap(); + citrea_client + .insert_deposit_move_txid(current_block_height + 1, tx.compute_txid()) + .await; + rpc.mine_blocks(5).await.unwrap(); + + // rpc.mine_blocks(config.protocol_paramset().finality_depth as u64 + 2) + // .await + // .unwrap(); + + // Make a withdrawal + let user_sk = SecretKey::from_slice(&[13u8; 32]).unwrap(); + let withdrawal_address = Address::p2tr( + &SECP, + user_sk.x_only_public_key(&SECP).0, + None, + config.protocol_paramset().network, + ); + let (dust_utxo, payout_txout, sig) = generate_withdrawal_transaction_and_signature( + &config, + &rpc, + &withdrawal_address, + config.protocol_paramset().bridge_amount + - config + .operator_withdrawal_fee_sats + .unwrap_or(Amount::from_sat(0)), + ) + .await; + + let withdrawal_utxo = dust_utxo.outpoint; + + tracing::info!("Created withdrawal UTXO: {:?}", withdrawal_utxo); + + let current_block_height = rpc.get_block_count().await.unwrap(); + + citrea_client + .insert_withdrawal_utxo(current_block_height + 1, withdrawal_utxo) + .await; + // Mine some blocks so that block syncer counts it as finalized + rpc.mine_blocks(DEFAULT_FINALITY_DEPTH + 2).await.unwrap(); + + // rpc.mine_blocks(config.protocol_paramset().finality_depth as u64 + 2) + // .await + // .unwrap(); + + tracing::info!("Withdrawal tx sent"); + let mut operator0 = actors.get_operator_client_by_index(0); + + let withdrawal_params = WithdrawParams { + withdrawal_id: 0, + input_signature: sig.serialize().to_vec(), + input_outpoint: Some(withdrawal_utxo.into()), + output_script_pubkey: payout_txout.script_pubkey.to_bytes(), + output_amount: payout_txout.value.to_sat(), + }; + let verification_signature = sign_withdrawal_verification_signature::( + &config, + withdrawal_params.clone(), + ); + + let verification_signature_str = verification_signature.to_string(); + + loop { + let withdrawal_response = operator0 + .withdraw(WithdrawParamsWithSig { + withdrawal: Some(withdrawal_params.clone()), + verification_signature: Some(verification_signature_str.clone()), + }) + .await; + + tracing::info!("Withdrawal response: {:?}", withdrawal_response); + + match withdrawal_response { + Ok(_) => break, + Err(e) => tracing::info!("Withdrawal error: {:?}", e), + }; + + tokio::time::sleep(std::time::Duration::from_secs(1)).await; + } + + let payout_txid = get_txid_where_utxo_is_spent(&rpc, withdrawal_utxo) + .await + .unwrap(); + tracing::info!("Payout txid: {:?}", payout_txid); + + rpc.mine_blocks(DEFAULT_FINALITY_DEPTH + 2).await.unwrap(); + + // Setup tx_sender for sending transactions + let verifier_0_config = { + let mut config = config.clone(); + config.db_name += "0"; + config + }; + + let op0_xonly_pk = verifiers_public_keys[0].x_only_public_key().0; + + let db = Database::new(&verifier_0_config) + .await + .expect("failed to create database"); + + // wait until payout part is not null + poll_until_condition( + async || { + Ok(db + .get_first_unhandled_payout_by_operator_xonly_pk(None, op0_xonly_pk) + .await? + .is_some()) + }, + Some(Duration::from_secs(20 * 60)), + Some(Duration::from_millis(200)), + ) + .await + .wrap_err("Timed out while waiting for payout to be added to unhandled list") + .unwrap(); + + tracing::info!("Waiting until payout is handled"); + // wait until payout is handled + poll_until_condition( + async || { + Ok(db + .get_first_unhandled_payout_by_operator_xonly_pk(None, op0_xonly_pk) + .await? + .is_none()) + }, + Some(Duration::from_secs(20 * 60)), + Some(Duration::from_millis(200)), + ) + .await + .wrap_err("Timed out while waiting for payout to be handled") + .unwrap(); + + let kickoff_txid = db + .get_handled_payout_kickoff_txid(None, payout_txid) + .await + .unwrap() + .expect("Payout must be handled"); + + tracing::info!("Kickoff txid: {:?}", kickoff_txid); + + let reimburse_connector = OutPoint { + txid: kickoff_txid, + vout: UtxoVout::ReimburseInKickoff.get_vout(), + }; + + let _kickoff_block_height = + mine_once_after_in_mempool(&rpc, kickoff_txid, Some("Kickoff tx"), Some(300)) + .await + .unwrap(); + + rpc.mine_blocks(DEFAULT_FINALITY_DEPTH + 2).await.unwrap(); + + // wait until the light client prover is synced to the same height + + let challenge_outpoint = OutPoint { + txid: kickoff_txid, + vout: UtxoVout::Challenge.get_vout(), + }; + + tracing::warn!("Waiting for challenge"); + let challenge_spent_txid = get_txid_where_utxo_is_spent(&rpc, challenge_outpoint) + .await + .unwrap(); + tracing::warn!("Challenge spent txid: {:?}", challenge_spent_txid); + + // check that challenge utxo was spent on timeout -> meaning challenge was not sent + let tx = rpc.get_tx_of_txid(&challenge_spent_txid).await.unwrap(); + // tx shouldn't have challenge amount sats as output as challenge timeout should be sent + assert!(tx.output[0].value != config.protocol_paramset().operator_challenge_amount); + + tracing::warn!("Ensuring reimburse connector is spent"); + // Ensure the reimburse connector is spent + ensure_outpoint_spent(&rpc, reimburse_connector) + .await + .unwrap(); + tracing::warn!("Reimburse connector spent"); +} + +// This test needs MEMPOOL_SPACE_API_KEY to be set to send nonstandard transactions to testnet4 +#[tokio::test] +#[ignore = "This is a testnet4 test. It needs to be run alongside a local testnet4 node with some btc in its wallet"] +async fn testnet4_mock_citrea_run_truthful() { + let mut config = create_test_config_with_thread_name().await; + config.bitcoin_rpc_url = "http://localhost:48443".to_string(); + config.bitcoin_rpc_user = SecretString::from("admin".to_string()); + config.bitcoin_rpc_password = SecretString::from("admin".to_string()); + + config.protocol_paramset = &TESTNET4_TEST_PARAMSET; + + config.test_params.all_operators_secret_keys = + vec![SecretKey::from_slice(&[12u8; 32]).unwrap()]; + + // use previous collateral funding outpoint on testnet4 so that we don't need to fund it again + config.operator_collateral_funding_outpoint = Some(OutPoint { + txid: Txid::from_str("a054cad4f2427f6659d87c11f781930cbdee74535267ebd848c628df2e3e5700") + .unwrap(), + vout: 0, + }); + + let rpc = ExtendedBitcoinRpc::connect( + config.bitcoin_rpc_url.clone(), + config.bitcoin_rpc_user.clone(), + config.bitcoin_rpc_password.clone(), + None, + ) + .await + .unwrap(); + + let mut citrea_client = MockCitreaClient::new( + config.citrea_rpc_url.clone(), + "".to_string(), + config.citrea_chain_id, + None, + config.citrea_request_timeout, + ) + .await + .unwrap(); + + // use previous withdrawal utxo so that we don't need to create a new one (if payout was already sent before, + // otherwise you need to create a new one) + let withdrawal_utxo = OutPoint { + txid: Txid::from_str("3edf392111b78fc8a90f998ec7553bd2a2afc960473a2d27c83fd8d9db8c2a68") + .unwrap(), + vout: 1, + }; + + tracing::info!("Created withdrawal UTXO: {:?}", withdrawal_utxo); + + citrea_client + .insert_withdrawal_utxo( + config.protocol_paramset().start_height as u64, + withdrawal_utxo, + ) + .await; + + tracing::info!("Running deposit"); + + tracing::info!( + "Deposit starting block_height: {:?}", + rpc.get_block_count().await.unwrap() + ); + + // use previous move txid and register it to mock citrea (if a deposit was done before) + let move_txid = + Txid::from_str("0176f77ab0c0a25703fc42c59e317594c6d2a2b711c680342166a9eaa02d51f1").unwrap(); + + citrea_client + .insert_deposit_move_txid(config.protocol_paramset().start_height as u64, move_txid) + .await; + + let (actors, _deposit_infos, _move_txid, _deposit_blockhash, _verifiers_public_keys) = + run_single_deposit::( + &mut config, + rpc.clone(), + None, + None, + Some(OutPoint { + // use previous deposit outpoint so that we don't need to create a new one + txid: Txid::from_str( + "93b3527dfcfe957c64a3210c04f19aaf9bfa8f5d8dd55c3e6f0613e631b8b135", + ) + .unwrap(), + vout: 1, + }), + ) + .await + .unwrap(); + + tracing::info!( + "Deposit ending block_height: {:?}", + rpc.get_block_count().await.unwrap() + ); + + // // Make a withdrawal + // let user_sk = SecretKey::from_slice(&[13u8; 32]).unwrap(); + // let withdrawal_address = Address::p2tr( + // &SECP, + // user_sk.x_only_public_key(&SECP).0, + // None, + // config.protocol_paramset().network, + // ); + // let ( + // UTXO { + // outpoint: withdrawal_utxo, + // .. + // }, + // payout_txout, + // sig, + // ) = generate_withdrawal_transaction_and_signature( + // &config, + // &rpc, + // &withdrawal_address, + // config.protocol_paramset().bridge_amount + // - config + // .operator_withdrawal_fee_sats + // .unwrap_or(Amount::from_sat(0)), + // ) + // .await; + + // tracing::info!("Withdrawal tx sent, withdrawal utxo: {:?}", withdrawal_utxo); + + // // insert withdrawal utxo into next block for mock citrea + // citrea_client + // .insert_withdrawal_utxo( + // (rpc.get_current_chain_height().await.unwrap() - TESTNET4_TEST_PARAMSET.finality_depth + // + 1) as u64, + // withdrawal_utxo, + // ) + // .await; + + // loop { + // let withdrawal_response = _operators[0] + // .withdraw(WithdrawParams { + // withdrawal_id: 0, + // input_signature: sig.serialize().to_vec(), + // input_outpoint: Some(withdrawal_utxo.into()), + // output_script_pubkey: payout_txout.script_pubkey.to_bytes(), + // output_amount: payout_txout.value.to_sat(), + // }) + // .await; + + // tracing::info!("Withdrawal response: {:?}", withdrawal_response); + + // match withdrawal_response { + // Ok(_) => break, + // Err(e) => tracing::info!("Withdrawal error: {:?}", e), + // }; + + // tokio::time::sleep(std::time::Duration::from_secs(60)).await; + // } + + // Setup tx_sender for sending transactions + let (op0_db, _) = actors.get_operator_db_and_xonly_pk_by_index(0).await; + + tracing::info!("Waiting for payout is mined and added to db"); + + // wait until payout tx is added to db + poll_until_condition( + async || { + Ok(op0_db + .get_payout_info_from_move_txid(None, move_txid) + .await + .is_ok()) + }, + Some(Duration::from_secs(300 * 60)), + Some(Duration::from_millis(2000)), + ) + .await + .wrap_err("Timed out while waiting for payout to be added to db") + .unwrap(); + + let payout_txid = op0_db + .get_payout_info_from_move_txid(None, move_txid) + .await + .unwrap() + .unwrap() + .2; + + tracing::info!("Payout txid: {:?}", payout_txid); + + // wait until payout is handled + poll_until_condition( + async || { + Ok(op0_db + .get_handled_payout_kickoff_txid(None, payout_txid) + .await? + .is_some()) + }, + Some(Duration::from_secs(300 * 60)), + Some(Duration::from_millis(2000)), + ) + .await + .wrap_err("Timed out while waiting for payout to be handled") + .unwrap(); + + let kickoff_txid = op0_db + .get_handled_payout_kickoff_txid(None, payout_txid) + .await + .unwrap() + .expect("Payout must be handled"); + + tracing::info!("Kickoff txid: {:?}", kickoff_txid); + + let reimburse_connector = OutPoint { + txid: kickoff_txid, + vout: UtxoVout::ReimburseInKickoff.get_vout(), + }; + + // ensure kickoff tx is on chain + loop { + if rpc.is_tx_on_chain(&kickoff_txid).await.unwrap() { + break; + } + tokio::time::sleep(std::time::Duration::from_millis(1000)).await; + } + + tracing::warn!("Ensuring reimburse connector is spent"); + // Ensure the reimburse connector is spent + loop { + if rpc.is_utxo_spent(&reimburse_connector).await.unwrap() { + break; + } + tokio::time::sleep(std::time::Duration::from_millis(1000)).await; + } + + tracing::warn!("Reimburse connector spent"); +} + +/// Tests protocol challenge mechanism when a malicious action is detected. +/// +/// # Arrange +/// * Sets up mock Citrea client +/// * Executes deposit and registers it in mock client +/// +/// # Act +/// * Registers a withdrawal in mock Citrea +/// * Operator attempts malicious action by calling internal_finalized_payout +/// * Operator attempts a second malicious action with another kickoff transaction +/// +/// # Assert +/// * Verifies first kickoff transaction is challenged (challenge output has correct amount) +/// * Confirms second kickoff transaction is not challenged (prevents double-challenge) +/// * Verifies challenge spent transaction has expected challenge amount for first attempt +/// * Confirms challenge spent transaction does not have challenge amount for second attempt +#[tokio::test] +async fn mock_citrea_run_truthful_opt_payout() { + let mut config = create_test_config_with_thread_name().await; + let regtest = create_regtest_rpc(&mut config).await; + let rpc = regtest.rpc().clone(); + let mut citrea_client = MockCitreaClient::new( + config.citrea_rpc_url.clone(), + "".to_string(), + config.citrea_chain_id, + None, + config.citrea_request_timeout, + ) + .await + .unwrap(); + + tracing::info!("Running deposit"); + + tracing::info!( + "Deposit starting block_height: {:?}", + rpc.get_block_count().await.unwrap() + ); + let (actors, _deposit_params, move_txid, _deposit_blockhash, _verifiers_public_keys) = + run_single_deposit::(&mut config, rpc.clone(), None, None, None) + .await + .unwrap(); + + // sleep for 1 second + tokio::time::sleep(std::time::Duration::from_secs(1)).await; + + tracing::info!( + "Deposit ending block_height: {:?}", + rpc.get_block_count().await.unwrap() + ); + // rpc.mine_blocks(DEFAULT_FINALITY_DEPTH).await.unwrap(); + + // Send deposit to Citrea + let tx = rpc.get_raw_transaction(&move_txid, None).await.unwrap(); + let tx_info = rpc + .get_raw_transaction_info(&move_txid, None) + .await + .unwrap(); + let block = rpc.get_block(&tx_info.blockhash.unwrap()).await.unwrap(); + let _block_height = rpc + .get_block_info(&block.block_hash()) + .await + .unwrap() + .height as u64; + + // Make a withdrawal + let user_sk = SecretKey::from_slice(&[13u8; 32]).unwrap(); + let withdrawal_address = Address::p2tr( + &SECP, + user_sk.x_only_public_key(&SECP).0, + None, + config.protocol_paramset().network, + ); + let ( + UTXO { + outpoint: withdrawal_utxo, + .. + }, + payout_txout, + sig, + ) = generate_withdrawal_transaction_and_signature( + &config, + &rpc, + &withdrawal_address, + config.protocol_paramset().bridge_amount + - config + .operator_withdrawal_fee_sats + .unwrap_or(Amount::from_sat(0)), + ) + .await; + + let withdrawal_params = WithdrawParams { + withdrawal_id: 0, + input_signature: sig.serialize().to_vec(), + input_outpoint: Some(withdrawal_utxo.into()), + output_script_pubkey: payout_txout.script_pubkey.to_bytes(), + output_amount: payout_txout.value.to_sat(), + }; + + let verification_signature = sign_withdrawal_verification_signature::( + &config, + withdrawal_params.clone(), + ); + + let verification_signature_str = verification_signature.to_string(); + + let mut aggregator = actors.get_aggregator(); + // should give err before deposit is confirmed on citrea + assert!(aggregator + .optimistic_payout(OptimisticWithdrawParams { + withdrawal: Some(withdrawal_params.clone()), + verification_signature: Some(verification_signature_str.clone()), + }) + .await + .is_err()); + + tracing::info!("Depositing to Citrea"); + let current_block_height = rpc.get_block_count().await.unwrap(); + citrea_client + .insert_deposit_move_txid(current_block_height + 1, tx.compute_txid()) + .await; + rpc.mine_blocks(5).await.unwrap(); + + tracing::info!("Created withdrawal UTXO: {:?}", withdrawal_utxo); + + tracing::info!("Collecting deposits and withdrawals"); + + // mine 1 block to make sure the withdrawal is in the next block + // rpc.mine_blocks(1).await.unwrap(); + + let current_block_height = rpc.get_block_count().await.unwrap(); + + // should give err before withdrawal is confirmed on citrea + assert!(aggregator + .optimistic_payout(OptimisticWithdrawParams { + withdrawal: Some(withdrawal_params.clone()), + verification_signature: Some(verification_signature_str.clone()), + }) + .await + .is_err()); + + citrea_client + .insert_withdrawal_utxo(current_block_height + 1, withdrawal_utxo) + .await; + // Mine some blocks so that block syncer counts it as finalized + rpc.mine_blocks(DEFAULT_FINALITY_DEPTH + 2).await.unwrap(); + + tracing::info!("Withdrawal tx sent"); + + let opt_payout_tx = poll_get( + async || { + let payout_resp = aggregator + .optimistic_payout(OptimisticWithdrawParams { + withdrawal: Some(withdrawal_params.clone()), + verification_signature: Some(verification_signature_str.clone()), + }) + .await; + + match payout_resp { + Ok(payout_response) => { + tracing::info!("Withdrawal response: {:?}", payout_response); + let opt_payout_tx: Transaction = payout_response.into_inner().try_into()?; + Ok(Some(opt_payout_tx)) + } + Err(e) => { + tracing::warn!("Optimistic payout error: {:?}", e); + Ok(None) + } + } + }, + Some(std::time::Duration::from_secs(120)), + Some(std::time::Duration::from_millis(1000)), + ) + .await + .wrap_err("Withdrawal took too long") + .unwrap(); + + tracing::info!("Optimistic payout tx: {:?}", opt_payout_tx); + + tracing::info!("Ensuring move txid bridge deposit is spent"); + ensure_outpoint_spent( + &rpc, + OutPoint { + txid: move_txid, + vout: (UtxoVout::DepositInMove).get_vout(), + }, + ) + .await + .unwrap(); + tracing::info!("Bridge deposit spent"); +} + +#[tokio::test] +async fn mock_citrea_run_malicious() { + let mut config = create_test_config_with_thread_name().await; + let regtest = create_regtest_rpc(&mut config).await; + let rpc = regtest.rpc().clone(); + let mut citrea_client = MockCitreaClient::new( + config.citrea_rpc_url.clone(), + "".to_string(), + config.citrea_chain_id, + None, + config.citrea_request_timeout, + ) + .await + .unwrap(); + + tracing::info!("Running deposit"); + + tracing::info!( + "Deposit starting block_height: {:?}", + rpc.get_block_count().await.unwrap() + ); + let (actors, deposit_info, move_txid, _deposit_blockhash, _) = + run_single_deposit::(&mut config, rpc.clone(), None, None, None) + .await + .unwrap(); + let db = Database::new(&BridgeConfig { + db_name: config.db_name.clone() + "0", + ..config.clone() + }) + .await + .expect("failed to create database"); + + // sleep for 1 second + tokio::time::sleep(std::time::Duration::from_secs(1)).await; + + tracing::info!( + "Deposit ending block_height: {:?}", + rpc.get_block_count().await.unwrap() + ); + // rpc.mine_blocks(DEFAULT_FINALITY_DEPTH).await.unwrap(); + + // Send deposit to Citrea + let tx = rpc.get_raw_transaction(&move_txid, None).await.unwrap(); + let tx_info = rpc + .get_raw_transaction_info(&move_txid, None) + .await + .unwrap(); + let block = rpc.get_block(&tx_info.blockhash.unwrap()).await.unwrap(); + let _block_height = rpc + .get_block_info(&block.block_hash()) + .await + .unwrap() + .height as u64; + + tracing::info!("Depositing to Citrea"); + let current_block_height = rpc.get_block_count().await.unwrap(); + citrea_client + .insert_deposit_move_txid(current_block_height + 1, tx.compute_txid()) + .await; + rpc.mine_blocks(5).await.unwrap(); + + // rpc.mine_blocks(config.protocol_paramset().finality_depth as u64 + 2) + // .await + // .unwrap(); + + // Make a withdrawal + let user_sk = SecretKey::from_slice(&[13u8; 32]).unwrap(); + let withdrawal_address = Address::p2tr( + &SECP, + user_sk.x_only_public_key(&SECP).0, + None, + config.protocol_paramset().network, + ); + let ( + UTXO { + outpoint: withdrawal_utxo, + .. + }, + _payout_txout, + _sig, + ) = generate_withdrawal_transaction_and_signature( + &config, + &rpc, + &withdrawal_address, + config.protocol_paramset().bridge_amount + - config + .operator_withdrawal_fee_sats + .unwrap_or(Amount::from_sat(0)), + ) + .await; + + citrea_client + .insert_withdrawal_utxo(current_block_height + 1, withdrawal_utxo) + .await; + + // Mine some blocks so that block syncer counts it as finalized + rpc.mine_blocks(config.protocol_paramset().finality_depth as u64 + 2) + .await + .unwrap(); + + rpc.mine_blocks(DEFAULT_FINALITY_DEPTH + 2).await.unwrap(); + + let mut operator0 = actors.get_operator_client_by_index(0); + let kickoff_txid: bitcoin::Txid = operator0 + .internal_finalized_payout(FinalizedPayoutParams { + payout_blockhash: vec![0u8; 32], + deposit_outpoint: Some(deposit_info.deposit_outpoint.into()), + }) + .await + .unwrap() + .into_inner() + .try_into() + .unwrap(); + + tracing::info!("Kickoff txid: {:?}", kickoff_txid); + + let _kickoff_block_height = + mine_once_after_in_mempool(&rpc, kickoff_txid, Some("Kickoff tx"), Some(1800)) + .await + .unwrap(); + + let challenge_outpoint = OutPoint { + txid: kickoff_txid, + vout: UtxoVout::Challenge.get_vout(), + }; + + let challenge_spent_txid = get_txid_where_utxo_is_spent(&rpc, challenge_outpoint) + .await + .unwrap(); + + tracing::info!("Challenge outpoint spent txid: {:?}", challenge_spent_txid); + + // check that challenge utxo was not spent on timeout -> meaning challenge was sent + let tx = rpc.get_tx_of_txid(&challenge_spent_txid).await.unwrap(); + + // tx should have challenge amount output + if tx.output.len() == 1 + && tx.output[0].value != config.protocol_paramset().operator_challenge_amount + { + panic!("Challenge amount output is not correct, likely challenge timed out."); + } + assert!(tx.output[0].value == config.protocol_paramset().operator_challenge_amount); + // send second kickoff tx + let kickoff_txid_2: bitcoin::Txid = operator0 + .internal_finalized_payout(FinalizedPayoutParams { + payout_blockhash: vec![0u8; 32], + deposit_outpoint: Some(deposit_info.deposit_outpoint.into()), + }) + .await + .unwrap() + .into_inner() + .try_into() + .unwrap(); + + wait_for_fee_payer_utxos_to_be_in_mempool(&rpc, db, kickoff_txid_2) + .await + .unwrap(); + rpc.mine_blocks(1).await.unwrap(); + let _kickoff_block_height2 = + mine_once_after_in_mempool(&rpc, kickoff_txid_2, Some("Kickoff tx2"), Some(1800)) + .await + .unwrap(); + + tracing::info!( + "Kickoff txid: {:?}, kickoff txid 2: {:?}", + kickoff_txid, + kickoff_txid_2 + ); + // second kickoff tx should not be challenged as a kickoff of the same round was already challenged + let challenge_outpoint_2 = OutPoint { + txid: kickoff_txid_2, + vout: UtxoVout::Challenge.get_vout(), + }; + let challenge_spent_txid_2 = get_txid_where_utxo_is_spent(&rpc, challenge_outpoint_2) + .await + .unwrap(); + let tx_2 = rpc.get_tx_of_txid(&challenge_spent_txid_2).await.unwrap(); + // tx_2 should not have challenge amount output + assert!(tx_2.output[0].value != config.protocol_paramset().operator_challenge_amount); +} + +/// Tests protocol safety when an operator exits before a challenge can be made. +/// +/// # Arrange +/// * Sets up mock Citrea client +/// * Executes deposit and registers it in mock client +/// +/// # Act +/// * Registers a withdrawal in mock Citrea +/// * Operator burns collateral (exits protocol) +/// * Operator attempts malicious action by calling internal_finalized_payout after exit +/// +/// # Assert +/// * Verifies kickoff transaction is created and mined +/// * Confirms challenge output is not spent on a challenge (operator already exited) +/// * Verifies challenge spent transaction does not have challenge amount +/// * Demonstrates protocol safety by preventing challenges after operator exit +#[tokio::test] +async fn mock_citrea_run_malicious_after_exit() { + let mut config = create_test_config_with_thread_name().await; + let regtest = create_regtest_rpc(&mut config).await; + let rpc = regtest.rpc().clone(); + let mut citrea_client = MockCitreaClient::new( + config.citrea_rpc_url.clone(), + "".to_string(), + config.citrea_chain_id, + None, + config.citrea_request_timeout, + ) + .await + .unwrap(); + + tracing::info!("Running deposit"); + + tracing::info!( + "Deposit starting block_height: {:?}", + rpc.get_block_count().await.unwrap() + ); + let (actors, deposit_info, move_txid, _deposit_blockhash, verifier_pks) = + run_single_deposit::(&mut config, rpc.clone(), None, None, None) + .await + .unwrap(); + + // sleep for 1 second + tokio::time::sleep(std::time::Duration::from_secs(1)).await; + + tracing::info!( + "Deposit ending block_height: {:?}", + rpc.get_block_count().await.unwrap() + ); + + // Send deposit to Citrea + let tx = rpc.get_raw_transaction(&move_txid, None).await.unwrap(); + let tx_info = rpc + .get_raw_transaction_info(&move_txid, None) + .await + .unwrap(); + let block = rpc.get_block(&tx_info.blockhash.unwrap()).await.unwrap(); + let _block_height = rpc + .get_block_info(&block.block_hash()) + .await + .unwrap() + .height as u64; + + tracing::info!("Depositing to Citrea"); + let current_block_height = rpc.get_block_count().await.unwrap(); + citrea_client + .insert_deposit_move_txid(current_block_height + 1, tx.compute_txid()) + .await; + + rpc.mine_blocks(config.protocol_paramset().finality_depth as u64 + 2) + .await + .unwrap(); + + // Make a withdrawal + let user_sk = SecretKey::from_slice(&[13u8; 32]).unwrap(); + let withdrawal_address = Address::p2tr( + &SECP, + user_sk.x_only_public_key(&SECP).0, + None, + config.protocol_paramset().network, + ); + let ( + UTXO { + outpoint: withdrawal_utxo, + .. + }, + _payout_txout, + _sig, + ) = generate_withdrawal_transaction_and_signature( + &config, + &rpc, + &withdrawal_address, + config.protocol_paramset().bridge_amount + - config + .operator_withdrawal_fee_sats + .unwrap_or(Amount::from_sat(0)), + ) + .await; + + citrea_client + .insert_withdrawal_utxo(current_block_height + 1, withdrawal_utxo) + .await; + + // Mine some blocks so that block syncer counts it as finalized + rpc.mine_blocks(config.protocol_paramset().finality_depth as u64 + 2) + .await + .unwrap(); + + // operator 0's signer + let actor = Actor::new( + config.secret_key, + config.winternitz_secret_key, + config.protocol_paramset().network, + ); + + let mut operator0 = actors.get_operator_client_by_index(0); + let first_round_txs = operator0 + .internal_create_signed_txs(TransactionRequest { + deposit_outpoint: Some(deposit_info.deposit_outpoint.into()), + kickoff_id: Some(KickoffId { + round_idx: 1, + operator_xonly_pk: verifier_pks[0].x_only_public_key().0.serialize().to_vec(), + kickoff_idx: 0, + }), + }) + .await + .unwrap() + .into_inner(); + + // get first round's tx + let round_tx = + get_tx_from_signed_txs_with_type(&first_round_txs, TransactionType::Round).unwrap(); + // send first round tx + let mut aggregator = actors.get_aggregator(); + aggregator + .internal_send_tx(SendTxRequest { + raw_tx: Some(RawSignedTx { + raw_tx: bitcoin::consensus::serialize(&round_tx), + }), + fee_type: FeeType::Cpfp as i32, + }) + .await + .unwrap(); + tokio::time::sleep(std::time::Duration::from_secs(2)).await; + let round_txid = round_tx.compute_txid(); + ensure_tx_onchain(&rpc, round_txid).await.unwrap(); + tracing::warn!("Round tx sent"); + + let op_xonly_pk = actor.xonly_public_key; + let (_op_address, op_spend) = + create_taproot_address(&[], Some(op_xonly_pk), config.protocol_paramset().network); + + let mut spend_txhandler = TxHandlerBuilder::new(TransactionType::Dummy) + .add_input( + NormalSignatureKind::OperatorSighashDefault, + SpendableTxIn::new( + OutPoint { + txid: round_txid, + vout: 0, + }, + TxOut { + value: round_tx.output[0].value, + script_pubkey: round_tx.output[0].script_pubkey.clone(), + }, + vec![], + Some(op_spend), + ), + SpendPath::KeySpend, + DEFAULT_SEQUENCE, + ) + .add_output(UnspentTxOut::from_partial(TxOut { + value: round_tx.output[0].value - Amount::from_sat(1000), + script_pubkey: round_tx.output[0].script_pubkey.clone(), + })) + .finalize(); + + actor + .tx_sign_and_fill_sigs(&mut spend_txhandler, &[], None) + .unwrap(); + let spend_tx = spend_txhandler.promote().unwrap().get_cached_tx().clone(); + + rpc.send_raw_transaction(&spend_tx).await.unwrap(); + + // mine 1 block to make sure collateral burn tx lands onchain + rpc.mine_blocks(1).await.unwrap(); + let deposit: Deposit = deposit_info.clone().into(); + + // because operator collaterl was spent outside of the protocol, new deposit with this operator should be rejected + assert!(aggregator.new_deposit(deposit).await.is_err()); + + let kickoff_txid: bitcoin::Txid = operator0 + .internal_finalized_payout(FinalizedPayoutParams { + payout_blockhash: vec![0u8; 32], + deposit_outpoint: Some(deposit_info.deposit_outpoint.into()), + }) + .await + .unwrap() + .into_inner() + .try_into() + .unwrap(); + + let _kickoff_block_height = + mine_once_after_in_mempool(&rpc, kickoff_txid, Some("Kickoff tx"), Some(1800)) + .await + .unwrap(); + + let challenge_outpoint = OutPoint { + txid: kickoff_txid, + vout: UtxoVout::Challenge.get_vout(), + }; + + let challenge_spent_txid = get_txid_where_utxo_is_spent(&rpc, challenge_outpoint) + .await + .unwrap(); + + // check that challenge utxo should not be spent on a challenge as operator exited the protocol + let tx = rpc.get_tx_of_txid(&challenge_spent_txid).await.unwrap(); + + assert!(tx.output[0].value != config.protocol_paramset().operator_challenge_amount); +} + +pub async fn make_concurrent_deposits( + count: usize, + rpc: &ExtendedBitcoinRpc, + config: &BridgeConfig, + verifiers_public_keys: Vec, + aggregator: &mut ClementineAggregatorClient, + citrea_client: MockCitreaClient, +) -> eyre::Result> { + let actor = Actor::new( + config.secret_key, + config.winternitz_secret_key, + config.protocol_paramset().network, + ); + let evm_address = EVMAddress([1; 20]); + + // Create move txs. + let mut aggregators = (0..count).map(|_| aggregator.clone()).collect::>(); + let mut move_tx_requests = Vec::new(); + let mut deposit_outpoints = Vec::new(); + for aggregator in aggregators.iter_mut() { + let (deposit_address, _) = + get_deposit_address(config, evm_address, verifiers_public_keys.clone()).unwrap(); + let deposit_outpoint = rpc + .send_to_address(&deposit_address, config.protocol_paramset().bridge_amount) + .await + .unwrap(); + deposit_outpoints.push(deposit_outpoint); + + mine_once_after_in_mempool(rpc, deposit_outpoint.txid, Some("Deposit outpoint"), None) + .await + .unwrap(); + + let deposit_info = DepositInfo { + deposit_outpoint, + deposit_type: DepositType::BaseDeposit(BaseDepositData { + evm_address, + recovery_taproot_address: actor.address.as_unchecked().to_owned(), + }), + }; + tracing::debug!( + "Creating move tx for deposit outpoint: {:?}", + deposit_info.deposit_outpoint + ); + + let deposit: Deposit = deposit_info.clone().into(); + move_tx_requests.push(aggregator.new_deposit(deposit.clone())); + } + let move_txs = try_join_all(move_tx_requests) + .await + .unwrap() + .into_iter() + .map(|encoded_move_tx| encoded_move_tx.into_inner()) + .collect::>(); + tracing::debug!("Move txs created: {:?}", move_txs); + + let mut deposit_requests = Vec::new(); + for (i, aggregator) in aggregators.iter_mut().enumerate() { + let request = SendMoveTxRequest { + deposit_outpoint: Some(deposit_outpoints[i].into()), + raw_tx: Some(move_txs[i].clone()), + }; + + deposit_requests.push(aggregator.send_move_to_vault_tx(request.clone())); + } + + // Send deposit requests at the same time. + let move_txids: Vec = try_join_all(deposit_requests) + .await + .unwrap() + .into_iter() + .map(|encoded_move_tx| encoded_move_tx.into_inner().try_into().unwrap()) + .collect::>(); + tracing::debug!("Move txids: {:?}", move_txids); + + sleep(Duration::from_secs(5)).await; + rpc.mine_blocks(1).await.unwrap(); + + for txid in move_txids.iter() { + let rpc = rpc.clone(); + let txid = *txid; + poll_until_condition( + async move || { + let entry = rpc.get_mempool_entry(&txid).await; + tracing::debug!("Mempool entry for txid {:?}: {:?}", txid, entry); + Ok(entry.is_ok()) + }, + Some(Duration::from_secs(120)), + None, + ) + .await + .unwrap(); + } + + rpc.mine_blocks(DEFAULT_FINALITY_DEPTH).await.unwrap(); + + for txid in move_txids.iter() { + let rpc = rpc.clone(); + let txid = *txid; + let mut citrea_client = citrea_client.clone(); + + poll_until_condition( + async move || { + if rpc.get_mempool_entry(&txid).await.is_ok() { + return Err(eyre::eyre!( + "Txid {:?} still in mempool after mining!", + txid + )); + } + + let tx = rpc.get_raw_transaction(&txid, None).await?; + + tracing::debug!("Depositing to Citrea..."); + + let current_block_height = rpc.get_block_count().await.unwrap(); + citrea_client + .insert_deposit_move_txid(current_block_height + 1, tx.compute_txid()) + .await; + + tracing::debug!("Deposit operations are successful."); + + Ok(true) + }, + None, + None, + ) + .await + .unwrap(); + } + + Ok(move_txids) +} + +/// A typical deposit and withdrawal flow. Except each operation are done +/// multiple times and concurrently. This is done by creating multiple requests +/// and `await`ing them together after using [`try_join_all`]. +#[tokio::test(flavor = "multi_thread")] +async fn concurrent_deposits_and_withdrawals() { + let mut config = create_test_config_with_thread_name().await; + let regtest = create_regtest_rpc(&mut config).await; + let rpc = regtest.rpc().clone(); + let mut citrea_client = MockCitreaClient::new( + config.citrea_rpc_url.clone(), + "".to_string(), + config.citrea_chain_id, + None, + config.citrea_request_timeout, + ) + .await + .unwrap(); + + let actors = create_actors::(&config).await; + let mut aggregator = actors.get_aggregator(); + + let verifiers_public_keys: Vec = aggregator + .setup(Request::new(Empty {})) + .await + .unwrap() + .into_inner() + .try_into() + .unwrap(); + + let count = 10; + + make_concurrent_deposits( + count, + &rpc, + &config, + verifiers_public_keys.clone(), + &mut aggregator, + citrea_client.clone(), + ) + .await + .unwrap(); + + let mut sigs = Vec::new(); + let mut withdrawal_utxos = Vec::new(); + let mut payout_txouts = Vec::new(); + for _ in 0..count { + let user_sk = SecretKey::from_slice(&[13u8; 32]).unwrap(); + let withdrawal_address = Address::p2tr( + &SECP, + user_sk.x_only_public_key(&SECP).0, + None, + config.protocol_paramset().network, + ); + + let (dust_utxo, payout_txout, sig) = generate_withdrawal_transaction_and_signature( + &config, + &rpc, + &withdrawal_address, + config.protocol_paramset().bridge_amount + - config + .operator_withdrawal_fee_sats + .unwrap_or(Amount::from_sat(0)), + ) + .await; + + let withdrawal_utxo = dust_utxo.outpoint; + + let current_block_height = rpc.get_block_count().await.unwrap(); + + citrea_client + .insert_withdrawal_utxo(current_block_height + 1, withdrawal_utxo) + .await; + + withdrawal_utxos.push(withdrawal_utxo); + payout_txouts.push(payout_txout); + sigs.push(sig); + } + + rpc.mine_blocks(DEFAULT_FINALITY_DEPTH + 2).await.unwrap(); + sleep(Duration::from_secs(10)).await; + + let withdrawal_input_outpoints = withdrawal_utxos.clone(); + let actors_ref = &actors; + + poll_get( + async move || { + let mut operator0s = (0..count) + .map(|_| actors_ref.get_operator_client_by_index(0)) + .collect::>(); + let mut withdrawal_requests = Vec::new(); + + for (i, operator) in operator0s.iter_mut().enumerate() { + let withdraw_params = WithdrawParams { + withdrawal_id: i as u32, + input_signature: sigs[i].serialize().to_vec(), + input_outpoint: Some(withdrawal_utxos[i].into()), + output_script_pubkey: payout_txouts[i].script_pubkey.to_bytes(), + output_amount: payout_txouts[i].value.to_sat(), + }; + let verification_signature = sign_withdrawal_verification_signature::< + OperatorWithdrawalMessage, + >(&config, withdraw_params.clone()); + + let verification_signature_str = verification_signature.to_string(); + + withdrawal_requests.push(operator.withdraw(WithdrawParamsWithSig { + withdrawal: Some(withdraw_params.clone()), + verification_signature: Some(verification_signature_str.clone()), + })); + } + + let withdrawal_txids = match try_join_all(withdrawal_requests).await { + Ok(txids) => txids, + Err(e) => { + tracing::error!("Error while processing withdrawals: {:?}", e); + return Err(eyre::eyre!("Error while processing withdrawals: {:?}", e)); + } + }; + + Ok(Some(withdrawal_txids)) + }, + Some(Duration::from_secs(240)), + None, + ) + .await + .unwrap(); + + tracing::info!("Checking if withdrawal input outpoints are spent"); + // check if withdrawal input outpoints are spent + for outpoint in withdrawal_input_outpoints.iter() { + ensure_tx_onchain(&rpc, outpoint.txid).await.unwrap(); + ensure_outpoint_spent(&rpc, *outpoint).await.unwrap(); + } +} + +#[tokio::test(flavor = "multi_thread")] +async fn concurrent_deposits_and_optimistic_payouts() { + let mut config = create_test_config_with_thread_name().await; + let regtest = create_regtest_rpc(&mut config).await; + let rpc = regtest.rpc().clone(); + let mut citrea_client = MockCitreaClient::new( + config.citrea_rpc_url.clone(), + "".to_string(), + config.citrea_chain_id, + None, + config.citrea_request_timeout, + ) + .await + .unwrap(); + + let actors = create_actors::(&config).await; + let mut aggregator = actors.get_aggregator(); + let verifiers_public_keys: Vec = aggregator + .setup(Request::new(Empty {})) + .await + .unwrap() + .into_inner() + .try_into() + .unwrap(); + + let count = 10; + + let move_txids = make_concurrent_deposits( + count, + &rpc, + &config, + verifiers_public_keys.clone(), + &mut aggregator, + citrea_client.clone(), + ) + .await + .unwrap(); + + let mut sigs = Vec::new(); + let mut withdrawal_utxos = Vec::new(); + let mut payout_txouts = Vec::new(); + for _ in 0..count { + let user_sk = SecretKey::from_slice(&[13u8; 32]).unwrap(); + let withdrawal_address = Address::p2tr( + &SECP, + user_sk.x_only_public_key(&SECP).0, + None, + config.protocol_paramset().network, + ); + + let (dust_utxo, payout_txout, sig) = generate_withdrawal_transaction_and_signature( + &config, + &rpc, + &withdrawal_address, + config.protocol_paramset().bridge_amount + - config + .operator_withdrawal_fee_sats + .unwrap_or(Amount::from_sat(0)), + ) + .await; + + let withdrawal_utxo = dust_utxo.outpoint; + + let current_block_height = rpc.get_block_count().await.unwrap(); + + citrea_client + .insert_withdrawal_utxo(current_block_height + 1, withdrawal_utxo) + .await; + + withdrawal_utxos.push(withdrawal_utxo); + payout_txouts.push(payout_txout); + sigs.push(sig); + } + + rpc.mine_blocks(DEFAULT_FINALITY_DEPTH + 2).await.unwrap(); + sleep(Duration::from_secs(10)).await; + + poll_until_condition( + async move || { + let mut aggregators = (0..count).map(|_| aggregator.clone()).collect::>(); + let mut withdrawal_requests = Vec::new(); + + for (i, aggregator) in aggregators.iter_mut().enumerate() { + let withdrawal_params = WithdrawParams { + withdrawal_id: i as u32, + input_signature: sigs[i].serialize().to_vec(), + input_outpoint: Some(withdrawal_utxos[i].into()), + output_script_pubkey: payout_txouts[i].script_pubkey.to_bytes(), + output_amount: payout_txouts[i].value.to_sat(), + }; + + let verification_signature = sign_withdrawal_verification_signature::< + OptimisticPayoutMessage, + >(&config, withdrawal_params.clone()); + + let verification_signature_str = verification_signature.to_string(); + + withdrawal_requests.push(aggregator.optimistic_payout(OptimisticWithdrawParams { + withdrawal: Some(withdrawal_params.clone()), + verification_signature: Some(verification_signature_str), + })); + } + + let opt_payout_txs = match try_join_all(withdrawal_requests).await { + Ok(txs) => txs, + Err(e) => { + tracing::error!("Error while processing withdrawals: {:?}", e); + return Ok(false); + } + }; + tracing::info!("Optimistic payout txs: {:?}", opt_payout_txs); + + Ok(true) + }, + Some(Duration::from_secs(480)), + None, + ) + .await + .unwrap(); + + poll_until_condition( + async move || { + tracing::info!("Ensuring move txid bridge deposit is spent"); + for move_txid in move_txids.clone().into_iter() { + if ensure_outpoint_spent( + &rpc, + OutPoint { + txid: move_txid, + vout: (UtxoVout::DepositInMove).get_vout(), + }, + ) + .await + .is_err() + { + return Ok(false); + } + } + + Ok(true) + }, + None, + None, + ) + .await + .unwrap(); +} diff --git a/core/src/test/full_flow.rs b/core/src/test/full_flow.rs new file mode 100644 index 000000000..70a14a6bf --- /dev/null +++ b/core/src/test/full_flow.rs @@ -0,0 +1,756 @@ +//! # Flow Tests +//! +//! This module contains tests that simulate typical flows of Clementine. + +use super::common::test_actors::TestActors; +use super::common::{create_test_config_with_thread_name, tx_utils::*}; +use crate::actor::Actor; +use crate::builder::transaction::sign::get_kickoff_utxos_to_sign; +use crate::builder::transaction::TransactionType as TxType; +use crate::config::protocol::BLOCKS_PER_HOUR; +use crate::config::BridgeConfig; +use crate::database::Database; +use crate::deposit::{DepositInfo, KickoffData}; +use crate::extended_bitcoin_rpc::ExtendedBitcoinRpc; +use crate::operator::RoundIndex; +use crate::rpc::clementine::{Empty, FinalizedPayoutParams, SignedTxsWithType, TransactionRequest}; +use crate::test::common::citrea::MockCitreaClient; +use crate::test::common::*; +use crate::tx_sender::TxSenderClient; +use crate::utils::RbfSigningInfo; +use bitcoin::hashes::Hash; +use bitcoin::{OutPoint, Txid, XOnlyPublicKey}; +use eyre::{Context, Result}; +use tonic::Request; + +const BLOCKS_PER_DAY: u64 = 144; + +/// Makes a deposit and returns the necessary clients and parameters for further testing. +async fn base_setup( + config: &mut BridgeConfig, + rpc: &ExtendedBitcoinRpc, +) -> Result< + ( + TestActors, + Vec, + DepositInfo, + u32, + TransactionRequest, + SignedTxsWithType, + XOnlyPublicKey, + ), + eyre::Error, +> { + let (actors, deposit_info, _move_txid, deposit_blockhash, _verifiers_public_keys) = + run_single_deposit::(config, rpc.clone(), None, None, None).await?; + let deposit_outpoint = deposit_info.deposit_outpoint; + + let mut tx_senders = Vec::new(); + for i in 0..actors.get_num_verifiers() { + let verifier_config = { + let mut config = config.clone(); + config.db_name += &i.to_string(); + config + }; + let tx_sender_db = Database::new(&verifier_config) + .await + .expect("failed to create database"); + + let tx_sender = TxSenderClient::new(tx_sender_db.clone(), format!("full_flow_{}", i)); + tx_senders.push(tx_sender); + } + + let op0_xonly_pk = Actor::new( + config + .test_params + .all_operators_secret_keys + .first() + .cloned() + .unwrap(), + config.winternitz_secret_key, + config.protocol_paramset().network, + ) + .xonly_public_key; + let kickoff_idx = get_kickoff_utxos_to_sign( + config.protocol_paramset(), + op0_xonly_pk, + deposit_blockhash, + deposit_outpoint, + )[0] as u32; + let base_tx_req = TransactionRequest { + kickoff_id: Some( + KickoffData { + operator_xonly_pk: op0_xonly_pk, + round_idx: RoundIndex::Round(0), + kickoff_idx, + } + .into(), + ), + deposit_outpoint: Some(deposit_outpoint.into()), + }; + let mut operator0 = actors.get_operator_client_by_index(0); + let all_txs = operator0 + .internal_create_signed_txs(base_tx_req.clone()) + .await? + .into_inner(); + + Ok(( + actors, + tx_senders, + deposit_info, + kickoff_idx, + base_tx_req, + all_txs, + op0_xonly_pk, + )) +} + +pub async fn run_operator_end_round( + config: &mut BridgeConfig, + rpc: ExtendedBitcoinRpc, + is_challenge: bool, +) -> Result<()> { + let (actors, deposit_info, move_txid, _deposit_blockhash, _verifiers_public_keys) = + run_single_deposit::(config, rpc.clone(), None, None, None).await?; + let deposit_outpoint = deposit_info.deposit_outpoint; + + let mut operator0 = actors.get_operator_client_by_index(0); + let kickoff_txid = operator0 + .internal_finalized_payout(FinalizedPayoutParams { + payout_blockhash: [1u8; 32].to_vec(), + deposit_outpoint: Some(deposit_outpoint.into()), + }) + .await?; + + let kickoff_txid = Txid::from_byte_array(kickoff_txid.into_inner().txid.try_into().unwrap()); + + let mut operator0 = actors.get_operator_client_by_index(0); + let mut verifier1 = actors.get_verifier_client_by_index(1); + + operator0.internal_end_round(Request::new(Empty {})).await?; + + ensure_tx_onchain(&rpc, kickoff_txid).await?; + + if is_challenge { + verifier1 + .internal_handle_kickoff(Request::new(crate::rpc::clementine::Txid { + txid: kickoff_txid.to_byte_array().to_vec(), + })) + .await?; + } + + let wait_to_be_spent = if is_challenge { + OutPoint { + txid: kickoff_txid, + vout: 1, + } + } else { + OutPoint { + txid: move_txid, + vout: 0, + } + }; + ensure_outpoint_spent(&rpc, wait_to_be_spent).await?; + + Ok(()) +} + +pub async fn run_happy_path_1(config: &mut BridgeConfig, rpc: ExtendedBitcoinRpc) -> Result<()> { + tracing::info!("Starting happy path test"); + + let (actors, tx_senders, _dep_params, _kickoff_idx, base_tx_req, all_txs, op0_xonly_pk) = + base_setup(config, &rpc).await?; + + let tx_sender = tx_senders[0].clone(); + + tracing::info!("Sending round transaction"); + send_tx_with_type(&rpc, &tx_sender, &all_txs, TxType::Round).await?; + + tracing::info!("Sending kickoff transaction"); + send_tx_with_type(&rpc, &tx_sender, &all_txs, TxType::Kickoff).await?; + + // Wait 1 week + rpc.mine_blocks(7 * 24 * 6).await?; + + tracing::info!("Sending challenge timeout transaction"); + send_tx_with_type(&rpc, &tx_sender, &all_txs, TxType::ChallengeTimeout).await?; + + // Send Ready to Reimburse Reimburse Transaction + tracing::info!("Sending ready to reimburse transaction"); + send_tx_with_type(&rpc, &tx_sender, &all_txs, TxType::ReadyToReimburse).await?; + + rpc.mine_blocks(6 * 24 * 2 + 1).await?; + + // Send Reimburse Generator 1 + tracing::info!("Sending round 2 transaction"); + let mut operator0 = actors.get_operator_client_by_index(0); + let all_txs_2 = operator0 + .internal_create_signed_txs(TransactionRequest { + kickoff_id: Some( + KickoffData { + operator_xonly_pk: op0_xonly_pk, + round_idx: RoundIndex::Round(1), + kickoff_idx: 0, + } + .into(), + ), + ..base_tx_req + }) + .await? + .into_inner(); + + send_tx_with_type(&rpc, &tx_sender, &all_txs_2, TxType::Round).await?; + + // Send Happy Reimburse Transaction + tracing::info!("Sending happy reimburse transaction"); + send_tx_with_type(&rpc, &tx_sender, &all_txs, TxType::Reimburse).await?; + + tracing::info!("Reimburse transaction sent successfully"); + tracing::info!("Happy path test completed successfully"); + Ok(()) +} + +/// Happy Path 2 flow: +/// Setup Aggregator +/// Make a Deposit +/// Make a Withdrawal +/// Send Kickoff Transaction +/// Send Challenge Transaction +/// Send Watchtower Challenge Transactions +/// Send Operator Challenge Acknowledgment Transactions +/// Send Assert Transactions +/// Send Disprove Timeout Transaction +/// Send Reimburse Transaction +pub async fn run_happy_path_2(config: &mut BridgeConfig, rpc: ExtendedBitcoinRpc) -> Result<()> { + tracing::info!("Starting Happy Path 2 test"); + + let (actors, tx_senders, _deposit_info, _kickoff_idx, base_tx_req, all_txs, op0_xonly_pk) = + base_setup(config, &rpc).await?; + + let tx_sender = tx_senders[0].clone(); + + // Send Round Transaction + tracing::info!("Sending round transaction"); + send_tx_with_type(&rpc, &tx_sender, &all_txs, TxType::Round).await?; + + // Send Kickoff Transaction + tracing::info!("Sending kickoff transaction"); + send_tx_with_type(&rpc, &tx_sender, &all_txs, TxType::Kickoff).await?; + + // Send Challenge Transaction + tracing::info!("Sending challenge transaction"); + send_tx_with_type(&rpc, &tx_sender, &all_txs, TxType::Challenge).await?; + + // Send Watchtower Challenge Transactions + for (verifier_idx, verifier) in actors.get_verifiers().iter_mut().enumerate() { + let watchtower_challenge_tx = verifier + .internal_create_watchtower_challenge(base_tx_req.clone()) + .await? + .into_inner(); + tracing::warn!( + "Sending watchtower challenge transaction for watchtower {}", + verifier_idx + ); + let rbf_info: Option = watchtower_challenge_tx + .rbf_info + .map(|rbf_rpc| rbf_rpc.try_into().unwrap()); + + tracing::warn!("Watchtower challenge rbf info: {:?}", rbf_info); + + send_tx( + &tx_senders[verifier_idx].clone(), + &rpc, + watchtower_challenge_tx.raw_tx.as_slice(), + TxType::WatchtowerChallenge(verifier_idx), + rbf_info, + ) + .await + .context(format!( + "failed to send watchtower challenge transaction for watchtower {}", + verifier_idx + ))?; + } + + // Send Operator Challenge Acknowledgment Transactions + for verifier_idx in 0..actors.get_num_verifiers() { + tracing::info!( + "Sending operator challenge ack transaction for verifier {}", + verifier_idx + ); + let mut operator0 = actors.get_operator_client_by_index(0); + let operator_challenge_ack_txs = operator0 + .internal_create_signed_txs(base_tx_req.clone()) + .await? + .into_inner(); + send_tx_with_type( + &rpc, + &tx_sender, + &operator_challenge_ack_txs, + TxType::OperatorChallengeAck(verifier_idx), + ) + .await?; + } + + // Send Assert Transactions + let mut operator0 = actors.get_operator_client_by_index(0); + let assert_txs = operator0 + .internal_create_assert_commitment_txs(base_tx_req.clone()) + .await? + .into_inner(); + for (assert_idx, tx) in assert_txs.signed_txs.iter().enumerate() { + tracing::info!("Sending mini assert transaction {}", assert_idx); + send_tx( + &tx_sender, + &rpc, + tx.raw_tx.as_slice(), + TxType::MiniAssert(assert_idx), + None, + ) + .await + .context(format!( + "failed to send mini assert transaction {}", + assert_idx + ))?; + } + + rpc.mine_blocks(BLOCKS_PER_DAY * 5).await?; + // Send Disprove Timeout Transaction + tracing::info!("Sending disprove timeout transaction"); + send_tx_with_type(&rpc, &tx_sender, &all_txs, TxType::DisproveTimeout).await?; + + // Send Ready to Reimburse Reimburse Transaction + tracing::info!("Sending ready to reimburse transaction"); + send_tx_with_type(&rpc, &tx_sender, &all_txs, TxType::ReadyToReimburse).await?; + + rpc.mine_blocks(6 * 24 * 2 + 1).await?; + + // Send Reimburse Generator 1 + tracing::info!("Sending round 2 transaction"); + let all_txs_2 = operator0 + .internal_create_signed_txs(TransactionRequest { + kickoff_id: Some( + KickoffData { + operator_xonly_pk: op0_xonly_pk, + round_idx: RoundIndex::Round(1), + kickoff_idx: 0, + } + .into(), + ), + ..base_tx_req + }) + .await? + .into_inner(); + + // Send Round 2 + send_tx_with_type(&rpc, &tx_sender, &all_txs_2, TxType::Round).await?; + + // Send Reimburse Transaction + tracing::info!("Sending reimburse transaction"); + let reimburse_tx = all_txs + .signed_txs + .iter() + .find(|tx| tx.transaction_type == Some(TxType::Reimburse.into())) + .unwrap(); + send_tx( + &tx_sender, + &rpc, + reimburse_tx.raw_tx.as_slice(), + TxType::Reimburse, + None, + ) + .await + .context("failed to send reimburse transaction")?; + + tracing::info!("Happy Path 2 test completed successfully"); + Ok(()) +} + +/// Simple Assert flow without watchtower challenges/acks +pub async fn run_simple_assert_flow( + config: &mut BridgeConfig, + rpc: ExtendedBitcoinRpc, +) -> Result<()> { + tracing::info!("Starting Simple Assert Flow"); + + let (actors, tx_senders, _deposit_info, _kickoff_idx, base_tx_req, all_txs, _op0_xonly_pk) = + base_setup(config, &rpc).await?; + + let tx_sender = tx_senders[0].clone(); + + // Send Round Transaction + tracing::info!("Sending round transaction"); + send_tx_with_type(&rpc, &tx_sender, &all_txs, TxType::Round).await?; + + tracing::info!("Sending kickoff transaction"); + send_tx_with_type(&rpc, &tx_sender, &all_txs, TxType::Kickoff).await?; + + // Send Challenge Transaction + tracing::info!("Sending challenge transaction"); + send_tx_with_type(&rpc, &tx_sender, &all_txs, TxType::Challenge).await?; + + // Directly create and send assert transactions directly + tracing::info!("Creating and sending assert transactions directly"); + + // Get deposit data and kickoff ID for assert creation + rpc.mine_blocks(8 * BLOCKS_PER_HOUR as u64).await?; + // Create assert transactions for operator 0 + let mut operator0 = actors.get_operator_client_by_index(0); + let assert_txs = operator0 + .internal_create_assert_commitment_txs(base_tx_req) + .await? + .into_inner(); + + // Ensure all assert transactions are sent in order + for tx in assert_txs.signed_txs.iter() { + tracing::info!( + "Sending assert transaction of type: {:?}", + tx.transaction_type + ); + send_tx( + &tx_sender, + &rpc, + tx.raw_tx.as_slice(), + tx.transaction_type.unwrap().try_into().unwrap(), + None, + ) + .await?; + } + + // Mine blocks to confirm transactions + rpc.mine_blocks(10).await?; + + tracing::info!("Simple Assert Flow test completed successfully"); + Ok(()) +} + +/// Bad Path 1 flow: +/// Setup Aggregator +/// Make a Deposit +/// Make a Withdrawal +/// Send Kickoff Transaction +/// Send Challenge Transaction +/// Send Watchtower Challenge Transaction +/// Send Operator Challenge Negative Acknowledgment Transaction +pub async fn run_bad_path_1(config: &mut BridgeConfig, rpc: ExtendedBitcoinRpc) -> Result<()> { + tracing::info!("Starting Bad Path 1 test"); + + let (actors, tx_senders, _dep_params, _kickoff_idx, base_tx_req, all_txs, _op0_xonly_pk) = + base_setup(config, &rpc).await?; + + let tx_sender = tx_senders[0].clone(); + + // Send Round Transaction + tracing::info!("Sending round transaction"); + send_tx_with_type(&rpc, &tx_sender, &all_txs, TxType::Round).await?; + + // Send Kickoff Transaction + tracing::info!("Sending kickoff transaction"); + send_tx_with_type(&rpc, &tx_sender, &all_txs, TxType::Kickoff).await?; + + // Send Challenge Transaction + tracing::info!("Sending challenge transaction"); + send_tx_with_type(&rpc, &tx_sender, &all_txs, TxType::Challenge).await?; + + // Send Watchtower Challenge Transaction (just for the first watchtower) + // Send Watchtower Challenge Transactions + let watchtower_idx = 0; + tracing::info!( + "Sending watchtower challenge transaction for watchtower {}", + watchtower_idx + ); + let mut verifier = actors.get_verifier_client_by_index(watchtower_idx); + let watchtower_challenge_tx = verifier + .internal_create_watchtower_challenge(base_tx_req.clone()) + .await? + .into_inner(); + tracing::info!( + "Sending watchtower challenge transaction for watchtower {}", + watchtower_idx + ); + let rbf_info: Option = watchtower_challenge_tx + .rbf_info + .map(|rbf_rpc| rbf_rpc.try_into().unwrap()); + send_tx( + &tx_sender, + &rpc, + watchtower_challenge_tx.raw_tx.as_slice(), + TxType::WatchtowerChallenge(watchtower_idx), + rbf_info, + ) + .await + .context(format!( + "failed to send watchtower challenge transaction for watchtower {}", + watchtower_idx + ))?; + + rpc.mine_blocks(BLOCKS_PER_DAY * 3).await?; + + // Send Operator Challenge Negative Acknowledgment Transaction + tracing::info!( + "Sending operator challenge nack transaction for watchtower {}", + watchtower_idx + ); + send_tx_with_type( + &rpc, + &tx_sender, + &all_txs, + TxType::OperatorChallengeNack(watchtower_idx), + ) + .await?; + + tracing::info!("Bad Path 1 test completed successfully"); + Ok(()) +} + +/// Bad Path 2 flow: +/// Setup Aggregator +/// Make a Deposit +/// Make a Withdrawal +/// Send Kickoff Transaction +/// Send Challenge Transaction +/// Send Watchtower Challenge Transaction +/// Send Operator Challenge Acknowledgment Transaction +/// Send Kickoff Timeout Transaction +pub async fn run_bad_path_2(config: &mut BridgeConfig, rpc: ExtendedBitcoinRpc) -> Result<()> { + tracing::info!("Starting Bad Path 2 test"); + + let (_actors, tx_senders, _dep_params, _kickoff_idx, _base_tx_req, all_txs, _op0_xonly_pk) = + base_setup(config, &rpc).await?; + + let tx_sender = tx_senders[0].clone(); + + // Send Round Transaction + tracing::info!("Sending round transaction"); + send_tx_with_type(&rpc, &tx_sender, &all_txs, TxType::Round).await?; + + // Send Kickoff Transaction + tracing::info!("Sending kickoff transaction"); + send_tx_with_type(&rpc, &tx_sender, &all_txs, TxType::Kickoff).await?; + + // Send Challenge Transaction + send_tx_with_type(&rpc, &tx_sender, &all_txs, TxType::Challenge).await?; + + // Ready to reimburse without finalized kickoff + send_tx_with_type(&rpc, &tx_sender, &all_txs, TxType::ReadyToReimburse).await?; + + // Kickoff is not finalized, burn + tracing::info!("Sending kickoff not finalized transaction"); + send_tx_with_type(&rpc, &tx_sender, &all_txs, TxType::KickoffNotFinalized).await?; + + tracing::info!("Bad Path 2 test completed successfully"); + Ok(()) +} + +/// Bad Path 3 flow: +/// Setup Aggregator +/// Make a Deposit +/// Make a Withdrawal +/// Send Kickoff Transaction +/// Send Challenge Transaction +/// Send Watchtower Challenge Transactions +/// Send Operator Challenge Acknowledgment Transactions +/// Send Assert Transactions +/// Send Disprove Transaction +pub async fn run_bad_path_3(config: &mut BridgeConfig, rpc: ExtendedBitcoinRpc) -> Result<()> { + tracing::info!("Starting Bad Path 3 test"); + + let (actors, tx_senders, _deposit_info, _kickoff_idx, _base_tx_req, all_txs, _op0_xonly_pk) = + base_setup(config, &rpc).await?; + + let tx_sender = tx_senders[0].clone(); + + // Send Round Transaction + send_tx_with_type(&rpc, &tx_sender, &all_txs, TxType::Round).await?; + + // Send Kickoff Transaction + tracing::info!("Sending kickoff transaction"); + send_tx_with_type(&rpc, &tx_sender, &all_txs, TxType::Kickoff).await?; + + // Send Challenge Transaction + tracing::info!("Sending challenge transaction"); + send_tx_with_type(&rpc, &tx_sender, &all_txs, TxType::Challenge).await?; + + // Send Watchtower Challenge Transactions + for watchtower_idx in 0..actors.get_num_verifiers() { + tracing::info!( + "Sending watchtower challenge transaction for watchtower {}", + watchtower_idx + ); + send_tx_with_type( + &rpc, + &tx_sender, + &all_txs, + TxType::WatchtowerChallenge(watchtower_idx), + ) + .await?; + } + + // Send Operator Challenge Acknowledgment Transactions + for verifier_idx in 0..actors.get_num_verifiers() { + tracing::info!( + "Sending operator challenge ack transaction for watchtower {}", + verifier_idx + ); + send_tx_with_type( + &rpc, + &tx_sender, + &all_txs, + TxType::OperatorChallengeAck(verifier_idx), + ) + .await?; + } + + // Send Assert Transactions + let num_asserts = crate::bitvm_client::ClementineBitVMPublicKeys::number_of_assert_txs(); + for assert_idx in 0..num_asserts { + tracing::info!("Sending mini assert transaction {}", assert_idx); + send_tx_with_type(&rpc, &tx_sender, &all_txs, TxType::MiniAssert(assert_idx)).await?; + } + + // Send Disprove Transaction + tracing::info!("Sending disprove transaction"); + send_tx_with_type(&rpc, &tx_sender, &all_txs, TxType::Disprove).await?; + + tracing::info!("Bad Path 3 test completed successfully"); + Ok(()) +} + +// Operator successfully sends challenge timeout for one deposit, but doesn't +// spend its remaining kickoffs, state machine should automatically send any +// unspent kickoff connector tx to burn operators collateral +pub async fn run_unspent_kickoffs_with_state_machine( + config: &mut BridgeConfig, + rpc: ExtendedBitcoinRpc, +) -> Result<()> { + let (_actors, tx_senders, _deposit_info, _kickoff_idx, _base_tx_req, all_txs, _op0_xonly_pk) = + base_setup(config, &rpc).await?; + + let tx_sender = tx_senders[0].clone(); + + // Send Round Transaction + tracing::info!("Sending round transaction"); + send_tx_with_type(&rpc, &tx_sender, &all_txs, TxType::Round).await?; + + // state machine should burn the collateral after ready to reimburse tx gets sent + let ready_to_reimburse_tx = + get_tx_from_signed_txs_with_type(&all_txs, TxType::ReadyToReimburse)?; + let collateral_utxo = OutPoint { + txid: ready_to_reimburse_tx.compute_txid(), + vout: 0, + }; + + // Send Ready to Reimburse Reimburse Transaction + tracing::info!("Sending ready to reimburse transaction"); + send_tx_with_type(&rpc, &tx_sender, &all_txs, TxType::ReadyToReimburse).await?; + + let collateral_burn_txid = get_txid_where_utxo_is_spent(&rpc, collateral_utxo).await?; + + // calculate unspent kickoff tx txids and check if any of them is where collateral was spent + let is_spent_by_unspent_kickoff_tx = (0..config.protocol_paramset().num_kickoffs_per_round) + .map(|i| { + let tx = get_tx_from_signed_txs_with_type(&all_txs, TxType::UnspentKickoff(i)).unwrap(); + tx.compute_txid() + }) + .any(|txid| txid == collateral_burn_txid); + + assert!(is_spent_by_unspent_kickoff_tx); + + Ok(()) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[tokio::test(flavor = "multi_thread")] + async fn test_simple_assert_flow() { + let mut config = create_test_config_with_thread_name().await; + config.test_params.should_run_state_manager = false; + let regtest = create_regtest_rpc(&mut config).await; + let rpc = regtest.rpc().clone(); + + run_simple_assert_flow(&mut config, rpc).await.unwrap(); + } + + #[tokio::test(flavor = "multi_thread")] + // #[ignore = "Design changes in progress"] + async fn test_happy_path_1() { + let mut config = create_test_config_with_thread_name().await; + config.test_params.should_run_state_manager = false; + let regtest = create_regtest_rpc(&mut config).await; + let rpc = regtest.rpc().clone(); + run_happy_path_1(&mut config, rpc).await.unwrap(); + } + + #[tokio::test(flavor = "multi_thread")] + async fn test_happy_path_2() { + let mut config = create_test_config_with_thread_name().await; + config.test_params.should_run_state_manager = false; + let regtest = create_regtest_rpc(&mut config).await; + let rpc = regtest.rpc().clone(); + run_happy_path_2(&mut config, rpc).await.unwrap(); + } + + #[tokio::test(flavor = "multi_thread")] + async fn test_bad_path_1() { + let mut config = create_test_config_with_thread_name().await; + config.test_params.should_run_state_manager = false; + let regtest = create_regtest_rpc(&mut config).await; + let rpc = regtest.rpc().clone(); + run_bad_path_1(&mut config, rpc).await.unwrap(); + } + + #[tokio::test(flavor = "multi_thread")] + async fn test_bad_path_2() { + let mut config = create_test_config_with_thread_name().await; + config.test_params.should_run_state_manager = false; + let regtest = create_regtest_rpc(&mut config).await; + let rpc = regtest.rpc().clone(); + run_bad_path_2(&mut config, rpc).await.unwrap(); + } + + #[tokio::test(flavor = "multi_thread")] + #[ignore = "Disprove is not ready"] + async fn test_bad_path_3() { + let mut config = create_test_config_with_thread_name().await; + config.test_params.should_run_state_manager = false; + let regtest = create_regtest_rpc(&mut config).await; + let rpc = regtest.rpc().clone(); + run_bad_path_3(&mut config, rpc).await.unwrap(); + } + + #[tokio::test(flavor = "multi_thread")] + async fn test_operator_end_round() { + let mut config = create_test_config_with_thread_name().await; + config.test_params.should_run_state_manager = false; + let regtest = create_regtest_rpc(&mut config).await; + let rpc = regtest.rpc().clone(); + + run_operator_end_round(&mut config, rpc, false) + .await + .unwrap(); + } + + #[tokio::test(flavor = "multi_thread")] + async fn test_operator_end_round_with_challenge() { + let mut config = create_test_config_with_thread_name().await; + config.test_params.should_run_state_manager = false; + let regtest = create_regtest_rpc(&mut config).await; + let rpc = regtest.rpc().clone(); + + run_operator_end_round(&mut config, rpc, true) + .await + .unwrap(); + } + + #[tokio::test(flavor = "multi_thread")] + async fn test_unspent_kickoffs_with_state_machine() { + let mut config = create_test_config_with_thread_name().await; + let regtest = create_regtest_rpc(&mut config).await; + let rpc = regtest.rpc().clone(); + + run_unspent_kickoffs_with_state_machine(&mut config, rpc) + .await + .unwrap(); + } +} diff --git a/core/src/test/manual_reimbursement.rs b/core/src/test/manual_reimbursement.rs new file mode 100644 index 000000000..a24b734e4 --- /dev/null +++ b/core/src/test/manual_reimbursement.rs @@ -0,0 +1,302 @@ +use crate::bitvm_client::SECP; +use crate::builder::transaction::input::UtxoVout; +use crate::builder::transaction::TransactionType; +use crate::citrea::CitreaClientT; +use crate::config::BridgeConfig; +use crate::database::Database; +use crate::extended_bitcoin_rpc::ExtendedBitcoinRpc; +use crate::rpc::clementine::{WithdrawParams, WithdrawParamsWithSig}; +use crate::rpc::ecdsa_verification_sig::OperatorWithdrawalMessage; +use crate::test::common::citrea::MockCitreaClient; +use crate::test::common::test_actors::TestActors; +use crate::test::common::{ + create_regtest_rpc, generate_withdrawal_transaction_and_signature, poll_until_condition, +}; +use crate::test::common::{create_test_config_with_thread_name, run_single_deposit}; +use crate::test::sign::sign_withdrawal_verification_signature; +use bitcoin::secp256k1::SecretKey; +use bitcoin::{Address, Amount, OutPoint, Transaction}; +use bitcoincore_rpc::RpcApi; +use citrea_e2e::bitcoin::DEFAULT_FINALITY_DEPTH; +use eyre::Context; +use std::time::Duration; +use tonic::Request; + +// This test tests if operators with no-automation can get reimbursed using get_reimbursement_txs rpc endpoint. +#[tokio::test] +async fn mock_citrea_run_truthful_manual_reimbursement() { + let mut config = create_test_config_with_thread_name().await; + // set min relay fee to zero so that we do not need to CPFP + config.test_params.mine_0_fee_txs = true; + let regtest = create_regtest_rpc(&mut config).await; + let rpc = regtest.rpc().clone(); + let mut citrea_client = MockCitreaClient::new( + config.citrea_rpc_url.clone(), + "".to_string(), + config.citrea_chain_id, + None, + config.citrea_request_timeout, + ) + .await + .unwrap(); + + // do 2 deposits and get reimbursements + let actors = + deposit_and_get_reimbursement(&mut config, None, &rpc, &mut citrea_client, 0).await; + let _actors = + deposit_and_get_reimbursement(&mut config, Some(actors), &rpc, &mut citrea_client, 1).await; +} + +async fn deposit_and_get_reimbursement( + config: &mut BridgeConfig, + actors: Option>, + rpc: &ExtendedBitcoinRpc, + citrea_client: &mut MockCitreaClient, + withdrawal_id: u32, +) -> TestActors { + tracing::info!("Running deposit"); + + tracing::info!( + "Deposit starting block_height: {:?}", + rpc.get_block_count().await.unwrap() + ); + let (actors, deposit_params, move_txid, _deposit_blockhash, verifiers_public_keys) = + run_single_deposit::(config, rpc.clone(), None, actors, None) + .await + .unwrap(); + + // sleep for 1 second + tokio::time::sleep(std::time::Duration::from_secs(1)).await; + + tracing::info!( + "Deposit ending block_height: {:?}", + rpc.get_block_count().await.unwrap() + ); + + // Send deposit to Citrea + tracing::info!("Depositing to Citrea"); + let current_block_height = rpc.get_block_count().await.unwrap(); + citrea_client + .insert_deposit_move_txid(current_block_height + 1, move_txid) + .await; + rpc.mine_blocks(config.protocol_paramset().finality_depth as u64 + 2) + .await + .unwrap(); + + // Make a withdrawal + let user_sk = SecretKey::from_slice(&[13u8; 32]).unwrap(); + let withdrawal_address = Address::p2tr( + &SECP, + user_sk.x_only_public_key(&SECP).0, + None, + config.protocol_paramset().network, + ); + let (dust_utxo, payout_txout, sig) = generate_withdrawal_transaction_and_signature( + config, + rpc, + &withdrawal_address, + config.protocol_paramset().bridge_amount + - config + .operator_withdrawal_fee_sats + .unwrap_or(Amount::from_sat(0)), + ) + .await; + + let withdrawal_utxo = dust_utxo.outpoint; + + tracing::info!("Created withdrawal UTXO: {:?}", withdrawal_utxo); + + rpc.mine_blocks(config.protocol_paramset().finality_depth as u64 + 2) + .await + .unwrap(); + + let current_block_height = rpc.get_block_count().await.unwrap(); + + citrea_client + .insert_withdrawal_utxo(current_block_height + 1, withdrawal_utxo) + .await; + // Mine some blocks so that block syncer counts it as finalized + + rpc.mine_blocks(config.protocol_paramset().finality_depth as u64 + 2) + .await + .unwrap(); + + tracing::info!("Withdrawal tx sent"); + let mut operator0 = actors.get_operator_client_by_index(0); + + // try to get reimbursement txs without a withdrawal, should return error + assert!(operator0 + .get_reimbursement_txs(Request::new(deposit_params.deposit_outpoint.into())) + .await + .is_err()); + + let withdrawal_params = WithdrawParams { + withdrawal_id, + input_signature: sig.serialize().to_vec(), + input_outpoint: Some(withdrawal_utxo.into()), + output_script_pubkey: payout_txout.script_pubkey.to_bytes(), + output_amount: payout_txout.value.to_sat(), + }; + + let verification_signature = sign_withdrawal_verification_signature::( + config, + withdrawal_params.clone(), + ); + let verification_signature_str = verification_signature.to_string(); + + let payout_tx = loop { + let withdrawal_response = operator0 + .withdraw(WithdrawParamsWithSig { + withdrawal: Some(withdrawal_params.clone()), + verification_signature: Some(verification_signature_str.clone()), + }) + .await; + + tracing::info!("Withdrawal response: {:?}", withdrawal_response); + + match withdrawal_response { + Ok(tx) => { + let tx: Transaction = tx.into_inner().try_into().unwrap(); + break tx; + } + Err(e) => tracing::info!("Withdrawal error: {:?}", e), + }; + + tokio::time::sleep(std::time::Duration::from_secs(1)).await; + }; + + let payout_txid = payout_tx.compute_txid(); + tracing::info!("Payout txid: {:?}", payout_txid); + + rpc.mine_blocks(DEFAULT_FINALITY_DEPTH + 2).await.unwrap(); + + // Setup tx_sender for sending transactions + let verifier_0_config = { + let mut config = config.clone(); + config.db_name += "0"; + config + }; + + let op0_xonly_pk = verifiers_public_keys[0].x_only_public_key().0; + + let db = Database::new(&verifier_0_config) + .await + .expect("failed to create database"); + + // wait until payout part is not null + poll_until_condition( + async || { + Ok(db + .get_first_unhandled_payout_by_operator_xonly_pk(None, op0_xonly_pk) + .await? + .is_some()) + }, + Some(Duration::from_secs(20 * 60)), + Some(Duration::from_millis(200)), + ) + .await + .wrap_err("Timed out while waiting for payout to be added to unhandled list") + .unwrap(); + + tracing::info!("Waiting until payout is handled"); + // wait until payout is handled + poll_until_condition( + async || { + Ok(db + .get_first_unhandled_payout_by_operator_xonly_pk(None, op0_xonly_pk) + .await? + .is_none()) + }, + Some(Duration::from_secs(20 * 60)), + Some(Duration::from_millis(200)), + ) + .await + .wrap_err("Timed out while waiting for payout to be handled") + .unwrap(); + + let kickoff_txid = db + .get_handled_payout_kickoff_txid(None, payout_txid) + .await + .unwrap() + .expect("Payout must be handled"); + + tracing::info!("Kickoff txid: {:?}", kickoff_txid); + + let reimburse_connector = OutPoint { + txid: kickoff_txid, + vout: UtxoVout::ReimburseInKickoff.get_vout(), + }; + + let mut cur_iteration = 0; + // loop until reimburse connecter is spent + while cur_iteration < 300 + && (!rpc.is_tx_on_chain(&kickoff_txid).await.unwrap() + || !rpc.is_utxo_spent(&reimburse_connector).await.unwrap()) + { + let manual_reimburse = operator0 + .get_reimbursement_txs(Request::new(deposit_params.deposit_outpoint.into())) + .await; + + match manual_reimburse { + Ok(txs) => { + let txs: Vec<(TransactionType, Transaction)> = txs.into_inner().try_into().unwrap(); + for (tx_type, tx) in txs { + tracing::warn!("Got tx: {:?}", tx_type); + tracing::warn!("Transaction: {:?}", tx); + rpc.send_raw_transaction(&tx).await.unwrap(); + // mine the tx + rpc.mine_blocks(1).await.unwrap(); + if tx_type == TransactionType::Kickoff { + rpc.mine_blocks( + config + .protocol_paramset() + .operator_challenge_timeout_timelock + as u64 + + config.protocol_paramset().finality_depth as u64 + + 2, + ) + .await + .unwrap(); + } else if tx_type == TransactionType::ReadyToReimburse { + rpc.mine_blocks( + config.protocol_paramset().operator_reimburse_timelock as u64 + + config.protocol_paramset().finality_depth as u64 + + 2, + ) + .await + .unwrap(); + } else if tx_type == TransactionType::BurnUnusedKickoffConnectors { + // the rpc endpoint should give an error because the BurnUnusedKickoffConnectors is not finalized yet + assert!(operator0 + .get_reimbursement_txs(Request::new( + deposit_params.deposit_outpoint.into() + )) + .await + .is_err()); + // mine blocks so that burn unused kickoff connectors is considered finalized + rpc.mine_blocks(config.protocol_paramset().finality_depth as u64 + 2) + .await + .unwrap(); + // wait a bit for btc syncer to sync + tokio::time::sleep(std::time::Duration::from_millis(2000)).await; + } else if tx_type == TransactionType::ChallengeTimeout { + // mine blocks so that challenge timeout is considered finalized + rpc.mine_blocks(config.protocol_paramset().finality_depth as u64 + 2) + .await + .unwrap(); + // wait a bit for btc syncer to sync + tokio::time::sleep(std::time::Duration::from_millis(2000)).await; + } + } + } + Err(e) => tracing::info!("Manual reimbursement error: {:?}", e), + } + rpc.mine_blocks(1).await.unwrap(); + tokio::time::sleep(std::time::Duration::from_millis(500)).await; + cur_iteration += 1; + } + + assert!(rpc.is_utxo_spent(&reimburse_connector).await.unwrap()); + + actors +} diff --git a/core/src/test/mod.rs b/core/src/test/mod.rs new file mode 100644 index 000000000..398300a61 --- /dev/null +++ b/core/src/test/mod.rs @@ -0,0 +1,59 @@ +//! Note to developer: Guard the new integration test files with the +//! `#[cfg(feature = "integration-tests")]` attribute (see #testing-clementine +//! in [`super`]). + +pub mod common; +#[cfg(all(feature = "automation", feature = "integration-tests"))] +mod deposit_and_withdraw_e2e; +#[cfg(all(feature = "automation", feature = "integration-tests"))] +mod full_flow; + +#[cfg(feature = "integration-tests")] +mod musig2; + +#[cfg(not(feature = "automation"))] +mod manual_reimbursement; + +#[cfg(feature = "integration-tests")] +mod rpc_auth; +#[cfg(all(feature = "automation", feature = "integration-tests"))] +mod state_manager; + +#[cfg(feature = "integration-tests")] +mod taproot; + +#[cfg(feature = "integration-tests")] +mod withdraw; + +mod sign; + +#[cfg(all(feature = "automation", feature = "integration-tests"))] +mod additional_disprove_scripts; + +#[cfg(all(feature = "automation", feature = "integration-tests"))] +mod bitvm_disprove_scripts; + +#[cfg(all(feature = "automation", feature = "integration-tests"))] +mod bridge_circuit_test_data; + +#[cfg(feature = "integration-tests")] +mod bitvm_script; + +pub const CITREA_E2E_DOCKER_IMAGE: &str = + "chainwayxyz/citrea-test:ca479a4147be1c3a472e76a3f117124683d81ab5"; + +use ctor::ctor; + +#[ctor] +// Increases stack to 32MB for tests, since tests fail with stack overflow otherwise. +// Note that this is unsafe as using stdlib before `main` has no guarantees. +// Read more: https://docs.rs/ctor/latest/ctor/attr.ctor.html +// +// After some investigation, the stack issue was narrowed down to `risc0-zkvm`s +// prover. The CPU-based prover runs out of stack space in a parallelized accumulate +// operation. FFI function is `risc0_circuit_rv32im_cpu_accum`, which is called +// indirectly by `risc0-circuit-rv32im` in `src/prove/hal/mod.rs:205`. The stack usage +// in the failing thread is ~384700 bytes. +unsafe fn rust_min_stack() { + std::env::set_var("RUST_MIN_STACK", "33554432"); +} diff --git a/core/src/test/musig2.rs b/core/src/test/musig2.rs new file mode 100644 index 000000000..ec71a9031 --- /dev/null +++ b/core/src/test/musig2.rs @@ -0,0 +1,597 @@ +use crate::bitvm_client::SECP; +use crate::builder::script::{CheckSig, OtherSpendable, SpendPath, SpendableScript}; +use crate::builder::transaction::input::SpendableTxIn; +use crate::builder::transaction::output::UnspentTxOut; +use crate::builder::transaction::{TransactionType, TxHandlerBuilder, DEFAULT_SEQUENCE}; +use crate::errors::BridgeError; +use crate::musig2::{ + aggregate_nonces, aggregate_partial_signatures, AggregateFromPublicKeys, Musig2Mode, +}; +use crate::rpc::clementine::NormalSignatureKind; +use crate::test::common::*; +use crate::{ + bitvm_client, + builder::{self}, + config::BridgeConfig, + musig2::{nonce_pair, partial_sign, MuSigNoncePair}, +}; +use bitcoin::key::Keypair; +use bitcoin::secp256k1::{Message, PublicKey}; +use bitcoin::{hashes::Hash, script, Amount, TapSighashType}; +use bitcoin::{taproot, Sequence, TxOut, XOnlyPublicKey}; +use bitcoincore_rpc::RpcApi; +use secp256k1::musig::{AggregatedNonce, PartialSignature}; +use std::sync::Arc; + +#[cfg(test)] +fn get_verifiers_keys(config: &BridgeConfig) -> (Vec, XOnlyPublicKey, Vec) { + let verifiers_secret_keys = &config.test_params.all_verifiers_secret_keys; + + let verifiers_secret_public_keys: Vec = verifiers_secret_keys + .iter() + .map(|sk| Keypair::from_secret_key(&SECP, sk)) + .collect(); + + let verifier_public_keys = verifiers_secret_public_keys + .iter() + .map(|kp| kp.public_key()) + .collect::>(); + + let untweaked_xonly_pubkey = + XOnlyPublicKey::from_musig2_pks(verifier_public_keys.clone(), None).unwrap(); + + ( + verifiers_secret_public_keys, + untweaked_xonly_pubkey, + verifier_public_keys, + ) +} + +#[cfg(test)] +fn get_nonces( + verifiers_secret_public_keys: Vec, +) -> Result<(Vec, AggregatedNonce), BridgeError> { + let nonce_pairs: Vec = verifiers_secret_public_keys + .iter() + .map(nonce_pair) + .collect::, _>>()?; + + let agg_nonce = aggregate_nonces( + nonce_pairs + .iter() + .map(|(_, musig_pub_nonces)| musig_pub_nonces) + .collect::>() + .as_slice(), + )?; + + Ok((nonce_pairs, agg_nonce)) +} + +#[tokio::test] +async fn key_spend() { + let mut config = create_test_config_with_thread_name().await; + let regtest = create_regtest_rpc(&mut config).await; + let rpc = regtest.rpc().clone(); + + let (verifiers_secret_public_keys, untweaked_xonly_pubkey, verifier_public_keys) = + get_verifiers_keys(&config); + let (nonce_pairs, agg_nonce) = get_nonces(verifiers_secret_public_keys.clone()).unwrap(); + + let (to_address, to_address_spend) = + builder::address::create_taproot_address(&[], None, config.protocol_paramset().network); + let (from_address, from_address_spend_info) = builder::address::create_taproot_address( + &[], + Some(untweaked_xonly_pubkey), + config.protocol_paramset().network, + ); + + let utxo = rpc + .send_to_address(&from_address, Amount::from_sat(100_000_000)) + .await + .unwrap(); + let prevout = rpc.get_txout_from_outpoint(&utxo).await.unwrap(); + + let mut tx_details = TxHandlerBuilder::new(TransactionType::Dummy) + .add_input( + NormalSignatureKind::NormalSignatureUnknown, + SpendableTxIn::new(utxo, prevout, vec![], Some(from_address_spend_info.clone())), + SpendPath::Unknown, + Sequence::default(), + ) + .add_output(UnspentTxOut::new( + TxOut { + value: Amount::from_sat(99_000_000), + script_pubkey: to_address.script_pubkey(), + }, + vec![], + Some(to_address_spend.clone()), + )) + .finalize(); + + let message = Message::from_digest( + tx_details + .calculate_pubkey_spend_sighash(0, TapSighashType::Default) + .unwrap() + .to_byte_array(), + ); + let merkle_root = from_address_spend_info.merkle_root(); + assert!(merkle_root.is_none()); + + let partial_sigs: Vec = verifiers_secret_public_keys + .into_iter() + .zip(nonce_pairs) + .map(|(kp, nonce_pair)| { + partial_sign( + verifier_public_keys.clone(), + Some(Musig2Mode::OnlyKeySpend), + nonce_pair.0, + agg_nonce, + kp, + message, + ) + .unwrap() + }) + .collect(); + + let final_signature = aggregate_partial_signatures( + verifier_public_keys.clone(), + Some(Musig2Mode::OnlyKeySpend), + agg_nonce, + &partial_sigs, + message, + ) + .unwrap(); + + let agg_pk = + XOnlyPublicKey::from_musig2_pks(verifier_public_keys, Some(Musig2Mode::OnlyKeySpend)) + .unwrap(); + SECP.verify_schnorr(&final_signature, &message, &agg_pk) + .unwrap(); + + rpc.mine_blocks(1).await.unwrap(); + + tx_details + .set_p2tr_key_spend_witness( + &taproot::Signature::from_slice(&final_signature.serialize()).unwrap(), + 0, + ) + .unwrap(); + rpc.send_raw_transaction(tx_details.get_cached_tx()) + .await + .unwrap(); +} + +#[tokio::test] + +async fn key_spend_with_script() { + let mut config = create_test_config_with_thread_name().await; + let regtest = create_regtest_rpc(&mut config).await; + let rpc = regtest.rpc().clone(); + + let (verifiers_secret_public_keys, untweaked_xonly_pubkey, verifier_public_keys) = + get_verifiers_keys(&config); + let (nonce_pairs, agg_nonce) = get_nonces(verifiers_secret_public_keys.clone()).unwrap(); + + let dummy_script = script::Builder::new().push_int(1).into_script(); + let scripts: Vec> = vec![Arc::new(OtherSpendable::new(dummy_script))]; + + let (to_address, _to_address_spend) = + builder::address::create_taproot_address(&[], None, config.protocol_paramset().network); + let (from_address, from_address_spend_info) = builder::address::create_taproot_address( + &scripts + .iter() + .map(|a| a.to_script_buf()) + .collect::>(), + Some(untweaked_xonly_pubkey), + config.protocol_paramset().network, + ); + + let utxo = rpc + .send_to_address(&from_address, Amount::from_sat(100_000_000)) + .await + .unwrap(); + let prevout = rpc.get_txout_from_outpoint(&utxo).await.unwrap(); + let mut builder = TxHandlerBuilder::new(TransactionType::Dummy); + builder = builder + .add_input( + NormalSignatureKind::NormalSignatureUnknown, + SpendableTxIn::new( + utxo, + prevout.clone(), + scripts.clone(), + Some(from_address_spend_info.clone()), + ), + SpendPath::Unknown, + DEFAULT_SEQUENCE, + ) + .add_output(UnspentTxOut::from_partial(TxOut { + value: Amount::from_sat(99_000_000), + script_pubkey: to_address.script_pubkey(), + })); + + let mut tx_details = builder.finalize(); + let message = Message::from_digest( + tx_details + .calculate_pubkey_spend_sighash(0, TapSighashType::Default) + .unwrap() + .to_byte_array(), + ); + let merkle_root = from_address_spend_info.merkle_root().unwrap(); + + let partial_sigs: Vec = verifiers_secret_public_keys + .into_iter() + .zip(nonce_pairs) + .map(|(kp, nonce_pair)| { + partial_sign( + verifier_public_keys.clone(), + Some(Musig2Mode::KeySpendWithScript(merkle_root)), + nonce_pair.0, + agg_nonce, + kp, + message, + ) + .unwrap() + }) + .collect(); + + let final_signature = aggregate_partial_signatures( + verifier_public_keys.clone(), + Some(Musig2Mode::KeySpendWithScript(merkle_root)), + agg_nonce, + &partial_sigs, + message, + ) + .unwrap(); + + let agg_pk = XOnlyPublicKey::from_musig2_pks( + verifier_public_keys, + Some(Musig2Mode::KeySpendWithScript(merkle_root)), + ) + .unwrap(); + + SECP.verify_schnorr(&final_signature, &message, &agg_pk) + .unwrap(); + + rpc.mine_blocks(1).await.unwrap(); + + tx_details + .set_p2tr_key_spend_witness( + &taproot::Signature::from_slice(&final_signature.serialize()).unwrap(), + 0, + ) + .unwrap(); + rpc.send_raw_transaction(tx_details.get_cached_tx()) + .await + .unwrap(); +} + +#[tokio::test] + +async fn script_spend() { + let mut config = create_test_config_with_thread_name().await; + let regtest = create_regtest_rpc(&mut config).await; + let rpc = regtest.rpc().clone(); + + let (verifiers_secret_public_keys, _untweaked_xonly_pubkey, verifier_public_keys) = + get_verifiers_keys(&config); + let (nonce_pairs, agg_nonce) = get_nonces(verifiers_secret_public_keys.clone()).unwrap(); + + let agg_pk = XOnlyPublicKey::from_musig2_pks(verifier_public_keys.clone(), None).unwrap(); + + let agg_xonly_pubkey = bitcoin::XOnlyPublicKey::from_slice(&agg_pk.serialize()).unwrap(); + let scripts: Vec> = vec![Arc::new(CheckSig::new(agg_xonly_pubkey))]; + + let to_address = bitcoin::Address::p2tr( + &SECP, + *bitvm_client::UNSPENDABLE_XONLY_PUBKEY, + None, + bitcoin::Network::Regtest, + ); + let (from_address, from_address_spend_info) = builder::address::create_taproot_address( + &scripts + .iter() + .map(|s| s.to_script_buf()) + .collect::>(), + None, + bitcoin::Network::Regtest, + ); + + let utxo = rpc + .send_to_address(&from_address, Amount::from_sat(100_000_000)) + .await + .unwrap(); + let prevout = rpc.get_txout_from_outpoint(&utxo).await.unwrap(); + let mut tx_details = TxHandlerBuilder::new(TransactionType::Dummy) + .add_input( + NormalSignatureKind::NormalSignatureUnknown, + SpendableTxIn::new( + utxo, + prevout.clone(), + scripts, + Some(from_address_spend_info.clone()), + ), + SpendPath::Unknown, + DEFAULT_SEQUENCE, + ) + .add_output(UnspentTxOut::from_partial(TxOut { + value: Amount::from_sat(99_000_000), + script_pubkey: to_address.script_pubkey(), + })) + .finalize(); + let message = Message::from_digest( + tx_details + .calculate_script_spend_sighash_indexed(0, 0, bitcoin::TapSighashType::Default) + .unwrap() + .to_byte_array(), + ); + + let partial_sigs: Vec = verifiers_secret_public_keys + .into_iter() + .zip(nonce_pairs) + .map(|(kp, nonce_pair)| { + partial_sign( + verifier_public_keys.clone(), + None, + nonce_pair.0, + agg_nonce, + kp, + message, + ) + .unwrap() + }) + .collect(); + let final_signature = aggregate_partial_signatures( + verifier_public_keys, + None, + agg_nonce, + &partial_sigs, + message, + ) + .unwrap(); + + bitvm_client::SECP + .verify_schnorr(&final_signature, &message, &agg_xonly_pubkey) + .unwrap(); + + let witness_elements = vec![final_signature.as_ref()]; + tx_details + .set_p2tr_script_spend_witness(&witness_elements, 0, 0) + .unwrap(); + + rpc.mine_blocks(1).await.unwrap(); + + rpc.send_raw_transaction(tx_details.get_cached_tx()) + .await + .unwrap(); +} + +/// Tests spending both key and script paths of a single P2TR UTXO. +/// +/// This test is designed to test the following, especially in the Musig2 case: +/// - The script spend is valid +/// - The key spend is valid with the tweaked aggregate public key +#[tokio::test] + +async fn key_and_script_spend() { + use bitcoin::{Network::*, *}; + + // Arrange + let mut config = create_test_config_with_thread_name().await; + let regtest = create_regtest_rpc(&mut config).await; + let rpc = regtest.rpc().clone(); + + // -- Musig2 Setup -- + // Generate NofN keys + let (verifiers_secret_public_keys, _untweaked_xonly_pubkey, verifier_public_keys) = + get_verifiers_keys(&config); + // Generate NofN nonces (need two for key and script spend) + let (nonce_pairs, agg_nonce) = get_nonces(verifiers_secret_public_keys.clone()).unwrap(); + let (nonce_pairs_2, agg_nonce_2) = get_nonces(verifiers_secret_public_keys.clone()).unwrap(); + + // Aggregate Pks + let agg_pk = XOnlyPublicKey::from_musig2_pks(verifier_public_keys.clone(), None).unwrap(); + + // -- Script Setup -- + // Tapscript for script spending of NofN sig + let musig2_script = Arc::new(CheckSig::new(agg_pk)); + let scripts: Vec> = vec![musig2_script]; + + // -- UTXO Setup -- + // Both script and key spend in P2TR address + let (from_address, from_address_spend_info) = builder::address::create_taproot_address( + &scripts + .iter() + .map(|s| s.to_script_buf()) + .collect::>(), + Some(agg_pk), + bitcoin::Network::Regtest, + ); + + // Merkle root hash of Tapscript tree + let merkle_root = from_address_spend_info.merkle_root().unwrap(); + // Tweaked aggregate public key + let agg_pk_tweaked = XOnlyPublicKey::from_musig2_pks( + verifier_public_keys.clone(), + Some(Musig2Mode::KeySpendWithScript(merkle_root)), + ) + .unwrap(); + + // Create UTXOs + let utxo_1 = rpc + .send_to_address(&from_address, Amount::from_sat(100_000_000)) + .await + .unwrap(); + let utxo_2 = rpc + .send_to_address(&from_address, Amount::from_sat(99_999_999)) + .await + .unwrap(); + + // Get UTXOs + let prevout_1 = rpc.get_txout_from_outpoint(&utxo_1).await.unwrap(); + let prevout_2 = rpc.get_txout_from_outpoint(&utxo_2).await.unwrap(); + + // BTC address to execute test transaction to + // Doesn't matter + let to_address = bitcoin::Address::p2pkh( + PublicKey::from(bitcoin::secp256k1::PublicKey::from_x_only_public_key( + *bitvm_client::UNSPENDABLE_XONLY_PUBKEY, + key::Parity::Even, + )), + Regtest, + ); + + // Test Transactions + let mut test_txhandler_1 = TxHandlerBuilder::new(TransactionType::Dummy) + .add_input( + NormalSignatureKind::NormalSignatureUnknown, + SpendableTxIn::new( + utxo_1, + prevout_1, + scripts.clone(), + Some(from_address_spend_info.clone()), + ), + SpendPath::Unknown, + DEFAULT_SEQUENCE, + ) + .add_output(UnspentTxOut::from_partial(TxOut { + value: Amount::from_sat(99_000_000), + script_pubkey: to_address.script_pubkey(), + })) + .finalize(); + + let mut test_txhandler_2 = TxHandlerBuilder::new(TransactionType::Dummy) + .add_input( + NormalSignatureKind::NormalSignatureUnknown, + SpendableTxIn::new( + utxo_2, + prevout_2, + scripts, + Some(from_address_spend_info.clone()), + ), + SpendPath::Unknown, + DEFAULT_SEQUENCE, + ) + .add_output(UnspentTxOut::from_partial(TxOut { + value: Amount::from_sat(99_000_000), + script_pubkey: to_address.script_pubkey(), + })) + .finalize(); + + let sighash_1 = Message::from_digest( + test_txhandler_1 + .calculate_script_spend_sighash_indexed(0, 0, TapSighashType::Default) + .unwrap() + .to_byte_array(), + ); + let sighash_2 = Message::from_digest( + test_txhandler_2 + .calculate_pubkey_spend_sighash(0, TapSighashType::Default) + .unwrap() + .to_byte_array(), + ); + + // Act + + // Musig2 Partial Signatures + // Script Spend + let final_signature_1 = { + let partial_sigs: Vec = verifiers_secret_public_keys + .iter() + .zip(nonce_pairs) + .map(|(kp, nonce_pair)| { + partial_sign( + verifier_public_keys.clone(), + None, + nonce_pair.0, + agg_nonce, + *kp, + sighash_1, + ) + .unwrap() + }) + .collect(); + + // Musig2 Aggregate + aggregate_partial_signatures( + verifier_public_keys.clone(), + None, + agg_nonce, + &partial_sigs, + sighash_1, + ) + .unwrap() + }; + + // Key spend + let final_signature_2 = { + let partial_sigs: Vec = verifiers_secret_public_keys + .iter() + .zip(nonce_pairs_2) + .map(|(kp, nonce_pair)| { + partial_sign( + verifier_public_keys.clone(), + Some(Musig2Mode::KeySpendWithScript(merkle_root)), + nonce_pair.0, + agg_nonce_2, + *kp, + sighash_2, + ) + .unwrap() + }) + .collect(); + + aggregate_partial_signatures( + verifier_public_keys, + Some(Musig2Mode::KeySpendWithScript(merkle_root)), + agg_nonce_2, + &partial_sigs, + sighash_2, + ) + .unwrap() + }; + + // Assert + + // -- Verify Script Spend -- + // Verify signature for script spend + // The script will verify the aggregate public key with the signature of sighash_1 + bitvm_client::SECP + .verify_schnorr(&final_signature_1, &sighash_1, &agg_pk) + .unwrap(); + + // Set up the witness for the script spend + let witness_elements = vec![final_signature_1.as_ref()]; + test_txhandler_1 + .set_p2tr_script_spend_witness(&witness_elements, 0, 0) + .unwrap(); + + // Mine a block to confirm previous transaction + rpc.mine_blocks(1).await.unwrap(); + + // Send the transaction + rpc.send_raw_transaction(test_txhandler_1.get_cached_tx()) + .await + .unwrap(); + + // -- Verify Key Spend -- + // Verify signature for key spend + // The key will verify the aggregate public key with the signature of sighash_2 + // The signature should be valid with the tweaked aggregate public key + bitvm_client::SECP + .verify_schnorr(&final_signature_2, &sighash_2, &agg_pk_tweaked) + .unwrap(); + + (test_txhandler_2) + .set_p2tr_key_spend_witness( + &taproot::Signature::from_slice(final_signature_2.as_ref()).unwrap(), + 0, + ) + .unwrap(); + + rpc.mine_blocks(1).await.unwrap(); + + // Send the transaction + rpc.send_raw_transaction(test_txhandler_2.get_cached_tx()) + .await + .unwrap(); +} diff --git a/core/src/test/rpc_auth.rs b/core/src/test/rpc_auth.rs new file mode 100644 index 000000000..bcc150497 --- /dev/null +++ b/core/src/test/rpc_auth.rs @@ -0,0 +1,147 @@ +use crate::rpc::clementine::clementine_operator_client::ClementineOperatorClient; +use crate::rpc::clementine::Empty; +use crate::rpc::get_clients; +use crate::servers::create_operator_grpc_server; +use crate::test::common::citrea::MockCitreaClient; +use crate::test::common::create_regtest_rpc; +use crate::test::common::create_test_config_with_thread_name; +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; +use std::path::PathBuf; +use tokio::net::TcpListener; + +// Helper function to find an available port +async fn find_available_port() -> u16 { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 0); + let listener = TcpListener::bind(addr).await.unwrap(); + let port = listener.local_addr().unwrap().port(); + drop(listener); + port +} + +#[tokio::test] +async fn test_mtls_connection() -> Result<(), eyre::Report> { + let mut config = create_test_config_with_thread_name().await; + let _rpc = create_regtest_rpc(&mut config).await; + + // Find an available port for the test + let port = find_available_port().await; + let host = "127.0.0.1"; + + config.host = host.to_string(); + config.port = port; + + // Start the operator server + let (_socket_addr, _shutdown_tx) = + create_operator_grpc_server::(config.clone()).await?; + + // Connect to the server using mTLS + let endpoint = format!("https://{}:{}", host, port); + + let clients = + crate::rpc::get_clients::, _>( + vec![endpoint], + crate::rpc::operator_client_builder(&config), + &config, + true, + ) + .await?; + + // Verify that we have one client + assert_eq!(clients.len(), 1); + + // Try to make a simple RPC call + let mut client = clients[0].clone(); + let response = client.get_x_only_public_key(Empty {}).await; + + // We just want to verify that the connection works with mTLS + println!("RPC response: {:?}", response); + + Ok(()) +} + +#[tokio::test] +async fn test_auth_interceptor() -> Result<(), eyre::Report> { + let mut config = create_test_config_with_thread_name().await; + let _rpc = create_regtest_rpc(&mut config).await; + + // Find an available port for the test + let port = find_available_port().await; + let host = "127.0.0.1"; + + config.host = host.to_string(); + config.port = port; + + // Start the operator server + let (_socket_addr, _shutdown_tx) = + create_operator_grpc_server::(config.clone()).await?; + + // Connect to the server using mTLS + let endpoint = format!("https://{}:{}", host, port); + + let mut agg_config = config.clone(); + agg_config.client_cert_path = PathBuf::from("certs/aggregator/aggregator.pem"); + agg_config.client_key_path = PathBuf::from("certs/aggregator/aggregator.key"); + + let mut clients = get_clients( + vec![endpoint.clone()], + crate::rpc::operator_client_builder(&config), + &agg_config, + true, + ) + .await?; + + clients[0] + .get_x_only_public_key(Empty {}) + .await + .expect("aggregator call succeeds"); + clients[0] + .internal_end_round(Empty {}) + .await + .expect_err("aggregator cannot call internal method"); + + let mut bad_config = config.clone(); + // Server key is not recognized to be safe, all requests + bad_config.client_cert_path = PathBuf::from("certs/server/server.pem"); + bad_config.client_key_path = PathBuf::from("certs/server/server.key"); + + let mut clients = get_clients( + vec![endpoint.clone()], + crate::rpc::operator_client_builder(&config), + &bad_config, + true, + ) + .await?; + + clients[0] + .get_x_only_public_key(Empty {}) + .await + .expect_err("unknown key should fail"); + clients[0] + .internal_end_round(Empty {}) + .await + .expect_err("unknown key should fail"); + + let mut internal_client_config = config.clone(); + // Server key is not recognized to be safe, all requests + internal_client_config.client_cert_path = PathBuf::from("certs/client/client.pem"); + internal_client_config.client_key_path = PathBuf::from("certs/client/client.key"); + + let mut clients = get_clients( + vec![endpoint.clone()], + crate::rpc::operator_client_builder(&config), + &internal_client_config, + true, + ) + .await?; + + clients[0] + .get_x_only_public_key(Empty {}) + .await + .expect("own key can call public method"); + clients[0] + .internal_end_round(Empty {}) + .await + .expect("own key can call internal method"); + + Ok(()) +} diff --git a/core/src/test/sign.rs b/core/src/test/sign.rs new file mode 100644 index 000000000..d36001552 --- /dev/null +++ b/core/src/test/sign.rs @@ -0,0 +1,40 @@ +use crate::{ + config::BridgeConfig, + rpc::{ + clementine::WithdrawParams, + ecdsa_verification_sig::{WithdrawalMessage, CLEMENTINE_EIP712_DOMAIN}, + }, +}; +use alloy::primitives::PrimitiveSignature; +use alloy_sol_types::SolStruct; + +/// Signs the optimistic payout verification signature for a given withdrawal params +/// using the private key in the test_params in the config. +pub fn sign_withdrawal_verification_signature( + config: &BridgeConfig, + withdrawal_params: WithdrawParams, +) -> PrimitiveSignature { + let signing_key = config + .test_params + .aggregator_verification_secret_key + .clone() + .unwrap(); + let (withdrawal_id, input_signature, input_outpoint, output_script_pubkey, output_amount) = + crate::rpc::parser::operator::parse_withdrawal_sig_params(withdrawal_params).unwrap(); + + let params = M::new( + withdrawal_id, + input_signature, + input_outpoint, + output_script_pubkey, + output_amount, + ); + + let eip712_hash = params.eip712_signing_hash(&CLEMENTINE_EIP712_DOMAIN); + + let signature = signing_key + .sign_prehash_recoverable(eip712_hash.as_slice()) + .unwrap(); + + PrimitiveSignature::from(signature) +} diff --git a/core/src/test/state_manager.rs b/core/src/test/state_manager.rs new file mode 100644 index 000000000..773c6a8dc --- /dev/null +++ b/core/src/test/state_manager.rs @@ -0,0 +1,111 @@ +use bitcoin::{consensus, Block}; + +use super::common::{create_test_config_with_thread_name, initialize_database, MockOwner}; +use crate::{config::BridgeConfig, database::Database, states::StateManager}; + +// Helper function to create a test state manager +async fn create_test_state_manager( + config: &BridgeConfig, +) -> (StateManager, BridgeConfig) { + let db = Database::new(config) + .await + .expect("Failed to create database"); + let owner = Default::default(); + + let state_manager = StateManager::new(db, owner, config.protocol_paramset()) + .await + .unwrap(); + + (state_manager, config.clone()) +} + +async fn create_test_config() -> BridgeConfig { + let config = create_test_config_with_thread_name().await; + initialize_database(&config).await; + config +} + +// Helper function to create an empty block for testing +fn create_empty_block() -> Block { + // from bitcoin tests + let some_block = hex::decode("010000004ddccd549d28f385ab457e98d1b11ce80bfea2c5ab93015ade4973e400000000bf4473e53794beae34e64fccc471dace6ae544180816f89591894e0f417a914cd74d6e49ffff001d323b3a7b0201000000010000000000000000000000000000000000000000000000000000000000000000ffffffff0804ffff001d026e04ffffffff0100f2052a0100000043410446ef0102d1ec5240f0d061a4246c1bdef63fc3dbab7733052fbbf0ecd8f41fc26bf049ebb4f9527f374280259e7cfa99c48b0e3f39c51347a19a5819651503a5ac00000000010000000321f75f3139a013f50f315b23b0c9a2b6eac31e2bec98e5891c924664889942260000000049483045022100cb2c6b346a978ab8c61b18b5e9397755cbd17d6eb2fe0083ef32e067fa6c785a02206ce44e613f31d9a6b0517e46f3db1576e9812cc98d159bfdaf759a5014081b5c01ffffffff79cda0945903627c3da1f85fc95d0b8ee3e76ae0cfdc9a65d09744b1f8fc85430000000049483045022047957cdd957cfd0becd642f6b84d82f49b6cb4c51a91f49246908af7c3cfdf4a022100e96b46621f1bffcf5ea5982f88cef651e9354f5791602369bf5a82a6cd61a62501fffffffffe09f5fe3ffbf5ee97a54eb5e5069e9da6b4856ee86fc52938c2f979b0f38e82000000004847304402204165be9a4cbab8049e1af9723b96199bfd3e85f44c6b4c0177e3962686b26073022028f638da23fc003760861ad481ead4099312c60030d4cb57820ce4d33812a5ce01ffffffff01009d966b01000000434104ea1feff861b51fe3f5f8a3b12d0f4712db80e919548a80839fc47c6a21e66d957e9c5d8cd108c7a2d2324bad71f9904ac0ae7336507d785b17a2c115e427a32fac00000000").unwrap(); + + consensus::deserialize(&some_block).unwrap() +} + +#[tokio::test] +async fn test_process_empty_block_with_no_machines() { + let (mut state_manager, _config) = create_test_state_manager(&create_test_config().await).await; + + let block = create_empty_block(); + let block_height = 1; + + state_manager.update_block_cache(&block, block_height); + // Process an empty block with no state machines + let result = state_manager.process_block_parallel(block_height).await; + + // Should succeed with no state changes + assert!( + result.is_ok(), + "Failed to process empty block: {:?}", + result + ); +} + +#[tokio::test] +async fn test_process_block_parallel() { + let (mut state_manager, _config) = create_test_state_manager(&create_test_config().await).await; + + // Create a block + let block = create_empty_block(); + + // Process the block multiple times to test the iteration logic + for i in 1..=3 { + state_manager.update_block_cache(&block, i); + let result = state_manager.process_block_parallel(i).await; + assert!( + result.is_ok(), + "Failed to process block on iteration {}: {:?}", + i, + result + ); + } +} + +#[tokio::test] +async fn test_save_and_load_state() { + let (mut state_manager, config) = create_test_state_manager(&create_test_config().await).await; + + // Process a block to ensure the state is initialized + let block = create_empty_block(); + state_manager.update_block_cache(&block, 1); + let result = state_manager.process_block_parallel(1).await; + assert!(result.is_ok(), "Failed to process block: {:?}", result); + + // Save state to DB + let result = state_manager.save_state_to_db(1, None).await; + assert!(result.is_ok(), "Failed to save state to DB: {:?}", result); + + // Create a new state manager to load from DB + let (mut new_state_manager, _) = create_test_state_manager(&config).await; + + // Load state from DB + let result = new_state_manager.load_from_db().await; + assert!(result.is_ok(), "Failed to load state from DB: {:?}", result); + + // Check that the state is the same + let mut round_machines = new_state_manager.round_machines(); + let mut kickoff_machines = new_state_manager.kickoff_machines(); + + round_machines.sort_by_key(|m| m.operator_data.xonly_pk); + kickoff_machines.sort_by_key(|m| m.kickoff_data); + + let mut round_machines_old = state_manager.round_machines(); + let mut kickoff_machines_old = state_manager.kickoff_machines(); + + round_machines_old.sort_by_key(|m| m.operator_data.xonly_pk); + kickoff_machines_old.sort_by_key(|m| m.kickoff_data); + + assert_eq!(round_machines, round_machines_old); + assert_eq!(kickoff_machines, kickoff_machines_old); +} diff --git a/core/src/test/taproot.rs b/core/src/test/taproot.rs new file mode 100644 index 000000000..c0f90d458 --- /dev/null +++ b/core/src/test/taproot.rs @@ -0,0 +1,87 @@ +use crate::actor::Actor; +use crate::bitvm_client::SECP; +use crate::builder::script::{CheckSig, SpendPath, SpendableScript}; +use crate::builder::transaction::input::SpendableTxIn; +use crate::builder::transaction::output::UnspentTxOut; +use crate::builder::transaction::{TransactionType, TxHandlerBuilder, DEFAULT_SEQUENCE}; +use crate::builder::{self}; +use crate::rpc::clementine::NormalSignatureKind; +use crate::test::common::*; +use bitcoin::{Amount, TxOut}; +use bitcoincore_rpc::RpcApi; +use std::sync::Arc; + +#[tokio::test] +async fn create_address_and_transaction_then_sign_transaction() { + let mut config = create_test_config_with_thread_name().await; + let regtest = create_regtest_rpc(&mut config).await; + let rpc = regtest.rpc().clone(); + + let (xonly_pk, _) = config.secret_key.public_key(&SECP).x_only_public_key(); + + // Prepare script and address. + let script = Arc::new(CheckSig::new( + // bitcoin::XOnlyPublicKey::from_slice(&tweaked_pk_script).unwrap(), + xonly_pk, + )); + let scripts: Vec> = vec![script.clone()]; + let (taproot_address, taproot_spend_info) = builder::address::create_taproot_address( + &scripts + .iter() + .map(|s| s.to_script_buf()) + .collect::>(), + None, + config.protocol_paramset().network, + ); + + // Create a new transaction. + let utxo = rpc + .send_to_address(&taproot_address, Amount::from_sat(1000)) + .await + .unwrap(); + + let mut builder = TxHandlerBuilder::new(TransactionType::Dummy); + builder = builder.add_input( + NormalSignatureKind::OperatorSighashDefault, + SpendableTxIn::new( + utxo, + TxOut { + value: Amount::from_sat(1000), + script_pubkey: taproot_address.script_pubkey(), + }, + scripts.clone(), + Some(taproot_spend_info.clone()), + ), + SpendPath::ScriptSpend(0), + DEFAULT_SEQUENCE, + ); + + builder = builder.add_output(UnspentTxOut::new( + TxOut { + value: Amount::from_sat(330), + script_pubkey: taproot_address.script_pubkey(), + }, + scripts, + Some(taproot_spend_info), + )); + + let mut tx_handler = builder.finalize(); + + // Signer should be able to sign the new transaction. + let signer = Actor::new( + config.secret_key, + config.winternitz_secret_key, + config.protocol_paramset().network, + ); + + signer + .tx_sign_and_fill_sigs(&mut tx_handler, &[], None) + .expect("failed to sign transaction"); + + rpc.mine_blocks(1).await.unwrap(); + + // New transaction should be OK to send. + rpc.send_raw_transaction(tx_handler.get_cached_tx()) + .await + .unwrap(); +} diff --git a/core/src/test/withdraw.rs b/core/src/test/withdraw.rs new file mode 100644 index 000000000..55b1f63be --- /dev/null +++ b/core/src/test/withdraw.rs @@ -0,0 +1,169 @@ +use super::common::citrea::get_bridge_params; +use crate::bitvm_client::SECP; +use crate::citrea::{CitreaClient, CitreaClientT, SATS_TO_WEI_MULTIPLIER}; +use crate::test::common::citrea::SECRET_KEYS; +use crate::test::common::generate_withdrawal_transaction_and_signature; +use crate::utils::initialize_logger; +use crate::{ + extended_bitcoin_rpc::ExtendedBitcoinRpc, + test::common::{ + citrea::{self}, + create_test_config_with_thread_name, + }, +}; +use alloy::primitives::FixedBytes; +use alloy::primitives::U256; +use alloy::providers::Provider; +use async_trait::async_trait; +use bitcoin::hashes::Hash; +use bitcoin::{secp256k1::SecretKey, Address, Amount}; +use citrea_e2e::{ + config::{BitcoinConfig, SequencerConfig, TestCaseConfig, TestCaseDockerConfig}, + framework::TestFramework, + test_case::{TestCase, TestCaseRunner}, + Result, +}; + +struct CitreaWithdrawAndGetUTXO; +#[async_trait] +impl TestCase for CitreaWithdrawAndGetUTXO { + fn bitcoin_config() -> BitcoinConfig { + BitcoinConfig { + extra_args: vec![ + "-txindex=1", + "-fallbackfee=0.000001", + "-rpcallowip=0.0.0.0/0", + "-dustrelayfee=0", + ], + ..Default::default() + } + } + + fn test_config() -> TestCaseConfig { + TestCaseConfig { + with_batch_prover: false, + with_sequencer: true, + with_full_node: true, + docker: TestCaseDockerConfig { + bitcoin: true, + citrea: true, + }, + ..Default::default() + } + } + + fn sequencer_config() -> SequencerConfig { + SequencerConfig { + bridge_initialize_params: get_bridge_params(), + ..Default::default() + } + } + + async fn run_test(&mut self, f: &mut TestFramework) -> Result<()> { + let (sequencer, _full_node, _, _, da) = citrea::start_citrea(Self::sequencer_config(), f) + .await + .unwrap(); + + let mut config = create_test_config_with_thread_name().await; + citrea::update_config_with_citrea_e2e_values(&mut config, da, sequencer, None); + + let rpc = ExtendedBitcoinRpc::connect( + config.bitcoin_rpc_url.clone(), + config.bitcoin_rpc_user.clone(), + config.bitcoin_rpc_password.clone(), + None, + ) + .await?; + + let user_sk = SecretKey::from_slice(&[13u8; 32]).unwrap(); + let withdrawal_address = Address::p2tr( + &SECP, + user_sk.x_only_public_key(&SECP).0, + None, + config.protocol_paramset().network, + ); + let withdrawal_utxo = generate_withdrawal_transaction_and_signature( + &config, + &rpc, + &withdrawal_address, + Amount::from_sat(330), + ) + .await + .0 + .outpoint; + println!("Created withdrawal UTXO: {:?}", withdrawal_utxo); + + let citrea_client = CitreaClient::new( + config.citrea_rpc_url.clone(), + config.citrea_light_client_prover_url.clone(), + config.citrea_chain_id, + Some(SECRET_KEYS[0].to_string().parse().unwrap()), + config.citrea_request_timeout, + ) + .await + .unwrap(); + + let balance = citrea_client + .contract + .provider() + .get_balance(citrea_client.wallet_address) + .await + .unwrap(); + println!("Initial balance: {}", balance); + + let withdrawal_count = citrea_client + .contract + .getWithdrawalCount() + .call() + .await + .unwrap(); + assert_eq!(withdrawal_count._0, U256::from(0)); + + let withdrawal_tx_height_block_height = sequencer + .client + .ledger_get_head_l2_block_height() + .await + .unwrap() + + 1; + let citrea_withdrawal_tx = citrea_client + .contract + .withdraw( + FixedBytes::from(withdrawal_utxo.txid.to_raw_hash().to_byte_array()), + FixedBytes::from(withdrawal_utxo.vout.to_le_bytes()), + ) + .value(U256::from( + config.protocol_paramset().bridge_amount.to_sat() * SATS_TO_WEI_MULTIPLIER, + )) + .send() + .await + .unwrap(); + sequencer.client.send_publish_batch_request().await.unwrap(); + + let receipt = citrea_withdrawal_tx.get_receipt().await.unwrap(); + println!("Citrea withdrawal tx receipt: {:?}", receipt); + + let withdrawal_count = citrea_client + .contract + .getWithdrawalCount() + .call() + .await + .unwrap(); + assert_eq!(withdrawal_count._0, U256::from(1)); + + let utxos = citrea_client + .collect_withdrawal_utxos(None, withdrawal_tx_height_block_height) + .await + .unwrap(); + assert_eq!(withdrawal_utxo, utxos[0].1); + + Ok(()) + } +} + +#[tokio::test] +async fn citrea_withdraw_and_get_utxo() -> Result<()> { + initialize_logger(Some(::tracing::level_filters::LevelFilter::DEBUG)) + .expect("Failed to initialize logger"); + std::env::set_var("CITREA_DOCKER_IMAGE", crate::test::CITREA_E2E_DOCKER_IMAGE); + TestCaseRunner::new(CitreaWithdrawAndGetUTXO).run().await +} diff --git a/core/src/tx_sender/client.rs b/core/src/tx_sender/client.rs new file mode 100644 index 000000000..35962c799 --- /dev/null +++ b/core/src/tx_sender/client.rs @@ -0,0 +1,450 @@ +//! # Transaction Sender Client +//! +//! This module is provides a client which is responsible for inserting +//! transactions into the sending queue. + +use super::Result; +use super::{ActivatedWithOutpoint, ActivatedWithTxid}; +use crate::builder::transaction::input::UtxoVout; +use crate::errors::ResultExt; +use crate::operator::RoundIndex; +use crate::rpc; +use crate::utils::{FeePayingType, RbfSigningInfo, TxMetadata}; +use crate::{ + builder::transaction::TransactionType, + config::BridgeConfig, + database::{Database, DatabaseTransaction}, +}; +use bitcoin::hashes::Hash; +use bitcoin::{OutPoint, Transaction, Txid}; +use std::collections::BTreeMap; + +#[derive(Debug, Clone)] +pub struct TxSenderClient { + pub(super) db: Database, + pub(super) tx_sender_consumer_id: String, +} + +impl TxSenderClient { + pub fn new(db: Database, tx_sender_consumer_id: String) -> Self { + Self { + db, + tx_sender_consumer_id, + } + } + + /// Saves a transaction to the database queue for sending/fee bumping. + /// + /// This function determines the initial parameters for a transaction send attempt, + /// including its [`FeePayingType`], associated metadata, and dependencies (cancellations/activations). + /// It then persists this information in the database via [`Database::save_tx`] and related functions. + /// The actual sending logic (CPFP/RBF) is handled later by the transaction sender's task loop. + /// + /// # Default Activation and Cancellation Conditions + /// + /// By default, this function automatically adds cancellation conditions for all outpoints + /// spent by the `signed_tx` itself. If `signed_tx` confirms, these input outpoints + /// are marked as spent/cancelled in the database. + /// + /// There are no default activation conditions added implicitly; all activation prerequisites + /// must be explicitly provided via the `activate_txids` and `activate_outpoints` arguments. + /// + /// # Arguments + /// * `dbtx` - An active database transaction. + /// * `tx_metadata` - Optional metadata about the transaction's purpose. + /// * `signed_tx` - The transaction to be potentially sent. + /// * `fee_paying_type` - Whether to use CPFP or RBF for fee management. + /// * `cancel_outpoints` - Outpoints that should be marked invalid if this tx confirms (in addition to the tx's own inputs). + /// * `cancel_txids` - Txids that should be marked invalid if this tx confirms. + /// * `activate_txids` - Txids that are prerequisites for this tx, potentially with a relative timelock. + /// * `activate_outpoints` - Outpoints that are prerequisites for this tx, potentially with a relative timelock. + /// + /// # Returns + /// + /// - [`u32`]: The database ID (`try_to_send_id`) assigned to this send attempt. + #[tracing::instrument(err(level = tracing::Level::ERROR), ret(level = tracing::Level::TRACE), skip_all, fields(?tx_metadata, consumer = self.tx_sender_consumer_id))] + #[allow(clippy::too_many_arguments)] + pub async fn insert_try_to_send( + &self, + dbtx: DatabaseTransaction<'_, '_>, + tx_metadata: Option, + signed_tx: &Transaction, + fee_paying_type: FeePayingType, + rbf_signing_info: Option, + cancel_outpoints: &[OutPoint], + cancel_txids: &[Txid], + activate_txids: &[ActivatedWithTxid], + activate_outpoints: &[ActivatedWithOutpoint], + ) -> Result { + let txid = signed_tx.compute_txid(); + + tracing::debug!( + "{} added tx {} with txid {} to the queue", + self.tx_sender_consumer_id, + tx_metadata + .map(|data| format!("{:?}", data.tx_type)) + .unwrap_or("N/A".to_string()), + txid + ); + + // do not add duplicate transactions to the txsender + let tx_exists = self + .db + .check_if_tx_exists_on_txsender(Some(dbtx), txid) + .await + .map_to_eyre()?; + if let Some(try_to_send_id) = tx_exists { + return Ok(try_to_send_id); + } + + let try_to_send_id = self + .db + .save_tx( + Some(dbtx), + tx_metadata, + signed_tx, + fee_paying_type, + txid, + rbf_signing_info, + ) + .await + .map_to_eyre()?; + + for input_outpoint in signed_tx.input.iter().map(|input| input.previous_output) { + self.db + .save_cancelled_outpoint(Some(dbtx), try_to_send_id, input_outpoint) + .await + .map_to_eyre()?; + } + + for outpoint in cancel_outpoints { + self.db + .save_cancelled_outpoint(Some(dbtx), try_to_send_id, *outpoint) + .await + .map_to_eyre()?; + } + + for txid in cancel_txids { + self.db + .save_cancelled_txid(Some(dbtx), try_to_send_id, *txid) + .await + .map_to_eyre()?; + } + + let mut max_timelock_of_activated_txids = BTreeMap::new(); + + for activated_txid in activate_txids { + let timelock = max_timelock_of_activated_txids + .entry(activated_txid.txid) + .or_insert(activated_txid.relative_block_height); + if *timelock < activated_txid.relative_block_height { + *timelock = activated_txid.relative_block_height; + } + } + + for input in signed_tx.input.iter() { + let relative_block_height = if input.sequence.is_relative_lock_time() { + let relative_locktime = input + .sequence + .to_relative_lock_time() + .expect("Invalid relative locktime"); + match relative_locktime { + bitcoin::relative::LockTime::Blocks(height) => height.value() as u32, + _ => { + return Err(eyre::eyre!("Invalid relative locktime").into()); + } + } + } else { + 0 + }; + let timelock = max_timelock_of_activated_txids + .entry(input.previous_output.txid) + .or_insert(relative_block_height); + if *timelock < relative_block_height { + *timelock = relative_block_height; + } + } + + for (txid, timelock) in max_timelock_of_activated_txids { + self.db + .save_activated_txid( + Some(dbtx), + try_to_send_id, + &ActivatedWithTxid { + txid, + relative_block_height: timelock, + }, + ) + .await + .map_to_eyre()?; + } + + for activated_outpoint in activate_outpoints { + self.db + .save_activated_outpoint(Some(dbtx), try_to_send_id, activated_outpoint) + .await + .map_to_eyre()?; + } + + Ok(try_to_send_id) + } + + /// Adds a transaction to the sending queue based on its type and configuration. + /// + /// This is a higher-level wrapper around [`Self::insert_try_to_send`]. It determines the + /// appropriate `FeePayingType` (CPFP or RBF) and any specific cancellation or activation + /// dependencies based on the `tx_type` and `config`. + /// + /// For example: + /// - `Challenge` transactions use `RBF`. + /// - Most other transactions default to `CPFP`. + /// - Specific types like `OperatorChallengeAck` might activate certain outpoints + /// based on related transactions (`kickoff_txid`). + /// + /// # Arguments + /// * `dbtx` - An active database transaction. + /// * `tx_type` - The semantic type of the transaction. + /// * `signed_tx` - The transaction itself. + /// * `related_txs` - Other transactions potentially related (e.g., the kickoff for a challenge ack). + /// * `tx_metadata` - Optional metadata, `tx_type` will be added/overridden. + /// * `config` - Bridge configuration providing parameters like finality depth. + /// + /// # Returns + /// + /// - [`u32`]: The database ID (`try_to_send_id`) assigned to this send attempt. + #[allow(clippy::too_many_arguments)] + pub async fn add_tx_to_queue<'a>( + &'a self, + dbtx: DatabaseTransaction<'a, '_>, + tx_type: TransactionType, + signed_tx: &Transaction, + related_txs: &[(TransactionType, Transaction)], + tx_metadata: Option, + config: &BridgeConfig, + rbf_info: Option, + ) -> Result { + let tx_metadata = tx_metadata.map(|mut data| { + data.tx_type = tx_type; + data + }); + match tx_type { + TransactionType::Kickoff + | TransactionType::Dummy + | TransactionType::ChallengeTimeout + | TransactionType::DisproveTimeout + | TransactionType::Reimburse + | TransactionType::Round + | TransactionType::OperatorChallengeNack(_) + | TransactionType::UnspentKickoff(_) + | TransactionType::MoveToVault + | TransactionType::BurnUnusedKickoffConnectors + | TransactionType::KickoffNotFinalized + | TransactionType::MiniAssert(_) + | TransactionType::LatestBlockhashTimeout + | TransactionType::LatestBlockhash + | TransactionType::EmergencyStop + | TransactionType::OptimisticPayout + | TransactionType::ReadyToReimburse + | TransactionType::ReplacementDeposit + | TransactionType::AssertTimeout(_) => { + // no_dependency and cpfp + self.insert_try_to_send( + dbtx, + tx_metadata, + signed_tx, + FeePayingType::CPFP, + rbf_info, + &[], + &[], + &[], + &[], + ) + .await + } + TransactionType::Challenge + | TransactionType::WatchtowerChallenge(_) + | TransactionType::Payout => { + self.insert_try_to_send( + dbtx, + tx_metadata, + signed_tx, + FeePayingType::RBF, + rbf_info, + &[], + &[], + &[], + &[], + ) + .await + } + TransactionType::WatchtowerChallengeTimeout(_) => { + // do not send watchtowet timeout if kickoff is already finalized + // which is done by adding kickoff finalizer utxo to cancel_outpoints + // this is not needed for any timeouts that spend the kickoff finalizer utxo like AssertTimeout + let kickoff_txid = related_txs + .iter() + .find_map(|(tx_type, tx)| { + if let TransactionType::Kickoff = tx_type { + Some(tx.compute_txid()) + } else { + None + } + }) + .ok_or(eyre::eyre!("Couldn't find kickoff tx in related_txs"))?; + self.insert_try_to_send( + dbtx, + tx_metadata, + signed_tx, + FeePayingType::CPFP, + rbf_info, + &[OutPoint { + txid: kickoff_txid, + vout: UtxoVout::KickoffFinalizer.get_vout(), + }], + &[], + &[], + &[], + ) + .await + } + TransactionType::OperatorChallengeAck(watchtower_idx) => { + let kickoff_txid = related_txs + .iter() + .find_map(|(tx_type, tx)| { + if let TransactionType::Kickoff = tx_type { + Some(tx.compute_txid()) + } else { + None + } + }) + .ok_or(eyre::eyre!("Couldn't find kickoff tx in related_txs"))?; + self.insert_try_to_send( + dbtx, + tx_metadata, + signed_tx, + FeePayingType::CPFP, + rbf_info, + &[], + &[], + &[], + &[ActivatedWithOutpoint { + // only send OperatorChallengeAck if corresponding watchtower challenge is sent + outpoint: OutPoint { + txid: kickoff_txid, + vout: UtxoVout::WatchtowerChallenge(watchtower_idx).get_vout(), + }, + relative_block_height: config.protocol_paramset().finality_depth, + }], + ) + .await + } + TransactionType::Disprove => { + self.insert_try_to_send( + dbtx, + tx_metadata, + signed_tx, + FeePayingType::NoFunding, + rbf_info, + &[], + &[], + &[], + &[], + ) + .await + } + TransactionType::AllNeededForDeposit | TransactionType::YieldKickoffTxid => { + unreachable!() + } + } + } + + /// Returns debugging information for a transaction + /// + /// This function gathers all debugging information about a transaction from the database, + /// including its state history, fee payer UTXOs, submission errors, and current state. + /// + /// # Arguments + /// * `id` - The ID of the transaction to debug + /// + /// # Returns + /// A comprehensive debug info structure with all available information about the transaction + pub async fn debug_tx(&self, id: u32) -> Result { + use crate::rpc::clementine::{TxDebugFeePayerUtxo, TxDebugInfo, TxDebugSubmissionError}; + + let (tx_metadata, tx, fee_paying_type, seen_block_id, _) = + self.db.get_try_to_send_tx(None, id).await.map_to_eyre()?; + + let submission_errors = self + .db + .get_tx_debug_submission_errors(None, id) + .await + .map_to_eyre()?; + + let submission_errors = submission_errors + .into_iter() + .map(|(error_message, timestamp)| TxDebugSubmissionError { + error_message, + timestamp, + }) + .collect(); + + let current_state = self.db.get_tx_debug_info(None, id).await.map_to_eyre()?; + + let fee_payer_utxos = self + .db + .get_tx_debug_fee_payer_utxos(None, id) + .await + .map_to_eyre()?; + + let fee_payer_utxos = fee_payer_utxos + .into_iter() + .map(|(txid, vout, amount, confirmed)| { + Ok(TxDebugFeePayerUtxo { + txid: Some(txid.into()), + vout, + amount: amount.to_sat(), + confirmed, + }) + }) + .collect::>>()?; + + let txid = match fee_paying_type { + FeePayingType::CPFP | FeePayingType::NoFunding => tx.compute_txid(), + FeePayingType::RBF => self + .db + .get_last_rbf_txid(None, id) + .await + .map_to_eyre()? + .unwrap_or(Txid::all_zeros()), + }; + let debug_info = TxDebugInfo { + id, + is_active: seen_block_id.is_none(), + current_state: current_state.unwrap_or_else(|| "unknown".to_string()), + submission_errors, + created_at: "".to_string(), + txid: Some(txid.into()), + fee_paying_type: format!("{:?}", fee_paying_type), + fee_payer_utxos_count: fee_payer_utxos.len() as u32, + fee_payer_utxos_confirmed_count: fee_payer_utxos + .iter() + .filter(|TxDebugFeePayerUtxo { confirmed, .. }| *confirmed) + .count() as u32, + fee_payer_utxos, + raw_tx: bitcoin::consensus::serialize(&tx), + metadata: tx_metadata.map(|metadata| rpc::clementine::TxMetadata { + deposit_outpoint: metadata.deposit_outpoint.map(Into::into), + operator_xonly_pk: metadata.operator_xonly_pk.map(Into::into), + + round_idx: metadata + .round_idx + .unwrap_or(RoundIndex::Round(0)) + .to_index() as u32, + kickoff_idx: metadata.kickoff_idx.unwrap_or(0), + tx_type: Some(metadata.tx_type.into()), + }), + }; + + Ok(debug_info) + } +} diff --git a/core/src/tx_sender/cpfp.rs b/core/src/tx_sender/cpfp.rs new file mode 100644 index 000000000..9fac68b32 --- /dev/null +++ b/core/src/tx_sender/cpfp.rs @@ -0,0 +1,606 @@ +//! # Child Pays For Parent (CPFP) Support For Transaction Sender +//! +//! This module implements the Child Pays For Parent (CPFP) strategy for sending +//! Bitcoin transactions with transaction sender. +//! +//! ## Child Transaction Details +//! +//! A child transaction is created to pay for the fees of a parent transaction. +//! They must be submitted together as a package for Bitcoin nodes to accept +//! them. +//! +//! ### Fee Payer Transactions/UTXOs +//! +//! Child transaction needs to spend an UTXO for the fees. But because of the +//! TRUC rules (https://github.com/bitcoin/bips/blob/master/bip-0431.mediawiki#specification), +//! a third transaction can't be put into the package. So, a so called "fee +//! payer" transaction must be send and confirmed before the CPFP package is +//! send. + +use super::{Result, SendTxError, TxMetadata, TxSender}; +use crate::constants::NON_STANDARD_V3; +use crate::errors::{ErrorExt, ResultExt}; +use crate::extended_bitcoin_rpc::BitcoinRPCError; +use crate::utils::FeePayingType; +use crate::{ + builder::{ + self, + script::SpendPath, + transaction::{ + input::SpendableTxIn, output::UnspentTxOut, TransactionType, TxHandlerBuilder, + DEFAULT_SEQUENCE, + }, + }, + constants::MIN_TAPROOT_AMOUNT, + rpc::clementine::NormalSignatureKind, +}; +use bitcoin::{Amount, FeeRate, OutPoint, Transaction, TxOut, Weight}; +use bitcoincore_rpc::PackageSubmissionResult; +use bitcoincore_rpc::{PackageTransactionResult, RpcApi}; +use eyre::eyre; +use eyre::Context; +use std::env; + +impl TxSender { + /// Creates and broadcasts a new "fee payer" UTXO to be used for CPFP + /// transactions. + /// + /// This function is called when a CPFP attempt fails due to insufficient funds + /// in the existing confirmed fee payer UTXOs associated with a transaction (`bumped_id`). + /// It calculates the required fee based on the parent transaction (`tx`) and the current + /// `fee_rate`, adding a buffer (3x required fee + dust limit) to handle potential fee spikes. + /// It then sends funds to the `TxSender`'s own signer address using the RPC's + /// `send_to_address` and saves the resulting UTXO information (`outpoint`, `amount`) + /// to the database, linking it to the `bumped_id`. + /// + /// # Arguments + /// * `bumped_id` - The database ID of the parent transaction requiring the fee bump. + /// * `tx` - The parent transaction itself. + /// * `fee_rate` - The target fee rate for the CPFP package. + /// * `total_fee_payer_amount` - The sum of amounts in currently available confirmed fee payer UTXOs. + /// * `fee_payer_utxos_len` - The number of currently available confirmed fee payer UTXOs. + async fn create_fee_payer_utxo( + &self, + bumped_id: u32, + tx: &Transaction, + fee_rate: FeeRate, + total_fee_payer_amount: Amount, + fee_payer_utxos_len: usize, + ) -> Result<()> { + tracing::debug!( + "Creating fee payer UTXO for txid {} with bump id {}", + &tx.compute_txid().to_string(), + bumped_id + ); + let required_fee = Self::calculate_required_fee( + tx.weight(), + fee_payer_utxos_len + 1, + fee_rate, + FeePayingType::CPFP, + )?; + + // Aggressively add 3x required fee to the total amount to account for sudden spikes + let new_fee_payer_amount = (required_fee - total_fee_payer_amount) + + required_fee + + required_fee + + required_fee + + MIN_TAPROOT_AMOUNT; + + tracing::debug!( + "Creating fee payer UTXO with amount {} ({} sat/vb)", + new_fee_payer_amount, + fee_rate + ); + + let outpoint = self + .rpc + .send_to_address(&self.signer.address, new_fee_payer_amount) + .await + .map_to_eyre()?; + + self.db + .save_fee_payer_tx( + None, + bumped_id, + outpoint.txid, + outpoint.vout, + new_fee_payer_amount, + None, + ) + .await + .map_to_eyre()?; + + Ok(()) + } + + /// Creates a Child-Pays-For-Parent (CPFP) child transaction. + /// + /// This transaction spends: + /// 1. The designated "P2A anchor" output of the parent transaction (`p2a_anchor`). + /// 2. One or more confirmed "fee payer" UTXOs (`fee_payer_utxos`) controlled by the `signer`. + /// + /// It calculates the total fee required (`required_fee`) to make the combined parent + child + /// package attractive to miners at the target `fee_rate`. The `required_fee` is paid entirely + /// by this child transaction. + /// + /// The remaining value (total input value - `required_fee`) is sent to the `change_address`. + /// + /// # Signing + /// We sign the input spending the P2A anchor and all fee payer UTXOs. + /// + /// # Returns + /// The constructed and partially signed child transaction. + async fn create_child_tx( + &self, + p2a_anchor: OutPoint, + anchor_sat: Amount, + fee_payer_utxos: Vec, + parent_tx_size: Weight, + fee_rate: FeeRate, + ) -> Result { + tracing::debug!( + "Creating child tx with {} fee payer utxos", + fee_payer_utxos.len() + ); + let required_fee = Self::calculate_required_fee( + parent_tx_size, + fee_payer_utxos.len(), + fee_rate, + FeePayingType::CPFP, + ) + .map_err(|e| eyre!(e))?; + + let change_address = self + .rpc + .get_new_wallet_address() + .await + .wrap_err("Failed to get new wallet address")?; + + let total_fee_payer_amount = fee_payer_utxos + .iter() + .map(|utxo| utxo.get_prevout().value) + .sum::() + + anchor_sat; // We add the anchor output value to the total amount. + if change_address.script_pubkey().minimal_non_dust() + required_fee > total_fee_payer_amount + { + return Err(SendTxError::InsufficientFeePayerAmount); + } + + let mut builder = TxHandlerBuilder::new(TransactionType::Dummy) + .with_version(NON_STANDARD_V3) + .add_input( + NormalSignatureKind::OperatorSighashDefault, + SpendableTxIn::new_partial( + p2a_anchor, + builder::transaction::anchor_output(anchor_sat), + ), + SpendPath::Unknown, + DEFAULT_SEQUENCE, + ); + + for fee_payer_utxo in fee_payer_utxos { + builder = builder.add_input( + NormalSignatureKind::OperatorSighashDefault, + fee_payer_utxo, + SpendPath::KeySpend, + DEFAULT_SEQUENCE, + ); + } + + builder = builder.add_output(UnspentTxOut::from_partial(TxOut { + value: total_fee_payer_amount - required_fee, + script_pubkey: change_address.script_pubkey(), + })); + + let mut tx_handler = builder.finalize(); + + for fee_payer_input in 1..tx_handler.get_cached_tx().input.len() { + let sighash = tx_handler + .calculate_pubkey_spend_sighash(fee_payer_input, bitcoin::TapSighashType::Default) + .map_err(|e| eyre!(e))?; + let signature = self + .signer + .sign_with_tweak_data(sighash, builder::sighash::TapTweakData::KeyPath(None), None) + .map_err(|e| eyre!(e))?; + tx_handler + .set_p2tr_key_spend_witness( + &bitcoin::taproot::Signature { + signature, + sighash_type: bitcoin::TapSighashType::Default, + }, + fee_payer_input, + ) + .map_err(|e| eyre!(e))?; + } + let child_tx = tx_handler.get_cached_tx().clone(); + + Ok(child_tx) + } + + /// Creates a transaction package for CPFP submission. + /// + /// Finds the P2A anchor output in the parent transaction (`tx`), then constructs + /// the child transaction using `create_child_tx`. + /// + /// # Returns + /// + /// - [`Vec`]: Parent transaction followed by the child + /// transaction ready for submission via the `submitpackage` RPC. + async fn create_package( + &self, + tx: Transaction, + fee_rate: FeeRate, + fee_payer_utxos: Vec, + ) -> Result> { + tracing::debug!( + "Creating package with {} fee payer utxos", + fee_payer_utxos.len() + ); + let txid = tx.compute_txid(); + + let p2a_vout = self + .find_p2a_vout(&tx) + .wrap_err("Failed to find p2a vout")?; + + // get sat amount of anchor output in the tx + let anchor_sat = tx.output[p2a_vout].value; + + let child_tx = self + .create_child_tx( + OutPoint { + txid, + vout: p2a_vout as u32, + }, + anchor_sat, + fee_payer_utxos, + tx.weight(), + fee_rate, + ) + .await + .wrap_err("Failed to create child tx")?; + + Ok(vec![tx, child_tx]) + } + + /// Retrieves confirmed fee payer UTXOs associated with a specific send attempt. + /// + /// Queries the database for UTXOs linked to `try_to_send_id` that are marked as confirmed. + /// These UTXOs are controlled by the `TxSender`'s `signer` and are intended to be + /// spent by a CPFP child transaction. + /// + /// # Returns + /// + /// - [`Vec`]: [`SpendableTxIn`]s of the confirmed fee payer + /// UTXOs that are ready to be included as inputs in the CPFP child tx. + async fn get_confirmed_fee_payer_utxos( + &self, + try_to_send_id: u32, + ) -> Result> { + Ok(self + .db + .get_confirmed_fee_payer_utxos(None, try_to_send_id) + .await + .map_to_eyre()? + .iter() + .map(|(txid, vout, amount)| { + SpendableTxIn::new( + OutPoint { + txid: *txid, + vout: *vout, + }, + TxOut { + value: *amount, + script_pubkey: self.signer.address.script_pubkey(), + }, + vec![], + Some(self.cached_spendinfo.clone()), + ) + }) + .collect()) + } + + /// Attempts to bump the fees of unconfirmed "fee payer" UTXOs using RBF. + /// + /// Fee payer UTXOs are created to fund CPFP child transactions. However, these + /// fee payer creation transactions might themselves get stuck due to low fees. + /// This function identifies such unconfirmed fee payer transactions associated with + /// a parent transaction (`bumped_id`) and attempts to RBF them using the provided `fee_rate`. + /// + /// This ensures the fee payer UTXOs confirm quickly, making them available to be spent + /// by the actual CPFP child transaction. + /// + /// # Arguments + /// * `bumped_id` - The database ID of the parent transaction whose fee payer UTXOs need bumping. + /// * `fee_rate` - The target fee rate for bumping the fee payer transactions. + #[tracing::instrument(skip_all, fields(sender = self.btc_syncer_consumer_id, bumped_id, fee_rate))] + async fn _bump_fees_of_unconfirmed_fee_payer_txs( + &self, + bumped_id: u32, + fee_rate: FeeRate, + ) -> Result<()> { + let bumpable_fee_payer_txs = self + .db + .get_unconfirmed_fee_payer_txs(None, bumped_id) + .await + .map_to_eyre()?; + + for (id, fee_payer_txid, vout, amount) in bumpable_fee_payer_txs { + tracing::debug!( + "Bumping fee for fee payer tx {} with bumped tx {} for fee rate {}", + fee_payer_txid, + bumped_id, + fee_rate + ); + let new_txi_result = self + .rpc + .bump_fee_with_fee_rate(fee_payer_txid, fee_rate) + .await; + + match new_txi_result { + Ok(new_txid) => { + if new_txid != fee_payer_txid { + self.db + .save_fee_payer_tx(None, bumped_id, new_txid, vout, amount, Some(id)) + .await + .map_to_eyre()?; + } else { + tracing::warn!( + "Fee payer tx {} has enough fee, no need to bump", + fee_payer_txid + ); + } + } + Err(e) => { + let e = e.into_eyre(); + match e.root_cause().downcast_ref::() { + Some(BitcoinRPCError::TransactionAlreadyInBlock(block_hash)) => { + tracing::debug!( + "Fee payer tx {} is already in block {}, skipping", + fee_payer_txid, + block_hash + ); + continue; + } + Some(BitcoinRPCError::BumpFeeUTXOSpent(outpoint)) => { + tracing::debug!( + "Fee payer tx {} is already onchain, skipping: {:?}", + fee_payer_txid, + outpoint + ); + continue; + } + _ => { + tracing::warn!("Failed to bump fee the fee payer tx {} of bumped tx {} with error {e}, skipping", fee_payer_txid, bumped_id); + continue; + } + } + } + } + } + + Ok(()) + } + + /// Sends a transaction using the Child-Pays-For-Parent (CPFP) strategy. + /// + /// # Logic: + /// 1. **Check Unconfirmed Fee Payers:** Ensures no unconfirmed fee payer UTXOs exist + /// for this `try_to_send_id`. If they do, returns [`SendTxError::UnconfirmedFeePayerUTXOsLeft`] + /// as they need to confirm before being spendable by the child. + /// 2. **Get Confirmed Fee Payers:** Retrieves the available confirmed fee payer UTXOs. + /// 3. **Create Package:** Calls `create_package` to build the `vec![parent_tx, child_tx]`. + /// The `child_tx` spends the parent's anchor output and the fee payer UTXOs, paying + /// a fee calculated for the whole package. + /// 4. **Test Mempool Accept (Debug step):** Uses `testmempoolaccept` RPC + /// to check if the package is likely to be accepted by the network before submitting. + /// 5. **Submit Package:** Uses the `submitpackage` RPC to atomically submit the parent + /// and child transactions. Bitcoin Core evaluates the fee rate of the package together. + /// 6. **Handle Results:** Checks the `submitpackage` result. If successful or already in + /// mempool, updates the effective fee rate in the database. If failed, logs an error. + /// + /// # Arguments + /// * `try_to_send_id` - The database ID tracking this send attempt. + /// * `tx` - The parent transaction requiring the fee bump. + /// * `tx_metadata` - Optional metadata associated with the transaction. + /// * `fee_rate` - The target fee rate for the CPFP package. + #[tracing::instrument(skip_all, fields(sender = self.btc_syncer_consumer_id, try_to_send_id, tx_meta=?tx_metadata))] + pub(super) async fn send_cpfp_tx( + &self, + try_to_send_id: u32, + tx: Transaction, + tx_metadata: Option, + fee_rate: FeeRate, + ) -> Result<()> { + let unconfirmed_fee_payer_utxos = self + .db + .get_unconfirmed_fee_payer_txs(None, try_to_send_id) + .await + .map_to_eyre()?; + + if !unconfirmed_fee_payer_utxos.is_empty() { + // Log that we're waiting for unconfirmed UTXOs + tracing::debug!( + try_to_send_id, + "Waiting for {} UTXOs to confirm", + unconfirmed_fee_payer_utxos.len() + ); + + // Update the sending state + let _ = self + .db + .update_tx_debug_sending_state( + try_to_send_id, + "waiting_for_utxo_confirmation", + true, + ) + .await; + + return Ok(()); + } + + tracing::debug!(try_to_send_id, "Attempting to send CPFP tx"); + + let confirmed_fee_payers = self.get_confirmed_fee_payer_utxos(try_to_send_id).await?; + let confirmed_fee_payer_len = confirmed_fee_payers.len(); + + let _ = self + .db + .update_tx_debug_sending_state(try_to_send_id, "creating_package", true) + .await; + + // to be used below + let total_fee_payer_amount = confirmed_fee_payers + .iter() + .map(|txi| txi.get_prevout().value) + .sum::(); + + let package = self + .create_package(tx.clone(), fee_rate, confirmed_fee_payers) + .await + .wrap_err("Failed to create CPFP package"); + + let package = match package { + Ok(package) => package, + Err(e) => match e.root_cause().downcast_ref::() { + Some(SendTxError::InsufficientFeePayerAmount) => { + tracing::debug!( + try_to_send_id, + "Insufficient fee payer amount, creating new fee payer utxo." + ); + + self.create_fee_payer_utxo( + try_to_send_id, + &tx, + fee_rate, + total_fee_payer_amount, + confirmed_fee_payer_len, + ) + .await?; + + let _ = self + .db + .update_tx_debug_sending_state( + try_to_send_id, + "waiting_for_fee_payer_utxos", + true, + ) + .await; + + return Ok(()); + } + _ => { + tracing::error!(try_to_send_id, "Failed to create CPFP package: {:?}", e); + return Err(e.into()); + } + }, + }; + + let package_refs: Vec<&Transaction> = package.iter().collect(); + + tracing::debug!( + try_to_send_id, + "Submitting package\n Pkg tx hexs: {:?}", + if env::var("DBG_PACKAGE_HEX").is_ok() { + package + .iter() + .map(|tx| hex::encode(bitcoin::consensus::serialize(tx))) + .collect::>() + } else { + vec!["use DBG_PACKAGE_HEX=1 to print the package as hex".into()] + } + ); + + // Update sending state to submitting_package + let _ = self + .db + .update_tx_debug_sending_state(try_to_send_id, "submitting_package", true) + .await; + + tracing::debug!(try_to_send_id, "Submitting package, size {}", package.len()); + + // let test_mempool_result = self + // .rpc + // .test_mempool_accept(&package_refs) + // .await + // .wrap_err("Failed to test mempool accept")?; + + let submit_package_result: PackageSubmissionResult = self + .rpc + .submit_package(&package_refs, Some(Amount::from_sat(0)), None) + .await + .wrap_err("Failed to submit package")?; + + tracing::debug!( + try_to_send_id, + "Submit package result: {submit_package_result:?}" + ); + + // If tx_results is empty, it means the txs were already accepted by the network. + if submit_package_result.tx_results.is_empty() { + return Ok(()); + } + + let mut early_exit = false; + for (_txid, result) in submit_package_result.tx_results { + if let PackageTransactionResult::Failure { error, .. } = result { + tracing::error!( + try_to_send_id, + "Error submitting package: {:?}, package: {:?}", + error, + package_refs + .iter() + .map(|tx| hex::encode(bitcoin::consensus::serialize(tx))) + .collect::>() + ); + + early_exit = true; + } + } + if early_exit { + return Ok(()); + } + + tracing::info!("Package submitted successfully."); + + // // Get the effective fee rate from the first transaction result + // let effective_fee_rate_btc_per_kvb = submit_package_result + // .tx_results + // .iter() + // .next() + // .and_then(|(_, result)| match result { + // PackageTransactionResult::Success { fees, .. } => Some(fees.effective_feerate), + // PackageTransactionResult::SuccessAlreadyInMempool { txid, .. } => { + // tracing::warn!( + // "{}: transaction {txid} is already in mempool, skipping", + // self.consumer_handle + // ); + // None + // } + // PackageTransactionResult::Failure { txid, error } => { + // tracing::warn!( + // "{}: failed to send the transaction {txid} with error {error}, skipping", + // self.consumer_handle + // ); + // None + // } + // }) + // .expect("Effective fee rate should be present") + // .expect("Effective fee rate should be present"); + + // let effective_fee_rate = Self::btc_per_kvb_to_fee_rate(effective_fee_rate_btc_per_kvb); + // Save the effective fee rate to the db + self.db + .update_effective_fee_rate(None, try_to_send_id, fee_rate) + .await + .wrap_err("Failed to update effective fee rate")?; + + // Sanity check to make sure the fee rate is equal to the required fee rate + // assert_eq!( + // effective_fee_rate, fee_rate, + // "Effective fee rate is not equal to the required fee rate: {:?} to {:?} != {:?}", + // effective_fee_rate_btc_per_kvb, effective_fee_rate, fee_rate + // ); + + Ok(()) + } +} diff --git a/core/src/tx_sender/mod.rs b/core/src/tx_sender/mod.rs new file mode 100644 index 000000000..e2576da36 --- /dev/null +++ b/core/src/tx_sender/mod.rs @@ -0,0 +1,987 @@ +//! # Transaction Sender +//! +//! Transaction sender is responsible for sending Bitcoin transactions, bumping +//! fees and making sure that transactions are finalized until the deadline. It +//! can utilize [Child-Pays-For-Parent (CPFP)](crate::tx_sender::cpfp) and +//! [Replace-By-Fee (RBF)](crate::tx_sender::rbf) strategies for sending +//! transactions. +//! +//! Sending transactions is done by the [`TxSenderClient`], which is a client +//! that puts transactions into the sending queue and the [`TxSenderTask`] is +//! responsible for processing this queue and sending them. +//! +//! ## Debugging Transaction Sender +//! +//! There are several database tables that saves the transaction states. Please +//! look for [`core/src/database/tx_sender.rs`] for more information. + +use crate::config::protocol::ProtocolParamset; +use crate::errors::ResultExt; +use crate::utils::FeePayingType; +use crate::{ + actor::Actor, + builder::{self}, + database::Database, + extended_bitcoin_rpc::ExtendedBitcoinRpc, + utils::TxMetadata, +}; +use alloy::transports::http::reqwest; +use bitcoin::taproot::TaprootSpendInfo; +use bitcoin::{Amount, FeeRate, Network, OutPoint, Transaction, TxOut, Txid, Weight}; +use bitcoincore_rpc::RpcApi; +use eyre::eyre; +use eyre::ContextCompat; +use eyre::OptionExt; +use eyre::WrapErr; + +#[cfg(test)] +use std::env; + +mod client; +mod cpfp; +mod nonstandard; +mod rbf; +mod task; + +pub use client::TxSenderClient; +pub use task::TxSenderTask; + +// Define a macro for logging errors and saving them to the database +macro_rules! log_error_for_tx { + ($db:expr, $try_to_send_id:expr, $err:expr) => {{ + let db = $db.clone(); + let try_to_send_id = $try_to_send_id; + let err = $err.to_string(); + tracing::warn!(try_to_send_id, "{}", err); + tokio::spawn(async move { + let _ = db + .save_tx_debug_submission_error(try_to_send_id, &err) + .await; + }); + }}; +} + +// Exports to this module. +use log_error_for_tx; + +/// Manages the process of sending Bitcoin transactions, including handling fee bumping +/// strategies like Replace-By-Fee (RBF) and Child-Pays-For-Parent (CPFP). +/// +/// It interacts with a Bitcoin Core RPC endpoint (`ExtendedBitcoinRpc`) to query network state +/// (like fee rates) and submit transactions. It uses a `Database` to persist transaction +/// state, track confirmation status, and manage associated data like fee payer UTXOs. +/// The `Actor` provides signing capabilities for transactions controlled by this service. +#[derive(Clone, Debug)] +pub struct TxSender { + pub signer: Actor, + pub rpc: ExtendedBitcoinRpc, + pub db: Database, + pub btc_syncer_consumer_id: String, + paramset: &'static ProtocolParamset, + cached_spendinfo: TaprootSpendInfo, + http_client: reqwest::Client, + pub mempool_api_host: Option, + pub mempool_api_endpoint: Option, +} + +#[derive(Copy, Clone, Debug, Eq, Hash, PartialEq, PartialOrd, Ord)] +pub struct ActivatedWithTxid { + pub txid: Txid, + pub relative_block_height: u32, +} + +#[derive(Copy, Clone, Debug, Eq, Hash, PartialEq, PartialOrd, Ord)] +pub struct ActivatedWithOutpoint { + pub outpoint: OutPoint, + pub relative_block_height: u32, +} + +#[derive(Debug, thiserror::Error)] +pub enum SendTxError { + #[error("Unconfirmed fee payer UTXOs left")] + UnconfirmedFeePayerUTXOsLeft, + #[error("Insufficient fee payer amount")] + InsufficientFeePayerAmount, + + #[error("Failed to create a PSBT for fee bump")] + PsbtError(String), + + #[error("Network error: {0}")] + NetworkError(String), + + #[error(transparent)] + Other(#[from] eyre::Report), +} + +type Result = std::result::Result; + +impl TxSender { + pub fn new( + signer: Actor, + rpc: ExtendedBitcoinRpc, + db: Database, + btc_syncer_consumer_id: String, + paramset: &'static ProtocolParamset, + mempool_api_host: Option, + mempool_api_endpoint: Option, + ) -> Self { + Self { + cached_spendinfo: builder::address::create_taproot_address( + &[], + Some(signer.xonly_public_key), + paramset.network, + ) + .1, + signer, + rpc, + db, + btc_syncer_consumer_id, + paramset, + http_client: reqwest::Client::new(), + mempool_api_host, + mempool_api_endpoint, + } + } + + /// Gets the current recommended fee rate in sat/vb from Mempool Space or Bitcoin Core. + async fn get_fee_rate(&self) -> Result { + match self.paramset.network { + // Regtest and Signet use a fixed, low fee rate. + Network::Regtest | Network::Signet => { + tracing::debug!( + "Using fixed fee rate of 1 sat/vB for {} network", + self.paramset.network + ); + Ok(FeeRate::from_sat_per_vb_unchecked(1)) + } + + // Mainnet and Testnet4 fetch fees from Mempool Space or Bitcoin Core RPC. + Network::Bitcoin | Network::Testnet4 => { + tracing::debug!("Fetching fee rate for {} network...", self.paramset.network); + + // Fetch fee from RPC provider with a fallback to the RPC node. + let mempool_fee = get_fee_rate_from_mempool_space( + &self.mempool_api_host, + &self.mempool_api_endpoint, + self.paramset.network, + ) + .await; + + let smart_fee_result: Result = if let Ok(fee_rate) = mempool_fee { + Ok(fee_rate) + } else { + if let Err(e) = &mempool_fee { + tracing::warn!( + "Mempool.space fee fetch failed, falling back to Bitcoin Core RPC: {:#}", + e + ); + } + + let fee_estimate = self + .rpc + .estimate_smart_fee(1, None) + .await + .wrap_err("Failed to estimate smart fee using Bitcoin Core RPC")?; + + Ok(fee_estimate + .fee_rate + .wrap_err("Failed to extract fee rate from Bitcoin Core RPC response")?) + }; + + let sat_vkb = smart_fee_result.map_or_else( + |err| { + tracing::warn!( + "Smart fee estimation failed, using default of 1 sat/vB. Error: {:#}", + err + ); + 1000 + }, + |rate| rate.to_sat(), + ); + + // Convert sat/kvB to sat/vB. + let fee_sat_vb = sat_vkb / 1000; + + tracing::info!("Using fee rate: {} sat/vb", fee_sat_vb); + Ok(FeeRate::from_sat_per_vb(fee_sat_vb) + .wrap_err("Failed to create FeeRate from calculated sat/vb")?) + } + + // All other network types are unsupported. + _ => Err(eyre!( + "Fee rate estimation is not supported for network: {:?}", + self.paramset.network + ) + .into()), + } + } + + /// Calculates the total fee required for a transaction package based on the fee bumping strategy. + /// + /// # Arguments + /// * `parent_tx_weight` - The weight of the main transaction being bumped. + /// * `num_fee_payer_utxos` - The number of fee payer UTXOs used (relevant for child tx size in CPFP). + /// * `fee_rate` - The target fee rate (sat/kwu or similar). + /// * `fee_paying_type` - The strategy being used (CPFP or RBF). + /// + /// # Calculation Logic + /// * **CPFP:** Calculates the weight of the hypothetical child transaction based on the + /// number of fee payer inputs and standard P2TR output sizes. It then calculates the + /// fee based on the *combined virtual size* (vbytes) of the parent and child transactions, + /// as miners evaluate the package deal. + /// * **RBF:** Calculates the weight of the replacement transaction itself (assuming inputs + /// and potentially outputs change slightly). The fee is calculated based on the weight + /// of this single replacement transaction. + /// + /// Reference for weight estimates: + fn calculate_required_fee( + parent_tx_weight: Weight, + num_fee_payer_utxos: usize, + fee_rate: FeeRate, + fee_paying_type: FeePayingType, + ) -> Result { + tracing::info!( + "Calculating required fee for {} fee payer utxos", + num_fee_payer_utxos + ); + // Estimate the weight of the child transaction (for CPFP) or the RBF replacement. + // P2TR input witness adds ~57.5vbytes (230 WU). P2TR output adds 43 vbytes (172 WU). + // Base transaction overhead (version, locktime, input/output counts) ~ 10.5 vBytes (42 WU) + // Anchor input marker (OP_FALSE OP_RETURN ..) adds overhead. Exact WU TBD. + // For CPFP child: (N fee payer inputs) + (1 anchor input) + (1 change output) + // For RBF replacement: (N fee payer inputs) + (1 change output) - assuming it replaces a tx with an anchor. + let child_tx_weight = match fee_paying_type { + // CPFP Child: N fee payer inputs + 1 anchor input + 1 change output + base overhead. + // Approx WU: (230 * num_fee_payer_utxos) + 230 + 172 + base_overhead_wu + // Simplified calculation used here needs verification. + FeePayingType::CPFP => Weight::from_wu_usize(230 * num_fee_payer_utxos + 207 + 172), + // RBF Replacement: N fee payer inputs + 1 change output + base overhead. + // Assumes it replaces a tx of similar structure but potentially different inputs/fees. + // Simplified calculation used here needs verification. + FeePayingType::RBF => Weight::from_wu_usize(230 * num_fee_payer_utxos + 172), + FeePayingType::NoFunding => Weight::from_wu_usize(0), + }; + + // Calculate total weight for fee calculation. + // For CPFP, miners consider the effective fee rate over the combined *vbytes* of parent + child. + // For RBF, miners consider the fee rate of the single replacement transaction's weight. + let total_weight = match fee_paying_type { + FeePayingType::CPFP => Weight::from_vb_unchecked( + child_tx_weight.to_vbytes_ceil() + parent_tx_weight.to_vbytes_ceil(), + ), + FeePayingType::RBF => child_tx_weight + parent_tx_weight, // Should likely just be the RBF tx weight? Check RBF rules. + FeePayingType::NoFunding => parent_tx_weight, + }; + + fee_rate + .checked_mul_by_weight(total_weight) + .ok_or_eyre("Fee calculation overflow") + .map_err(Into::into) + } + + fn is_p2a_anchor(&self, output: &TxOut) -> bool { + output.script_pubkey + == builder::transaction::anchor_output(self.paramset.anchor_amount()).script_pubkey + } + + fn find_p2a_vout(&self, tx: &Transaction) -> Result { + let p2a_anchor = tx + .output + .iter() + .enumerate() + .find(|(_, output)| self.is_p2a_anchor(output)); + if let Some((vout, _)) = p2a_anchor { + Ok(vout) + } else { + Err(eyre::eyre!("P2A anchor output not found in transaction").into()) + } + } + + /// Submit package returns the effective fee rate in btc/kvb. + /// This function converts the btc/kvb to a fee rate in sat/vb. + #[allow(dead_code)] + fn btc_per_kvb_to_fee_rate(btc_per_kvb: f64) -> FeeRate { + FeeRate::from_sat_per_vb_unchecked((btc_per_kvb * 100000.0) as u64) + } + + /// Fetches transactions that are eligible to be sent or bumped from + /// database based on the given fee rate and tip height. Then, places a send + /// transaction request to the Bitcoin based on the fee strategy. + /// + /// For each eligible transaction (`id`): + /// + /// 1. **Send/Bump Main Tx:** Calls `send_tx` to either perform RBF or CPFP on the main + /// transaction (`id`) using the `new_fee_rate`. + /// 2. **Handle Errors:** + /// - [`SendTxError::UnconfirmedFeePayerUTXOsLeft`]: Skips the current tx, waiting for fee + /// payers to confirm. + /// - [`SendTxError::InsufficientFeePayerAmount`]: Calls `create_fee_payer_utxo` to + /// provision more funds for a future CPFP attempt. + /// - Other errors are logged. + /// + /// # Arguments + /// * `new_fee_rate` - The current target fee rate based on network conditions. + /// * `current_tip_height` - The current blockchain height, used for time-lock checks. + #[tracing::instrument(skip_all, fields(sender = self.btc_syncer_consumer_id, new_fee_rate, current_tip_height))] + async fn try_to_send_unconfirmed_txs( + &self, + new_fee_rate: FeeRate, + current_tip_height: u32, + ) -> Result<()> { + let txs = self + .db + .get_sendable_txs(None, new_fee_rate, current_tip_height) + .await + .map_to_eyre()?; + + if !txs.is_empty() { + tracing::debug!("Trying to send {} sendable txs ", txs.len()); + } + + #[cfg(test)] + { + if env::var("TXSENDER_DBG_INACTIVE_TXS").is_ok() { + self.db + .debug_inactive_txs(new_fee_rate, current_tip_height) + .await; + } + } + + for id in txs { + // Update debug state + tracing::debug!( + try_to_send_id = id, + "Processing TX in try_to_send_unconfirmed_txs with fee rate {new_fee_rate}", + ); + + let (tx_metadata, tx, fee_paying_type, seen_block_id, rbf_signing_info) = + match self.db.get_try_to_send_tx(None, id).await { + Ok(res) => res, + Err(e) => { + log_error_for_tx!(self.db, id, format!("Failed to get tx details: {}", e)); + continue; + } + }; + + // Check if the transaction is already confirmed (only happens if it was confirmed after this loop started) + if let Some(block_id) = seen_block_id { + tracing::debug!( + try_to_send_id = id, + "Transaction already confirmed in block with block id of {}", + block_id + ); + + // Update sending state + let _ = self + .db + .update_tx_debug_sending_state(id, "confirmed", true) + .await; + + continue; + } + + let result = match fee_paying_type { + // Send nonstandard transactions to testnet4 using the mempool.space accelerator. + // As mempool uses out of band payment, we don't need to do cpfp or rbf. + _ if self.paramset.network == bitcoin::Network::Testnet4 + && self.is_bridge_tx_nonstandard(&tx) => + { + self.send_testnet4_nonstandard_tx(&tx, id).await + } + FeePayingType::CPFP => self.send_cpfp_tx(id, tx, tx_metadata, new_fee_rate).await, + FeePayingType::RBF => { + self.send_rbf_tx(id, tx, tx_metadata, new_fee_rate, rbf_signing_info) + .await + } + FeePayingType::NoFunding => self.send_no_funding_tx(id, tx, tx_metadata).await, + }; + + if let Err(e) = result { + log_error_for_tx!(self.db, id, format!("Failed to send tx: {:?}", e)); + } + } + + Ok(()) + } + + pub fn client(&self) -> TxSenderClient { + TxSenderClient::new(self.db.clone(), self.btc_syncer_consumer_id.clone()) + } + + /// Sends a transaction that is already fully funded and signed. + /// + /// This function is used for transactions that do not require fee bumping strategies + /// like RBF or CPFP. The transaction is submitted directly to the Bitcoin network + /// without any modifications. + /// + /// # Arguments + /// * `try_to_send_id` - The database ID tracking this send attempt. + /// * `tx` - The fully funded and signed transaction ready for broadcast. + /// * `tx_metadata` - Optional metadata associated with the transaction for debugging. + /// + /// # Behavior + /// 1. Attempts to broadcast the transaction using `send_raw_transaction` RPC. + /// 2. Updates the database with success/failure state for debugging purposes. + /// 3. Logs appropriate messages for monitoring and troubleshooting. + /// + /// # Returns + /// * `Ok(())` - If the transaction was successfully broadcast. + /// * `Err(SendTxError)` - If the broadcast failed. + #[tracing::instrument(skip_all, fields(sender = self.btc_syncer_consumer_id, try_to_send_id, tx_meta=?tx_metadata))] + pub(super) async fn send_no_funding_tx( + &self, + try_to_send_id: u32, + tx: Transaction, + tx_metadata: Option, + ) -> Result<()> { + tracing::debug!(target: "ci", "Sending no funding tx, raw tx: {:?}", hex::encode(bitcoin::consensus::serialize(&tx))); + match self.rpc.send_raw_transaction(&tx).await { + Ok(sent_txid) => { + tracing::debug!( + try_to_send_id, + "Successfully sent no funding tx with txid {}", + sent_txid + ); + let _ = self + .db + .update_tx_debug_sending_state(try_to_send_id, "no_funding_send_success", true) + .await; + } + Err(e) => { + tracing::error!( + "Failed to send no funding tx with try_to_send_id: {:?} and metadata: {:?}", + try_to_send_id, + tx_metadata + ); + let err_msg = format!("send_raw_transaction error for no funding tx: {}", e); + log_error_for_tx!(self.db, try_to_send_id, err_msg); + let _ = self + .db + .update_tx_debug_sending_state(try_to_send_id, "no_funding_send_failed", true) + .await; + return Err(SendTxError::Other(eyre::eyre!(e))); + } + }; + + Ok(()) + } +} + +/// Fetches the current recommended fee rate from RPC provider. Currently only supports +/// Mempool Space API. +/// This function is used to get the fee rate in sat/vkb (satoshis per kilovbyte). +/// See [Mempool Space API](https://mempool.space/docs/api/rest#get-recommended-fees) for more details. +#[allow(dead_code)] +async fn get_fee_rate_from_mempool_space( + rpc_url: &Option, + rpc_endpoint: &Option, + network: Network, +) -> Result { + let rpc_url = rpc_url + .as_ref() + .ok_or_else(|| eyre!("Fee rate API host is not configured"))?; + + let rpc_endpoint = rpc_endpoint + .as_ref() + .ok_or_else(|| eyre!("Fee rate API endpoint is not configured"))?; + let url = match network { + Network::Bitcoin => format!( + // If the variables are not, return Error to fallback to Bitcoin Core RPC. + "{}{}", + rpc_url, rpc_endpoint + ), + Network::Testnet4 => format!("{}testnet4/{}", rpc_url, rpc_endpoint), + // Return early with error for unsupported networks + _ => return Err(eyre!("Unsupported network for mempool.space: {:?}", network).into()), + }; + + let fee_sat_per_vb = reqwest::get(&url) + .await + .wrap_err_with(|| format!("GET request to {} failed", url))? + .json::() + .await + .wrap_err_with(|| format!("Failed to parse JSON response from {}", url))? + .get("fastestFee") + .and_then(|fee| fee.as_u64()) + .ok_or_else(|| eyre!("'fastestFee' field not found or invalid in API response"))?; + + // The API returns the fee rate in sat/vB. We multiply by 1000 to get sat/kvB. + let fee_rate = Amount::from_sat(fee_sat_per_vb * 1000); + + Ok(fee_rate) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::actor::TweakCache; + use crate::bitcoin_syncer::BitcoinSyncer; + use crate::bitvm_client::SECP; + use crate::builder::script::{CheckSig, SpendPath, SpendableScript}; + use crate::builder::transaction::input::SpendableTxIn; + use crate::builder::transaction::output::UnspentTxOut; + use crate::builder::transaction::{TransactionType, TxHandlerBuilder, DEFAULT_SEQUENCE}; + use crate::constants::{MIN_TAPROOT_AMOUNT, NON_EPHEMERAL_ANCHOR_AMOUNT, NON_STANDARD_V3}; + use crate::errors::BridgeError; + use crate::rpc::clementine::tagged_signature::SignatureId; + use crate::rpc::clementine::{NormalSignatureKind, NumberedSignatureKind}; + use crate::task::{IntoTask, TaskExt}; + use crate::{database::Database, test::common::*}; + use bitcoin::hashes::Hash; + use bitcoin::secp256k1::rand; + use bitcoin::secp256k1::SecretKey; + use bitcoin::transaction::Version; + use std::result::Result; + use std::sync::Arc; + use std::time::Duration; + use tokio::sync::oneshot; + + impl TxSenderClient { + pub async fn test_dbtx( + &self, + ) -> Result, BridgeError> { + self.db.begin_transaction().await + } + } + + pub(super) async fn create_tx_sender( + rpc: ExtendedBitcoinRpc, + ) -> ( + TxSender, + BitcoinSyncer, + ExtendedBitcoinRpc, + Database, + Actor, + bitcoin::Network, + ) { + let sk = SecretKey::new(&mut rand::thread_rng()); + let network = bitcoin::Network::Regtest; + let actor = Actor::new(sk, None, network); + + let config = create_test_config_with_thread_name().await; + + let db = Database::new(&config).await.unwrap(); + + let tx_sender = TxSender::new( + actor.clone(), + rpc.clone(), + db.clone(), + "tx_sender".into(), + config.protocol_paramset(), + config.mempool_api_host.clone(), + config.mempool_api_endpoint.clone(), + ); + + ( + tx_sender, + BitcoinSyncer::new(db.clone(), rpc.clone(), config.protocol_paramset()) + .await + .unwrap(), + rpc, + db, + actor, + network, + ) + } + + pub(super) async fn create_bg_tx_sender( + rpc: ExtendedBitcoinRpc, + ) -> ( + TxSenderClient, + TxSender, + Vec>, + ExtendedBitcoinRpc, + Database, + Actor, + bitcoin::Network, + ) { + let (tx_sender, syncer, rpc, db, actor, network) = create_tx_sender(rpc).await; + + let sender_task = tx_sender.clone().into_task().cancelable_loop(); + sender_task.0.into_bg(); + + let syncer_task = syncer.into_task().cancelable_loop(); + syncer_task.0.into_bg(); + + ( + tx_sender.client(), + tx_sender, + vec![sender_task.1, syncer_task.1], + rpc, + db, + actor, + network, + ) + } + + async fn create_bumpable_tx( + rpc: &ExtendedBitcoinRpc, + signer: &Actor, + network: bitcoin::Network, + fee_paying_type: FeePayingType, + requires_rbf_signing_info: bool, + ) -> Result { + let (address, spend_info) = + builder::address::create_taproot_address(&[], Some(signer.xonly_public_key), network); + + let amount = Amount::from_sat(100000); + let outpoint = rpc.send_to_address(&address, amount).await?; + rpc.mine_blocks(1).await?; + + let version = match fee_paying_type { + FeePayingType::CPFP => NON_STANDARD_V3, + FeePayingType::RBF | FeePayingType::NoFunding => Version::TWO, + }; + + let mut txhandler = TxHandlerBuilder::new(TransactionType::Dummy) + .with_version(version) + .add_input( + match fee_paying_type { + FeePayingType::CPFP => { + SignatureId::from(NormalSignatureKind::OperatorSighashDefault) + } + FeePayingType::RBF if !requires_rbf_signing_info => { + NormalSignatureKind::Challenge.into() + } + FeePayingType::RBF => (NumberedSignatureKind::WatchtowerChallenge, 0i32).into(), + FeePayingType::NoFunding => { + unreachable!("AlreadyFunded should not be used for bumpable txs") + } + }, + SpendableTxIn::new( + outpoint, + TxOut { + value: amount, + script_pubkey: address.script_pubkey(), + }, + vec![], + Some(spend_info), + ), + SpendPath::KeySpend, + DEFAULT_SEQUENCE, + ) + .add_output(UnspentTxOut::from_partial(TxOut { + value: amount - NON_EPHEMERAL_ANCHOR_AMOUNT - MIN_TAPROOT_AMOUNT * 3, // buffer so that rbf works without adding inputs + script_pubkey: address.script_pubkey(), // In practice, should be the wallet address, not the signer address + })) + .add_output(UnspentTxOut::from_partial( + builder::transaction::non_ephemeral_anchor_output(), + )) + .finalize(); + + signer + .tx_sign_and_fill_sigs(&mut txhandler, &[], None) + .unwrap(); + + let tx = txhandler.get_cached_tx().clone(); + Ok(tx) + } + + #[tokio::test] + async fn test_try_to_send_duplicate() -> Result<(), BridgeError> { + let mut config = create_test_config_with_thread_name().await; + let regtest = create_regtest_rpc(&mut config).await; + let rpc = regtest.rpc().clone(); + + rpc.mine_blocks(1).await.unwrap(); + + let (client, _tx_sender, _cancel_txs, rpc, db, signer, network) = + create_bg_tx_sender(rpc).await; + + let tx = create_bumpable_tx(&rpc, &signer, network, FeePayingType::CPFP, false) + .await + .unwrap(); + + let mut dbtx = db.begin_transaction().await.unwrap(); + let tx_id1 = client + .insert_try_to_send( + &mut dbtx, + None, + &tx, + FeePayingType::CPFP, + None, + &[], + &[], + &[], + &[], + ) + .await + .unwrap(); + let tx_id2 = client + .insert_try_to_send( + &mut dbtx, + None, + &tx, + FeePayingType::CPFP, + None, + &[], + &[], + &[], + &[], + ) + .await + .unwrap(); // It is ok to call this twice + dbtx.commit().await.unwrap(); + + poll_until_condition( + async || { + rpc.mine_blocks(1).await.unwrap(); + + match rpc.get_raw_transaction_info(&tx.compute_txid(), None).await { + Ok(tx_result) => { + if let Some(conf) = tx_result.confirmations { + return Ok(conf > 0); + } + Ok(false) + } + Err(_) => Ok(false), + } + }, + Some(Duration::from_secs(30)), + Some(Duration::from_millis(100)), + ) + .await + .expect("Tx was not confirmed in time"); + + poll_until_condition( + async || { + let (_, _, _, tx_id1_seen_block_id, _) = + db.get_try_to_send_tx(None, tx_id1).await.unwrap(); + let (_, _, _, tx_id2_seen_block_id, _) = + db.get_try_to_send_tx(None, tx_id2).await.unwrap(); + + // Wait for tx sender to catch up to bitcoin syncer + Ok(tx_id2_seen_block_id.is_some() && tx_id1_seen_block_id.is_some()) + }, + Some(Duration::from_secs(5)), + Some(Duration::from_millis(100)), + ) + .await + .expect("Tx was not confirmed in time"); + + Ok(()) + } + + #[tokio::test] + async fn get_fee_rate() { + let mut config = create_test_config_with_thread_name().await; + let regtest = create_regtest_rpc(&mut config).await; + let rpc = regtest.rpc().clone(); + let db = Database::new(&config).await.unwrap(); + + let amount = Amount::from_sat(100_000); + let signer = Actor::new( + config.secret_key, + config.winternitz_secret_key, + config.protocol_paramset().network, + ); + let (xonly_pk, _) = config.secret_key.public_key(&SECP).x_only_public_key(); + + let tx_sender = TxSender::new( + signer.clone(), + rpc.clone(), + db, + "tx_sender".into(), + config.protocol_paramset(), + config.mempool_api_host.clone(), + config.mempool_api_endpoint.clone(), + ); + + let scripts: Vec> = + vec![Arc::new(CheckSig::new(xonly_pk)).clone()]; + let (taproot_address, taproot_spend_info) = builder::address::create_taproot_address( + &scripts + .iter() + .map(|s| s.to_script_buf()) + .collect::>(), + None, + config.protocol_paramset().network, + ); + + let input_utxo = rpc.send_to_address(&taproot_address, amount).await.unwrap(); + + let builder = TxHandlerBuilder::new(TransactionType::Dummy).add_input( + NormalSignatureKind::NotStored, + SpendableTxIn::new( + input_utxo, + TxOut { + value: amount, + script_pubkey: taproot_address.script_pubkey(), + }, + scripts.clone(), + Some(taproot_spend_info.clone()), + ), + SpendPath::ScriptSpend(0), + DEFAULT_SEQUENCE, + ); + + let mut will_fail_handler = builder + .clone() + .add_output(UnspentTxOut::new( + TxOut { + value: amount, + script_pubkey: taproot_address.script_pubkey(), + }, + scripts.clone(), + Some(taproot_spend_info.clone()), + )) + .finalize(); + + let mut tweak_cache = TweakCache::default(); + signer + .tx_sign_and_fill_sigs(&mut will_fail_handler, &[], Some(&mut tweak_cache)) + .unwrap(); + + rpc.mine_blocks(1).await.unwrap(); + let mempool_info = rpc.get_mempool_info().await.unwrap(); + tracing::info!("Mempool info: {:?}", mempool_info); + + let will_fail_tx = will_fail_handler.get_cached_tx(); + + if mempool_info.mempool_min_fee.to_sat() > 0 { + assert!(rpc.send_raw_transaction(will_fail_tx).await.is_err()); + } + + // Calculate and send with fee. + let fee_rate = tx_sender.get_fee_rate().await.unwrap(); + let fee = TxSender::calculate_required_fee( + will_fail_tx.weight(), + 1, + fee_rate, + FeePayingType::CPFP, + ) + .unwrap(); + tracing::info!("Fee rate: {:?}, fee: {}", fee_rate, fee); + + let mut will_successful_handler = builder + .add_output(UnspentTxOut::new( + TxOut { + value: amount - fee, + script_pubkey: taproot_address.script_pubkey(), + }, + scripts, + Some(taproot_spend_info), + )) + .finalize(); + signer + .tx_sign_and_fill_sigs(&mut will_successful_handler, &[], Some(&mut tweak_cache)) + .unwrap(); + + rpc.mine_blocks(1).await.unwrap(); + + rpc.send_raw_transaction(will_successful_handler.get_cached_tx()) + .await + .unwrap(); + } + + #[tokio::test] + async fn test_send_no_funding_tx() -> Result<(), BridgeError> { + // Initialize RPC, tx_sender and other components + let mut config = create_test_config_with_thread_name().await; + let rpc = create_regtest_rpc(&mut config).await; + + let (tx_sender, btc_sender, rpc, db, signer, network) = + create_tx_sender(rpc.rpc().clone()).await; + let pair = btc_sender.into_task().cancelable_loop(); + pair.0.into_bg(); + + // Create a transaction that doesn't need funding + let tx = rbf::tests::create_rbf_tx(&rpc, &signer, network, false).await?; + + // Insert the transaction into the database + let mut dbtx = db.begin_transaction().await?; + let try_to_send_id = tx_sender + .client() + .insert_try_to_send( + &mut dbtx, + None, // No metadata + &tx, + FeePayingType::NoFunding, + None, + &[], // No cancel outpoints + &[], // No cancel txids + &[], // No activate txids + &[], // No activate outpoints + ) + .await?; + dbtx.commit().await?; + + // Test send_rbf_tx + tx_sender + .send_no_funding_tx(try_to_send_id, tx.clone(), None) + .await + .expect("Already funded should succeed"); + + tx_sender + .send_no_funding_tx(try_to_send_id, tx.clone(), None) + .await + .expect("Should not return error if sent again"); + + // Verify that the transaction was fee-bumped + let tx_debug_info = tx_sender + .client() + .debug_tx(try_to_send_id) + .await + .expect("Transaction should be have debug info"); + + // Get the actual transaction from the mempool + rpc.get_tx_of_txid(&bitcoin::Txid::from_byte_array( + tx_debug_info.txid.unwrap().txid.try_into().unwrap(), + )) + .await + .expect("Transaction should be in mempool"); + + tx_sender + .send_no_funding_tx(try_to_send_id, tx.clone(), None) + .await + .expect("Should not return error if sent again but still in mempool"); + + Ok(()) + } + + #[tokio::test] + async fn test_mempool_space_fee_rate_mainnet() { + get_fee_rate_from_mempool_space( + &Some("https://mempool.space/".to_string()), + &Some("api/v1/fees/recommended".to_string()), + bitcoin::Network::Bitcoin, + ) + .await + .unwrap(); + } + + #[tokio::test] + async fn test_mempool_space_fee_rate_testnet4() { + get_fee_rate_from_mempool_space( + &Some("https://mempool.space/".to_string()), + &Some("api/v1/fees/recommended".to_string()), + bitcoin::Network::Testnet4, + ) + .await + .unwrap(); + } + + #[tokio::test] + #[should_panic(expected = "Unsupported network for mempool.space: Regtest")] + async fn test_mempool_space_fee_rate_regtest() { + get_fee_rate_from_mempool_space( + &Some("https://mempool.space/".to_string()), + &Some("api/v1/fees/recommended".to_string()), + bitcoin::Network::Regtest, + ) + .await + .unwrap(); + } + + #[tokio::test] + #[should_panic(expected = "Unsupported network for mempool.space: Signet")] + async fn test_mempool_space_fee_rate_signet() { + get_fee_rate_from_mempool_space( + &Some("https://mempool.space/".to_string()), + &Some("api/v1/fees/recommended".to_string()), + bitcoin::Network::Signet, + ) + .await + .unwrap(); + } +} diff --git a/core/src/tx_sender/nonstandard.rs b/core/src/tx_sender/nonstandard.rs new file mode 100644 index 000000000..233cdc25c --- /dev/null +++ b/core/src/tx_sender/nonstandard.rs @@ -0,0 +1,186 @@ +//! TxSender for nonstandard transactions. +//! +//! This module contains the logic for sending nonstandard transactions for various bitcoin networks. +use bitcoin::consensus::serialize; +use bitcoin::Transaction; +use hex; +use std::collections::HashMap; + +use super::{log_error_for_tx, SendTxError, TxSender}; + +impl TxSender { + /// Checks if a bridge transaction is nonstandard. Keep in mind that these are not all cases where a transaction is nonstandard. + /// We only check non-standard types that clementine generates by default in non-standard mode. + /// Currently checks these cases: + /// 1. The transaction contains 0 sat non-anchor (only checks our specific anchor address) + /// and non-op return output. + /// 2. The transaction weight is bigger than 400k + /// + /// Arguments: + /// * `tx` - The transaction to check. + /// + /// Returns: + /// * `true` if the transaction is nonstandard, `false` otherwise. + pub fn is_bridge_tx_nonstandard(&self, tx: &Transaction) -> bool { + tx.output.iter().any(|output| { + output.value.to_sat() == 0 + && !self.is_p2a_anchor(output) + && !output.script_pubkey.is_op_return() + }) || tx.weight().to_wu() > 400_000 + } + + /// Sends a nonstandard transaction to testnet4 using the mempool.space accelerator. + /// + /// Arguments: + /// * `tx` - The transaction to send. + /// + /// Returns: + /// * `Ok(())` if the transaction is sent successfully to the accelerator. + /// * `Err(SendTxError)` if the transaction is not sent successfully to the accelerator. + /// + /// Note: Mempool.space accelerator doesn't accept transactions if: + /// - At least one of the transaction's inputs is signed with either the SIGHASH_NONE or SIGHASH_ANYONECANPAY flag, which may allow a third party to replace the transaction. + /// - The number of signature operations multiplied by 20 exceeds the transaction's weight. + /// [Mempool Space API docs](https://mempool.space/docs/api/rest) + /// [Mempool Space Accelerator FAQ](https://mempool.space/accelerator/faq) + pub async fn send_testnet4_nonstandard_tx( + &self, + tx: &Transaction, + try_to_send_id: u32, + ) -> Result<(), SendTxError> { + // Get API key from environment variable + let api_key = std::env::var("MEMPOOL_SPACE_API_KEY").map_err(|_| { + SendTxError::Other(eyre::eyre!( + "MEMPOOL_SPACE_API_KEY environment variable not set, cannot send nonstandard transactions to testnet4" + )) + })?; + + // first check if the transaction is already submitted to the accelerator + let txid = tx.compute_txid(); + let response = self + .http_client + .get("https://mempool.space/api/v1/services/accelerator/testnet4/accelerations") + .header("X-Mempool-Auth", api_key.clone()) + .send() + .await + .map_err(|e| { + SendTxError::NetworkError(format!( + "Failed to get transaction history from mempool.space accelerator: {}", + e + )) + })?; + if response.status().is_success() { + // Try to parse the response, if for some reason response can't be parsed, + // don't return errors, so we continue with sending the transaction to the accelerator. + let text = response.text().await.unwrap_or_default(); + let previously_sent_txs: serde_json::Value = + serde_json::from_str(&text).unwrap_or_else(|_| serde_json::json!([])); + + // try to parse the response + for tx in previously_sent_txs.as_array().unwrap_or(&vec![]) { + let Some(response_txid) = tx.get("txid").and_then(|v| v.as_str()) else { + continue; + }; + + if response_txid == txid.to_string() && tx["status"] != "failed" { + tracing::debug!( + "Found {:?} with status {:?} in accelerator transaction history", + txid, + tx["status"] + ); + let _ = self + .db + .update_tx_debug_sending_state( + try_to_send_id, + "nonstandard_testnet4_send_submitted", + false, + ) + .await; + return Ok(()); // Already submitted + } + } + } else { + let status = response.status(); + let error_text = response.text().await.unwrap_or_default(); + return Err(SendTxError::NetworkError(format!( + "Accelerator returned HTTP {}: {}", + status, error_text + ))); + } + + // Serialize transaction to hex + let tx_hex = hex::encode(serialize(tx)); + + // Prepare form data + let mut form_data = HashMap::new(); + form_data.insert("txInput", tx_hex); + form_data.insert("label", format!("clementine-{}", tx.compute_txid())); + + // Make the API request + let response = self + .http_client + .post("https://mempool.space/api/v1/services/accelerator/testnet4/accelerate/hex") + .header("X-Mempool-Auth", api_key) + .form(&form_data) + .send() + .await + .map_err(|e| { + SendTxError::NetworkError(format!( + "Failed to submit transaction to mempool.space accelerator: {}", + e + )) + })?; + + // Check if the request was successful + if response.status().is_success() { + let response_text = response.text().await.map_err(|e| { + SendTxError::NetworkError(format!("Failed to read response: {}", e)) + })?; + + tracing::info!( + "Successfully submitted nonstandard transaction {:?} to mempool.space testnet4 accelerator: {}", + txid, + response_text + ); + + let _ = self + .db + .update_tx_debug_sending_state( + try_to_send_id, + "nonstandard_testnet4_send_success", + true, + ) + .await; + + Ok(()) + } else { + let status = response.status(); + let error_text = response + .text() + .await + .unwrap_or_else(|_| "Unknown error".to_string()); + + log_error_for_tx!( + self.db, + try_to_send_id, + format!( + "Failed to submit transaction to mempool.space. Status: {}, Error: {}", + status, error_text + ) + ); + let _ = self + .db + .update_tx_debug_sending_state( + try_to_send_id, + "nonstandard_testnet4_send_failed", + true, + ) + .await; + + Err(SendTxError::NetworkError(format!( + "Failed to submit transaction to mempool.space. Status: {}, Error: {}", + status, error_text + ))) + } + } +} diff --git a/core/src/tx_sender/rbf.rs b/core/src/tx_sender/rbf.rs new file mode 100644 index 000000000..c16509088 --- /dev/null +++ b/core/src/tx_sender/rbf.rs @@ -0,0 +1,1419 @@ +use super::{log_error_for_tx, Result, SendTxError, TxMetadata, TxSender}; +use crate::builder::{self}; +use crate::utils::RbfSigningInfo; +use bitcoin::script::Instruction; +use bitcoin::sighash::{Prevouts, SighashCache}; +use bitcoin::taproot::{self}; +use bitcoin::{consensus, Address, Amount, FeeRate, Transaction}; +use bitcoin::{Psbt, TapSighashType, TxOut, Txid, Witness}; +use bitcoincore_rpc::json::{ + BumpFeeOptions, BumpFeeResult, CreateRawTransactionInput, FinalizePsbtResult, + WalletCreateFundedPsbtOutput, WalletCreateFundedPsbtOutputs, WalletCreateFundedPsbtResult, +}; +use bitcoincore_rpc::RpcApi; +use eyre::Context; +use eyre::{eyre, OptionExt}; +use std::str::FromStr; + +impl TxSender { + /// Calculates the appropriate fee rate for a Replace-By-Fee (RBF) transaction. + /// + /// This method determines the effective fee rate needed to successfully replace + /// an existing transaction in the mempool. It follows Bitcoin's RBF rules by: + /// + /// 1. Retrieving the original transaction and calculating its current fee rate + /// 2. Ensuring the new fee rate is higher than the original by at least the minimum + /// required incremental relay fee + /// 3. Comparing the calculated minimum bump fee rate with the requested target fee rate + /// and selecting the higher of the two + /// + /// # Arguments + /// * `txid` - The transaction ID of the original transaction to be replaced + /// * `new_feerate` - The target fee rate requested for the replacement transaction + /// + /// # Returns + /// * `Ok(Some(Amount))` - The effective fee rate (in satoshis per vbyte) to use for the replacement + /// * `Ok(None)` - If the original transaction already has a higher fee rate than requested + /// * `Err(...)` - If there was an error retrieving or analyzing the original transaction + pub async fn calculate_bump_feerate( + &self, + txid: &Txid, + new_feerate: FeeRate, + ) -> Result> { + let original_tx = self.rpc.get_tx_of_txid(txid).await.map_err(|e| eyre!(e))?; + + // Calculate original tx fee + let original_tx_fee = self.get_tx_fee(&original_tx).await.map_err(|e| eyre!(e))?; + + //println!("original_tx_fee: {}", original_tx_fee); + + let original_tx_weight = original_tx.weight(); + + // Conservative vsize calculation + let original_tx_vsize = original_tx_weight.to_vbytes_floor(); + let original_feerate = original_tx_fee.to_sat() as f64 / original_tx_vsize as f64; + + // Use max of target fee rate and original + incremental rate + let min_bump_feerate = original_feerate + (222f64 / original_tx_vsize as f64); + + let effective_feerate_sat_per_vb = std::cmp::max( + new_feerate.to_sat_per_vb_ceil(), + min_bump_feerate.ceil() as u64, + ); + + // If original feerate is already higher than target, avoid bumping + if original_feerate >= new_feerate.to_sat_per_vb_ceil() as f64 { + return Ok(None); + } + + Ok(Some(Amount::from_sat(effective_feerate_sat_per_vb))) + } + + pub async fn fill_in_utxo_info(&self, psbt: &mut String) -> Result<()> { + let mut decoded_psbt = Psbt::from_str(psbt).map_err(|e| eyre!(e))?; + let tx = decoded_psbt.unsigned_tx.clone(); + + for (idx, input) in tx.input.iter().enumerate() { + let utxo = self + .rpc + .get_tx_out( + &input.previous_output.txid, + input.previous_output.vout, + Some(false), + ) + .await + .wrap_err("Failed to get UTXO info")?; + + if let Some(utxo) = utxo { + decoded_psbt.inputs[idx].witness_utxo = Some(TxOut { + value: utxo.value, + script_pubkey: utxo + .script_pub_key + .script() + .wrap_err("Failed to get script pubkey")?, + }); + } + } + + *psbt = decoded_psbt.to_string(); + + Ok(()) + } + + /// Given a PSBT with inputs, fill in the existing witnesses from the original tx + /// This allows us to create a finalized PSBT if + /// the original tx had SinglePlusAnyoneCanPay signatures. If the original + /// tx did not have S+AP, these signatures will be added. The expected behavior is for them to be replaced using RbfSigningInfo. + /// + /// # Returns + /// The PSBT as a base64-encoded string. + pub async fn copy_witnesses(&self, psbt: String, initial_tx: &Transaction) -> Result { + let mut decoded_psbt = Psbt::from_str(&psbt).map_err(|e| eyre!(e))?; + + for (idx, input) in initial_tx.input.iter().enumerate() { + decoded_psbt.inputs[idx].final_script_witness = Some(input.witness.clone()); + } + + Ok(decoded_psbt.to_string()) + } + + pub async fn create_funded_psbt( + &self, + tx: &Transaction, + fee_rate: FeeRate, + ) -> Result { + // We need to carefully calculate the witness weight and factor that + // into the fee rate because wallet_create_funded_psbt does not factor + // in witnesses. + + // The scaleup factor is the ratio of the total weight to the base weight + // The walletcreatefundedpsbt will use the base weight to calculate the fee + // and we'll scale up the fee rate by the scaleup factor to achieve our desired fee + let witness_scaleup = tx.weight().to_wu() as f64 / (tx.base_size() * 4) as f64; + + let adjusted_fee_rate = FeeRate::from_sat_per_kwu( + (fee_rate.to_sat_per_kwu() as f64 * witness_scaleup).ceil() as u64, + ); + + // 1. Create a funded PSBT using the wallet + let create_psbt_opts = bitcoincore_rpc::json::WalletCreateFundedPsbtOptions { + add_inputs: Some(true), // Let the wallet add its inputs + change_address: None, + change_position: Some(tx.output.len() as u16), // Add change output at last index (so that SinglePlusAnyoneCanPay signatures stay valid) + change_type: None, + include_watching: None, + lock_unspent: None, + // Bitcoincore expects BTC/kvbyte for fee_rate + fee_rate: Some( + adjusted_fee_rate + .fee_vb(1000) + .ok_or_eyre("Failed to convert fee rate to BTC/kvbyte")?, + ), + subtract_fee_from_outputs: vec![], + replaceable: Some(true), // Mark as RBF enabled + conf_target: None, + estimate_mode: None, + }; + + let outputs: Vec = tx + .output + .iter() + .filter_map(|out| { + if out.script_pubkey.is_op_return() { + if let Some(Ok(Instruction::PushBytes(data))) = + out.script_pubkey.instructions().last() + { + return Some(WalletCreateFundedPsbtOutput::OpReturn( + data.as_bytes().to_vec(), + )); + } + } + let address = Address::from_script(&out.script_pubkey, self.paramset.network) + .map_err(|e| eyre!(e)); + match address { + Ok(address) => Some(WalletCreateFundedPsbtOutput::Spendable( + address.to_string(), + out.value, + )), + Err(err) => { + tracing::warn!("Failed to create address from script: {}", err); + None + } + } + }) + .collect::>(); + + let outputs = WalletCreateFundedPsbtOutputs(outputs); + + self.rpc + .wallet_create_funded_psbt( + &tx.input + .iter() + .map(|inp| CreateRawTransactionInput { + txid: inp.previous_output.txid, + vout: inp.previous_output.vout, + sequence: Some(inp.sequence.to_consensus_u32()), + }) + .collect::>(), + outputs, + None, + Some(create_psbt_opts), + None, + ) + .await + .map_err(|e| eyre!(e).into()) + } + /// Given a PSBT with inputs that've been signed by the wallet except for our new input, + /// we have to sign the first input with our self.signer actor. + /// + /// Assumes that the first input is the input with our key. + /// + /// # Returns + /// The signed PSBT as a base64-encoded string. + pub async fn attempt_sign_psbt( + &self, + psbt: String, + rbf_signing_info: RbfSigningInfo, + ) -> Result { + // Parse the PSBT from string + let mut decoded_psbt = Psbt::from_str(&psbt).map_err(|e| eyre!(e))?; + + // Ensure we have inputs to sign + if decoded_psbt.inputs.is_empty() { + return Err(eyre!("PSBT has no inputs to sign").into()); + } + + let input_index = rbf_signing_info.vout as usize; + + // Get the transaction to calculate the sighash + let tx = decoded_psbt.unsigned_tx.clone(); + let mut sighash_cache = SighashCache::new(&tx); + + // Determine the sighash type (default to ALL if not specified) + let sighash_type = decoded_psbt.inputs[input_index] + .sighash_type + .unwrap_or((TapSighashType::Default).into()); + + // For Taproot key path spending + if let Ok(tap_sighash_type) = sighash_type.taproot_hash_ty() { + // Calculate the sighash for this input + // Extract previous outputs from the PSBT + let prevouts: Vec = decoded_psbt + .inputs + .iter() + .map(|input| { + input + .witness_utxo + .clone() + .ok_or_eyre("expected inputs to be segwit") + .map_err(SendTxError::Other) + }) + .collect::>>()?; + + let sighash = sighash_cache + .taproot_key_spend_signature_hash( + input_index, + &Prevouts::All(&prevouts), + tap_sighash_type, + ) + .map_err(|e| eyre!("Failed to calculate sighash: {}", e))?; + + #[cfg(test)] + let mut sighash = sighash; + + #[cfg(test)] + { + use bitcoin::sighash::Annex; + // This should provide the Sighash for the key spend + if let Some(ref annex_bytes) = rbf_signing_info.annex { + let annex = Annex::new(annex_bytes).unwrap(); + sighash = sighash_cache + .taproot_signature_hash( + input_index, + &Prevouts::All(&prevouts), + Some(annex), + None, + tap_sighash_type, + ) + .map_err(|e| eyre!("Failed to calculate sighash with annex: {}", e))?; + } + } + + // Sign the sighash with our signer + let signature = self + .signer + .sign_with_tweak_data( + sighash, + builder::sighash::TapTweakData::KeyPath(rbf_signing_info.tweak_merkle_root), + None, + ) + .map_err(|e| eyre!("Failed to sign input: {}", e))?; + + // Add the signature to the PSBT + decoded_psbt.inputs[input_index].tap_key_sig = Some(taproot::Signature { + signature, + sighash_type: tap_sighash_type, + }); + + decoded_psbt.inputs[input_index].final_script_witness = + Some(Witness::from_slice(&[signature.serialize()])); + + #[cfg(test)] + { + if let Some(ref annex_bytes) = rbf_signing_info.annex { + let mut witness = Witness::from_slice(&[signature.serialize()]); + witness.push(annex_bytes); + decoded_psbt.inputs[input_index].final_script_witness = Some(witness); + tracing::info!("Decoded PSBT: {:?}", decoded_psbt); + } + } + // Serialize the signed PSBT back to base64 + Ok(decoded_psbt.to_string()) + } else { + Err(eyre!("Only Taproot key path signing is currently supported").into()) + } + } + + #[track_caller] + pub fn handle_err( + &self, + err_msg: impl AsRef, + err_state: impl Into, + try_to_send_id: u32, + ) { + log_error_for_tx!(self.db, try_to_send_id, err_msg.as_ref()); + + let err_state = err_state.into(); + let db = self.db.clone(); + + tokio::spawn(async move { + let _ = db + .update_tx_debug_sending_state(try_to_send_id, &err_state, true) + .await; + }); + } + + /// This function verifies that the wallet has added a funding input to the + /// PSBT. + /// + /// This is required for a transaction to be added to the wallet. + pub fn verify_new_inputs(&self, psbt: &str, original_tx: &Transaction) -> bool { + let Ok(psbt) = Psbt::from_str(psbt) else { + tracing::error!("Failed to parse PSBT"); + return false; + }; + + psbt.inputs.len() > original_tx.input.len() + } + + pub async fn get_tx_fee(&self, tx: &Transaction) -> Result { + let inputs = { + let mut inputs = Amount::ZERO; + for inp in &tx.input { + inputs += self + .rpc + .get_txout_from_outpoint(&inp.previous_output) + .await + .map_err(|e| eyre!(e))? + .value; + } + inputs + }; + let outputs = tx.output.iter().map(|o| o.value).sum::(); + + let tx_fee = inputs - outputs; + + Ok(tx_fee) + } + + /// Sends or bumps a transaction using the Replace-By-Fee (RBF) strategy. + /// + /// It interacts with the database to track the latest RBF attempt (`last_rbf_txid`). + /// + /// # Logic: + /// 1. **Check for Existing RBF Tx:** Retrieves `last_rbf_txid` for the `try_to_send_id`. + /// 2. **Bump Existing Tx:** If `psbt_bump_fee` exists, it calls `rpc.psbt_bump_fee`. + /// - This internally uses the Bitcoin Core `psbtbumpfee` RPC. + /// - We then sign the inputs that we can using our Actor and have the wallet sign the rest. + /// + /// 3. **Send Initial RBF Tx:** If no `last_rbf_txid` exists (first attempt): + /// - It uses `fund_raw_transaction` RPC to let the wallet add (potentially) inputs, + /// outputs, set the fee according to `fee_rate`, and mark the transaction as replaceable. + /// - Uses `sign_raw_transaction_with_wallet` RPC to sign the funded transaction. + /// - Uses `send_raw_transaction` RPC to broadcast the initial RBF transaction. + /// - Saves the resulting `txid` to the database as the `last_rbf_txid`. + /// + /// # Arguments + /// * `try_to_send_id` - The database ID tracking this send attempt. + /// * `tx` - The original transaction intended for RBF (used only on the first attempt). + /// * `tx_metadata` - Optional metadata associated with the transaction. + /// * `fee_rate` - The target fee rate for the RBF replacement. + #[tracing::instrument(skip_all, fields(sender = self.btc_syncer_consumer_id, try_to_send_id, tx_meta=?tx_metadata))] + pub(super) async fn send_rbf_tx( + &self, + try_to_send_id: u32, + tx: Transaction, + tx_metadata: Option, + fee_rate: FeeRate, + rbf_signing_info: Option, + ) -> Result<()> { + tracing::debug!(?tx_metadata, "Sending RBF tx",); + + tracing::debug!(?try_to_send_id, "Attempting to send."); + + let _ = self + .db + .update_tx_debug_sending_state(try_to_send_id, "preparing_rbf", true) + .await; + + let mut dbtx = self + .db + .begin_transaction() + .await + .wrap_err("Failed to begin database transaction")?; + + let last_rbf_txid = self + .db + .get_last_rbf_txid(Some(&mut dbtx), try_to_send_id) + .await + .wrap_err("Failed to get last RBF txid")?; + + if let Some(last_rbf_txid) = last_rbf_txid { + tracing::debug!( + ?try_to_send_id, + "Attempting to bump fee for txid {last_rbf_txid} using psbt_bump_fee" + ); + + let effective_feerate = self + .calculate_bump_feerate(&last_rbf_txid, fee_rate) + .await?; + + let Some(effective_feerate) = effective_feerate else { + tracing::debug!( + ?try_to_send_id, + "Original tx feerate already higher than target ({} sat/vB), skipping bump", + fee_rate.to_sat_per_vb_ceil() + ); + return Ok(()); + }; + + let psbt_bump_opts = BumpFeeOptions { + conf_target: None, // Use fee_rate instead + fee_rate: Some(bitcoincore_rpc::json::FeeRate::per_vbyte(effective_feerate)), + replaceable: Some(true), // Ensure the bumped tx is also replaceable + estimate_mode: None, + }; + + let bump_result = self + .rpc + .psbt_bump_fee(&last_rbf_txid, Some(&psbt_bump_opts)) + .await; + + let bumped_psbt = match bump_result { + Err(e) => { + // Check for common errors indicating the tx is already confirmed or spent + let rpc_error_str = e.to_string(); + if rpc_error_str.contains("Transaction already in block chain") { + tracing::debug!( + ?try_to_send_id, + "RBF bump failed for {last_rbf_txid}, likely confirmed or spent: {e}" + ); + // No need to return error, just log and proceed + dbtx.commit().await.wrap_err( + "Failed to commit database transaction after failed bump check", + )?; + return Ok(()); + } else { + // Other potentially transient errors + let error_message = format!("psbt_bump_fee failed: {}", e); + log_error_for_tx!(self.db, try_to_send_id, error_message); + let _ = self + .db + .update_tx_debug_sending_state( + try_to_send_id, + "rbf_psbt_bump_failed", + true, + ) + .await; + tracing::warn!(?try_to_send_id, "psbt_bump_fee failed: {e:?}"); + return Err(SendTxError::Other(eyre!(e))); + } + } + Ok(BumpFeeResult { + psbt: Some(psbt), .. + }) => psbt, + Ok(BumpFeeResult { errors, .. }) if !errors.is_empty() => { + self.handle_err( + format!("psbt_bump_fee failed: {:?}", errors), + "rbf_psbt_bump_failed", + try_to_send_id, + ); + return Err(SendTxError::Other(eyre!(errors.join(", ")))); + } + Ok(BumpFeeResult { psbt: None, .. }) => { + self.handle_err( + "psbt_bump_fee returned no psbt", + "rbf_psbt_bump_failed", + try_to_send_id, + ); + return Err(SendTxError::Other(eyre!("psbt_bump_fee returned no psbt"))); + } + }; + + let bumped_psbt = self + .copy_witnesses(bumped_psbt, &tx) + .await + .wrap_err("Failed to fill SAP signatures")?; + + // Wallet first pass + // We rely on the node's wallet here because psbt_bump_fee might add inputs from it. + let process_result = self + .rpc + .wallet_process_psbt(&bumped_psbt, Some(true), None, None) // sign=true + .await; + + let processed_psbt = match process_result { + Ok(res) if res.complete => res.psbt, + // attempt to sign + Ok(res) => { + let Some(rbf_signing_info) = rbf_signing_info else { + return Err(eyre!( + "RBF signing info is required for non SighashSingle RBF txs" + ) + .into()); + }; + self.attempt_sign_psbt(res.psbt, rbf_signing_info).await? + } + Err(e) => { + let err_msg = format!("wallet_process_psbt error: {}", e); + tracing::warn!(?try_to_send_id, "{}", err_msg); + log_error_for_tx!(self.db, try_to_send_id, err_msg); + let _ = self + .db + .update_tx_debug_sending_state(try_to_send_id, "rbf_psbt_sign_failed", true) + .await; + return Err(SendTxError::Other(eyre!(e))); + } + }; + + // Finalize the PSBT + let finalize_result = self + .rpc + .finalize_psbt(&processed_psbt, None) // extract=true by default + .await; + + let final_tx_hex = match finalize_result { + Ok(FinalizePsbtResult { + hex: Some(hex), + complete: true, + .. + }) => hex, + Ok(res) => { + let err_msg = format!("Could not finalize PSBT: {:?}", res); + log_error_for_tx!(self.db, try_to_send_id, err_msg); + + let _ = self + .db + .update_tx_debug_sending_state( + try_to_send_id, + "rbf_psbt_finalize_incomplete", + true, + ) + .await; + return Err(SendTxError::PsbtError(err_msg)); + } + Err(e) => { + log_error_for_tx!( + self.db, + try_to_send_id, + format!("finalize_psbt error: {}", e) + ); + let _ = self + .db + .update_tx_debug_sending_state( + try_to_send_id, + "rbf_psbt_finalize_failed", + true, + ) + .await; + return Err(SendTxError::Other(eyre!(e))); + } + }; + + // Deserialize final tx to get txid + let final_tx: Transaction = match consensus::deserialize(&final_tx_hex) { + Ok(tx) => tx, + Err(e) => { + log_error_for_tx!( + self.db, + try_to_send_id, + format!("Failed to deserialize final RBF tx hex: {}", e) + ); + return Err(SendTxError::Other(eyre!(e))); + } + }; + let bumped_txid = final_tx.compute_txid(); + + // Broadcast the finalized transaction + let sent_txid = match self.rpc.send_raw_transaction(&final_tx).await { + Ok(sent_txid) if sent_txid == bumped_txid => sent_txid, + Ok(other_txid) => { + log_error_for_tx!( + self.db, + try_to_send_id, + format!( + "send_raw_transaction returned unexpected txid {} (expected {})", + other_txid, bumped_txid + ) + ); + let _ = self + .db + .update_tx_debug_sending_state( + try_to_send_id, + "rbf_send_txid_mismatch", + true, + ) + .await; + return Err(SendTxError::Other(eyre!( + "send_raw_transaction returned unexpected txid" + ))); + } + Err(e) => { + log_error_for_tx!( + self.db, + try_to_send_id, + format!("send_raw_transaction error for bumped RBF tx: {}", e) + ); + let _ = self + .db + .update_tx_debug_sending_state(try_to_send_id, "rbf_bump_send_failed", true) + .await; + return Err(SendTxError::Other(eyre!(e))); + } + }; + + tracing::debug!( + ?try_to_send_id, + "RBF tx {last_rbf_txid} successfully bumped and sent as {sent_txid}" + ); + + let _ = self + .db + .update_tx_debug_sending_state(try_to_send_id, "rbf_bumped_sent", true) + .await; + + self.db + .save_rbf_txid(Some(&mut dbtx), try_to_send_id, sent_txid) + .await + .wrap_err("Failed to save new RBF txid after bump")?; + } else { + tracing::debug!( + ?try_to_send_id, + "Funding initial RBF tx using PSBT workflow" + ); + + let _ = self + .db + .update_tx_debug_sending_state(try_to_send_id, "creating_initial_rbf_psbt", true) + .await; + + let create_result = self + .create_funded_psbt(&tx, fee_rate) + .await + .map_err(|err| { + let err = eyre!(err).wrap_err("Failed to create funded PSBT"); + self.handle_err( + format!("{:?}", err), + "rbf_psbt_create_failed", + try_to_send_id, + ); + + err + })?; + + if !self.verify_new_inputs(&create_result.psbt, &tx) { + tracing::warn!( + ?try_to_send_id, + "Transaction has not been funded and is being sent as is. This transaction will have to be manually bumped as the wallet will not add it to itself." + ); + } + + // replace locktime and version + let mut psbt = Psbt::from_str(&create_result.psbt).map_err(|e| eyre!(e))?; + psbt.unsigned_tx.lock_time = tx.lock_time; + psbt.unsigned_tx.version = tx.version; + + tracing::debug!( + try_to_send_id, + "Successfully created initial RBF PSBT with fee {}", + create_result.fee + ); + + let mut psbt = psbt.to_string(); + + self.fill_in_utxo_info(&mut psbt).await.map_err(|err| { + let err = eyre!(err).wrap_err("Failed to fill in utxo info"); + self.handle_err( + format!("{:?}", err), + "rbf_fill_in_utxo_info_failed", + try_to_send_id, + ); + + err + })?; + + psbt = self.copy_witnesses(psbt, &tx).await.map_err(|err| { + let err = eyre!(err).wrap_err("Failed to copy witnesses"); + self.handle_err( + format!("{:?}", err), + "rbf_copy_witnesses_failed", + try_to_send_id, + ); + + err + })?; + + // 2. Process the PSBT (let the wallet sign its inputs) + let process_result = self + .rpc + .wallet_process_psbt(&psbt, Some(true), None, None) + .await + .map_err(|err| { + let err = eyre!(err).wrap_err("Failed to process initial RBF PSBT"); + self.handle_err( + format!("{:?}", err), + "rbf_psbt_process_failed", + try_to_send_id, + ); + + err + })?; + + if let Some(rbf_signing_info) = rbf_signing_info { + psbt = self + .attempt_sign_psbt(process_result.psbt, rbf_signing_info) + .await + .map_err(|err| { + let err = eyre!(err).wrap_err("Failed to sign initial RBF PSBT"); + self.handle_err( + format!("{:?}", err), + "rbf_psbt_sign_failed", + try_to_send_id, + ); + + err + })?; + } else { + psbt = process_result.psbt; + } + + tracing::debug!(try_to_send_id, "Successfully processed initial RBF PSBT"); + + let final_tx = { + // Extract tx + let psbt = Psbt::from_str(&psbt).map_err(|e| eyre!(e)).map_err(|err| { + let err = eyre!(err).wrap_err("Failed to deserialize initial RBF PSBT"); + self.handle_err( + format!("{:?}", err), + "rbf_psbt_deserialize_failed", + try_to_send_id, + ); + err + })?; + + let mut tx = psbt.unsigned_tx.clone(); + + for (idx, input) in tx.input.iter_mut().enumerate() { + if let Some(witness) = psbt.inputs[idx].final_script_witness.clone() { + input.witness = witness; + } + if let Some(sig) = psbt.inputs[idx].final_script_sig.clone() { + input.script_sig = sig; + } + } + + tx + }; + + let initial_txid = final_tx.compute_txid(); + + // 4. Broadcast the finalized transaction + let sent_txid = match self.rpc.send_raw_transaction(&final_tx).await { + Ok(sent_txid) => { + if sent_txid != initial_txid { + let err_msg = format!( + "send_raw_transaction returned unexpected txid {} (expected {}) for initial RBF", + sent_txid, initial_txid + ); + log_error_for_tx!(self.db, try_to_send_id, err_msg); + let _ = self + .db + .update_tx_debug_sending_state( + try_to_send_id, + "rbf_initial_send_txid_mismatch", + true, + ) + .await; + return Err(SendTxError::Other(eyre!(err_msg))); + } + tracing::debug!( + try_to_send_id, + "Successfully sent initial RBF tx with txid {}", + sent_txid + ); + sent_txid + } + Err(e) => { + tracing::error!("RBF failed for: {:?}", final_tx); + let err_msg = format!("send_raw_transaction error for initial RBF tx: {}", e); + log_error_for_tx!(self.db, try_to_send_id, err_msg); + let _ = self + .db + .update_tx_debug_sending_state( + try_to_send_id, + "rbf_initial_send_failed", + true, + ) + .await; + return Err(SendTxError::Other(eyre!(e))); + } + }; + + // Update debug sending state + let _ = self + .db + .update_tx_debug_sending_state(try_to_send_id, "rbf_initial_sent", true) + .await; + + self.db + .save_rbf_txid(Some(&mut dbtx), try_to_send_id, sent_txid) + .await + .wrap_err("Failed to save initial RBF txid")?; + } + + dbtx.commit() + .await + .wrap_err("Failed to commit database transaction")?; + + Ok(()) + } +} + +#[cfg(test)] +pub mod tests { + use super::super::tests::*; + use super::*; + use crate::actor::Actor; + use crate::builder::script::SpendPath; + use crate::builder::transaction::input::SpendableTxIn; + use crate::builder::transaction::output::UnspentTxOut; + use crate::builder::transaction::{ + op_return_txout, TransactionType, TxHandlerBuilder, DEFAULT_SEQUENCE, + }; + use crate::constants::{MIN_TAPROOT_AMOUNT, NON_STANDARD_V3}; + use crate::errors::BridgeError; + use crate::extended_bitcoin_rpc::ExtendedBitcoinRpc; + use crate::rpc::clementine::tagged_signature::SignatureId; + use crate::rpc::clementine::{NormalSignatureKind, NumberedSignatureKind}; + use crate::task::{IntoTask, TaskExt}; + use crate::test::common::*; + use crate::utils::FeePayingType; + use bitcoin::hashes::Hash; + use bitcoin::transaction::Version; + use bitcoin::TxOut; + use bitcoincore_rpc::json::GetRawTransactionResult; + use std::result::Result; + use std::time::Duration; + + pub async fn create_rbf_tx( + rpc: &ExtendedBitcoinRpc, + signer: &Actor, + network: bitcoin::Network, + requires_initial_funding: bool, + ) -> Result { + let (address, spend_info) = + builder::address::create_taproot_address(&[], Some(signer.xonly_public_key), network); + + let amount = Amount::from_sat(100000); + let outpoint = rpc.send_to_address(&address, amount).await?; + + rpc.mine_blocks(1).await?; + + let version = Version::TWO; + + let mut txhandler = TxHandlerBuilder::new(TransactionType::Dummy) + .with_version(version) + .add_input( + if !requires_initial_funding { + SignatureId::from(NormalSignatureKind::Challenge) + } else { + SignatureId::from((NumberedSignatureKind::WatchtowerChallenge, 0i32)) + }, + SpendableTxIn::new( + outpoint, + TxOut { + value: amount, + script_pubkey: address.script_pubkey(), + }, + vec![], + Some(spend_info), + ), + SpendPath::KeySpend, + DEFAULT_SEQUENCE, + ) + .add_output(UnspentTxOut::from_partial(TxOut { + value: if requires_initial_funding { + amount // do not add any fee if we want to test initial funding + } else { + amount - MIN_TAPROOT_AMOUNT * 3 + }, + script_pubkey: address.script_pubkey(), // In practice, should be the wallet address, not the signer address + })) + .finalize(); + + signer + .tx_sign_and_fill_sigs(&mut txhandler, &[], None) + .unwrap(); + + let tx = txhandler.get_cached_tx().clone(); + Ok(tx) + } + + async fn create_challenge_tx( + rpc: &ExtendedBitcoinRpc, + signer: &Actor, + network: bitcoin::Network, + ) -> Result { + let (address, spend_info) = + builder::address::create_taproot_address(&[], Some(signer.xonly_public_key), network); + + let amount = MIN_TAPROOT_AMOUNT; + let outpoint = rpc.send_to_address(&address, amount).await?; + + rpc.mine_blocks(1).await?; + + let version = NON_STANDARD_V3; + + let mut txhandler = TxHandlerBuilder::new(TransactionType::Challenge) + .with_version(version) + .add_input( + SignatureId::from(NormalSignatureKind::Challenge), + SpendableTxIn::new( + outpoint, + TxOut { + value: amount, + script_pubkey: address.script_pubkey(), + }, + vec![], + Some(spend_info), + ), + SpendPath::KeySpend, + DEFAULT_SEQUENCE, + ) + .add_output(UnspentTxOut::from_partial(TxOut { + value: Amount::from_btc(1.0).unwrap(), + script_pubkey: address.script_pubkey(), // In practice, should be the wallet address, not the signer address + })) + .add_output(UnspentTxOut::from_partial(op_return_txout(b"TEST"))) + .finalize(); + + signer + .tx_sign_and_fill_sigs(&mut txhandler, &[], None) + .unwrap(); + + let tx = txhandler.get_cached_tx().clone(); + Ok(tx) + } + + #[tokio::test] + async fn test_send_challenge_tx() -> Result<(), BridgeError> { + // Initialize RPC, tx_sender and other components + let mut config = create_test_config_with_thread_name().await; + let rpc = create_regtest_rpc(&mut config).await; + + let (tx_sender, btc_sender, rpc, db, signer, network) = + create_tx_sender(rpc.rpc().clone()).await; + let pair = btc_sender.into_task().cancelable_loop(); + pair.0.into_bg(); + + // Create a bumpable transaction + let tx = create_challenge_tx(&rpc, &signer, network).await?; + + // Insert the transaction into the database + let mut dbtx = db.begin_transaction().await?; + let try_to_send_id = tx_sender + .client() + .insert_try_to_send( + &mut dbtx, + None, // No metadata + &tx, + FeePayingType::RBF, + None, // should not be resigning challenge tx + &[], // No cancel outpoints + &[], // No cancel txids + &[], // No activate txids + &[], // No activate outpoints + ) + .await?; + dbtx.commit().await?; + + // Get the current fee rate and increase it for RBF + let current_fee_rate = tx_sender.get_fee_rate().await?; + + // Test send_rbf_tx + tx_sender + .send_rbf_tx(try_to_send_id, tx.clone(), None, current_fee_rate, None) + .await + .expect("RBF should succeed"); + + // Verify that the transaction was fee-bumped + let tx_debug_info = tx_sender + .client() + .debug_tx(try_to_send_id) + .await + .expect("Transaction should be have debug info"); + + // Get the actual transaction from the mempool + rpc.get_tx_of_txid(&bitcoin::Txid::from_byte_array( + tx_debug_info.txid.unwrap().txid.try_into().unwrap(), + )) + .await + .expect("Transaction should be in mempool"); + + Ok(()) + } + + #[tokio::test] + async fn test_send_rbf() -> Result<(), BridgeError> { + // Initialize RPC, tx_sender and other components + let mut config = create_test_config_with_thread_name().await; + let rpc = create_regtest_rpc(&mut config).await; + + let (tx_sender, btc_sender, rpc, db, signer, network) = + create_tx_sender(rpc.rpc().clone()).await; + let pair = btc_sender.into_task().cancelable_loop(); + pair.0.into_bg(); + + // Create a bumpable transaction + let tx = create_rbf_tx(&rpc, &signer, network, false).await?; + + // Insert the transaction into the database + let mut dbtx = db.begin_transaction().await?; + let try_to_send_id = tx_sender + .client() + .insert_try_to_send( + &mut dbtx, + None, // No metadata + &tx, + FeePayingType::RBF, + Some(RbfSigningInfo { + vout: 0, + tweak_merkle_root: None, + #[cfg(test)] + annex: None, + #[cfg(test)] + additional_taproot_output_count: None, + }), + &[], // No cancel outpoints + &[], // No cancel txids + &[], // No activate txids + &[], // No activate outpoints + ) + .await?; + dbtx.commit().await?; + + // Get the current fee rate and increase it for RBF + let current_fee_rate = tx_sender.get_fee_rate().await?; + + // Test send_rbf_tx + tx_sender + .send_rbf_tx( + try_to_send_id, + tx.clone(), + None, + current_fee_rate, + Some(RbfSigningInfo { + vout: 0, + tweak_merkle_root: None, + #[cfg(test)] + annex: None, + #[cfg(test)] + additional_taproot_output_count: None, + }), + ) + .await + .expect("RBF should succeed"); + + // Verify that the transaction was fee-bumped + let tx_debug_info = tx_sender + .client() + .debug_tx(try_to_send_id) + .await + .expect("Transaction should be have debug info"); + + // Get the actual transaction from the mempool + rpc.get_tx_of_txid(&bitcoin::Txid::from_byte_array( + tx_debug_info.txid.unwrap().txid.try_into().unwrap(), + )) + .await + .expect("Transaction should be in mempool"); + + Ok(()) + } + + #[tokio::test] + async fn test_send_with_initial_funding_rbf() -> Result<(), BridgeError> { + // Initialize RPC, tx_sender and other components + let mut config = create_test_config_with_thread_name().await; + let rpc = create_regtest_rpc(&mut config).await; + + let (tx_sender, btc_sender, rpc, db, signer, network) = + create_tx_sender(rpc.rpc().clone()).await; + let pair = btc_sender.into_task().cancelable_loop(); + pair.0.into_bg(); + + // Create a bumpable transaction + let tx = create_rbf_tx(&rpc, &signer, network, true).await?; + + // Insert the transaction into the database + let mut dbtx = db.begin_transaction().await?; + let try_to_send_id = tx_sender + .client() + .insert_try_to_send( + &mut dbtx, + None, // No metadata + &tx, + FeePayingType::RBF, + Some(RbfSigningInfo { + vout: 0, + tweak_merkle_root: None, + #[cfg(test)] + annex: None, + #[cfg(test)] + additional_taproot_output_count: None, + }), + &[], // No cancel outpoints + &[], // No cancel txids + &[], // No activate txids + &[], // No activate outpoints + ) + .await?; + dbtx.commit().await?; + + // Get the current fee rate and increase it for RBF + let current_fee_rate = tx_sender.get_fee_rate().await?; + + // Test send_rbf_tx + tx_sender + .send_rbf_tx( + try_to_send_id, + tx.clone(), + None, + current_fee_rate, + Some(RbfSigningInfo { + vout: 0, + tweak_merkle_root: None, + #[cfg(test)] + annex: None, + #[cfg(test)] + additional_taproot_output_count: None, + }), + ) + .await + .expect("RBF should succeed"); + + // Verify that the transaction was fee-bumped + let tx_debug_info = tx_sender + .client() + .debug_tx(try_to_send_id) + .await + .expect("Transaction should have debug info"); + + // Get the actual transaction from the mempool + let tx = rpc + .get_tx_of_txid(&bitcoin::Txid::from_byte_array( + tx_debug_info.txid.unwrap().txid.try_into().unwrap(), + )) + .await + .expect("Transaction should be in mempool"); + + // Check that the transaction has new input + assert_eq!(tx.input.len(), 2); + + Ok(()) + } + + #[tokio::test] + async fn test_send_without_info_rbf() -> Result<(), BridgeError> { + // This is the case with no initial funding required, corresponding to the Challenge transaction. + + // Initialize RPC, tx_sender and other components + let mut config = create_test_config_with_thread_name().await; + let rpc = create_regtest_rpc(&mut config).await; + + let (tx_sender, btc_sender, rpc, db, signer, network) = + create_tx_sender(rpc.rpc().clone()).await; + let pair = btc_sender.into_task().cancelable_loop(); + pair.0.into_bg(); + + // Create a bumpable transaction + let tx = create_rbf_tx(&rpc, &signer, network, false).await?; + + // Insert the transaction into the database + let mut dbtx = db.begin_transaction().await?; + let try_to_send_id = tx_sender + .client() + .insert_try_to_send( + &mut dbtx, + None, // No metadata + &tx, + FeePayingType::RBF, + None, + &[], // No cancel outpoints + &[], // No cancel txids + &[], // No activate txids + &[], // No activate outpoints + ) + .await?; + dbtx.commit().await?; + + // Get the current fee rate and increase it for RBF + let current_fee_rate = tx_sender.get_fee_rate().await?; + + // Test send_rbf_tx + tx_sender + .send_rbf_tx(try_to_send_id, tx.clone(), None, current_fee_rate, None) + .await + .expect("RBF should succeed"); + + // Verify that the transaction was fee-bumped + let tx_debug_info = tx_sender + .client() + .debug_tx(try_to_send_id) + .await + .expect("Transaction should be have debug info"); + + // Get the actual transaction from the mempool + rpc.get_tx_of_txid(&bitcoin::Txid::from_byte_array( + tx_debug_info.txid.unwrap().txid.try_into().unwrap(), + )) + .await + .expect("Transaction should be in mempool"); + + Ok(()) + } + + #[tokio::test] + // #[ignore = "unable to bump right now due to psbtbumpfee not accepting out-of-wallet"] + async fn test_bump_rbf_after_sent() -> Result<(), BridgeError> { + // Initialize RPC, tx_sender and other components + let mut config = create_test_config_with_thread_name().await; + let rpc = create_regtest_rpc(&mut config).await; + + let (tx_sender, btc_sender, rpc, db, signer, network) = + create_tx_sender(rpc.rpc().clone()).await; + let pair = btc_sender.into_task().cancelable_loop(); + pair.0.into_bg(); + + // Create a bumpable transaction + let tx = create_rbf_tx(&rpc, &signer, network, true).await?; + + // Insert the transaction into the database + let mut dbtx = db.begin_transaction().await?; + let try_to_send_id = tx_sender + .client() + .insert_try_to_send( + &mut dbtx, + None, // No metadata + &tx, + FeePayingType::RBF, + None, + &[], // No cancel outpoints + &[], // No cancel txids + &[], // No activate txids + &[], // No activate outpoints + ) + .await?; + dbtx.commit().await?; + + let current_fee_rate = tx_sender.get_fee_rate().await?; + + // Create initial TX + tx_sender + .send_rbf_tx( + try_to_send_id, + tx.clone(), + None, + current_fee_rate, + Some(RbfSigningInfo { + vout: 0, + tweak_merkle_root: None, + #[cfg(test)] + annex: None, + #[cfg(test)] + additional_taproot_output_count: None, + }), + ) + .await + .expect("RBF should succeed"); + + // Verify that the transaction was saved in db + let tx_debug_info = tx_sender + .client() + .debug_tx(try_to_send_id) + .await + .expect("Transaction should be have debug info"); + + // Verify that TX is in mempool + let initial_txid = tx_debug_info.txid.unwrap().txid; + rpc.get_tx_of_txid(&bitcoin::Txid::from_byte_array( + initial_txid.clone().try_into().unwrap(), + )) + .await + .expect("Transaction should be in mempool"); + + // Increase fee rate + let higher_fee_rate = current_fee_rate.checked_mul(2).unwrap(); + + tokio::time::sleep(Duration::from_secs(1)).await; + + // try to send tx with a bumped fee. + tx_sender + .send_rbf_tx( + try_to_send_id, + tx.clone(), + None, + higher_fee_rate, + Some(RbfSigningInfo { + vout: 0, + tweak_merkle_root: None, + #[cfg(test)] + annex: None, + #[cfg(test)] + additional_taproot_output_count: None, + }), + ) + .await + .expect("RBF should succeed"); + + // Verify that the transaction was saved in db + let tx_debug_info = tx_sender + .client() + .debug_tx(try_to_send_id) + .await + .expect("Transaction should be have debug info"); + + // Verify that TX is in mempool + let changed_txid = tx_debug_info.txid.unwrap().txid; + rpc.get_tx_of_txid(&bitcoin::Txid::from_byte_array( + changed_txid.clone().try_into().unwrap(), + )) + .await + .expect("Transaction should be in mempool"); + + // Verify that tx has changed. + assert_ne!( + changed_txid, initial_txid, + "Transaction should have been bumped" + ); + + Ok(()) + } + + #[tokio::test] + async fn test_bg_send_rbf() -> Result<(), BridgeError> { + let mut config = create_test_config_with_thread_name().await; + let regtest = create_regtest_rpc(&mut config).await; + let rpc = regtest.rpc().clone(); + + rpc.mine_blocks(1).await.unwrap(); + + let (client, _tx_sender, _cancel_txs, rpc, db, signer, network) = + create_bg_tx_sender(rpc).await; + + let tx = create_rbf_tx(&rpc, &signer, network, false).await.unwrap(); + + let mut dbtx = db.begin_transaction().await.unwrap(); + client + .insert_try_to_send( + &mut dbtx, + None, + &tx, + FeePayingType::RBF, + Some(RbfSigningInfo { + vout: 0, + tweak_merkle_root: None, + #[cfg(test)] + annex: None, + #[cfg(test)] + additional_taproot_output_count: None, + }), + &[], + &[], + &[], + &[], + ) + .await + .unwrap(); + dbtx.commit().await.unwrap(); + + poll_until_condition( + async || { + rpc.mine_blocks(1).await.unwrap(); + + let tx_result = rpc.get_raw_transaction_info(&tx.compute_txid(), None).await; + + Ok(matches!(tx_result, Ok(GetRawTransactionResult { + confirmations: Some(confirmations), + .. + }) if confirmations > 0)) + }, + Some(Duration::from_secs(30)), + Some(Duration::from_millis(100)), + ) + .await + .expect("Tx was not confirmed in time"); + + Ok(()) + } +} diff --git a/core/src/tx_sender/task.rs b/core/src/tx_sender/task.rs new file mode 100644 index 000000000..742b5319a --- /dev/null +++ b/core/src/tx_sender/task.rs @@ -0,0 +1,128 @@ +//! # Transaction Sender Task +//! +//! This module provides the [`Task`] implementation for the [`TxSender`]. +//! +//! This task will fetch block events from [`Bitcoin Syncer`](crate::bitcoin_syncer) +//! and confirms or unconfirms transaction based on the event. Finally, it will +//! try to send transactions that are in the queue. Transactions are picked from +//! the database and sent to the Bitcoin network if a transaction is in queue +//! and not in the [`Bitcoin Syncer`](crate::bitcoin_syncer) database. + +use super::TxSender; +use crate::errors::ResultExt; +use crate::task::{IgnoreError, TaskVariant, WithDelay}; +use crate::{ + bitcoin_syncer::BitcoinSyncerEvent, + database::Database, + errors::BridgeError, + task::{IntoTask, Task, TaskExt}, +}; +use std::time::Duration; +use tonic::async_trait; + +const POLL_DELAY: Duration = if cfg!(test) { + Duration::from_millis(250) +} else { + Duration::from_secs(30) +}; + +#[derive(Debug)] +pub struct TxSenderTask { + db: Database, + current_tip_height: u32, + inner: TxSender, +} + +#[async_trait] +impl Task for TxSenderTask { + type Output = bool; + const VARIANT: TaskVariant = TaskVariant::TxSender; + + #[tracing::instrument(skip(self), name = "tx_sender_task")] + async fn run_once(&mut self) -> std::result::Result { + let mut dbtx = self.db.begin_transaction().await.map_to_eyre()?; + + let is_block_update = async { + let Some(event) = self + .db + .fetch_next_bitcoin_syncer_evt(&mut dbtx, &self.inner.btc_syncer_consumer_id) + .await? + else { + return Ok(false); + }; + tracing::info!("Received Bitcoin syncer event: {:?}", event); + + tracing::debug!("TXSENDER: Event: {:?}", event); + Ok::<_, BridgeError>(match event { + BitcoinSyncerEvent::NewBlock(block_id) => { + self.current_tip_height = self + .db + .get_block_info_from_id(Some(&mut dbtx), block_id) + .await? + .ok_or(eyre::eyre!("Block not found in TxSenderTask"))? + .1; + tracing::info!( + height = self.current_tip_height, + block_id = %block_id, + "Block mined, confirming transactions..." + ); + + self.db.confirm_transactions(&mut dbtx, block_id).await?; + + dbtx.commit().await?; + true + } + BitcoinSyncerEvent::ReorgedBlock(block_id) => { + let height = self + .db + .get_block_info_from_id(Some(&mut dbtx), block_id) + .await? + .ok_or(eyre::eyre!("Block not found in TxSenderTask"))? + .1; + tracing::info!( + height = height, + block_id = %block_id, + "Reorged happened, unconfirming transactions..." + ); + + self.db.unconfirm_transactions(&mut dbtx, block_id).await?; + + dbtx.commit().await?; + true + } + }) + } + .await?; + + // If there is a block update, it is possible that there are more. + // Before sending, fetch all events and process them without waiting. + if is_block_update { + return Ok(true); + } + + tracing::info!("TXSENDER: Getting fee rate"); + let fee_rate_result = self.inner.get_fee_rate().await; + tracing::info!("TXSENDER: Fee rate result: {:?}", fee_rate_result); + let fee_rate = fee_rate_result?; + + self.inner + .try_to_send_unconfirmed_txs(fee_rate, self.current_tip_height) + .await?; + + Ok(false) + } +} + +impl IntoTask for TxSender { + type Task = WithDelay>; + + fn into_task(self) -> Self::Task { + TxSenderTask { + db: self.db.clone(), + current_tip_height: 0, + inner: self, + } + .ignore_error() + .with_delay(POLL_DELAY) + } +} diff --git a/core/src/utils.rs b/core/src/utils.rs new file mode 100644 index 000000000..51cebdcf2 --- /dev/null +++ b/core/src/utils.rs @@ -0,0 +1,797 @@ +use crate::builder::transaction::TransactionType; +use crate::config::TelemetryConfig; +use crate::errors::BridgeError; +use crate::operator::RoundIndex; +use crate::rpc::clementine::VergenResponse; +use bitcoin::{OutPoint, ScriptBuf, TapNodeHash, XOnlyPublicKey}; +use eyre::Context as _; +use futures::future::try_join_all; +use http::HeaderValue; +use metrics_exporter_prometheus::PrometheusBuilder; +use serde::{Deserialize, Serialize}; +use std::fmt::{Debug, Display}; +use std::fs::File; +use std::future::Future; +use std::net::{Ipv4Addr, SocketAddr}; +use std::pin::Pin; +use std::sync::Arc; +use std::task::{Context, Poll}; +use std::time::Duration; +use tokio::time::error::Elapsed; +use tokio::time::timeout; +use tonic::Status; +use tower::{Layer, Service}; +use tracing::level_filters::LevelFilter; +use tracing::{debug_span, Instrument, Subscriber}; +use tracing_subscriber::layer::SubscriberExt; +use tracing_subscriber::{fmt, EnvFilter, Layer as TracingLayer, Registry}; + +/// Initializes a [`tracing`] subscriber depending on the environment. +/// [`EnvFilter`] is used with an optional default level. Sets up the +/// [`color_eyre`] handler. +/// +/// # Log Formats +/// +/// - `json` **JSON** is used when `LOG_FORMAT=json` +/// - `human` **Human-readable** direct logs are used when `LOG_FORMAT` is not +/// set to `json`. +/// +/// ## CI +/// +/// In CI, logging is always in the human-readable format with output to the +/// console. The `INFO_LOG_FILE` env var can be used to set an optional log file +/// output. If not set, only console logging is used. +/// +/// # Backtraces +/// +/// Backtraces are enabled by default for tests. Error backtraces otherwise +/// depend on the `RUST_LIB_BACKTRACE` env var. Please read [`color_eyre`] +/// documentation for more details. +/// +/// # Parameters +/// +/// - `default_level`: Default level ranges from 0 to 5. This is overwritten through the +/// `RUST_LOG` env var. +/// +/// # Returns +/// +/// Returns `Err` in CI if the file logging cannot be initialized. Already +/// initialized errors are ignored, so this function can be called multiple +/// times safely. +pub fn initialize_logger(default_level: Option) -> Result<(), BridgeError> { + let is_ci = std::env::var("CI") + .map(|v| v == "true" || v == "1") + .unwrap_or(false); + + // UNCOMMENT TO DEBUG TOKIO TASKS + // console_subscriber::init(); + + if cfg!(test) { + // Enable full backtraces for tests + std::env::set_var("RUST_LIB_BACKTRACE", "full"); + std::env::set_var("RUST_BACKTRACE", "full"); + } + + // Initialize color-eyre for better error handling and backtraces + let _ = color_eyre::config::HookBuilder::default() + .add_frame_filter(Box::new(|frames| { + // Frames with names starting with any of the str's below will be filtered out + let filters = &[ + "std::", + "test::", + "tokio::", + "core::", + " Result<(), BridgeError> { + let telemetry_addr: SocketAddr = format!("{}:{}", config.host, config.port) + .parse() + .unwrap_or_else(|_| { + tracing::warn!( + "Invalid telemetry address: {}:{}, using default address: 127.0.0.1:8081", + config.host, + config.port + ); + SocketAddr::from((Ipv4Addr::new(0, 0, 0, 0), 8081)) + }); + + tracing::debug!("Initializing telemetry at {}", telemetry_addr); + + let builder = PrometheusBuilder::new().with_http_listener(telemetry_addr); + + builder + .install() + .map_err(|e| eyre::eyre!("Failed to initialize telemetry: {}", e))?; + + Ok(()) +} + +fn try_set_global_subscriber(subscriber: S) +where + S: Subscriber + Send + Sync + 'static, +{ + match tracing::subscriber::set_global_default(subscriber) { + Ok(_) => {} + // Statically, the only error possible is "already initialized" + Err(_) => { + #[cfg(test)] + tracing::trace!("Tracing is already initialized, skipping without errors..."); + #[cfg(not(test))] + tracing::info!( + "Unexpected double initialization of tracing, skipping without errors..." + ); + } + } +} + +fn env_subscriber_with_file(path: &str) -> Result, BridgeError> { + if let Some(parent_dir) = std::path::Path::new(path).parent() { + std::fs::create_dir_all(parent_dir).map_err(|e| { + BridgeError::ConfigError(format!( + "Failed to create log directory '{}': {}", + parent_dir.display(), + e + )) + })?; + } + + let file = File::create(path).map_err(|e| BridgeError::ConfigError(e.to_string()))?; + + let file_filter = EnvFilter::from_default_env() + .add_directive("info".parse().expect("It should parse info level")) + .add_directive("ci=debug".parse().expect("It should parse ci debug level")); + + let console_filter = EnvFilter::builder() + .with_default_directive(LevelFilter::WARN.into()) + .from_env_lossy(); + + let file_layer = fmt::layer() + .with_writer(file) + .with_ansi(false) + .with_file(true) + .with_line_number(true) + .with_target(true) + .with_thread_ids(true) + .with_thread_names(true) + .with_filter(file_filter) + .boxed(); + + let console_layer = fmt::layer() + .with_test_writer() + .with_file(true) + .with_line_number(true) + .with_target(true) + .with_filter(console_filter) + .boxed(); + + Ok(Box::new( + Registry::default().with(file_layer).with(console_layer), + )) +} + +fn env_subscriber_to_json(level: Option) -> Box { + let filter = match level { + Some(lvl) => EnvFilter::builder() + .with_default_directive(lvl.into()) + .from_env_lossy(), + None => EnvFilter::from_default_env(), + }; + + let json_layer = fmt::layer::() + .with_test_writer() + // .with_timer(time::UtcTime::rfc_3339()) + .with_file(true) + .with_line_number(true) + .with_thread_ids(true) + .with_thread_names(true) + .with_target(true) + .json(); + // .with_current_span(true)z + // .with_span_list(true) + // To see how long each span takes, uncomment this. + // .with_span_events(FmtSpan::CLOSE) + + Box::new(tracing_subscriber::registry().with(json_layer).with(filter)) +} + +fn env_subscriber_to_human(level: Option) -> Box { + let filter = match level { + Some(lvl) => EnvFilter::builder() + .with_default_directive(lvl.into()) + .from_env_lossy(), + None => EnvFilter::from_default_env(), + }; + + let standard_layer = fmt::layer() + .with_test_writer() + // .with_timer(time::UtcTime::rfc_3339()) + .with_file(true) + .with_line_number(true) + // To see how long each span takes, uncomment this. + // .with_span_events(FmtSpan::CLOSE) + .with_target(true); + + Box::new( + tracing_subscriber::registry() + .with(standard_layer) + .with(filter), + ) +} + +fn is_json_logs() -> bool { + std::env::var("LOG_FORMAT") + .map(|v| v.eq_ignore_ascii_case("json")) + .unwrap_or(false) +} + +pub fn get_vergen_response() -> VergenResponse { + let mut vergen_response = String::new(); + + // build info + if let Some(date) = option_env!("VERGEN_BUILD_DATE") { + vergen_response.push_str(&format!("Build Date: {date}\n")); + } + if let Some(timestamp) = option_env!("VERGEN_BUILD_TIMESTAMP") { + vergen_response.push_str(&format!("Build Timestamp: {timestamp}\n")); + } + + // git info + if let Some(branch) = option_env!("VERGEN_GIT_BRANCH") { + vergen_response.push_str(&format!("git branch: {branch}\n")); + } + if let Some(commit) = option_env!("VERGEN_GIT_SHA") { + vergen_response.push_str(&format!("git commit: {commit}\n")); + } + if let Some(commit_date) = option_env!("VERGEN_GIT_COMMIT_DATE") { + vergen_response.push_str(&format!("git commit date: {commit_date}\n")); + } + if let Some(commit_timestamp) = option_env!("VERGEN_GIT_COMMIT_TIMESTAMP") { + vergen_response.push_str(&format!("git commit timestamp: {commit_timestamp}\n")); + } + if let Some(commit_author_name) = option_env!("VERGEN_GIT_COMMIT_AUTHOR_NAME") { + vergen_response.push_str(&format!("git commit author name: {commit_author_name}\n")); + } + if let Some(commit_author_email) = option_env!("VERGEN_GIT_COMMIT_AUTHOR_EMAIL") { + vergen_response.push_str(&format!("git commit author email: {commit_author_email}\n")); + } + if let Some(commit_count) = option_env!("VERGEN_GIT_COMMIT_COUNT") { + vergen_response.push_str(&format!("git commit count: {commit_count}\n")); + } + if let Some(commit_message) = option_env!("VERGEN_GIT_COMMIT_MESSAGE") { + vergen_response.push_str(&format!("git commit message: {commit_message}\n")); + } + if let Some(describe) = option_env!("VERGEN_GIT_DESCRIBE") { + vergen_response.push_str(&format!("git describe: {describe}\n")); + } + if let Some(dirty) = option_env!("VERGEN_GIT_DIRTY") { + vergen_response.push_str(&format!("git dirty: {dirty}\n")); + } + + // cargo info + if let Some(debug) = option_env!("VERGEN_CARGO_DEBUG") { + vergen_response.push_str(&format!("cargo debug: {debug}\n")); + } + if let Some(opt_level) = option_env!("VERGEN_CARGO_OPT_LEVEL") { + vergen_response.push_str(&format!("cargo opt level: {opt_level}\n")); + } + if let Some(target_triple) = option_env!("VERGEN_CARGO_TARGET_TRIPLE") { + vergen_response.push_str(&format!("cargo target triple: {target_triple}\n")); + } + if let Some(features) = option_env!("VERGEN_CARGO_FEATURES") { + vergen_response.push_str(&format!("cargo features: {features}\n")); + } + if let Some(dependencies) = option_env!("VERGEN_CARGO_DEPENDENCIES") { + vergen_response.push_str(&format!("cargo dependencies: {dependencies}\n")); + } + + // rustc info + if let Some(channel) = option_env!("VERGEN_RUSTC_CHANNEL") { + vergen_response.push_str(&format!("rustc channel: {channel}\n")); + } + if let Some(version) = option_env!("VERGEN_RUSTC_SEMVER") { + vergen_response.push_str(&format!("rustc version: {version}\n")); + } + if let Some(commit_hash) = option_env!("VERGEN_RUSTC_COMMIT_HASH") { + vergen_response.push_str(&format!("rustc commit hash: {commit_hash}\n")); + } + if let Some(commit_date) = option_env!("VERGEN_RUSTC_COMMIT_DATE") { + vergen_response.push_str(&format!("rustc commit date: {commit_date}\n")); + } + if let Some(host_triple) = option_env!("VERGEN_RUSTC_HOST_TRIPLE") { + vergen_response.push_str(&format!("rustc host triple: {host_triple}\n")); + } + if let Some(llvm_version) = option_env!("VERGEN_RUSTC_LLVM_VERSION") { + vergen_response.push_str(&format!("rustc LLVM version: {llvm_version}\n")); + } + + // sysinfo + if let Some(cpu_brand) = option_env!("VERGEN_SYSINFO_CPU_BRAND") { + vergen_response.push_str(&format!("cpu brand: {cpu_brand}\n")); + } + if let Some(cpu_name) = option_env!("VERGEN_SYSINFO_CPU_NAME") { + vergen_response.push_str(&format!("cpu name: {cpu_name}\n")); + } + if let Some(cpu_vendor) = option_env!("VERGEN_SYSINFO_CPU_VENDOR") { + vergen_response.push_str(&format!("cpu vendor: {cpu_vendor}\n")); + } + if let Some(cpu_core_count) = option_env!("VERGEN_SYSINFO_CPU_CORE_COUNT") { + vergen_response.push_str(&format!("cpu core count: {cpu_core_count}\n")); + } + if let Some(cpu_frequency) = option_env!("VERGEN_SYSINFO_CPU_FREQUENCY") { + vergen_response.push_str(&format!("cpu frequency: {cpu_frequency} MHz\n")); + } + if let Some(memory) = option_env!("VERGEN_SYSINFO_MEMORY") { + vergen_response.push_str(&format!("total memory: {memory} KB\n")); + } + if let Some(name) = option_env!("VERGEN_SYSINFO_NAME") { + vergen_response.push_str(&format!("system name: {name}\n")); + } + if let Some(os_version) = option_env!("VERGEN_SYSINFO_OS_VERSION") { + vergen_response.push_str(&format!("OS version: {os_version}\n")); + } + if let Some(user) = option_env!("VERGEN_SYSINFO_USER") { + vergen_response.push_str(&format!("build user: {user}\n")); + } + + VergenResponse { + response: vergen_response, + } +} + +/// Monitors a [`tokio::task::JoinHandle`] in the background and logs it's end +/// result. +pub fn monitor_standalone_task( + task_handle: tokio::task::JoinHandle>, + task_name: &str, +) { + let task_name = task_name.to_string(); + + // Move task_handle into the spawned task to make it Send + tokio::spawn(async move { + match task_handle.await { + Ok(Ok(_)) => { + tracing::debug!("Task {} completed successfully", task_name); + } + Ok(Err(e)) => { + tracing::error!("Task {} throw an error: {:?}", task_name, e); + } + Err(e) => { + if e.is_cancelled() { + // Task was cancelled, which is expected during cleanup + tracing::debug!("Task {} has cancelled", task_name); + return; + } + tracing::error!("Task {} has panicked: {:?}", task_name, e); + } + } + }); +} + +/// Delays the exit of the program for 15 seconds, to allow for logs to be flushed. +/// Then panics with the given arguments. +/// +/// # Parameters +/// +/// - `($($arg:tt)*)`: Arguments to pass to `panic!`, in the same manner as format! and println! +macro_rules! delayed_panic { + ($($arg:tt)*) => { + { + eprintln!($($arg)*); + eprintln!("Delaying exit for 15 seconds, to allow for logs to be flushed"); + std::thread::sleep(std::time::Duration::from_secs(15)); + panic!($($arg)*); + } + }; +} + +pub(crate) use delayed_panic; + +#[derive(Debug, Clone, Default)] +pub struct AddMethodMiddlewareLayer; + +impl Layer for AddMethodMiddlewareLayer { + type Service = AddMethodMiddleware; + + fn layer(&self, service: S) -> Self::Service { + AddMethodMiddleware { inner: service } + } +} + +#[derive(Debug, Clone)] +pub struct AddMethodMiddleware { + inner: S, +} + +type BoxFuture<'a, T> = Pin + Send + 'a>>; + +impl Service> for AddMethodMiddleware +where + S: Service, Response = http::Response> + Clone + Send + 'static, + S::Future: Send + 'static, + ReqBody: Send + 'static, +{ + type Response = S::Response; + type Error = S::Error; + type Future = BoxFuture<'static, Result>; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_ready(cx) + } + + fn call(&mut self, mut req: http::Request) -> Self::Future { + // See: https://docs.rs/tower/latest/tower/trait.Service.html#be-careful-when-cloning-inner-services + let clone = self.inner.clone(); + let mut inner = std::mem::replace(&mut self.inner, clone); + + Box::pin(async move { + let path = req.uri().path(); + + let grpc_method = + if let &[_, _, method] = &path.split("/").collect::>().as_slice() { + Some(method.to_string()) + } else { + None + }; + + if let Some(grpc_method) = grpc_method { + if let Ok(grpc_method) = HeaderValue::from_str(&grpc_method) { + req.headers_mut().insert("grpc-method", grpc_method); + } + } + + // Do extra async work here... + let response = inner.call(req).await?; + + Ok(response) + }) + } +} + +/// A trait for entities that have a name, operator, verifier, etc. +/// Used to distinguish between state machines with different owners in the database, +/// and to provide a human-readable name for the entity for task names. +pub trait NamedEntity: Sync + Send + 'static { + /// A string identifier for this owner type used to distinguish between + /// state machines with different owners in the database. + /// + /// ## Example + /// "operator", "verifier", "user" + const ENTITY_NAME: &'static str; + + /// Consumer ID for the tx sender task. + const TX_SENDER_CONSUMER_ID: &'static str; + + /// Consumer ID for the finalized block task with no automation. + const FINALIZED_BLOCK_CONSUMER_ID_NO_AUTOMATION: &'static str; + + /// Consumer ID for the finalized block task with automation. + const FINALIZED_BLOCK_CONSUMER_ID_AUTOMATION: &'static str; +} + +#[derive(Copy, Clone, Eq, Hash, PartialEq, PartialOrd, Ord, Serialize, Deserialize)] +pub struct TxMetadata { + pub deposit_outpoint: Option, + pub operator_xonly_pk: Option, + pub round_idx: Option, + pub kickoff_idx: Option, + pub tx_type: TransactionType, +} + +impl std::fmt::Debug for TxMetadata { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let mut dbg_struct = f.debug_struct("TxMetadata"); + if let Some(deposit_outpoint) = self.deposit_outpoint { + dbg_struct.field("deposit_outpoint", &deposit_outpoint); + } + if let Some(operator_xonly_pk) = self.operator_xonly_pk { + dbg_struct.field("operator_xonly_pk", &operator_xonly_pk); + } + if let Some(round_idx) = self.round_idx { + dbg_struct.field("round_idx", &round_idx); + } + if let Some(kickoff_idx) = self.kickoff_idx { + dbg_struct.field("kickoff_idx", &kickoff_idx); + } + dbg_struct.field("tx_type", &self.tx_type); + dbg_struct.finish() + } +} + +/// Specifies the fee bumping strategy used for a transaction. +#[derive(Copy, Clone, Debug, Eq, Hash, PartialEq, PartialOrd, Ord, sqlx::Type)] +#[sqlx(type_name = "fee_paying_type", rename_all = "lowercase")] +pub enum FeePayingType { + /// Child-Pays-For-Parent: A new "child" transaction is created, spending an output + /// from the original "parent" transaction. The child pays a high fee, sufficient + /// to cover both its own cost and the parent's fee deficit, incentivizing miners + /// to confirm both together. Specifically, we utilize "fee payer" UTXOs. + CPFP, + /// Replace-By-Fee: The original unconfirmed transaction is replaced with a new + /// version that includes a higher fee. The original transaction must signal + /// RBF enablement (e.g., via nSequence). Bitcoin Core's `bumpfee` RPC is often used. + RBF, + /// The transaction has already been funded and no fee is needed. + /// Currently used for disprove tx as it has operator's collateral as input. + NoFunding, +} + +/// Information to re-sign an RBF transaction. +/// Specifically the merkle root of the taproot to keyspend with and the output index of the utxo to be +/// re-signed. +/// +/// - Not needed for SinglePlusAnyoneCanPay RBF txs. +/// - Not needed for CPFP. +/// - Only signs for a keypath spend +#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] +pub struct RbfSigningInfo { + pub vout: u32, + pub tweak_merkle_root: Option, + #[cfg(test)] + pub annex: Option>, + #[cfg(test)] + pub additional_taproot_output_count: Option, +} +pub trait Last20Bytes { + fn last_20_bytes(&self) -> [u8; 20]; +} + +pub trait TryLast20Bytes { + fn try_last_20_bytes(self) -> Result<[u8; 20], BridgeError>; +} + +impl Last20Bytes for [u8; 32] { + fn last_20_bytes(&self) -> [u8; 20] { + self.try_last_20_bytes().expect("will not happen") + } +} + +pub trait ScriptBufExt { + fn try_get_taproot_pk(&self) -> Result; +} + +impl ScriptBufExt for ScriptBuf { + fn try_get_taproot_pk(&self) -> Result { + if !self.is_p2tr() { + return Err(eyre::eyre!("Script is not a valid P2TR script (not 34 bytes)").into()); + } + + Ok(XOnlyPublicKey::from_slice(&self.as_bytes()[2..34]) + .wrap_err("Failed to parse XOnlyPublicKey from script")?) + } +} + +impl TryLast20Bytes for &[u8] { + fn try_last_20_bytes(self) -> Result<[u8; 20], BridgeError> { + if self.len() < 20 { + return Err(eyre::eyre!("Input is too short to contain 20 bytes").into()); + } + let mut result = [0u8; 20]; + + result.copy_from_slice(&self[self.len() - 20..]); + Ok(result) + } +} + +/// Wraps a future with a timeout, returning a `Status::deadline_exceeded` gRPC error +/// if the future does not complete within the specified duration. +/// +/// This is useful for enforcing timeouts on individual asynchronous operations, +/// especially those involving network requests, to prevent them from hanging indefinitely. +/// +/// # Arguments +/// +/// * `duration`: The maximum `Duration` to wait for the future to complete. +/// * `description`: A string slice describing the operation, used in the timeout error message. +/// * `future`: The `Future` to execute. The future should return a `Result`. +/// +/// # Returns +/// +/// Returns `Ok(T)` if the future completes successfully within the time limit. +/// Returns `Err(BridgeError)` if the future returns an error or if it times out. +/// A timeout results in a `BridgeError` that wraps a `tonic::Status::deadline_exceeded`. +pub async fn timed_request( + duration: Duration, + description: &str, + future: F, +) -> Result +where + F: Future>, +{ + timed_request_base(duration, description, future) + .await + .map_err(|_| Status::deadline_exceeded(format!("{} timed out", description)))? +} + +/// Wraps a future with a timeout and adds a debug span with the description. +/// +/// # Arguments +/// +/// * `duration`: The maximum `Duration` to wait for the future to complete. +/// * `description`: A string slice describing the operation, used in the timeout error message. +/// * `future`: The `Future` to execute. The future should return a `Result`. +/// +/// # Returns +/// +/// Returns `Ok(Ok(T))` if the future completes successfully within the time limit, returns `Ok(Err(e))` +/// if the future returns an error, returns `Err(Elapsed)` if the request times out. +pub async fn timed_request_base( + duration: Duration, + description: &str, + future: F, +) -> Result, Elapsed> +where + F: Future>, +{ + timeout(duration, future) + .instrument(debug_span!("timed_request", description = description)) + .await +} + +/// Concurrently executes a collection of futures, applying a timeout to each one individually. +/// If any future fails or times out, the entire operation is aborted and an error is returned. +/// +/// This utility is an extension of `futures::future::try_join_all` with added per-future +/// timeout logic and improved error reporting using optional IDs. +/// +/// # Type Parameters +/// +/// * `I`: An iterator that yields futures. +/// * `T`: The success type of the futures. +/// * `D`: A type that can be displayed, used for identifying futures in error messages. +/// +/// # Arguments +/// +/// * `duration`: The timeout `Duration` applied to each individual future in the iterator. +/// * `description`: A string slice describing the collective operation, used in timeout error messages. +/// * `ids`: An optional `Vec` of identifiers corresponding to each future. If provided, +/// these IDs are used in error messages to specify which future failed or timed out. +/// * `iter`: An iterator producing the futures to be executed. +/// +/// # Returns +/// +/// Returns `Ok(Vec)` containing the results of all futures if they all complete successfully. +/// Returns `Err(BridgeError)` if any future returns an error or times out. +/// The error will be contextualized with the operation description and the specific future's ID if available. +pub async fn timed_try_join_all( + duration: Duration, + description: &str, + ids: Option>, + iter: I, +) -> Result, BridgeError> +where + D: Display, + I: IntoIterator, + I::Item: Future>, +{ + let ids = Arc::new(ids); + try_join_all(iter.into_iter().enumerate().map(|item| { + let ids = ids.clone(); + async move { + let id = Option::as_ref(&ids).and_then(|ids| ids.get(item.0)); + + timeout(duration, item.1) + .await + .map_err(|_| { + Status::deadline_exceeded(format!( + "{} (id: {}) timed out", + description, + id.map(|id| id.to_string()) + .unwrap_or_else(|| "n/a".to_string()) + )) + })? + // Add the id to the error chain for easier debugging for other errors. + .wrap_err_with(|| { + format!( + "Failed to join {}", + id.map(ToString::to_string).unwrap_or_else(|| "n/a".into()) + ) + }) + .map_err(Into::into) + } + })) + .instrument(debug_span!("timed_try_join_all", description = description)) + .await +} + +#[cfg(test)] +mod tests { + use super::*; + use std::fs; + use std::io::Read; + use tempfile::NamedTempFile; + use tracing::level_filters::LevelFilter; + + #[test] + #[ignore = "This test changes environment variables so it should not be run in CI since it might affect other tests."] + fn test_ci_logging_setup() { + let temp_file = NamedTempFile::new().expect("Failed to create temp file"); + let temp_path = temp_file.path().to_string_lossy().to_string(); + + std::env::set_var("CI", "true"); + std::env::set_var("INFO_LOG_FILE", &temp_path); + + let result = initialize_logger(Some(LevelFilter::DEBUG)); + assert!(result.is_ok(), "Logger initialization should succeed"); + + tracing::error!("Test error message"); + tracing::warn!("Test warn message"); + tracing::info!("Test info message"); + tracing::debug!(target: "ci", "Test CI debug message"); + tracing::debug!("Test debug message"); + + std::thread::sleep(std::time::Duration::from_millis(100)); + + let mut file_contents = String::new(); + let mut file = fs::File::open(&temp_path).expect("Failed to open log file"); + file.read_to_string(&mut file_contents) + .expect("Failed to read log file"); + + assert!( + file_contents.contains("Test error message"), + "Error message should be in file" + ); + assert!( + file_contents.contains("Test warn message"), + "Warn message should be in file" + ); + assert!( + file_contents.contains("Test info message"), + "Info message should be in file" + ); + + assert!( + file_contents.contains("Test CI debug message"), + "Debug message for CI should be in file" + ); + + assert!( + !file_contents.contains("Test debug message"), + "Debug message should not be in file" + ); + + std::env::remove_var("CI"); + std::env::remove_var("INFO_LOG_FILE"); + } +} diff --git a/core/src/verifier.rs b/core/src/verifier.rs new file mode 100644 index 000000000..c816021cb --- /dev/null +++ b/core/src/verifier.rs @@ -0,0 +1,3178 @@ +use crate::actor::{verify_schnorr, Actor, TweakCache, WinternitzDerivationPath}; +use crate::bitcoin_syncer::BitcoinSyncer; +use crate::bitvm_client::{ClementineBitVMPublicKeys, REPLACE_SCRIPTS_LOCK}; +use crate::builder::address::{create_taproot_address, taproot_builder_with_scripts}; +use crate::builder::block_cache; +use crate::builder::script::{ + extract_winternitz_commits, extract_winternitz_commits_with_sigs, SpendableScript, + TimelockScript, WinternitzCommit, +}; +use crate::builder::sighash::{ + create_nofn_sighash_stream, create_operator_sighash_stream, PartialSignatureInfo, SignatureInfo, +}; +use crate::builder::transaction::deposit_signature_owner::EntityType; +use crate::builder::transaction::input::UtxoVout; +use crate::builder::transaction::sign::{create_and_sign_txs, TransactionRequestData}; +use crate::builder::transaction::{ + create_emergency_stop_txhandler, create_move_to_vault_txhandler, + create_optimistic_payout_txhandler, ContractContext, TransactionType, TxHandler, +}; +use crate::builder::transaction::{create_round_txhandlers, KickoffWinternitzKeys}; +use crate::citrea::CitreaClientT; +use crate::config::protocol::ProtocolParamset; +use crate::config::BridgeConfig; +use crate::constants::{ + self, MAX_ALL_SESSIONS_BYTES, MAX_NUM_SESSIONS, NON_EPHEMERAL_ANCHOR_AMOUNT, NUM_NONCES_LIMIT, + TEN_MINUTES_IN_SECS, +}; +use crate::database::{Database, DatabaseTransaction}; +use crate::deposit::{DepositData, KickoffData, OperatorData}; +use crate::errors::{BridgeError, TxError}; +use crate::extended_bitcoin_rpc::ExtendedBitcoinRpc; +use crate::header_chain_prover::HeaderChainProver; +use crate::metrics::L1SyncStatusProvider; +use crate::operator::RoundIndex; +use crate::rpc::clementine::{EntityStatus, NormalSignatureKind, OperatorKeys, TaggedSignature}; +use crate::rpc::ecdsa_verification_sig::{ + recover_address_from_ecdsa_signature, OptimisticPayoutMessage, +}; +#[cfg(feature = "automation")] +use crate::states::StateManager; +use crate::task::entity_metric_publisher::{ + EntityMetricPublisher, ENTITY_METRIC_PUBLISHER_INTERVAL, +}; +use crate::task::manager::BackgroundTaskManager; +use crate::task::{IntoTask, TaskExt}; +#[cfg(feature = "automation")] +use crate::tx_sender::{TxSender, TxSenderClient}; +use crate::utils::TxMetadata; +use crate::utils::{monitor_standalone_task, NamedEntity}; +use crate::{musig2, UTXO}; +use alloy::primitives::PrimitiveSignature; +use bitcoin::hashes::Hash; +use bitcoin::key::rand::Rng; +use bitcoin::key::Secp256k1; +use bitcoin::script::Instruction; +use bitcoin::secp256k1::schnorr::Signature; +use bitcoin::secp256k1::Message; +use bitcoin::taproot::TaprootBuilder; +use bitcoin::{Address, Amount, ScriptBuf, Witness, XOnlyPublicKey}; +use bitcoin::{OutPoint, TxOut}; +use bitcoin_script::builder::StructuredScript; +use bitvm::chunk::api::validate_assertions; +use bitvm::clementine::additional_disprove::{ + replace_placeholders_in_script, validate_assertions_for_additional_script, +}; +use bitvm::signatures::winternitz; +#[cfg(feature = "automation")] +use circuits_lib::bridge_circuit::groth16::CircuitGroth16Proof; +use circuits_lib::bridge_circuit::transaction::CircuitTransaction; +use circuits_lib::bridge_circuit::{ + deposit_constant, get_first_op_return_output, parse_op_return_data, +}; +use eyre::{Context, ContextCompat, OptionExt, Result}; +use secp256k1::ffi::MUSIG_SECNONCE_LEN; +use secp256k1::musig::{AggregatedNonce, PartialSignature, PublicNonce, SecretNonce}; +#[cfg(feature = "automation")] +use std::collections::BTreeMap; +use std::collections::{HashMap, HashSet, VecDeque}; +use std::pin::pin; +use std::sync::Arc; +use std::time::Duration; +use tokio::sync::mpsc; +use tokio_stream::StreamExt; + +#[derive(Debug)] +pub struct NonceSession { + /// Nonces used for a deposit session (last nonce is for the movetx signature) + pub nonces: Vec, +} + +#[derive(Debug)] +pub struct AllSessions { + sessions: HashMap, + session_queue: VecDeque, +} + +impl AllSessions { + pub fn new() -> Self { + Self { + sessions: HashMap::new(), + session_queue: VecDeque::new(), + } + } + + /// Adds a new session to the AllSessions with the given id.. + /// If the current byte size of all sessions exceeds MAX_ALL_SESSIONS_BYTES, the oldest session is removed until the byte size is under the limit. + pub fn add_new_session_with_id( + &mut self, + new_nonce_session: NonceSession, + id: u128, + ) -> Result<(), eyre::Report> { + if new_nonce_session.nonces.is_empty() { + // empty session, return error + return Err(eyre::eyre!("Empty session attempted to be added")); + } + + let mut total_needed = Self::session_bytes(&new_nonce_session)? + .checked_add(self.total_sessions_byte_size()?) + .ok_or_else(|| eyre::eyre!("Session size calculation overflow in add_new_session"))?; + + loop { + // check byte size and session count, if session count is already at the limit or byte size is higher than limit + // we remove the oldest session until the conditions are met + if total_needed <= MAX_ALL_SESSIONS_BYTES && self.sessions.len() < MAX_NUM_SESSIONS { + break; + } + total_needed = total_needed + .checked_sub(self.remove_oldest_session()?) + .ok_or_else(|| eyre::eyre!("Session size calculation overflow"))?; + } + + // save the session to the HashMap and the session id queue + self.sessions.insert(id, new_nonce_session); + self.session_queue.push_back(id); + Ok(()) + } + + /// Adds a new session to the AllSessions with a random id. + /// Returns the id of the added session. + pub fn add_new_session_with_random_id( + &mut self, + new_nonce_session: NonceSession, + ) -> Result { + // generate unused id + let random_id = self.get_new_unused_id(); + self.add_new_session_with_id(new_nonce_session, random_id)?; + Ok(random_id) + } + + /// Removes a session from the AllSessions with the given id. + /// Also removes it from the session queue, because we might add the session with the same id later + /// (as in [`deposit_sign`]). + /// Returns the removed session. + pub fn remove_session_with_id(&mut self, id: u128) -> Result { + let session = self.sessions.remove(&id).ok_or_eyre("Session not found")?; + // remove the id from the session queue + self.session_queue.retain(|x| *x != id); + Ok(session) + } + + /// Generates a new unused id for a nonce session. + /// The important thing it that the id not easily predictable. + fn get_new_unused_id(&mut self) -> u128 { + let mut random_id = bitcoin::secp256k1::rand::thread_rng().gen_range(0..=u128::MAX); + while self.sessions.contains_key(&random_id) { + random_id = bitcoin::secp256k1::rand::thread_rng().gen_range(0..=u128::MAX); + } + random_id + } + + /// Removes the oldest session from the AllSessions. + /// Returns the number of bytes removed. + fn remove_oldest_session(&mut self) -> Result { + match self.session_queue.pop_front() { + Some(oldest_id) => { + let removed_session = self.sessions.remove(&oldest_id); + match removed_session { + Some(session) => Ok(Self::session_bytes(&session)?), + None => Ok(0), + } + } + None => Err(eyre::eyre!("No session to remove")), + } + } + + fn session_bytes(session: &NonceSession) -> Result { + // 132 bytes per nonce + session + .nonces + .len() + .checked_mul(MUSIG_SECNONCE_LEN) + .ok_or_eyre("Calculation overflow in session_bytes") + } + + /// Returns the total byte size of all secnonces in the AllSessions. + pub fn total_sessions_byte_size(&self) -> Result { + // Should never overflow as it counts bytes in usize + let mut total_bytes: usize = 0; + + for (_, session) in self.sessions.iter() { + total_bytes = total_bytes + .checked_add(Self::session_bytes(session)?) + .ok_or_eyre("Calculation overflow in total_byte_size")?; + } + + Ok(total_bytes) + } +} + +impl Default for AllSessions { + fn default() -> Self { + Self::new() + } +} + +pub struct VerifierServer { + pub verifier: Verifier, + background_tasks: BackgroundTaskManager, +} + +impl VerifierServer +where + C: CitreaClientT, +{ + pub async fn new(config: BridgeConfig) -> Result { + let verifier = Verifier::new(config.clone()).await?; + let background_tasks = BackgroundTaskManager::default(); + + Ok(VerifierServer { + verifier, + background_tasks, + }) + } + + /// Starts the background tasks for the verifier. + /// If called multiple times, it will restart only the tasks that are not already running. + pub async fn start_background_tasks(&self) -> Result<(), BridgeError> { + let rpc = ExtendedBitcoinRpc::connect( + self.verifier.config.bitcoin_rpc_url.clone(), + self.verifier.config.bitcoin_rpc_user.clone(), + self.verifier.config.bitcoin_rpc_password.clone(), + None, + ) + .await?; + + // initialize and run automation features + #[cfg(feature = "automation")] + { + let tx_sender = TxSender::new( + self.verifier.signer.clone(), + rpc.clone(), + self.verifier.db.clone(), + Verifier::::TX_SENDER_CONSUMER_ID.to_string(), + self.verifier.config.protocol_paramset(), + self.verifier.config.mempool_api_host.clone(), + self.verifier.config.mempool_api_endpoint.clone(), + ); + + self.background_tasks + .ensure_task_looping(tx_sender.into_task()) + .await; + let state_manager = StateManager::new( + self.verifier.db.clone(), + self.verifier.clone(), + self.verifier.config.protocol_paramset(), + ) + .await?; + + let should_run_state_mgr = { + #[cfg(test)] + { + self.verifier.config.test_params.should_run_state_manager + } + #[cfg(not(test))] + { + true + } + }; + + if should_run_state_mgr { + // start tracking operators if they exist in the db + let operators = self.verifier.db.get_operators(None).await?; + if !operators.is_empty() { + let mut dbtx = self.verifier.db.begin_transaction().await?; + for operator in operators { + StateManager::>::dispatch_new_round_machine( + self.verifier.db.clone(), + &mut dbtx, + OperatorData { + xonly_pk: operator.0, + reimburse_addr: operator.1, + collateral_funding_outpoint: operator.2, + }, + ) + .await?; + } + dbtx.commit().await?; + } + self.background_tasks + .ensure_task_looping(state_manager.block_fetcher_task().await?) + .await; + self.background_tasks + .ensure_task_looping(state_manager.into_task()) + .await; + } + } + #[cfg(not(feature = "automation"))] + { + // get the next finalized block height to start from + let next_height = self + .verifier + .db + .get_next_finalized_block_height_for_consumer( + None, + Verifier::::FINALIZED_BLOCK_CONSUMER_ID_NO_AUTOMATION, + self.verifier.config.protocol_paramset(), + ) + .await?; + + self.background_tasks + .ensure_task_looping( + crate::bitcoin_syncer::FinalizedBlockFetcherTask::new( + self.verifier.db.clone(), + Verifier::::FINALIZED_BLOCK_CONSUMER_ID_NO_AUTOMATION.to_string(), + self.verifier.config.protocol_paramset(), + next_height, + self.verifier.clone(), + ) + .into_buffered_errors(50) + .with_delay(crate::bitcoin_syncer::BTC_SYNCER_POLL_DELAY), + ) + .await; + } + + let syncer = BitcoinSyncer::new( + self.verifier.db.clone(), + rpc.clone(), + self.verifier.config.protocol_paramset(), + ) + .await?; + + self.background_tasks + .ensure_task_looping(syncer.into_task()) + .await; + + self.background_tasks + .ensure_task_looping( + EntityMetricPublisher::>::new(self.verifier.db.clone(), rpc.clone()) + .with_delay(ENTITY_METRIC_PUBLISHER_INTERVAL), + ) + .await; + + Ok(()) + } + + pub async fn get_current_status(&self) -> Result { + let stopped_tasks = self.background_tasks.get_stopped_tasks().await?; + // Determine if automation is enabled + let automation_enabled = cfg!(feature = "automation"); + + let l1_sync_status = + Verifier::::get_l1_status(&self.verifier.db, &self.verifier.rpc).await?; + + Ok(EntityStatus { + automation: automation_enabled, + wallet_balance: l1_sync_status + .wallet_balance + .map(|balance| format!("{} BTC", balance.to_btc())), + tx_sender_synced_height: l1_sync_status.tx_sender_synced_height, + finalized_synced_height: l1_sync_status.finalized_synced_height, + hcp_last_proven_height: l1_sync_status.hcp_last_proven_height, + rpc_tip_height: l1_sync_status.rpc_tip_height, + bitcoin_syncer_synced_height: l1_sync_status.btc_syncer_synced_height, + stopped_tasks: Some(stopped_tasks), + state_manager_next_height: l1_sync_status.state_manager_next_height, + }) + } + + pub async fn shutdown(&mut self) { + self.background_tasks.graceful_shutdown().await; + } +} + +#[derive(Debug, Clone)] +pub struct Verifier { + rpc: ExtendedBitcoinRpc, + + pub(crate) signer: Actor, + pub(crate) db: Database, + pub(crate) config: BridgeConfig, + pub(crate) nonces: Arc>, + #[cfg(feature = "automation")] + pub tx_sender: TxSenderClient, + #[cfg(feature = "automation")] + pub header_chain_prover: HeaderChainProver, + pub citrea_client: C, +} + +impl Verifier +where + C: CitreaClientT, +{ + pub async fn new(config: BridgeConfig) -> Result { + let signer = Actor::new( + config.secret_key, + config.winternitz_secret_key, + config.protocol_paramset().network, + ); + + let rpc = ExtendedBitcoinRpc::connect( + config.bitcoin_rpc_url.clone(), + config.bitcoin_rpc_user.clone(), + config.bitcoin_rpc_password.clone(), + None, + ) + .await?; + + let db = Database::new(&config).await?; + + let citrea_client = C::new( + config.citrea_rpc_url.clone(), + config.citrea_light_client_prover_url.clone(), + config.citrea_chain_id, + None, + config.citrea_request_timeout, + ) + .await?; + + let all_sessions = AllSessions::new(); + + #[cfg(feature = "automation")] + let tx_sender = TxSenderClient::new(db.clone(), Self::TX_SENDER_CONSUMER_ID.to_string()); + + #[cfg(feature = "automation")] + let header_chain_prover = HeaderChainProver::new(&config, rpc.clone()).await?; + + let verifier = Verifier { + rpc, + signer, + db: db.clone(), + config: config.clone(), + nonces: Arc::new(tokio::sync::Mutex::new(all_sessions)), + #[cfg(feature = "automation")] + tx_sender, + #[cfg(feature = "automation")] + header_chain_prover, + citrea_client, + }; + Ok(verifier) + } + + /// Verifies all unspent kickoff signatures sent by the operator, converts them to TaggedSignature + /// as they will be saved as TaggedSignatures to the db. + fn verify_unspent_kickoff_sigs( + &self, + collateral_funding_outpoint: OutPoint, + operator_xonly_pk: XOnlyPublicKey, + wallet_reimburse_address: Address, + unspent_kickoff_sigs: Vec, + kickoff_wpks: &KickoffWinternitzKeys, + ) -> Result, BridgeError> { + let mut tweak_cache = TweakCache::default(); + let mut tagged_sigs = Vec::with_capacity(unspent_kickoff_sigs.len()); + let mut prev_ready_to_reimburse: Option = None; + let operator_data = OperatorData { + xonly_pk: operator_xonly_pk, + collateral_funding_outpoint, + reimburse_addr: wallet_reimburse_address.clone(), + }; + let mut cur_sig_index = 0; + for round_idx in RoundIndex::iter_rounds(self.config.protocol_paramset().num_round_txs) { + let txhandlers = create_round_txhandlers( + self.config.protocol_paramset(), + round_idx, + &operator_data, + kickoff_wpks, + prev_ready_to_reimburse.as_ref(), + )?; + for txhandler in txhandlers { + if let TransactionType::UnspentKickoff(kickoff_idx) = + txhandler.get_transaction_type() + { + let partial = PartialSignatureInfo { + operator_idx: 0, // dummy value + round_idx, + kickoff_utxo_idx: kickoff_idx, + }; + let sighashes = txhandler + .calculate_shared_txins_sighash(EntityType::OperatorSetup, partial)?; + for sighash in sighashes { + let message = Message::from_digest(sighash.0.to_byte_array()); + verify_schnorr( + &unspent_kickoff_sigs[cur_sig_index], + &message, + operator_xonly_pk, + sighash.1.tweak_data, + Some(&mut tweak_cache), + ) + .map_err(|e| { + eyre::eyre!( + "Verifier{}: Unspent kickoff signature verification failed for num sig {}: {}", + self.signer.xonly_public_key.to_string(), + cur_sig_index + 1, + e + ) + })?; + tagged_sigs.push(TaggedSignature { + signature: unspent_kickoff_sigs[cur_sig_index].serialize().to_vec(), + signature_id: Some(sighash.1.signature_id), + }); + cur_sig_index += 1; + } + } else if let TransactionType::ReadyToReimburse = txhandler.get_transaction_type() { + prev_ready_to_reimburse = Some(txhandler); + } + } + } + + Ok(tagged_sigs) + } + + /// Checks if all operators in verifier's db that are still in protocol are in the deposit. + /// Checks if all operators in the deposit data from aggregator are in the verifier's DB. + /// Afterwards, it checks if the given deposit outpoint is valid. First it checks if the tx exists on chain, + /// then it checks if the amount in TxOut is equal to bridge_amount and if the script is correct. + /// + /// # Arguments + /// * `deposit_data` - The deposit data to check. + /// + /// # Returns + /// * `()` if the deposit is valid, `BridgeError::InvalidDeposit` if the deposit is invalid. + async fn is_deposit_valid(&self, deposit_data: &mut DepositData) -> Result<(), BridgeError> { + // check if security council is the same as in our config + if deposit_data.security_council != self.config.security_council { + let reason = format!( + "Security council in deposit is not the same as in the config, expected {:?}, got {:?}", + self.config.security_council, + deposit_data.security_council + ); + tracing::error!("{reason}"); + return Err(BridgeError::InvalidDeposit(reason)); + } + let operators_in_deposit_data = deposit_data.get_operators(); + // check if all operators that still have collateral are in the deposit + let operators_in_db = self.db.get_operators(None).await?; + for (xonly_pk, reimburse_addr, collateral_funding_outpoint) in operators_in_db.iter() { + let operator_data = OperatorData { + xonly_pk: *xonly_pk, + collateral_funding_outpoint: *collateral_funding_outpoint, + reimburse_addr: reimburse_addr.clone(), + }; + let kickoff_winternitz_pks = self + .db + .get_operator_kickoff_winternitz_public_keys(None, *xonly_pk) + .await?; + let kickoff_wpks = KickoffWinternitzKeys::new( + kickoff_winternitz_pks, + self.config.protocol_paramset().num_kickoffs_per_round, + self.config.protocol_paramset().num_round_txs, + ); + let is_collateral_usable = self + .rpc + .collateral_check( + &operator_data, + &kickoff_wpks, + self.config.protocol_paramset(), + ) + .await?; + // if operator is not in deposit but its collateral is still on chain, return false + if !operators_in_deposit_data.contains(xonly_pk) && is_collateral_usable { + let reason = format!( + "Operator {:?} is is still in protocol but not in the deposit data from aggregator", + xonly_pk + ); + tracing::error!("{reason}"); + return Err(BridgeError::InvalidDeposit(reason)); + } + // if operator is in deposit, but the collateral is not usable, return false + if operators_in_deposit_data.contains(xonly_pk) && !is_collateral_usable { + let reason = format!( + "Operator {:?} is in the deposit data from aggregator but its collateral is spent, operator cannot fulfill withdrawals anymore", + xonly_pk + ); + tracing::error!("{reason}"); + return Err(BridgeError::InvalidDeposit(reason)); + } + } + // check if there are any operators in the deposit that are not in the DB. + for operator_xonly_pk in operators_in_deposit_data { + if !operators_in_db + .iter() + .any(|(xonly_pk, _, _)| xonly_pk == &operator_xonly_pk) + { + let reason = format!( + "Operator {:?} is in the deposit data from aggregator but not in the verifier's DB, cannot sign deposit", + operator_xonly_pk + ); + tracing::error!("{reason}"); + return Err(BridgeError::InvalidDeposit(reason)); + } + } + // check if deposit script in deposit_outpoint is valid + let deposit_scripts: Vec = deposit_data + .get_deposit_scripts(self.config.protocol_paramset())? + .into_iter() + .map(|s| s.to_script_buf()) + .collect(); + // what the deposit scriptpubkey is in the deposit_outpoint should be according to the deposit data + let expected_scriptpubkey = create_taproot_address( + &deposit_scripts, + None, + self.config.protocol_paramset().network, + ) + .0 + .script_pubkey(); + let deposit_outpoint = deposit_data.get_deposit_outpoint(); + let deposit_txid = deposit_outpoint.txid; + let deposit_tx = self + .rpc + .get_tx_of_txid(&deposit_txid) + .await + .wrap_err("Deposit tx could not be found on chain")?; + let deposit_txout_in_chain = deposit_tx + .output + .get(deposit_outpoint.vout as usize) + .ok_or(eyre::eyre!( + "Deposit vout not found in tx {}, vout: {}", + deposit_txid, + deposit_outpoint.vout + ))?; + if deposit_txout_in_chain.value != self.config.protocol_paramset().bridge_amount { + let reason = format!( + "Deposit amount is not correct, expected {}, got {}", + self.config.protocol_paramset().bridge_amount, + deposit_txout_in_chain.value + ); + tracing::error!("{reason}"); + return Err(BridgeError::InvalidDeposit(reason)); + } + if deposit_txout_in_chain.script_pubkey != expected_scriptpubkey { + let reason = format!( + "Deposit script pubkey in deposit outpoint does not match the deposit data, expected {:?}, got {:?}", + expected_scriptpubkey, + deposit_txout_in_chain.script_pubkey + ); + tracing::error!("{reason}"); + return Err(BridgeError::InvalidDeposit(reason)); + } + Ok(()) + } + + pub async fn set_operator( + &self, + collateral_funding_outpoint: OutPoint, + operator_xonly_pk: XOnlyPublicKey, + wallet_reimburse_address: Address, + operator_winternitz_public_keys: Vec, + unspent_kickoff_sigs: Vec, + ) -> Result<(), BridgeError> { + tracing::info!("Setting operator: {:?}", operator_xonly_pk); + let operator_data = OperatorData { + xonly_pk: operator_xonly_pk, + collateral_funding_outpoint, + reimburse_addr: wallet_reimburse_address, + }; + + let kickoff_wpks = KickoffWinternitzKeys::new( + operator_winternitz_public_keys, + self.config.protocol_paramset().num_kickoffs_per_round, + self.config.protocol_paramset().num_round_txs, + ); + + if !self + .rpc + .collateral_check( + &operator_data, + &kickoff_wpks, + self.config.protocol_paramset(), + ) + .await? + { + return Err(eyre::eyre!( + "Collateral utxo of operator {:?} does not exist or is not usable in bitcoin, cannot set operator", + operator_xonly_pk, + ) + .into()); + } + + let tagged_sigs = self.verify_unspent_kickoff_sigs( + collateral_funding_outpoint, + operator_xonly_pk, + operator_data.reimburse_addr.clone(), + unspent_kickoff_sigs, + &kickoff_wpks, + )?; + + let operator_winternitz_public_keys = kickoff_wpks.keys; + let mut dbtx = self.db.begin_transaction().await?; + // Save the operator details to the db + self.db + .insert_operator_if_not_exists( + Some(&mut dbtx), + operator_xonly_pk, + &operator_data.reimburse_addr, + collateral_funding_outpoint, + ) + .await?; + + self.db + .insert_operator_kickoff_winternitz_public_keys_if_not_exist( + Some(&mut dbtx), + operator_xonly_pk, + operator_winternitz_public_keys, + ) + .await?; + + let sigs_per_round = self.config.get_num_unspent_kickoff_sigs() + / self.config.protocol_paramset().num_round_txs; + let tagged_sigs_per_round: Vec> = tagged_sigs + .chunks(sigs_per_round) + .map(|chunk| chunk.to_vec()) + .collect(); + + for (round_idx, sigs) in tagged_sigs_per_round.into_iter().enumerate() { + self.db + .insert_unspent_kickoff_sigs_if_not_exist( + Some(&mut dbtx), + operator_xonly_pk, + RoundIndex::Round(round_idx), + sigs, + ) + .await?; + } + + #[cfg(feature = "automation")] + { + StateManager::::dispatch_new_round_machine( + self.db.clone(), + &mut dbtx, + operator_data, + ) + .await?; + } + dbtx.commit().await?; + tracing::info!("Operator: {:?} set successfully", operator_xonly_pk); + Ok(()) + } + + pub async fn nonce_gen( + &self, + num_nonces: u32, + ) -> Result<(u128, Vec), BridgeError> { + // reject if too many nonces are requested + if num_nonces > NUM_NONCES_LIMIT { + return Err(eyre::eyre!( + "Number of nonces requested is too high, max allowed is {}, requested: {}", + NUM_NONCES_LIMIT, + num_nonces + ) + .into()); + } + if num_nonces == 0 { + return Err( + eyre::eyre!("Number of nonces requested is 0, cannot generate nonces").into(), + ); + } + let (sec_nonces, pub_nonces): (Vec, Vec) = (0..num_nonces) + .map(|_| { + // nonce pair needs keypair and a rng + let (sec_nonce, pub_nonce) = musig2::nonce_pair(&self.signer.keypair)?; + Ok((sec_nonce, pub_nonce)) + }) + .collect::, BridgeError>>()? + .into_iter() + .unzip(); + + let session = NonceSession { nonces: sec_nonces }; + + // save the session + let session_id = { + let all_sessions = &mut *self.nonces.lock().await; + all_sessions.add_new_session_with_random_id(session)? + }; + + Ok((session_id, pub_nonces)) + } + + pub async fn deposit_sign( + &self, + mut deposit_data: DepositData, + session_id: u128, + mut agg_nonce_rx: mpsc::Receiver, + ) -> Result, BridgeError> { + self.citrea_client + .check_nofn_correctness(deposit_data.get_nofn_xonly_pk()?) + .await?; + + self.is_deposit_valid(&mut deposit_data).await?; + + // set deposit data to db before starting to sign, ensures that if the deposit data already exists in db, it matches the one + // given by the aggregator currently. We do not want to sign 2 different deposits for same deposit_outpoint + self.db + .insert_deposit_data_if_not_exists( + None, + &mut deposit_data, + self.config.protocol_paramset(), + ) + .await?; + + let verifier = self.clone(); + let (partial_sig_tx, partial_sig_rx) = mpsc::channel(constants::DEFAULT_CHANNEL_SIZE); + let verifier_index = deposit_data.get_verifier_index(&self.signer.public_key)?; + let verifiers_public_keys = deposit_data.get_verifiers(); + + let deposit_blockhash = self + .rpc + .get_blockhash_of_tx(&deposit_data.get_deposit_outpoint().txid) + .await?; + + let handle = tokio::spawn(async move { + // Take the lock and extract the session before entering the async block + // Extract the session and remove it from the map to release the lock early + let mut session = { + let mut session_map = verifier.nonces.lock().await; + session_map.remove_session_with_id(session_id)? + }; + session.nonces.reverse(); + + let mut nonce_idx: usize = 0; + + let mut sighash_stream = Box::pin(create_nofn_sighash_stream( + verifier.db.clone(), + verifier.config.clone(), + deposit_data.clone(), + deposit_blockhash, + false, + )); + let num_required_sigs = verifier.config.get_num_required_nofn_sigs(&deposit_data); + + assert_eq!( + num_required_sigs + 2, + session.nonces.len(), + "Expected nonce count to be num_required_sigs + 2 (movetx & emergency stop)" + ); + + while let Some(agg_nonce) = agg_nonce_rx.recv().await { + let sighash = sighash_stream + .next() + .await + .ok_or(eyre::eyre!("No sighash received"))??; + tracing::debug!("Verifier {} found sighash: {:?}", verifier_index, sighash); + + let nonce = session + .nonces + .pop() + .ok_or(eyre::eyre!("No nonce available"))?; + + let partial_sig = musig2::partial_sign( + verifiers_public_keys.clone(), + None, + nonce, + agg_nonce, + verifier.signer.keypair, + Message::from_digest(*sighash.0.as_byte_array()), + )?; + + partial_sig_tx + .send(partial_sig) + .await + .wrap_err("Failed to send partial signature")?; + + nonce_idx += 1; + tracing::debug!( + "Verifier {} signed and sent sighash {} of {}", + verifier_index, + nonce_idx, + num_required_sigs + ); + if nonce_idx == num_required_sigs { + break; + } + } + + if session.nonces.len() != 2 { + return Err(eyre::eyre!( + "Expected 2 nonces remaining in session, one for move tx and one for emergency stop, got {}", + session.nonces.len() + ).into()); + } + + let mut session_map = verifier.nonces.lock().await; + session_map.add_new_session_with_id(session, session_id)?; + + Ok::<(), BridgeError>(()) + }); + monitor_standalone_task(handle, "Verifier deposit_sign"); + + Ok(partial_sig_rx) + } + + pub async fn deposit_finalize( + &self, + deposit_data: &mut DepositData, + session_id: u128, + mut sig_receiver: mpsc::Receiver, + mut agg_nonce_receiver: mpsc::Receiver, + mut operator_sig_receiver: mpsc::Receiver, + ) -> Result<(PartialSignature, PartialSignature), BridgeError> { + self.citrea_client + .check_nofn_correctness(deposit_data.get_nofn_xonly_pk()?) + .await?; + + self.is_deposit_valid(deposit_data).await?; + + let mut tweak_cache = TweakCache::default(); + let deposit_blockhash = self + .rpc + .get_blockhash_of_tx(&deposit_data.get_deposit_outpoint().txid) + .await?; + + let mut sighash_stream = pin!(create_nofn_sighash_stream( + self.db.clone(), + self.config.clone(), + deposit_data.clone(), + deposit_blockhash, + true, + )); + + let num_required_nofn_sigs = self.config.get_num_required_nofn_sigs(deposit_data); + let num_required_nofn_sigs_per_kickoff = self + .config + .get_num_required_nofn_sigs_per_kickoff(deposit_data); + let num_required_op_sigs = self.config.get_num_required_operator_sigs(deposit_data); + let num_required_op_sigs_per_kickoff = self + .config + .get_num_required_operator_sigs_per_kickoff(deposit_data); + + let operator_xonly_pks = deposit_data.get_operators(); + let num_operators = deposit_data.get_num_operators(); + + let ProtocolParamset { + num_round_txs, + num_kickoffs_per_round, + .. + } = *self.config.protocol_paramset(); + + let mut verified_sigs = vec![ + vec![ + vec![ + Vec::::with_capacity( + num_required_nofn_sigs_per_kickoff + num_required_op_sigs_per_kickoff + ); + num_kickoffs_per_round + ]; + num_round_txs + 1 + ]; + num_operators + ]; + + let mut kickoff_txids = vec![vec![vec![]; num_round_txs + 1]; num_operators]; + + // ------ N-of-N SIGNATURES VERIFICATION ------ + + let mut nonce_idx: usize = 0; + + while let Some(sighash) = sighash_stream.next().await { + let typed_sighash = sighash.wrap_err("Failed to read from sighash stream")?; + + let &SignatureInfo { + operator_idx, + round_idx, + kickoff_utxo_idx, + signature_id, + tweak_data, + kickoff_txid, + } = &typed_sighash.1; + + if signature_id == NormalSignatureKind::YieldKickoffTxid.into() { + kickoff_txids[operator_idx][round_idx.to_index()] + .push((kickoff_txid, kickoff_utxo_idx)); + continue; + } + + let sig = sig_receiver + .recv() + .await + .ok_or_eyre("No signature received")?; + + tracing::debug!("Verifying Final nofn Signature {}", nonce_idx + 1); + + verify_schnorr( + &sig, + &Message::from(typed_sighash.0), + deposit_data.get_nofn_xonly_pk()?, + tweak_data, + Some(&mut tweak_cache), + ) + .wrap_err_with(|| { + format!( + "Failed to verify nofn signature {} with signature info {:?}", + nonce_idx + 1, + typed_sighash.1 + ) + })?; + + let tagged_sig = TaggedSignature { + signature: sig.serialize().to_vec(), + signature_id: Some(signature_id), + }; + verified_sigs[operator_idx][round_idx.to_index()][kickoff_utxo_idx].push(tagged_sig); + + tracing::debug!("Final Signature Verified"); + + nonce_idx += 1; + } + + if nonce_idx != num_required_nofn_sigs { + return Err(eyre::eyre!( + "Did not receive enough nofn signatures. Needed: {}, received: {}", + num_required_nofn_sigs, + nonce_idx + ) + .into()); + } + + tracing::info!( + "Verifier{} Finished verifying final signatures of NofN", + self.signer.xonly_public_key.to_string() + ); + + let move_tx_agg_nonce = agg_nonce_receiver + .recv() + .await + .ok_or(eyre::eyre!("Aggregated nonces channel ended prematurely"))?; + + let emergency_stop_agg_nonce = agg_nonce_receiver + .recv() + .await + .ok_or(eyre::eyre!("Aggregated nonces channel ended prematurely"))?; + + tracing::info!( + "Verifier{} Received move tx and emergency stop aggregated nonces", + self.signer.xonly_public_key.to_string() + ); + // ------ OPERATOR SIGNATURES VERIFICATION ------ + + let num_required_total_op_sigs = num_required_op_sigs * deposit_data.get_num_operators(); + let mut total_op_sig_count = 0; + + // get operator data + let operators_data = deposit_data.get_operators(); + + // get signatures of operators and verify them + for (operator_idx, &op_xonly_pk) in operators_data.iter().enumerate() { + let mut op_sig_count = 0; + // generate the sighash stream for operator + let mut sighash_stream = pin!(create_operator_sighash_stream( + self.db.clone(), + op_xonly_pk, + self.config.clone(), + deposit_data.clone(), + deposit_blockhash, + )); + while let Some(operator_sig) = operator_sig_receiver.recv().await { + let typed_sighash = sighash_stream + .next() + .await + .ok_or_eyre("Operator sighash stream ended prematurely")??; + + tracing::debug!( + "Verifying Final operator signature {} for operator {}, signature info {:?}", + op_sig_count + 1, + operator_idx, + typed_sighash.1 + ); + + let &SignatureInfo { + operator_idx, + round_idx, + kickoff_utxo_idx, + signature_id, + kickoff_txid: _, + tweak_data, + } = &typed_sighash.1; + + verify_schnorr( + &operator_sig, + &Message::from(typed_sighash.0), + op_xonly_pk, + tweak_data, + Some(&mut tweak_cache), + ) + .wrap_err_with(|| { + format!( + "Operator {} Signature {}: verification failed. Signature info: {:?}.", + operator_idx, + op_sig_count + 1, + typed_sighash.1 + ) + })?; + + let tagged_sig = TaggedSignature { + signature: operator_sig.serialize().to_vec(), + signature_id: Some(signature_id), + }; + verified_sigs[operator_idx][round_idx.to_index()][kickoff_utxo_idx] + .push(tagged_sig); + + op_sig_count += 1; + total_op_sig_count += 1; + if op_sig_count == num_required_op_sigs { + break; + } + } + } + + if total_op_sig_count != num_required_total_op_sigs { + return Err(eyre::eyre!( + "Did not receive enough operator signatures. Needed: {}, received: {}", + num_required_total_op_sigs, + total_op_sig_count + ) + .into()); + } + + tracing::info!( + "Verifier{} Finished verifying final signatures of operators", + self.signer.xonly_public_key.to_string() + ); + // ----- MOVE TX SIGNING + + // Generate partial signature for move transaction + let move_txhandler = + create_move_to_vault_txhandler(deposit_data, self.config.protocol_paramset())?; + + let move_tx_sighash = move_txhandler.calculate_script_spend_sighash_indexed( + 0, + 0, + bitcoin::TapSighashType::Default, + )?; + + let movetx_secnonce = { + let mut session_map = self.nonces.lock().await; + let session = session_map + .sessions + .get_mut(&session_id) + .ok_or_else(|| eyre::eyre!("Could not find session id {session_id}"))?; + session + .nonces + .pop() + .ok_or_eyre("No move tx secnonce in session")? + }; + + let emergency_stop_secnonce = { + let mut session_map = self.nonces.lock().await; + let session = session_map + .sessions + .get_mut(&session_id) + .ok_or_else(|| eyre::eyre!("Could not find session id {session_id}"))?; + session + .nonces + .pop() + .ok_or_eyre("No emergency stop secnonce in session")? + }; + + // sign move tx and save everything to db if everything is correct + let move_tx_partial_sig = musig2::partial_sign( + deposit_data.get_verifiers(), + None, + movetx_secnonce, + move_tx_agg_nonce, + self.signer.keypair, + Message::from_digest(move_tx_sighash.to_byte_array()), + )?; + + tracing::info!( + "Verifier{} Finished signing move tx", + self.signer.xonly_public_key.to_string() + ); + + let emergency_stop_txhandler = create_emergency_stop_txhandler( + deposit_data, + &move_txhandler, + self.config.protocol_paramset(), + )?; + + let emergency_stop_sighash = emergency_stop_txhandler + .calculate_script_spend_sighash_indexed( + 0, + 0, + bitcoin::TapSighashType::SinglePlusAnyoneCanPay, + )?; + + let emergency_stop_partial_sig = musig2::partial_sign( + deposit_data.get_verifiers(), + None, + emergency_stop_secnonce, + emergency_stop_agg_nonce, + self.signer.keypair, + Message::from_digest(emergency_stop_sighash.to_byte_array()), + )?; + + tracing::info!( + "Verifier{} Finished signing emergency stop tx", + self.signer.xonly_public_key.to_string() + ); + + // Save signatures to db + let mut dbtx = self.db.begin_transaction().await?; + // Deposit is not actually finalized here, its only finalized after the aggregator gets all the partial sigs and checks the aggregated sig + for (operator_idx, (operator_xonly_pk, operator_sigs)) in operator_xonly_pks + .into_iter() + .zip(verified_sigs.into_iter()) + .enumerate() + { + // skip indexes until round 0 (currently 0th index corresponds to collateral, which doesn't have any sigs) + for (round_idx, mut op_round_sigs) in operator_sigs + .into_iter() + .enumerate() + .skip(RoundIndex::Round(0).to_index()) + { + if kickoff_txids[operator_idx][round_idx].len() + != self.config.protocol_paramset().num_signed_kickoffs + { + return Err(eyre::eyre!( + "Number of signed kickoff utxos for operator: {}, round: {} is wrong. Expected: {}, got: {}", + operator_xonly_pk, round_idx, self.config.protocol_paramset().num_signed_kickoffs, kickoff_txids[operator_idx][round_idx].len() + ).into()); + } + for (kickoff_txid, kickoff_idx) in &kickoff_txids[operator_idx][round_idx] { + if kickoff_txid.is_none() { + return Err(eyre::eyre!( + "Kickoff txid not found for {}, {}, {}", + operator_xonly_pk, + round_idx, // rounds start from 1 + kickoff_idx + ) + .into()); + } + + tracing::trace!( + "Setting deposit signatures for {:?}, {:?}, {:?} {:?}", + operator_xonly_pk, + round_idx, // rounds start from 1 + kickoff_idx, + kickoff_txid + ); + + self.db + .insert_deposit_signatures_if_not_exist( + Some(&mut dbtx), + deposit_data.get_deposit_outpoint(), + operator_xonly_pk, + RoundIndex::from_index(round_idx), + *kickoff_idx, + kickoff_txid.expect("Kickoff txid must be Some"), + std::mem::take(&mut op_round_sigs[*kickoff_idx]), + ) + .await?; + } + } + } + dbtx.commit().await?; + + Ok((move_tx_partial_sig, emergency_stop_partial_sig)) + } + + #[allow(clippy::too_many_arguments)] + pub async fn sign_optimistic_payout( + &self, + nonce_session_id: u128, + agg_nonce: AggregatedNonce, + deposit_id: u32, + input_signature: Signature, + input_outpoint: OutPoint, + output_script_pubkey: ScriptBuf, + output_amount: Amount, + verification_signature: Option, + ) -> Result { + // if the withdrawal utxo is spent, no reason to sign optimistic payout + if self.rpc.is_utxo_spent(&input_outpoint).await? { + return Err( + eyre::eyre!("Withdrawal utxo {:?} is already spent", input_outpoint).into(), + ); + } + // if verification address is set in config, check if verification signature is valid + if let Some(address_in_config) = self.config.aggregator_verification_address { + // check if verification signature is provided by aggregator + if let Some(verification_signature) = verification_signature { + let address_from_sig = + recover_address_from_ecdsa_signature::( + deposit_id, + input_signature, + input_outpoint, + output_script_pubkey.clone(), + output_amount, + verification_signature, + )?; + + // check if verification signature is signed by the address in config + if address_from_sig != address_in_config { + return Err(BridgeError::InvalidECDSAVerificationSignature); + } + } else { + // if verification signature is not provided, but verification address is set in config, return error + return Err(BridgeError::ECDSAVerificationSignatureMissing); + } + } + + // check if withdrawal is valid first + let move_txid = self + .db + .get_move_to_vault_txid_from_citrea_deposit(None, deposit_id) + .await? + .ok_or_else(|| { + BridgeError::from(eyre::eyre!("Deposit not found for id: {}", deposit_id)) + })?; + + // amount in move_tx is exactly the bridge amount + if output_amount + > self.config.protocol_paramset().bridge_amount - NON_EPHEMERAL_ANCHOR_AMOUNT + { + return Err(eyre::eyre!( + "Output amount is greater than the bridge amount: {} > {}", + output_amount, + self.config.protocol_paramset().bridge_amount + - self.config.protocol_paramset().anchor_amount() + - NON_EPHEMERAL_ANCHOR_AMOUNT + ) + .into()); + } + + // check if withdrawal utxo is correct + let withdrawal_utxo = self + .db + .get_withdrawal_utxo_from_citrea_withdrawal(None, deposit_id) + .await?; + + if withdrawal_utxo != input_outpoint { + return Err(eyre::eyre!( + "Withdrawal utxo is not correct: {:?} != {:?}", + withdrawal_utxo, + input_outpoint + ) + .into()); + } + + let mut deposit_data = self + .db + .get_deposit_data_with_move_tx(None, move_txid) + .await? + .ok_or_eyre("Deposit data corresponding to move txid not found")?; + + let withdrawal_prevout = self.rpc.get_txout_from_outpoint(&input_outpoint).await?; + let withdrawal_utxo = UTXO { + outpoint: input_outpoint, + txout: withdrawal_prevout, + }; + let output_txout = TxOut { + value: output_amount, + script_pubkey: output_script_pubkey, + }; + + let opt_payout_txhandler = create_optimistic_payout_txhandler( + &mut deposit_data, + withdrawal_utxo, + output_txout, + input_signature, + self.config.protocol_paramset(), + )?; + // txin at index 1 is deposited utxo in movetx + let sighash = opt_payout_txhandler.calculate_script_spend_sighash_indexed( + 1, + 0, + bitcoin::TapSighashType::Default, + )?; + + let opt_payout_secnonce = { + let mut session_map = self.nonces.lock().await; + let session = session_map + .sessions + .get_mut(&nonce_session_id) + .ok_or_else(|| eyre::eyre!("Could not find session id {nonce_session_id}"))?; + session + .nonces + .pop() + .ok_or_eyre("No move tx secnonce in session")? + }; + + let opt_payout_partial_sig = musig2::partial_sign( + deposit_data.get_verifiers(), + None, + opt_payout_secnonce, + agg_nonce, + self.signer.keypair, + Message::from_digest(sighash.to_byte_array()), + )?; + + Ok(opt_payout_partial_sig) + } + + pub async fn set_operator_keys( + &self, + mut deposit_data: DepositData, + keys: OperatorKeys, + operator_xonly_pk: XOnlyPublicKey, + ) -> Result<(), BridgeError> { + self.citrea_client + .check_nofn_correctness(deposit_data.get_nofn_xonly_pk()?) + .await?; + + self.is_deposit_valid(&mut deposit_data).await?; + + self.db + .insert_deposit_data_if_not_exists( + None, + &mut deposit_data, + self.config.protocol_paramset(), + ) + .await?; + + let hashes: Vec<[u8; 20]> = keys + .challenge_ack_digests + .into_iter() + .map(|x| { + x.hash.try_into().map_err(|e: Vec| { + eyre::eyre!("Invalid hash length, expected 20 bytes, got {}", e.len()) + }) + }) + .collect::, eyre::Report>>()?; + + if hashes.len() != self.config.get_num_challenge_ack_hashes(&deposit_data) { + return Err(eyre::eyre!( + "Invalid number of challenge ack hashes received from operator {:?}: got: {} expected: {}", + operator_xonly_pk, + hashes.len(), + self.config.get_num_challenge_ack_hashes(&deposit_data) + ).into()); + } + + let operator_data = self + .db + .get_operator(None, operator_xonly_pk) + .await? + .ok_or(BridgeError::OperatorNotFound(operator_xonly_pk))?; + + self.db + .insert_operator_challenge_ack_hashes_if_not_exist( + None, + operator_xonly_pk, + deposit_data.get_deposit_outpoint(), + &hashes, + ) + .await?; + + if keys.winternitz_pubkeys.len() != ClementineBitVMPublicKeys::number_of_flattened_wpks() { + tracing::error!( + "Invalid number of winternitz keys received from operator {:?}: got: {} expected: {}", + operator_xonly_pk, + keys.winternitz_pubkeys.len(), + ClementineBitVMPublicKeys::number_of_flattened_wpks() + ); + return Err(eyre::eyre!( + "Invalid number of winternitz keys received from operator {:?}: got: {} expected: {}", + operator_xonly_pk, + keys.winternitz_pubkeys.len(), + ClementineBitVMPublicKeys::number_of_flattened_wpks() + ) + .into()); + } + + let winternitz_keys: Vec = keys + .winternitz_pubkeys + .into_iter() + .map(|x| x.try_into()) + .collect::>()?; + + let bitvm_pks = ClementineBitVMPublicKeys::from_flattened_vec(&winternitz_keys); + + let assert_tx_addrs = bitvm_pks + .get_assert_taproot_leaf_hashes(operator_data.xonly_pk) + .iter() + .map(|x| x.to_byte_array()) + .collect::>(); + + // wrap around a mutex lock to avoid OOM + let guard = REPLACE_SCRIPTS_LOCK.lock().await; + let start = std::time::Instant::now(); + let scripts: Vec = bitvm_pks.get_g16_verifier_disprove_scripts()?; + + let taproot_builder = taproot_builder_with_scripts(scripts); + + let root_hash = taproot_builder + .try_into_taptree() + .expect("taproot builder always builds a full taptree") + .root_hash() + .to_byte_array(); + + // bitvm scripts are dropped, release the lock + drop(guard); + tracing::debug!("Built taproot tree in {:?}", start.elapsed()); + + let latest_blockhash_wots = bitvm_pks.latest_blockhash_pk.to_vec(); + + let latest_blockhash_script = WinternitzCommit::new( + vec![(latest_blockhash_wots, 40)], + operator_data.xonly_pk, + self.config.protocol_paramset().winternitz_log_d, + ) + .to_script_buf(); + + let latest_blockhash_root_hash = taproot_builder_with_scripts(&[latest_blockhash_script]) + .try_into_taptree() + .expect("taproot builder always builds a full taptree") + .root_hash() + .to_raw_hash() + .to_byte_array(); + + self.db + .insert_operator_bitvm_keys_if_not_exist( + None, + operator_xonly_pk, + deposit_data.get_deposit_outpoint(), + bitvm_pks.to_flattened_vec(), + ) + .await?; + // Save the public input wots to db along with the root hash + self.db + .insert_bitvm_setup_if_not_exists( + None, + operator_xonly_pk, + deposit_data.get_deposit_outpoint(), + &assert_tx_addrs, + &root_hash, + &latest_blockhash_root_hash, + ) + .await?; + + Ok(()) + } + + /// Checks if the operator who sent the kickoff matches the payout data saved in our db + /// Payout data in db is updated during citrea sync. + async fn is_kickoff_malicious( + &self, + kickoff_witness: Witness, + deposit_data: &mut DepositData, + kickoff_data: KickoffData, + dbtx: Option>, + ) -> Result { + let move_txid = + create_move_to_vault_txhandler(deposit_data, self.config.protocol_paramset())? + .get_cached_tx() + .compute_txid(); + let payout_info = self + .db + .get_payout_info_from_move_txid(dbtx, move_txid) + .await; + if let Err(e) = &payout_info { + tracing::warn!( + "Couldn't retrieve payout info from db {}, assuming malicious", + e + ); + return Ok(true); + } + let payout_info = payout_info?; + let Some((operator_xonly_pk_opt, payout_blockhash, _, _)) = payout_info else { + tracing::warn!("No payout info found in db, assuming malicious"); + return Ok(true); + }; + + let Some(operator_xonly_pk) = operator_xonly_pk_opt else { + tracing::warn!("No operator xonly pk found in payout tx OP_RETURN, assuming malicious"); + return Ok(true); + }; + + if operator_xonly_pk != kickoff_data.operator_xonly_pk { + tracing::warn!("Operator xonly pk for the payout does not match with the kickoff_data"); + return Ok(true); + } + + let wt_derive_path = WinternitzDerivationPath::Kickoff( + kickoff_data.round_idx, + kickoff_data.kickoff_idx, + self.config.protocol_paramset(), + ); + let commits = extract_winternitz_commits( + kickoff_witness, + &[wt_derive_path], + self.config.protocol_paramset(), + )?; + let blockhash_data = commits.first(); + // only last 20 bytes of the blockhash is committed + let truncated_blockhash = &payout_blockhash[12..]; + if let Some(committed_blockhash) = blockhash_data { + if committed_blockhash != truncated_blockhash { + tracing::warn!("Payout blockhash does not match committed hash: committed: {:?}, truncated payout blockhash: {:?}", + blockhash_data, truncated_blockhash); + return Ok(true); + } + } else { + return Err(eyre::eyre!("Couldn't retrieve committed data from witness").into()); + } + Ok(false) + } + + /// Checks if the kickoff is malicious and sends the appropriate txs if it is. + /// Returns true if the kickoff is malicious. + pub async fn handle_kickoff<'a>( + &'a self, + dbtx: DatabaseTransaction<'a, '_>, + kickoff_witness: Witness, + mut deposit_data: DepositData, + kickoff_data: KickoffData, + challenged_before: bool, + ) -> Result { + let is_malicious = self + .is_kickoff_malicious(kickoff_witness, &mut deposit_data, kickoff_data, Some(dbtx)) + .await?; + if !is_malicious { + return Ok(false); + } + + tracing::warn!( + "Malicious kickoff {:?} for deposit {:?}", + kickoff_data, + deposit_data + ); + + let context = ContractContext::new_context_with_signer( + kickoff_data, + deposit_data.clone(), + self.config.protocol_paramset(), + self.signer.clone(), + ); + + let signed_txs = create_and_sign_txs( + self.db.clone(), + &self.signer, + self.config.clone(), + context, + None, // No need, verifier will not send kickoff tx + Some(dbtx), + ) + .await?; + + let tx_metadata = Some(TxMetadata { + tx_type: TransactionType::Dummy, // will be replaced in add_tx_to_queue + operator_xonly_pk: Some(kickoff_data.operator_xonly_pk), + round_idx: Some(kickoff_data.round_idx), + kickoff_idx: Some(kickoff_data.kickoff_idx), + deposit_outpoint: Some(deposit_data.get_deposit_outpoint()), + }); + + // try to send them + for (tx_type, signed_tx) in &signed_txs { + if *tx_type == TransactionType::Challenge && challenged_before { + // do not send challenge tx operator was already challenged in the same round + tracing::warn!( + "Operator {:?} was already challenged in the same round, skipping challenge tx", + kickoff_data.operator_xonly_pk + ); + continue; + } + match *tx_type { + TransactionType::Challenge + | TransactionType::AssertTimeout(_) + | TransactionType::KickoffNotFinalized + | TransactionType::LatestBlockhashTimeout + | TransactionType::OperatorChallengeNack(_) => { + #[cfg(feature = "automation")] + self.tx_sender + .add_tx_to_queue( + dbtx, + *tx_type, + signed_tx, + &signed_txs, + tx_metadata, + &self.config, + None, + ) + .await?; + } + _ => {} + } + } + + Ok(true) + } + + #[cfg(feature = "automation")] + async fn send_watchtower_challenge( + &self, + kickoff_data: KickoffData, + deposit_data: DepositData, + dbtx: Option>, + ) -> Result<(), BridgeError> { + let current_tip_hcp = self + .header_chain_prover + .get_tip_header_chain_proof() + .await?; + + let (work_only_proof, work_output) = self + .header_chain_prover + .prove_work_only(current_tip_hcp.0)?; + + let g16: [u8; 256] = work_only_proof + .inner + .groth16() + .wrap_err("Work only receipt is not groth16")? + .seal + .to_owned() + .try_into() + .map_err(|e: Vec| { + eyre::eyre!( + "Invalid g16 proof length, expected 256 bytes, got {}", + e.len() + ) + })?; + + let g16_proof = CircuitGroth16Proof::from_seal(&g16); + let mut commit_data: Vec = g16_proof + .to_compressed() + .wrap_err("Couldn't compress g16 proof")? + .to_vec(); + + let total_work = + borsh::to_vec(&work_output.work_u128).wrap_err("Couldn't serialize total work")?; + + #[cfg(test)] + { + let wt_ind = self + .config + .test_params + .all_verifiers_secret_keys + .iter() + .position(|x| x == &self.config.secret_key) + .ok_or_else(|| eyre::eyre!("Verifier secret key not found in test params"))?; + + self.config + .test_params + .maybe_disrupt_commit_data_for_total_work(&mut commit_data, wt_ind); + } + + commit_data.extend_from_slice(&total_work); + + tracing::info!("Watchtower prepared commit data, trying to send watchtower challenge"); + + self.queue_watchtower_challenge(kickoff_data, deposit_data, commit_data, dbtx) + .await + } + + async fn queue_watchtower_challenge( + &self, + kickoff_data: KickoffData, + deposit_data: DepositData, + commit_data: Vec, + dbtx: Option>, + ) -> Result<(), BridgeError> { + let (tx_type, challenge_tx, rbf_info) = self + .create_watchtower_challenge( + TransactionRequestData { + deposit_outpoint: deposit_data.get_deposit_outpoint(), + kickoff_data, + }, + &commit_data, + dbtx, + ) + .await?; + + #[cfg(test)] + let mut challenge_tx = challenge_tx; + + #[cfg(test)] + { + if let Some(annex_bytes) = rbf_info.annex.clone() { + challenge_tx.input[0].witness.push(annex_bytes); + } + } + + #[cfg(feature = "automation")] + { + let mut dbtx = self.db.begin_transaction().await?; + + self.tx_sender + .add_tx_to_queue( + &mut dbtx, + tx_type, + &challenge_tx, + &[], + Some(TxMetadata { + tx_type, + operator_xonly_pk: Some(kickoff_data.operator_xonly_pk), + round_idx: Some(kickoff_data.round_idx), + kickoff_idx: Some(kickoff_data.kickoff_idx), + deposit_outpoint: Some(deposit_data.get_deposit_outpoint()), + }), + &self.config, + Some(rbf_info), + ) + .await?; + + dbtx.commit().await?; + tracing::info!( + "Committed watchtower challenge, commit data: {:?}", + commit_data + ); + } + + Ok(()) + } + + #[tracing::instrument(skip(self, dbtx))] + async fn update_citrea_deposit_and_withdrawals( + &self, + dbtx: &mut DatabaseTransaction<'_, '_>, + l2_height_start: u64, + l2_height_end: u64, + block_height: u32, + ) -> Result<(), BridgeError> { + let last_deposit_idx = self.db.get_last_deposit_idx(None).await?; + tracing::debug!("Last Citrea deposit idx: {:?}", last_deposit_idx); + + let last_withdrawal_idx = self.db.get_last_withdrawal_idx(None).await?; + tracing::debug!("Last Citrea withdrawal idx: {:?}", last_withdrawal_idx); + + let new_deposits = self + .citrea_client + .collect_deposit_move_txids(last_deposit_idx, l2_height_end) + .await?; + tracing::debug!("New deposits received from Citrea: {:?}", new_deposits); + + let new_withdrawals = self + .citrea_client + .collect_withdrawal_utxos(last_withdrawal_idx, l2_height_end) + .await?; + tracing::debug!( + "New withdrawals received from Citrea: {:?}", + new_withdrawals + ); + + for (idx, move_to_vault_txid) in new_deposits { + tracing::info!( + "Saving move to vault txid {:?} with index {} for Citrea deposits", + move_to_vault_txid, + idx + ); + self.db + .upsert_move_to_vault_txid_from_citrea_deposit( + Some(dbtx), + idx as u32, + &move_to_vault_txid, + ) + .await?; + } + + for (idx, withdrawal_utxo_outpoint) in new_withdrawals { + tracing::info!( + "Saving withdrawal utxo {:?} with index {} for Citrea withdrawals", + withdrawal_utxo_outpoint, + idx + ); + self.db + .update_withdrawal_utxo_from_citrea_withdrawal( + Some(dbtx), + idx as u32, + withdrawal_utxo_outpoint, + block_height, + ) + .await?; + } + + let replacement_move_txids = self + .citrea_client + .get_replacement_deposit_move_txids(l2_height_start + 1, l2_height_end) + .await?; + + for (idx, new_move_txid) in replacement_move_txids { + tracing::info!( + "Setting replacement move txid: {:?} -> {:?}", + idx, + new_move_txid + ); + self.db + .update_replacement_deposit_move_txid(dbtx, idx, new_move_txid) + .await?; + } + + Ok(()) + } + + async fn update_finalized_payouts( + &self, + dbtx: &mut DatabaseTransaction<'_, '_>, + block_id: u32, + block_cache: &block_cache::BlockCache, + ) -> Result<(), BridgeError> { + let payout_txids = self + .db + .get_payout_txs_for_withdrawal_utxos(Some(dbtx), block_id) + .await?; + + let block = block_cache + .block + .as_ref() + .ok_or(eyre::eyre!("Block not found"))?; + + let block_hash = block.block_hash(); + + let mut payout_txs_and_payer_operator_idx = vec![]; + for (idx, payout_txid) in payout_txids { + let payout_tx_idx = block_cache.txids.get(&payout_txid); + if payout_tx_idx.is_none() { + tracing::error!( + "Payout tx not found in block cache: {:?} and in block: {:?}", + payout_txid, + block_id + ); + tracing::error!("Block cache: {:?}", block_cache); + return Err(eyre::eyre!("Payout tx not found in block cache").into()); + } + let payout_tx_idx = payout_tx_idx.expect("Payout tx not found in block cache"); + let payout_tx = &block.txdata[*payout_tx_idx]; + // Find the first output that contains OP_RETURN + let circuit_payout_tx = CircuitTransaction::from(payout_tx.clone()); + let op_return_output = get_first_op_return_output(&circuit_payout_tx); + + // If OP_RETURN doesn't exist in any outputs, or the data in OP_RETURN is not a valid xonly_pubkey, + // operator_xonly_pk will be set to None, and the corresponding column in DB set to NULL. + // This can happen if optimistic payout is used, or an operator constructs the payout tx wrong. + let operator_xonly_pk = op_return_output + .and_then(|output| parse_op_return_data(&output.script_pubkey)) + .and_then(|bytes| XOnlyPublicKey::from_slice(bytes).ok()); + + if operator_xonly_pk.is_none() { + tracing::info!( + "No valid operator xonly pk found in payout tx {:?} OP_RETURN. Either it is an optimistic payout or the operator constructed the payout tx wrong", + payout_txid + ); + } + + tracing::info!( + "A new payout tx detected for withdrawal {}, payout txid: {:?}, operator xonly pk: {:?}", + idx, + payout_txid, + operator_xonly_pk + ); + + payout_txs_and_payer_operator_idx.push(( + idx, + payout_txid, + operator_xonly_pk, + block_hash, + )); + } + + self.db + .update_payout_txs_and_payer_operator_xonly_pk( + Some(dbtx), + payout_txs_and_payer_operator_idx, + ) + .await?; + + Ok(()) + } + + async fn send_unspent_kickoff_connectors( + &self, + round_idx: RoundIndex, + operator_xonly_pk: XOnlyPublicKey, + used_kickoffs: HashSet, + ) -> Result<(), BridgeError> { + if used_kickoffs.len() == self.config.protocol_paramset().num_kickoffs_per_round { + // ok, every kickoff spent + return Ok(()); + } + + let unspent_kickoff_txs = self + .create_and_sign_unspent_kickoff_connector_txs(round_idx, operator_xonly_pk, None) + .await?; + let mut dbtx = self.db.begin_transaction().await?; + for (tx_type, tx) in unspent_kickoff_txs { + if let TransactionType::UnspentKickoff(kickoff_idx) = tx_type { + if used_kickoffs.contains(&kickoff_idx) { + continue; + } + #[cfg(feature = "automation")] + self.tx_sender + .add_tx_to_queue( + &mut dbtx, + tx_type, + &tx, + &[], + Some(TxMetadata { + tx_type, + operator_xonly_pk: Some(operator_xonly_pk), + round_idx: Some(round_idx), + kickoff_idx: Some(kickoff_idx as u32), + deposit_outpoint: None, + }), + &self.config, + None, + ) + .await?; + } + } + dbtx.commit().await?; + Ok(()) + } + + /// Verifies the conditions required to disprove an operator's actions using the "additional" disprove path. + /// + /// This function handles specific, non-Groth16 challenges. It reconstructs a unique challenge script + /// based on on-chain data and constants (`deposit_constant`). It then validates the operator's + /// provided assertions (`operator_asserts`) and acknowledgements (`operator_acks`) against this script. + /// The goal is to produce a spendable witness for the disprove transaction if the operator is found to be at fault. + /// + /// # Arguments + /// * `deposit_data` - Mutable data for the specific deposit being challenged. + /// * `kickoff_data` - Information about the kickoff transaction that initiated this challenge. + /// * `latest_blockhash` - The witness containing Winternitz signature for the latest Bitcoin blockhash. + /// * `payout_blockhash` - The witness containing Winternitz signature for the payout transaction's blockhash. + /// * `operator_asserts` - A map of witnesses from the operator, containing their assertions (claims). + /// * `operator_acks` - A map of witnesses from the operator, containing their acknowledgements of watchtower challenges. + /// * `txhandlers` - A map of transaction builders, used here to retrieve TXIDs of dependent transactions. + /// + /// # Returns + /// - `Ok(Some(bitcoin::Witness))` if the operator's claims are successfully proven false, returning the complete witness needed to spend the disprove script path. + /// - `Ok(None)` if the operator's claims are valid under this specific challenge, and no disprove is possible. + /// - `Err(BridgeError)` if any error occurs during script reconstruction or validation. + #[cfg(feature = "automation")] + #[allow(clippy::too_many_arguments)] + async fn verify_additional_disprove_conditions( + &self, + deposit_data: &mut DepositData, + kickoff_data: &KickoffData, + latest_blockhash: &Witness, + payout_blockhash: &Witness, + operator_asserts: &HashMap, + operator_acks: &HashMap, + txhandlers: &BTreeMap, + dbtx: Option>, + ) -> Result, BridgeError> { + use bitvm::clementine::additional_disprove::debug_assertions_for_additional_script; + + use crate::builder::transaction::ReimburseDbCache; + + let mut reimburse_db_cache = ReimburseDbCache::new_for_deposit( + self.db.clone(), + kickoff_data.operator_xonly_pk, + deposit_data.get_deposit_outpoint(), + self.config.protocol_paramset(), + dbtx, + ); + + let nofn_key = deposit_data.get_nofn_xonly_pk().inspect_err(|e| { + tracing::error!("Error getting nofn xonly pk: {:?}", e); + })?; + + let move_txid = txhandlers + .get(&TransactionType::MoveToVault) + .ok_or(TxError::TxHandlerNotFound(TransactionType::MoveToVault))? + .get_txid() + .to_byte_array(); + + let round_txid = txhandlers + .get(&TransactionType::Round) + .ok_or(TxError::TxHandlerNotFound(TransactionType::Round))? + .get_txid() + .to_byte_array(); + + let vout = UtxoVout::Kickoff(kickoff_data.kickoff_idx as usize).get_vout(); + + let watchtower_challenge_start_idx = + u16::try_from(UtxoVout::WatchtowerChallenge(0).get_vout()) + .wrap_err("Watchtower challenge start index overflow")?; + + let secp = Secp256k1::verification_only(); + + let watchtower_xonly_pk = deposit_data.get_watchtowers(); + let watchtower_pubkeys = watchtower_xonly_pk + .iter() + .map(|xonly_pk| { + // Create timelock script that this watchtower key will commit to + let nofn_2week = Arc::new(TimelockScript::new( + Some(nofn_key), + self.config + .protocol_paramset + .watchtower_challenge_timeout_timelock, + )); + + let builder = TaprootBuilder::new(); + let tweaked = builder + .add_leaf(0, nofn_2week.to_script_buf()) + .expect("Valid script leaf") + .finalize(&secp, *xonly_pk) + .expect("taproot finalize must succeed"); + + tweaked.output_key().serialize() + }) + .collect::>(); + + let deposit_constant = deposit_constant( + kickoff_data.operator_xonly_pk.serialize(), + watchtower_challenge_start_idx, + &watchtower_pubkeys, + move_txid, + round_txid, + vout, + self.config.protocol_paramset.genesis_chain_state_hash, + ); + + tracing::debug!("Deposit constant: {:?}", deposit_constant); + + let kickoff_winternitz_keys = reimburse_db_cache + .get_kickoff_winternitz_keys() + .await? + .clone(); + + let payout_tx_blockhash_pk = kickoff_winternitz_keys + .get_keys_for_round(kickoff_data.round_idx)? + .get(kickoff_data.kickoff_idx as usize) + .ok_or(TxError::IndexOverflow)? + .clone(); + + let replaceable_additional_disprove_script = reimburse_db_cache + .get_replaceable_additional_disprove_script() + .await?; + + let additional_disprove_script = replace_placeholders_in_script( + replaceable_additional_disprove_script.clone(), + payout_tx_blockhash_pk, + deposit_constant.0, + ); + + let witness = operator_asserts + .get(&0) + .wrap_err("No witness found in operator asserts")? + .clone(); + + let deposit_outpoint = deposit_data.get_deposit_outpoint(); + let paramset = self.config.protocol_paramset(); + + let commits = extract_winternitz_commits_with_sigs( + witness, + &ClementineBitVMPublicKeys::mini_assert_derivations_0(deposit_outpoint, paramset), + self.config.protocol_paramset(), + )?; + + let mut challenge_sending_watchtowers_signature = Witness::new(); + let len = commits.len(); + + for elem in commits[len - 1].iter() { + challenge_sending_watchtowers_signature.push(elem); + } + + let mut g16_public_input_signature = Witness::new(); + + for elem in commits[len - 2].iter() { + g16_public_input_signature.push(elem); + } + + let num_of_watchtowers = deposit_data.get_num_watchtowers(); + + let mut operator_acks_vec: Vec> = vec![None; num_of_watchtowers]; + + for (idx, witness) in operator_acks.iter() { + tracing::debug!( + "Processing operator ack for idx: {}, witness: {:?}", + idx, + witness + ); + + let pre_image: [u8; 20] = witness + .nth(1) + .wrap_err("No pre-image found in operator ack witness")? + .try_into() + .wrap_err("Invalid pre-image length, expected 20 bytes")?; + if *idx >= operator_acks_vec.len() { + return Err(eyre::eyre!( + "Operator ack index {} out of bounds for vec of length {}", + idx, + operator_acks_vec.len() + ) + .into()); + } + operator_acks_vec[*idx] = Some(pre_image); + + tracing::debug!(target: "ci", "Operator ack for idx {}", idx); + } + + let latest_blockhash: Vec> = latest_blockhash + .iter() + .skip(1) + .take(88) + .map(|x| x.to_vec()) + .collect(); + + let mut latest_blockhash_new = Witness::new(); + for element in latest_blockhash { + latest_blockhash_new.push(element); + } + + let payout_blockhash: Vec> = payout_blockhash + .iter() + .skip(1) + .take(88) + .map(|x| x.to_vec()) + .collect(); + + let mut payout_blockhash_new = Witness::new(); + for element in payout_blockhash { + payout_blockhash_new.push(element); + } + + tracing::debug!( + target: "ci", + "Verify additional disprove conditions - Genesis height: {:?}, operator_xonly_pk: {:?}, move_txid: {:?}, round_txid: {:?}, vout: {:?}, watchtower_challenge_start_idx: {:?}, genesis_chain_state_hash: {:?}, deposit_constant: {:?}", + self.config.protocol_paramset.genesis_height, + kickoff_data.operator_xonly_pk, + move_txid, + round_txid, + vout, + watchtower_challenge_start_idx, + self.config.protocol_paramset.genesis_chain_state_hash, + deposit_constant + ); + + tracing::debug!( + target: "ci", + "Payout blockhash: {:?}\nLatest blockhash: {:?}\nChallenge sending watchtowers signature: {:?}\nG16 public input signature: {:?}", + payout_blockhash_new, + latest_blockhash_new, + challenge_sending_watchtowers_signature, + g16_public_input_signature + ); + + let additional_disprove_witness = validate_assertions_for_additional_script( + additional_disprove_script.clone(), + g16_public_input_signature.clone(), + payout_blockhash_new.clone(), + latest_blockhash_new.clone(), + challenge_sending_watchtowers_signature.clone(), + operator_acks_vec.clone(), + ); + + let debug_additional_disprove_script = debug_assertions_for_additional_script( + additional_disprove_script.clone(), + g16_public_input_signature.clone(), + payout_blockhash_new.clone(), + latest_blockhash_new.clone(), + challenge_sending_watchtowers_signature.clone(), + operator_acks_vec, + ); + + tracing::info!( + "Debug additional disprove script: {:?}", + debug_additional_disprove_script + ); + + tracing::info!( + "Additional disprove witness: {:?}", + additional_disprove_witness + ); + + Ok(additional_disprove_witness) + } + + /// Constructs, signs, and broadcasts the "additional" disprove transaction. + /// + /// This function is called after `verify_additional_disprove_conditions` successfully returns a witness. + /// It takes this witness, places it into the disprove transaction's script spend path, adds the required + /// operator and verifier signatures, and broadcasts the finalized transaction to the Bitcoin network. + /// + /// # Arguments + /// * `txhandlers` - A map containing the pre-built `Disprove` transaction handler. + /// * `kickoff_data` - Contextual data from the kickoff transaction. + /// * `deposit_data` - Contextual data for the deposit being challenged. + /// * `additional_disprove_witness` - The witness generated by `verify_additional_disprove_conditions`, proving the operator's fault. + /// + /// # Returns + /// - `Ok(())` on successful broadcast of the transaction. + /// - `Err(BridgeError)` if signing or broadcasting fails. + #[cfg(feature = "automation")] + async fn send_disprove_tx_additional( + &self, + txhandlers: &BTreeMap, + kickoff_data: KickoffData, + deposit_data: DepositData, + additional_disprove_witness: Witness, + ) -> Result<(), BridgeError> { + let verifier_xonly_pk = self.signer.xonly_public_key; + + let mut disprove_txhandler = txhandlers + .get(&TransactionType::Disprove) + .wrap_err("Disprove txhandler not found in txhandlers")? + .clone(); + + let disprove_input = additional_disprove_witness + .iter() + .map(|x| x.to_vec()) + .collect::>(); + + disprove_txhandler + .set_p2tr_script_spend_witness(&disprove_input, 0, 1) + .inspect_err(|e| { + tracing::error!("Error setting disprove input witness: {:?}", e); + })?; + + let operators_sig = self + .db + .get_deposit_signatures( + None, + deposit_data.get_deposit_outpoint(), + kickoff_data.operator_xonly_pk, + kickoff_data.round_idx, + kickoff_data.kickoff_idx as usize, + ) + .await? + .ok_or_eyre("No operator signature found for the disprove tx")?; + + let mut tweak_cache = TweakCache::default(); + + self.signer + .tx_sign_and_fill_sigs( + &mut disprove_txhandler, + operators_sig.as_ref(), + Some(&mut tweak_cache), + ) + .inspect_err(|e| { + tracing::error!( + "Error signing disprove tx for verifier {:?}: {:?}", + verifier_xonly_pk, + e + ); + })?; + + let disprove_tx = disprove_txhandler.get_cached_tx().clone(); + + tracing::debug!("Disprove txid: {:?}", disprove_tx.compute_txid()); + + tracing::warn!( + "Additional disprove tx created for verifier {:?} with kickoff_data: {:?}, deposit_data: {:?}", + verifier_xonly_pk, + kickoff_data, + deposit_data + ); + + let mut dbtx = self.db.begin_transaction().await?; + self.tx_sender + .add_tx_to_queue( + &mut dbtx, + TransactionType::Disprove, + &disprove_tx, + &[], + Some(TxMetadata { + tx_type: TransactionType::Disprove, + deposit_outpoint: Some(deposit_data.get_deposit_outpoint()), + operator_xonly_pk: Some(kickoff_data.operator_xonly_pk), + round_idx: Some(kickoff_data.round_idx), + kickoff_idx: Some(kickoff_data.kickoff_idx), + }), + &self.config, + None, + ) + .await?; + dbtx.commit().await?; + Ok(()) + } + + /// Performs the primary G16 proof verification to disprove an operator's claim. + /// + /// This is a complex function that aggregates all of the operator's assertions, which are commitments + /// from a Winternitz one-time signature scheme. It meticulously parses and reorganizes these commitments + /// into the precise input format required by the underlying Groth16 SNARK verifier (`validate_assertions`). + /// It then invokes the verifier to check for a faulty computation. + /// + /// # Arguments + /// * `deposit_data` - Mutable data for the specific deposit being challenged. + /// * `operator_asserts` - A map containing all 33 required operator assertion witnesses. + /// + /// # Returns + /// - `Ok(Some((index, script)))` if the ZK proof is faulty. The tuple contains the `StructuredScript` + /// that can be executed on-chain and its `index` in the Taproot tree. + /// - `Ok(None)` if the ZK proof is valid. + /// - `Err(BridgeError)` if any error occurs during data processing or ZK proof verification. + #[cfg(feature = "automation")] + async fn verify_disprove_conditions( + &self, + deposit_data: &mut DepositData, + operator_asserts: &HashMap, + ) -> Result, BridgeError> { + use bridge_circuit_host::utils::get_verifying_key; + + let bitvm_pks = self.signer.generate_bitvm_pks_for_deposit( + deposit_data.get_deposit_outpoint(), + self.config.protocol_paramset, + )?; + let disprove_scripts = bitvm_pks.get_g16_verifier_disprove_scripts()?; + + let deposit_outpoint = deposit_data.get_deposit_outpoint(); + let paramset = self.config.protocol_paramset(); + + // Pre-allocate commit vectors. Initializing with known sizes or empty vectors + // is slightly more efficient as it can prevent reallocations. + let mut g16_public_input_commit: Vec>> = vec![vec![vec![]]; 1]; + let mut num_u256_commits: Vec>> = vec![vec![vec![]]; 14]; + let mut intermediate_value_commits: Vec>> = vec![vec![vec![]]; 363]; + + tracing::info!("Number of operator asserts: {}", operator_asserts.len()); + + if operator_asserts.len() != ClementineBitVMPublicKeys::number_of_assert_txs() { + return Err(eyre::eyre!( + "Expected exactly {} operator asserts, got {}", + ClementineBitVMPublicKeys::number_of_assert_txs(), + operator_asserts.len() + ) + .into()); + } + + for i in 0..operator_asserts.len() { + let witness = operator_asserts + .get(&i) + .expect("indexed from 0 to 32") + .clone(); + + let mut commits = extract_winternitz_commits_with_sigs( + witness, + &ClementineBitVMPublicKeys::get_assert_derivations(i, deposit_outpoint, paramset), + self.config.protocol_paramset(), + )?; + + // Similar to the original operator asserts ordering, here we reorder into the format that BitVM expects. + // For the first transaction, we have specific commits that need to be assigned to their respective arrays. + // It includes the g16 public input commit, the last 2 num_u256 commits, and the last 3 intermediate value commits. + // The rest of the commits are assigned to the num_u256_commits and intermediate_value_commits arrays. + match i { + 0 => { + // Remove the last commit, which is for challenge-sending watchtowers + commits.pop(); + let len = commits.len(); + + // Assign specific commits to their respective arrays by removing from the end. + // This is slightly more efficient than removing from arbitrary indices. + g16_public_input_commit[0] = commits.remove(len - 1); + num_u256_commits[12] = commits.remove(len - 2); + num_u256_commits[13] = commits.remove(len - 3); + intermediate_value_commits[360] = commits.remove(len - 4); + intermediate_value_commits[361] = commits.remove(len - 5); + intermediate_value_commits[362] = commits.remove(len - 6); + } + 1 | 2 => { + // Handles i = 1 and i = 2 + for j in 0..6 { + num_u256_commits[6 * (i - 1) + j] = commits + .pop() + .expect("Should not panic: `num_u256_commits` index out of bounds"); + } + } + 3..=32 => { + // Handles i from 3 to 32 + for j in 0..12 { + intermediate_value_commits[12 * (i - 3) + j] = commits.pop().expect( + "Should not panic: `intermediate_value_commits` index out of bounds", + ); + } + } + _ => { + // Catch-all for any other 'i' values + panic!("Unexpected operator assert index: {}; expected 0 to 32.", i); + } + } + } + + tracing::info!("Converting assert commits to required format"); + tracing::info!( + "g16_public_input_commit[0]: {:?}", + g16_public_input_commit[0] + ); + + // Helper closure to parse commit data into the ([u8; 20], u8) format. + // This avoids code repetition and improves readability. + let fill_from_commits = |source: &Vec>, + target: &mut [[u8; 21]]| + -> Result<(), BridgeError> { + // We iterate over chunks of 2 `Vec` elements at a time. + for (i, chunk) in source.chunks_exact(2).enumerate() { + let mut sig_array: [u8; 21] = [0; 21]; + let sig: [u8; 20] = <[u8; 20]>::try_from(chunk[0].as_slice()).map_err(|_| { + eyre::eyre!( + "Invalid signature length, expected 20 bytes, got {}", + chunk[0].len() + ) + })?; + + sig_array[..20].copy_from_slice(&sig); + + let u8_part: u8 = *chunk[1].first().unwrap_or(&0); + sig_array[20] = u8_part; + + target[i] = sig_array; + } + Ok(()) + }; + + let mut first_box = Box::new([[[0u8; 21]; 68]; 1]); + fill_from_commits(&g16_public_input_commit[0], &mut first_box[0])?; + + let mut second_box = Box::new([[[0u8; 21]; 68]; 14]); + for i in 0..14 { + fill_from_commits(&num_u256_commits[i], &mut second_box[i])?; + } + + let mut third_box = Box::new([[[0u8; 21]; 36]; 363]); + for i in 0..363 { + fill_from_commits(&intermediate_value_commits[i], &mut third_box[i])?; + } + + tracing::info!("Boxes created"); + + let vk = get_verifying_key(); + + let res = tokio::task::spawn_blocking(move || { + validate_assertions( + &vk, + (first_box, second_box, third_box), + bitvm_pks.bitvm_pks, + disprove_scripts + .as_slice() + .try_into() + .expect("static bitvm_cache contains exactly 364 disprove scripts"), + ) + }) + .await + .wrap_err("Validate assertions thread failed with error")?; + + tracing::info!("Disprove validation result: {:?}", res); + + match res { + None => { + tracing::info!("No disprove witness found"); + Ok(None) + } + Some((index, disprove_script)) => { + tracing::info!("Disprove witness found"); + Ok(Some((index, disprove_script))) + } + } + } + + /// Constructs, signs, and broadcasts the primary disprove transaction based on the operator assertions. + /// + /// This function takes the `StructuredScript` and its `index` returned by `verify_disprove_conditions`. + /// It compiles the script, extracts the witness data (the push-only elements), and places it into the correct + /// script path (`index`) of the disprove transaction. It then adds the necessary operator and verifier + /// signatures before broadcasting the transaction to the Bitcoin network. + /// + /// # Arguments + /// * `txhandlers` - A map containing the pre-built `Disprove` transaction handler. + /// * `kickoff_data` - Contextual data from the kickoff transaction. + /// * `deposit_data` - Contextual data for the deposit being challenged. + /// * `disprove_script` - A tuple containing the executable `StructuredScript` and its Taproot leaf `index`, as returned by `verify_disprove_conditions`. + /// + /// # Returns + /// - `Ok(())` on successful broadcast of the transaction. + /// - `Err(BridgeError)` if signing or broadcasting fails. + #[cfg(feature = "automation")] + async fn send_disprove_tx( + &self, + txhandlers: &BTreeMap, + kickoff_data: KickoffData, + deposit_data: DepositData, + disprove_script: (usize, StructuredScript), + ) -> Result<(), BridgeError> { + let verifier_xonly_pk = self.signer.xonly_public_key; + + let mut disprove_txhandler = txhandlers + .get(&TransactionType::Disprove) + .wrap_err("Disprove txhandler not found in txhandlers")? + .clone(); + + let disprove_inputs: Vec> = disprove_script + .1 + .compile() + .instructions() + .filter_map(|ins_res| match ins_res { + Ok(Instruction::PushBytes(bytes)) => Some(bytes.as_bytes().to_vec()), + _ => None, + }) + .collect(); + + disprove_txhandler + .set_p2tr_script_spend_witness(&disprove_inputs, 0, disprove_script.0 + 2) + .inspect_err(|e| { + tracing::error!("Error setting disprove input witness: {:?}", e); + })?; + + let operators_sig = self + .db + .get_deposit_signatures( + None, + deposit_data.get_deposit_outpoint(), + kickoff_data.operator_xonly_pk, + kickoff_data.round_idx, + kickoff_data.kickoff_idx as usize, + ) + .await? + .ok_or_eyre("No operator signature found for the disprove tx")?; + + let mut tweak_cache = TweakCache::default(); + + self.signer + .tx_sign_and_fill_sigs( + &mut disprove_txhandler, + operators_sig.as_ref(), + Some(&mut tweak_cache), + ) + .inspect_err(|e| { + tracing::error!( + "Error signing disprove tx for verifier {:?}: {:?}", + verifier_xonly_pk, + e + ); + })?; + + let disprove_tx = disprove_txhandler.get_cached_tx().clone(); + + tracing::debug!("Disprove txid: {:?}", disprove_tx.compute_txid()); + + tracing::warn!( + "BitVM disprove tx created for verifier {:?} with kickoff_data: {:?}, deposit_data: {:?}", + verifier_xonly_pk, + kickoff_data, + deposit_data + ); + + let mut dbtx = self.db.begin_transaction().await?; + self.tx_sender + .add_tx_to_queue( + &mut dbtx, + TransactionType::Disprove, + &disprove_tx, + &[], + Some(TxMetadata { + tx_type: TransactionType::Disprove, + deposit_outpoint: Some(deposit_data.get_deposit_outpoint()), + operator_xonly_pk: Some(kickoff_data.operator_xonly_pk), + round_idx: Some(kickoff_data.round_idx), + kickoff_idx: Some(kickoff_data.kickoff_idx), + }), + &self.config, + None, + ) + .await?; + dbtx.commit().await?; + Ok(()) + } + + async fn handle_finalized_block( + &self, + mut dbtx: DatabaseTransaction<'_, '_>, + block_id: u32, + block_height: u32, + block_cache: Arc, + light_client_proof_wait_interval_secs: Option, + ) -> Result<(), BridgeError> { + tracing::info!("Verifier handling finalized block height: {}", block_height); + + // before a certain number of blocks, citrea doesn't produce proofs (defined in citrea config) + let max_attempts = light_client_proof_wait_interval_secs.unwrap_or(TEN_MINUTES_IN_SECS); + let timeout = Duration::from_secs(max_attempts as u64); + + let (l2_height_start, l2_height_end) = self + .citrea_client + .get_citrea_l2_height_range( + block_height.into(), + timeout, + self.config.protocol_paramset(), + ) + .await + .inspect_err(|e| tracing::error!("Error getting citrea l2 height range: {:?}", e))?; + + tracing::debug!( + "l2_height_start: {:?}, l2_height_end: {:?}, collecting deposits and withdrawals...", + l2_height_start, + l2_height_end + ); + self.update_citrea_deposit_and_withdrawals( + &mut dbtx, + l2_height_start, + l2_height_end, + block_height, + ) + .await?; + + self.update_finalized_payouts(&mut dbtx, block_id, &block_cache) + .await?; + + #[cfg(feature = "automation")] + { + // Save unproven block cache to the database + self.header_chain_prover + .save_unproven_block_cache(Some(&mut dbtx), &block_cache) + .await?; + while (self.header_chain_prover.prove_if_ready().await?).is_some() { + // Continue until prove_if_ready returns None + // If it doesn't return None, it means next batch_size amount of blocks were proven + } + } + + Ok(()) + } +} + +// This implementation is only relevant for non-automation mode, where the verifier is run as a standalone process +#[cfg(not(feature = "automation"))] +#[async_trait::async_trait] +impl crate::bitcoin_syncer::BlockHandler for Verifier +where + C: CitreaClientT, +{ + async fn handle_new_block( + &mut self, + dbtx: DatabaseTransaction<'_, '_>, + block_id: u32, + block: bitcoin::Block, + height: u32, + ) -> Result<(), BridgeError> { + self.handle_finalized_block( + dbtx, + block_id, + height, + Arc::new(block_cache::BlockCache::from_block(&block, height)), + None, + ) + .await + } +} + +impl NamedEntity for Verifier +where + C: CitreaClientT, +{ + const ENTITY_NAME: &'static str = "verifier"; + const TX_SENDER_CONSUMER_ID: &'static str = "verifier_tx_sender"; + const FINALIZED_BLOCK_CONSUMER_ID_AUTOMATION: &'static str = + "verifier_finalized_block_fetcher_automation"; + const FINALIZED_BLOCK_CONSUMER_ID_NO_AUTOMATION: &'static str = + "verifier_finalized_block_fetcher_no_automation"; +} + +#[cfg(feature = "automation")] +mod states { + use super::*; + use crate::builder::transaction::{ + create_txhandlers, ContractContext, ReimburseDbCache, TxHandlerCache, + }; + use crate::states::context::DutyResult; + use crate::states::{block_cache, Duty, Owner}; + use std::collections::BTreeMap; + use tonic::async_trait; + + #[async_trait] + impl Owner for Verifier + where + C: CitreaClientT, + { + async fn handle_duty(&self, duty: Duty) -> Result { + let verifier_xonly_pk = &self.signer.xonly_public_key; + match duty { + Duty::NewReadyToReimburse { + round_idx, + operator_xonly_pk, + used_kickoffs, + } => { + tracing::info!( + "Verifier {:?} called new ready to reimburse with round_idx: {:?}, operator_idx: {}, used_kickoffs: {:?}", + verifier_xonly_pk, round_idx, operator_xonly_pk, used_kickoffs + ); + self.send_unspent_kickoff_connectors( + round_idx, + operator_xonly_pk, + used_kickoffs, + ) + .await?; + Ok(DutyResult::Handled) + } + Duty::WatchtowerChallenge { + kickoff_data, + deposit_data, + } => { + tracing::warn!( + "Verifier {:?} called watchtower challenge with kickoff_data: {:?}, deposit_data: {:?}", + verifier_xonly_pk, kickoff_data, deposit_data + ); + self.send_watchtower_challenge(kickoff_data, deposit_data, None) + .await?; + + tracing::info!("Verifier sent watchtower challenge",); + + Ok(DutyResult::Handled) + } + Duty::SendOperatorAsserts { .. } => Ok(DutyResult::Handled), + Duty::VerifierDisprove { + kickoff_data, + mut deposit_data, + operator_asserts, + operator_acks, + payout_blockhash, + latest_blockhash, + } => { + #[cfg(test)] + { + if !self + .config + .test_params + .should_disprove(&self.signer.public_key, &deposit_data)? + { + return Ok(DutyResult::Handled); + } + } + let context = ContractContext::new_context_with_signer( + kickoff_data, + deposit_data.clone(), + self.config.protocol_paramset(), + self.signer.clone(), + ); + let mut db_cache = + ReimburseDbCache::from_context(self.db.clone(), &context, None); + + let txhandlers = create_txhandlers( + TransactionType::Disprove, + context, + &mut TxHandlerCache::new(), + &mut db_cache, + ) + .await?; + + // Attempt to find an additional disprove witness first + if let Some(additional_disprove_witness) = self + .verify_additional_disprove_conditions( + &mut deposit_data, + &kickoff_data, + &latest_blockhash, + &payout_blockhash, + &operator_asserts, + &operator_acks, + &txhandlers, + None, + ) + .await? + { + tracing::info!( + "The additional public inputs for the bridge proof provided by operator {:?} for the deposit are incorrect.", + kickoff_data.operator_xonly_pk + ); + self.send_disprove_tx_additional( + &txhandlers, + kickoff_data, + deposit_data, + additional_disprove_witness, + ) + .await?; + } else { + tracing::info!( + "The additional public inputs for the bridge proof provided by operator {:?} for the deposit are correct.", + kickoff_data.operator_xonly_pk + ); + + // If no additional witness, try to find a standard disprove witness + match self + .verify_disprove_conditions(&mut deposit_data, &operator_asserts) + .await? + { + Some((index, disprove_script)) => { + tracing::info!( + "The public inputs for the bridge proof provided by operator {:?} for the deposit are incorrect.", + kickoff_data.operator_xonly_pk + ); + + self.send_disprove_tx( + &txhandlers, + kickoff_data, + deposit_data, + (index, disprove_script), + ) + .await?; + } + None => { + tracing::info!( + "The public inputs for the bridge proof provided by operator {:?} for the deposit are correct.", + kickoff_data.operator_xonly_pk + ); + } + } + } + + Ok(DutyResult::Handled) + } + Duty::SendLatestBlockhash { .. } => Ok(DutyResult::Handled), + Duty::CheckIfKickoff { + txid, + block_height, + witness, + challenged_before, + } => { + tracing::debug!( + "Verifier {:?} called check if kickoff with txid: {:?}, block_height: {:?}", + verifier_xonly_pk, + txid, + block_height, + ); + let db_kickoff_data = self + .db + .get_deposit_data_with_kickoff_txid(None, txid) + .await?; + let mut challenged = false; + if let Some((deposit_data, kickoff_data)) = db_kickoff_data { + tracing::debug!( + "New kickoff found {:?}, for deposit: {:?}", + kickoff_data, + deposit_data.get_deposit_outpoint() + ); + let mut dbtx = self.db.begin_transaction().await?; + // add kickoff machine if there is a new kickoff + // do not add if kickoff finalizer is already spent => kickoff is finished + // this can happen if we are resyncing + StateManager::::dispatch_new_kickoff_machine( + self.db.clone(), + &mut dbtx, + kickoff_data, + block_height, + deposit_data.clone(), + witness.clone(), + ) + .await?; + challenged = self + .handle_kickoff( + &mut dbtx, + witness, + deposit_data, + kickoff_data, + challenged_before, + ) + .await?; + dbtx.commit().await?; + } + Ok(DutyResult::CheckIfKickoff { challenged }) + } + } + } + + async fn create_txhandlers( + &self, + tx_type: TransactionType, + contract_context: ContractContext, + ) -> Result, BridgeError> { + let mut db_cache = + ReimburseDbCache::from_context(self.db.clone(), &contract_context, None); + let txhandlers = create_txhandlers( + tx_type, + contract_context, + &mut TxHandlerCache::new(), + &mut db_cache, + ) + .await?; + Ok(txhandlers) + } + + async fn handle_finalized_block( + &self, + dbtx: DatabaseTransaction<'_, '_>, + block_id: u32, + block_height: u32, + block_cache: Arc, + light_client_proof_wait_interval_secs: Option, + ) -> Result<(), BridgeError> { + self.handle_finalized_block( + dbtx, + block_id, + block_height, + block_cache, + light_client_proof_wait_interval_secs, + ) + .await + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::rpc::ecdsa_verification_sig::OperatorWithdrawalMessage; + use crate::test::common::citrea::MockCitreaClient; + use crate::test::common::*; + use bitcoin::Block; + use std::str::FromStr; + use std::sync::Arc; + + #[tokio::test] + #[ignore] + async fn test_handle_finalized_block_idempotency() { + let mut config = create_test_config_with_thread_name().await; + let _regtest = create_regtest_rpc(&mut config).await; + + let verifier = Verifier::::new(config.clone()) + .await + .unwrap(); + + // Create test block data + let block_id = 1u32; + let block_height = 100u32; + let test_block = Block { + header: bitcoin::block::Header { + version: bitcoin::block::Version::ONE, + prev_blockhash: bitcoin::BlockHash::all_zeros(), + merkle_root: bitcoin::TxMerkleNode::all_zeros(), + time: 1234567890, + bits: bitcoin::CompactTarget::from_consensus(0x207fffff), + nonce: 12345, + }, + txdata: vec![], // empty transactions + }; + let block_cache = Arc::new(block_cache::BlockCache::from_block( + &test_block, + block_height, + )); + + // First call to handle_finalized_block + let mut dbtx1 = verifier.db.begin_transaction().await.unwrap(); + let result1 = verifier + .handle_finalized_block( + &mut dbtx1, + block_id, + block_height, + block_cache.clone(), + None, + ) + .await; + // Should succeed or fail gracefully - testing idempotency, not functionality + tracing::info!("First call result: {:?}", result1); + + // Commit the first transaction + dbtx1.commit().await.unwrap(); + + // Second call with identical parameters should also succeed (idempotent) + let mut dbtx2 = verifier.db.begin_transaction().await.unwrap(); + let result2 = verifier + .handle_finalized_block( + &mut dbtx2, + block_id, + block_height, + block_cache.clone(), + None, + ) + .await; + // Should succeed or fail gracefully - testing idempotency, not functionality + tracing::info!("Second call result: {:?}", result2); + + // Commit the second transaction + dbtx2.commit().await.unwrap(); + + // Both calls should have same outcome (both succeed or both fail with same error type) + assert_eq!( + result1.is_ok(), + result2.is_ok(), + "Both calls should have the same outcome" + ); + } + + #[tokio::test] + #[cfg(feature = "automation")] + async fn test_database_operations_idempotency() { + let mut config = create_test_config_with_thread_name().await; + let _regtest = create_regtest_rpc(&mut config).await; + + let verifier = Verifier::::new(config.clone()) + .await + .unwrap(); + + // Test header chain prover save operation idempotency + let test_block = Block { + header: bitcoin::block::Header { + version: bitcoin::block::Version::ONE, + prev_blockhash: bitcoin::BlockHash::all_zeros(), + merkle_root: bitcoin::TxMerkleNode::all_zeros(), + time: 1234567890, + bits: bitcoin::CompactTarget::from_consensus(0x207fffff), + nonce: 12345, + }, + txdata: vec![], // empty transactions + }; + let block_cache = block_cache::BlockCache::from_block(&test_block, 100u32); + + // First save + let mut dbtx1 = verifier.db.begin_transaction().await.unwrap(); + let result1 = verifier + .header_chain_prover + .save_unproven_block_cache(Some(&mut dbtx1), &block_cache) + .await; + assert!(result1.is_ok(), "First save should succeed"); + dbtx1.commit().await.unwrap(); + + // Second save with same data should be idempotent + let mut dbtx2 = verifier.db.begin_transaction().await.unwrap(); + let result2 = verifier + .header_chain_prover + .save_unproven_block_cache(Some(&mut dbtx2), &block_cache) + .await; + assert!(result2.is_ok(), "Second save should succeed (idempotent)"); + dbtx2.commit().await.unwrap(); + } + + #[tokio::test] + async fn test_recover_address_from_signature() { + let input_signature = Signature::from_str("e8b82defd5e7745731737d210ad3f649541fd1e3173424fe6f9152b11cf8a1f9e24a176690c2ab243fb80ccc43369b2aba095b011d7a3a7c2a6953ef6b102643") + .unwrap(); + let input_outpoint = OutPoint::from_str( + "0000000000000000000000000000000000000000000000000000000000000000:0", + ) + .unwrap(); + let output_script_pubkey = + ScriptBuf::from_hex("0000000000000000000000000000000000000000000000000000000000000000") + .unwrap(); + let output_amount = Amount::from_sat(1000000000000000000); + let deposit_id = 1; + + let opt_payout_sig = PrimitiveSignature::from_str("0x165b7303ffe40149e297be9f1112c1484fcbd464bec26036e5a6142da92249ed7de398295ecac9e41943e326d44037073643a89049177b43c4a09f98787eafa91b") + .unwrap(); + let address = recover_address_from_ecdsa_signature::( + deposit_id, + input_signature, + input_outpoint, + output_script_pubkey.clone(), + output_amount, + opt_payout_sig, + ) + .unwrap(); + assert_eq!( + address, + alloy::primitives::Address::from_str("0x281df03154e98484B786EDEf7EfF592a270F1Fb1") + .unwrap() + ); + + let op_withdrawal_sig = PrimitiveSignature::from_str("0xe540662d2ea0aeb29adeeb81a824bcb00e3d2a51d2c28e3eab6305168904e4cb7549e5abe78a91e58238a3986a5faf2ca9bbaaa79e0d0489a96ee275f7db9b111c") + .unwrap(); + let address = recover_address_from_ecdsa_signature::( + deposit_id, + input_signature, + input_outpoint, + output_script_pubkey.clone(), + output_amount, + op_withdrawal_sig, + ) + .unwrap(); + assert_eq!( + address, + alloy::primitives::Address::from_str("0x281df03154e98484B786EDEf7EfF592a270F1Fb1") + .unwrap() + ); + + // using OperatorWithdrawalMessage signature for OptimisticPayoutMessage should fail + let address = recover_address_from_ecdsa_signature::( + deposit_id, + input_signature, + input_outpoint, + output_script_pubkey, + output_amount, + op_withdrawal_sig, + ) + .unwrap(); + assert_ne!( + address, + alloy::primitives::Address::from_str("0x281df03154e98484B786EDEf7EfF592a270F1Fb1") + .unwrap() + ); + } +} diff --git a/docs/README.md b/docs/README.md new file mode 100644 index 000000000..915705fef --- /dev/null +++ b/docs/README.md @@ -0,0 +1,14 @@ +# Clementine Documentation + +Welcome to the Clementine documentation. This is not a code documentation, +rather a higher view of the Clementine. To view code documentation, please visit +[chainwayxyz.github.io/clementine/clementine_core](https://chainwayxyz.github.io/clementine/clementine_core/). + +- [The Design of Clementine](design.md): Design and technicals of Clementine +- [Usage](usage.md): How to use Clementine + +## Circuits + +- [Header Chain Circuit](header-chain-circuit.md) +- [Work Only Circuit](work-only-circuit.md) +- [Bridge Circuit](bridge-circuit.md) diff --git a/docs/bridge-circuit.md b/docs/bridge-circuit.md new file mode 100644 index 000000000..ef6f862a8 --- /dev/null +++ b/docs/bridge-circuit.md @@ -0,0 +1,102 @@ +# Bridge Circuit Documentation + +## The Role of the Bridge Circuit +The Bridge Circuit is a core component of Clementine, the trust-minimized two-way peg mechanism for the Citrea L2 rollup. The bridge circuit's primary role is to ensure the secure and verifiable transfer of assets between the Bitcoin blockchain and the Citrea L2. It acts as an automated verifier, enforcing the rules of the protocol and enabling users to confidently move their assets between the two chains. By leveraging zero-knowledge proofs, the bridge circuit allows for this cross-chain functionality without requiring users to trust a centralized intermediary, which is a significant improvement in security and decentralization compared to traditional bridge designs. This trust-minimized and automated verification is made possible through a mechanism that emulates covenants by having a committee of n signers pre-sign a specific set of transactions during the Peg-In phase. This pre-signing creates a deterministic and enforceable set of rules for how the deposited funds can be moved, effectively automating the BitVM2 challenge-response process and ensuring the Bridge Circuit receives the correct inputs for verification. Here's how these pre-signed transactions relate to the Bridge Circuit: +- Setting the Stage for Verification: During the peg-in process, the signer committee pre-signs a whole suite of potential transactions that will govern any future peg-out attempt related to that deposit. This creates a predictable and unalterable "game tree" for the BitVM2 protocol. The Operator and Challengers are not creating new rules as they go; they are simply choosing which pre-written script to execute. +- Automating the Challenge Flow: The pre-signed transactions directly map to the inputs and state transitions that the Bridge Circuit is designed to verify. +- KickOff and its Connectors: When an Operator initiates a peg-out with a KickOff transaction, they are activating a set of pre-signed "connector" outputs while also committing the payout block hash data on-chain via Winternitz One-Time Signature (WOTS). Each connector can only be spent by the specific, pre-signed transactions. +- Watchtower Challenge and OperatorChallengeACK: The circuit needs to know which Watchtowers have challenged the Operator. This is enforced through pre-signed transactions. A Watchtower spends a small amount of personal fund to send the Watchtower Challenge transaction to submit its Work Only Proof (WOP). The Operator is then forced to acknowledge this by spending the corresponding Acknowledge Connector with the pre-signed Operator ChallengeACK transaction. If the Operator fails to do so, a Challenger can use the pre-signed Operator ChallengeNACK to slash them. This mechanism guarantees that the watchtower_sent_challenge boolean array fed into the Bridge Circuit is accurate and cannot be manipulated by the Operator. +- Assert and Disprove: The Operator posts the results of their off-chain execution of the Bridge Circuit via Assert transactions. If a Challenger finds an error, they use a pre-signed Disprove transaction to execute a single step of the Bridge or Clementine Disprove Scripts on-chain. The logic for these disprove scripts, which includes verifying the Groth16 proof of the main Bridge Circuit, is encoded into the pre-signed Disprove Connector output of the KickOff transaction. + +## BitVM2 Integration +The Bridge Circuit integrates with the BitVM2 design to enable the verification of complex computations on the Bitcoin blockchain without requiring any changes to Bitcoin's underlying protocol. Since the Bridge Circuit's verification logic (a zk-SNARK verifier) is too large to be executed directly within a Bitcoin transaction, it is implemented as a BitVM2 program. This integration works as follows: +- Off-Chain Execution: The Operator executes the fronting (payout) transaction for a withdrawal request. Then commits the hash of the Bitcoin block that includes this transaction in the Witness of the Kickoff transaction (via WOTS). +- On-Chain Challenge: Any Challenger who thinks the Operator is malicious can contest the Operator's claim by spending the Challenge connector, which starts the BitVM2 process. The BitVM2 protocol facilitates a "dissection game" that narrows down the disagreement to a single computational step. In this game, after a challenge, the Operator is forced to prove the correctness of their on-chain operations using Bridge Circuit, then post a series of Assert transactions, which reveal the intermediate results of the generated Groth16 proof verification computation (via WOTS). If a Verifier finds an incorrect step in this revealed trace, they can post a Disprove transaction, which pinpoints the specific faulty computation to be executed on-chain. +- On-Chain Verification: This single, incorrect step of the Bridge Circuit's execution is then verified on-chain within a Bitcoin Script. Because only one small part of the computation is verified, it fits within the size and operational limits of Bitcoin's scripting language. + +By using BitVM2, Clementine can enforce the complex rules of the Bridge Circuit in a trust-minimized way, leveraging Bitcoin's security for dispute resolution while keeping the vast majority of the computational work off-chain. + +### The Disprove Process +If a Challenger finds an error with the output of the Operator's off-chain execution of the bridge program, they can post a Disprove transaction. This transaction pinpoints the specific step of the program where the Operator's computation was incorrect and executes that step on-chain. If the on-chain execution confirms the Operator's error, the Challenger is able to burn the Operator's collateral. There are two types of scripts that can be executed in a Disprove transaction: +- BridgeDisproveScript: This script verifies the main Bridge Circuit. It uses a Groth16 proof to check several critical conditions related to bridge operations. +- ClementineDisproveScript: This script ensures that the inputs provided to the Bridge Circuit are consistent with the on-chain state of the relevant data, such as Watchtower challenges and block hashes (committed via WOTS). It verifies that the Operator has not censored or ignored any valid challenges from the Watchtowers, and did use the data they committed on-chain. + +## TL;DR +* **Header Chain Proof (HCP) Verification:** + * Verifies that the HCP's `method_id` is correct. + * Verifies the HCP's Groth16 proof using the `zkvm_guest`. + +* **Watchtower Challenge Processing:** + * Verifies the Schnorr signature on each Watchtower's challenge transaction and if verification is successful, sets the corresponding bit. + * Sorts Watchtower challenges that passed the Schnorr signature verification by their `total_work` in descending order. + * Verifies the Groth16 proofs of the Watchtowers until the first valid proof. This will be the highest valid `total_work`, hence the name `max_total_work`. + * Asserts that the Operator's `total_work` from their HCP is greater than the `max_total_work` from the Watchtowers. + +* **Simple Payment Verification (SPV):** + * Verifies the inclusion of the payout transaction within the claimed Bitcoin block using a Merkle tree proof based on `mid_state_txid`. + * Verifies the inclusion of the block header in the (Merkle Mountain Range) MMR of the canonical chain. + +* **Light Client Proof (LCP) Verification:** + * Verifies the `LightClientProof` by calling `env::verify` with the correct `LC_IMAGE_ID`. + * Performs a check to ensure the L1 block hash from the LCP output matches the payout transaction's block hash. + +* **EVM Storage Proof Verification:** + * Verifies the storage proof for the deposit UTXO using the state root from the verified LCP. + * Verifies the storage proof for the withdrawal data. + +* **Other Checks:** + * Checks that the data on the contract matches with the payout transaction data. + * Checks that the payout transaction is included in the HCP. + +## High-Level Overview +> [!WARNING] +> Before reading this document, please read the [header chain circuit](header-chain-circuit.md) and [work only circuit](work-only-circuit.md) documentations. + +The bridge circuit in Clementine serves as a critical component that enables secure and (optimistically) verifiable cross-chain interactions between Bitcoin and the Citrea L2 rollup. Its primary function is to allow Operators, when challenged, to prove the correctness of their operations and the validity of state transitions. + +At a high level, the circuit performs several key verifications: + +* **Header Chain Verification**: In the circuit, the Operator verifies their own Header Chain Proof (HCP). + +* **Watchtower Challenge Processing**: In the circuit, the Operator processes and validates challenges from watchtowers, who monitor operator behavior and provide their own Work Only Proof (WOP) as a Groth16 proof. + This verification is done as follows: + For each Watchtower, the signature that is for spending the connector UTXO for the challenge-sending transaction is verified. If the signature is verified, the corresponding bit flag to that Watchtower will be set to 1. + Then the `Work`s provided by the Watchtowers are sorted in a descending order. Then, until the first Groth16 proof is verified, they are looped. This way, the Operator obtains the maximum valid amount of Work + provided by the Watchtowers. The Operator must provide a HCP with more work compared to the WOP with maximum Work. This is necessary, since the canonical Bitcoin blockchain is determined by the total Work done. If the Operator fails to do so, this automatically means that the Operator did not follow the canonical chain; therefore, is already malicious. + +* **Simple Payment Verification (SPV)**: In the circuit, the Operator verifies the Simplified Payment Verification (SPV) proof of their payout (fronting the withdrawal) transaction. + +* **Light Client Proof (LCP) Verification**: In the circuit, the Operator verifies the Light Client Proof (LCP) (which is a Groth16 proof). This proof comes from a recursive Risc0 circuit that verifies the previous LCP each time, and verifies the Batch Proofs for Citrea, and then generates the new Light Client Proof. This recursion happens per Bitcoin block. Therefore, there exists an LCP for the Bitcoin block that includes the payout transaction of the Operator. The Operator uses the state root from the LCP output to verify the storage proof of their payout transaction data for the specific deposit that corresponds to that withdrawal operation on the Bridge Contract. + +After all of the verification steps above, the specific constants from the setup are calculated, and with withdrawal specific data, the output data is generated and committed. + +--- + +### Key Files (RISC Zero Implementation) + +* **`risc0-circuits/bridge-circuit/guest/src/main.rs`** + * This is the entry point for the **RISC Zero guest application**. + * It initializes the `Risc0Guest` environment and makes the crucial call to `circuits_lib::bridge_circuit::bridge_circuit`, passing the `zkvm_guest` and the `WORK_ONLY_IMAGE_ID` as parameters. + * **`WORK_ONLY_IMAGE_ID: [u8; 32]`**: This is a static constant that dynamically resolves to the expected method ID (image ID) of the "Work-Only" circuit. Its value is determined at compile time based on the `BITCOIN_NETWORK` environment variable (e.g., `mainnet`, `testnet4`, `signet`, `regtest`), ensuring the Bridge Circuit verifies proofs from the correct "Work-Only" circuit version for the specified network. + +* **`risc0-circuits/bridge-circuit/guest/Cargo.toml`** + * The package manifest for the guest-side code. + * It defines the `bridge-circuit-guest` package and its dependencies, notably linking to `circuits-lib` which contains the core circuit logic. + * It also includes a `use-test-vk` feature, which can be enabled to use a test verification key. + +* **`risc0-circuits/bridge-circuit/src/lib.rs`** + * This file includes the `methods.rs` generated by the build script, which contains information about the guest methods (ELF image and method ID) that the host can use to prove the guest's execution. Because we use hard-coded method IDs and ELFs that are relocated by build.rs, we rely on those instead. + +* **`risc0-circuits/bridge-circuit/build.rs`** + * This is the **build script** for the host-side. It is responsible for: + * Compiling the `bridge-circuit-guest` code into a RISC Zero ELF binary. + * Computing the unique **method ID** for the compiled guest program. + * Handling environment variables (like `BITCOIN_NETWORK` and `REPR_GUEST_BUILD`) to configure the build process, including optional Docker usage for guest builds. + * Copying the generated ELF binary to a designated `elfs` folder. The destination filename incorporates a `test-` prefix if the `use-test-vk` feature is enabled. + +* **`risc0-circuits/bridge-circuit/Cargo.toml`** + * The package manifest for the host-side crate. + * It defines the `bridge-circuit` package and its build-time dependencies, including `risc0-build` for the RISC Zero toolchain integration and `risc0-binfmt`. + * It also defines the `use-test-vk` feature, which can influence the build process as seen in `build.rs`. + +--- \ No newline at end of file diff --git a/docs/design.md b/docs/design.md new file mode 100644 index 000000000..722a42a14 --- /dev/null +++ b/docs/design.md @@ -0,0 +1,46 @@ +# The Design Of Clementine + +Our bridge leverages BitVM for a trust minimized BTC <> Citrea bridge. The +[whitepaper](https://citrea.xyz/clementine_whitepaper.pdf) explains technicals. +Also, [https://bitvm.org/bitvm_bridge.pdf](https://bitvm.org/bitvm_bridge.pdf) +and [http://bitvm.org/bitvm2](http://bitvm.org/bitvm2) can be checked to learn +more about BitVM. + +![Clementine Tx Graph](images/clementine_diagram.png) + +## Depositing + +Aggregator is responsible for helping verifiers to finalize deposits, using +[musig2](https://github.com/bitcoin-core/secp256k1/blob/master/doc/musig.md#signing). +It has 3 steps: + +1. Nonce aggregation +2. Signature aggregation +3. Move tx creation + +![Move TX creation](images/move_tx_creation.png) + +1. In the first step, aggregator will collect nonces from all the verifiers, + soon to be aggregated and send back to the verifiers. Aggregation is done by + musig2. +2. At the second step, partial signatures will be requested from verifiers for + the provided aggregated nonce. They will be aggregated using musig2, just + like nonces. Final Schnorr signature will be sent to verifiers. +3. Aggregated signatures are used by verifiers to finalize deposit. Then, + verifiers will return move tx partial signatures, which will later be + aggregated. Finally, aggregator will create a move tx. + +## FAQ + +### Why the bridge funds stays in N-of-N not in M-of-N? + +It is important to distinguish this N-of-N arrangement from a traditional multisignature wallet; instead, it functions as a key deletion covenant. A covenant is a mechanism that restricts how an UTXO can be spent. One potential concern is that if any of the signers refuse to sign, new deposits could be blocked, as the required N-of-N signatures could not be collected. However, this isn't a problem. Because the Bridge Contract also maintains a separate M-of-N multisig, which has the authority to update the N-of-N set. While this may appear similar to simply holding bridge funds in an M-of-N multisig, it is fundamentally different. Funds already deposited and secured by the N-of-N covenant remain safe, and updates to the N-of-N set can be subject to time restrictions (for example, allowing one month for updates). This gives participants the opportunity to exit the system if they do not trust the new set of signers. The bridge is designed to remain secure as long as at least one of the N signers' keys is still secure, and to remain operational as long as at least one operator continues to participate. + +### Why bridge denominator is 10 BTC not 1 BTC? + +This is still open research question. But from current observations, 1 BTC doesnโ€™t seem feasible. The reason is every round tx has a limited amount of kickoff connectors. Thus limiting the withdrawal throughput. + +### Why We Use Winternitz One-Time Signatures + +In Bitcoin script, with native opcodes, one can only verify Schnorr signatures that sign the transaction. In other words, one cannot verify a Schnorr signature that signs a random message. However, Winternitz and Lamport signatures can be verified just by taking hashes and checking for equality (Winternitz involves some additional mathematical operations where Bitcoin has those). +This way, we can use Winternitz to propagate state across UTXOs. For example, in BitVM, the prover signs intermediate steps; later, the same signatures can be used to disprove an incorrect proof. diff --git a/docs/header-chain-circuit.md b/docs/header-chain-circuit.md new file mode 100644 index 000000000..d39136c67 --- /dev/null +++ b/docs/header-chain-circuit.md @@ -0,0 +1,52 @@ +# Header Chain Circuit Documentation + +This document describes the logic and structure of the header chain circuit, which implements Bitcoin header chain verification logic. This circuit is designed to operate within a zero-knowledge virtual machine (zkVM) environment, with some components also supporting native execution. Its primary purpose is to verify sequences of Bitcoin block headers, ensuring the integrity and continuity of the chain state. + +### Merkle Mountain Range (MMR) Implementations + +The circuit's core logic is supported by two distinct Merkle Mountain Range (MMR) implementations. The [`mmr_guest.rs`](../circuits-lib/src/header_chain/mmr_guest.rs) module provides an MMR tailored for the constrained environment of a zkVM. This `MMRGuest` implementation is designed to efficiently verify inclusion proofs for block hashes, confirming that a given block header is part of the verified chain without needing to store the entire chain history. + +In contrast, the [`mmr_native.rs`](../circuits-lib/src/header_chain/mmr_native.rs) module provides a native MMR implementation for use outside the zkVM. This `MMRNative` is used to build the MMR from a sequence of block headers and generate inclusion proofs. These proofs can then be passed to the `MMRGuest` within the zkVM for verification, bridging the gap between off-chain data preparation and on-chain verification. + +### Core Circuit Logic + +The central component of the circuit is the main logic module, found in [`mod.rs`](../circuits-lib/src/header_chain/mod.rs). This module orchestrates the entire verification process, from handling input and output to performing all necessary cryptographic and protocol-level checks. + +The circuit's entry point function, `header_chain_circuit`, takes a list of block headers as input and applies them to a `ChainState` data structure. This `ChainState` keeps track of essential chain parameters such as block height, accumulated proof of work, and the current difficulty target. For each block header, the circuit performs a series of critical validations: + +* **Method ID Consistency**: It first ensures that the `method_id` of the input matches that of any previous proof, preventing the use of different circuit versions. +* **Chain Continuity**: It verifies that each block's `prev_block_hash` matches the `best_block_hash` from the preceding state, guaranteeing a continuous chain. +* **Proof of Work**: It calculates the double SHA256 hash of the block header and checks that it is less than or equal to the current difficulty target. +* **Difficulty Adjustment**: It validates the `bits` field in the header and, at the end of each 2016-block epoch, calculates and validates the new difficulty target based on the time elapsed. +* **Timestamp Validation**: It ensures that the block's timestamp is greater than the median of the previous 11 block timestamps, preventing timestamp manipulation. +* **MMR Proof Verification**: It verifies the integrity of the block header's inclusion proof using the `MMRGuest`. + +Upon successful execution, the circuit outputs an updated `ChainState` and a hash of the initial state. This output can then be used as input for a subsequent circuit run, enabling the verification of a continuous, long chain of headers across multiple proofs. The entire process is designed to `panic!` on any validation failure, ensuring that only cryptographically sound and protocol-compliant proofs are generated. + +### Key Files (RISC Zero Implementation) + + * **`risc0-circuits/header-chain/guest/src/main.rs`** + + * This is the entry point for the **RISC Zero guest application**. It initializes the `Risc0Guest` environment and makes the crucial call to `circuits_lib::header_chain::header_chain_circuit` to execute the Bitcoin header chain verification logic inside the zkVM. + + * **`risc0-circuits/header-chain/guest/Cargo.toml`** + + * The package manifest for the guest-side code. It defines the `header-chain-guest` package and its dependencies, notably linking to the `circuits-lib` which contains the actual circuit logic. + + * **`risc0-circuits/header-chain/src/lib.rs`** + + * This file includes the `methods.rs` generated by the build script, which contains information about the guest methods (ELF image and method ID) that the host can use to prove the guest's execution. Because we use hard-coded method IDs and ELFs that are relocated by build.rs, we rely on those instead. + + * **`risc0-circuits/header-chain/build.rs`** + + * This is the **build script** for the host-side. It is responsible for: + * Compiling the `header-chain-guest` code into a RISC Zero ELF binary. + * Computing the unique **method ID** for the compiled guest program. + * Handling environment variables (like `BITCOIN_NETWORK`) to configure the build. + * Optionally using Docker for guest builds and copying the generated ELF binary to a designated `elfs` folder. + + * **`risc0-circuits/header-chain/Cargo.toml`** + + * The package manifest for the host-side crate. It defines the `header-chain` package and its build-time dependencies, including `risc0-build` for the RISC Zero toolchain integration. + +--- \ No newline at end of file diff --git a/docs/images/clementine_diagram.png b/docs/images/clementine_diagram.png new file mode 100644 index 000000000..735ddd2f5 Binary files /dev/null and b/docs/images/clementine_diagram.png differ diff --git a/docs/images/move_tx_creation.png b/docs/images/move_tx_creation.png new file mode 100644 index 000000000..06c13c8c8 Binary files /dev/null and b/docs/images/move_tx_creation.png differ diff --git a/docs/usage.md b/docs/usage.md new file mode 100644 index 000000000..f9d318425 --- /dev/null +++ b/docs/usage.md @@ -0,0 +1,313 @@ +# Usage + +Clementine provides a single binary, that can act as 3 different actors +services: + +- Verifier (We sometimes call this as signer) +- Operator +- Aggregator + +These services communicate via gRPC and use a Postgresql database. They can be +configured to share the same database. + +An entity can choose to run these services on a single host to be a part of the +peg-in and peg-out process. All the services that are run by a single entity +should ideally share the same database. Typical entities are: + +- Operator entity +- Runs both an operator and a verifier service +- Verifier entity + - Runs a verifier service +- Aggregator entity + - Runs both an aggregator and a verifier service + +## Prerequisites + +Before compiling Clementine: + +1. Install Rust: [rustup.rs](https://rustup.rs/) +2. Install RiscZero (2.1.0): [dev.risczero.com/api/zkvm/install](https://dev.risczero.com/api/zkvm/install) + + ```sh + curl -L https://risczero.com/install | bash + rzup install cargo-risczero 2.1.0 # Or v2.1.0 + rzup install r0vm 2.1.0 + rzup install rust 1.85.0 + ``` + +3. If on Mac, install XCode and its app from AppStore (if `xcrun metal` gives an error): + + ```sh + xcode-select --install + ``` + +4. If on Ubuntu, install these packages: + + ```sh + sudo apt install build-essential libssl-dev pkg-config + ``` + +Before running Clementine: + +1. Install and configure a Bitcoin node (at least v29.0) +2. Install and configure PostgreSQL. Using docker: + + ```sh + docker run --name clementine-test-db \ + -e POSTGRES_USER=clementine \ + -e POSTGRES_PASSWORD=clementine \ + -e POSTGRES_DB=clementine \ + -p 5432:5432 \ + --restart always \ + -d postgres:15 \ + bash -c "exec docker-entrypoint.sh postgres -c 'max_connections=1000'" + ``` + +3. Install RISC Zero toolchain: + + ```sh + cargo install cargo-risczero + ``` + +4. [Optional] TLS certificates required to start and connect to a Clementine + server. For tests, these are automatically generated, if not present. Please + check [RPC Authentication](#rpc-authentication) and [Security Considerations](#security-considerations) + sections when generating certificates for a deployment. + + ```sh + ./scripts/generate_certs.sh + ``` + +5. Set `RISC0_DEV_MODE` environment variable if tests are going to be run or + deployment that requires it: + + ```sh + export RISC0_DEV_MODE=1 + ``` + +6. [Optional] Download pre-generated BitVM cache. If not downloaded, it will be + generated automatically. + + ```sh + wget https://static.testnet.citrea.xyz/common/bitvm_cache_v3.bin -O bitvm_cache.bin + wget https://static.testnet.citrea.xyz/common/bitvm_cache_dev.bin -O bitvm_cache_dev.bin + export BITVM_CACHE_PATH=/path/to/bitvm_cache.bin # If RISC0_DEV_MODE is not set + export BITVM_CACHE_PATH=/path/to/bitvm_cache_dev.bin # If RISC0_DEV_MODE is set + ``` + +7. Set `RUST_MIN_STACK` environment variable to at least `33554432` + + ```sh + # On Unix-like systems: + export RUST_MIN_STACK=33554432 + ``` + +## Configure Clementine + +Clementine can be configured to enable automation at build-time via the +`automation` feature. The automation feature enables the State Manager and +Transaction Sender which automatically fulfills the duties of +verifier/operator/aggregator entities. It also enables automatic sending and +management of transactions to the Bitcoin network via Transaction Sender. + +```bash +cargo build --release --features automation +``` + +Clementine supports two runtime primary configuration methods: + +1. **Configuration Files**: Specify main configuration and protocol parameters via TOML files +2. **Environment Variables**: Configure the application entirely through environment variables + +### Configuration Files + +Running the binary as a verifier, aggregator, or operator requires a +configuration file. An example configuration file is located at +[`core/src/test/data/bridge_config.toml`](../core/src/test/data/bridge_config.toml) and can +be taken as reference. Please copy that configuration file to another location +and modify fields to your local configuration. + +Additionally, Clementine requires protocol parameters, that are either specified +by a file or from the environment. You can specify a separate protocol +parameters file using the `--protocol-params` option. This file contains +protocol-specific settings that affect transactions in the contract. + +### Environment Variables + +It is also possible to use environment variables instead of configuration files. +The `.env.example` file can be taken as a reference for this matter. + +### Configuration Source Selection + +Clementine uses the following logic to determine the configuration source: + +1. **Main Configuration**: + + - If `READ_CONFIG_FROM_ENV=1` or `READ_CONFIG_FROM_ENV=on`, configuration is read from environment variables + - If `READ_CONFIG_FROM_ENV=0` or `READ_CONFIG_FROM_ENV=off` or not set, configuration is read from the specified config file + +2. **Protocol Parameters**: + - If `READ_PARAMSET_FROM_ENV=1` or `READ_PARAMSET_FROM_ENV=on`, protocol parameters are read from environment variables + - If `READ_PARAMSET_FROM_ENV=0` or `READ_PARAMSET_FROM_ENV=off` or not set, protocol parameters are read from the specified protocol parameters file + +You can mix these approaches - for example, reading main configuration from a file but protocol parameters from environment variables. + +## RPC Authentication + +Clementine uses mutual TLS (mTLS) to secure gRPC communications between entities +and to authenticate clients. Client certificates are verified and filtered by +the verifier/operator to ensure that: + +1. Verifier/Operator methods can only be called by the aggregator (using + aggregator's client certificate `aggregator_cert_path`) +2. Internal methods can only be called by the entity's own client certificate + (using the entity's client certificate `client_cert_path`) + +The aggregator does not enforce client certificates but does use TLS for encryption. + +### Certificate Setup for Tests + +Before running the servers, you need to generate certificates. A script is provided for this purpose: + +```bash +# Run from the project root +./scripts/generate_certs.sh +``` + +This will create certificates in the following structure: + +```text +certs/ +โ”œโ”€โ”€ ca/ +โ”‚ โ”œโ”€โ”€ ca.key # CA private key +โ”‚ โ””โ”€โ”€ ca.pem # CA certificate +โ”œโ”€โ”€ server/ +โ”‚ โ”œโ”€โ”€ ca.pem # Copy of CA certificate (for convenience) +โ”‚ โ”œโ”€โ”€ server.key # Server private key +โ”‚ โ””โ”€โ”€ server.pem # Server certificate +โ”œโ”€โ”€ client/ +โ”‚ โ”œโ”€โ”€ ca.pem # Copy of CA certificate (for convenience) +โ”‚ โ”œโ”€โ”€ client.key # Client private key +โ”‚ โ””โ”€โ”€ client.pem # Client certificate +โ””โ”€โ”€ aggregator/ + โ”œโ”€โ”€ ca.pem # Copy of CA certificate (for convenience) + โ”œโ”€โ”€ aggregator.key # Aggregator private key + โ””โ”€โ”€ aggregator.pem # Aggregator certificate +``` + +> [!NOTE] +> For production use, you should use certificates signed by a trusted CA rather than self-signed ones. + +## Starting a Server + +Clementine is designed to be run multiple times for every actor that an entity +requires. An actor's server can be started using its corresponding argument. +Please follow instruction steps before trying to start a server. + +### Compiling Manually + +```sh +# Build the binary (with optional automation) +cargo build --release [--features automation] + +# Run binary with configuration file +./target/release/clementine-core verifier --config /path/to/config.toml +./target/release/clementine-core operator --config /path/to/config.toml +./target/release/clementine-core aggregator --config /path/to/config.toml + +# Run with both configuration and protocol parameter files +./target/release/clementine-core verifier --config /path/to/config.toml --protocol-params /path/to/params.toml + +# Run with environment variables +READ_CONFIG_FROM_ENV=1 READ_PARAMSET_FROM_ENV=1 ./target/release/clementine-core verifier + +# Mixing configuration sources +READ_CONFIG_FROM_ENV=0 READ_PARAMSET_FROM_ENV=1 ./target/release/clementine-core verifier --config /path/to/config.toml +``` + +A server's log level can be specified with `--verbose` flag: + +```sh +./target/release/clementine-core operator --config /path/to/config.toml --verbose 5 # Logs everything +``` + +Setting `RUST_LIB_BACKTRACE` to `full` will enable full backtraces for errors + +```sh +RUST_LIB_BACKTRACE=full ./target/release/clementine-core operator --config /path/to/config.toml +``` + +For more information, use `--help` flag: + +```sh +./target/release/clementine-core --help +``` + +### Using Docker + +A docker image is provided in +[Docker Hub](https://hub.docker.com/r/chainwayxyz/clementine). It can also be +locally built with: + +```bash +docker build -f scripts/docker/Dockerfile -t clementine:latest . +``` + +Also, there are multiple Docker compose files located at [scripts/docker/](../scripts/docker/) +which can be used to start Bitcoin, PostgreSQL, Citrea and Clementine. Config +files for these compose files can be found at [scripts/docker/configs/](../scripts/docker/configs/). +They are configured for a typical deployment and needs modification before deployment. +**Please note that**, apart from regtest, new wallet that is created won't have +any funds and users are responsible for configuring their own address. + +```sh +docker compose -f scripts/docker/docker-compose.verifier.testnet4.yml up +docker compose -f scripts/docker/docker-compose.full.regtest.yml up +``` + +## Running Tests + +To run all tests: + +```sh +cargo test --all-features +``` + +Also, due to the test directory hierarchy, unit and integration tests can be +run separately: + +```sh +cargo test_unit +cargo test_integration +``` + +## Helper Scripts + +There are handful amount of scripts in [scripts/](../scripts/) directory. Most of +them are for testing but still can be used for setting up the environment. They +can change quite frequently. So, please check for useful ones. + +Each script should have a name and comment inside that explain its purpose. + +## Debugging Tokio Tasks (`tokio-console`) + +To debug tokio tasks, you can uncomment the `console-subscriber` dependency in `Cargo.toml` and the `console_subscriber::init();` line in `src/utils.rs`. Then, rebuild the project with `cargo build_console` which is an alias defined with the necessary flags. + +```sh +cargo build_console +``` + +After running Clementine, you can access the console by running the following command: + +```sh +tokio-console +``` + +## Security Considerations + +### TLS Certificates + +- Keep private keys (\*.key) secure and don't commit them to version control +- In production, use properly signed certificates from a trusted CA +- Rotate certificates regularly +- Consider using distinct client certificates for different clients/services diff --git a/docs/work-only-circuit.md b/docs/work-only-circuit.md new file mode 100644 index 000000000..7cbe40fae --- /dev/null +++ b/docs/work-only-circuit.md @@ -0,0 +1,43 @@ +# Work-Only Circuit Documentation + +This document details the **Work-Only Circuit**, a specialized zero-knowledge virtual machine (zkVM) circuit designed to verify and commit the accumulated proof of work from a previously executed Bitcoin header chain circuit. Its primary purpose is to extract the `total_work` value and `genesis_state_hash` from a Header Chain Circuit proof and present them as a new, concise proof output. + +### Core Logic + +The core logic for the work-only circuit is contained within the [`mod.rs`](../circuits-lib/src/work_only/mod.rs) module. The main entry point, `work_only_circuit`, receives a `WorkOnlyCircuitInput` which includes the full output of a Header Chain Circuit execution. + +The circuit begins by performing two crucial validation steps. First, it checks for method ID consistency, ensuring that the input proof's method ID matches a compile-time constant, `HEADER_CHAIN_METHOD_ID`, which is specific to the intended Bitcoin network. This prevents the circuit from verifying proofs from an incorrect or incompatible version of the Header Chain Circuit. Second, it uses the `env::verify()` function to cryptographically validate the entire `header_chain_circuit_output`. This step is a zero-knowledge check that proves the integrity and correctness of the preceding circuit's execution. + +After successful verification, the circuit extracts the `total_work` and `genesis_state_hash` from the verified output. A private helper function, `work_conversion`, is then used to convert the 256-bit `total_work` value into a 128-bit representation, effectively truncating it to its lower 128 bits. The final output, a `WorkOnlyCircuitOutput`, contains this 128-bit work value and the genesis state hash, which are then committed to the zkVM's output. + +## RISC Zero Implementation (`risc0-circuits/work-only`) + +This section provides an overview of the `risc0-circuits/work-only` module, which defines the RISC Zero specific implementation and guest environment for the Work-Only Circuit. + +### Key Files (RISC Zero Implementation) + + * **`risc0-circuits/work-only/guest/src/main.rs`** + + * This is the entry point for the **RISC Zero guest application**. It initializes the `Risc0Guest` environment and calls `circuits_lib::work_only::work_only_circuit` to execute the work verification logic inside the zkVM. + + * **`risc0-circuits/work-only/guest/Cargo.toml`** + + * The package manifest for the guest-side code. It defines the `work-only-guest` package and its dependencies, notably linking to `circuits-lib` which contains the core circuit logic. + + * **`risc0-circuits/work-only/src/lib.rs`** + + * This file includes the `methods.rs` generated by the build script, which contains information about the guest methods (ELF image and method ID) that the host can use to prove the guest's execution. Because we use hard-coded method IDs and ELFs that are relocated by build.rs, we rely on those instead. + + * **`risc0-circuits/work-only/build.rs`** + + * This is the **build script** for the host-side. It is responsible for: + * Compiling the `work-only-guest` code into a RISC Zero ELF binary. + * Computing the unique **method ID** for the compiled guest program. + * Handling environment variables (like `BITCOIN_NETWORK` and `REPR_GUEST_BUILD`) to configure the build process, including optional Docker usage. + * Copying the generated ELF binary to a designated `elfs` folder. + + * **`risc0-circuits/work-only/Cargo.toml`** + + * The package manifest for the host-side crate. It defines the `work-only` package and its build-time dependencies, including `risc0-build` for the RISC Zero toolchain integration. + +----- \ No newline at end of file diff --git a/risc0-circuits/README.md b/risc0-circuits/README.md new file mode 100644 index 000000000..3a626d52c --- /dev/null +++ b/risc0-circuits/README.md @@ -0,0 +1,34 @@ +# Clementine risc0 Circuits + +## Description +This package contains the Risc0 guest programs and their entrypoints. You can find the libraries for them in `./circuits-lib` directory. +`header-chain` is used to prove the block headers and their chaining. It calculates the total work done up to the last input block. +`work-only` is used by the Watchtowers to generate a compact proof of the work that is produced by the Bitcoin chain they are following. +`bridge-circuit` is used by the Operators to prove that they have the right to reimburse a withdrawal from the bridge vault. + +## Build +- To build `header-chain`, use +```bash +cd risc0-circuits/header-chain/ +REPR_GUEST_BUILD=1 BITCOIN_NETWORK= cargo build -p work-only --release +``` + +- To build `work-only`, use +```bash +cd risc0-circuits/work-only/ +REPR_GUEST_BUILD=1 BITCOIN_NETWORK= cargo build -p work-only --release +``` + +- To build `bridge-circuit`, use +```bash +cd risc0-circuits/bridge-circuit/ +REPR_GUEST_BUILD=1 BITCOIN_NETWORK= BRIDGE_CIRCUIT_MODE= cargo build -p bridge-circuit --release +``` +where `NETWORK_TYPE` can be `mainnet`, `testnet4`, `signet`, or `regtest`. + +- For testing purposes, also build the `bridge-circuit` with the `test-vk` (use Groth16 Verification Key for testing purposes) on `regtest`: +```bash +REPR_GUEST_BUILD=1 BITCOIN_NETWORK=regtest cargo build -p bridge-circuit --features use-test-vk --release +``` + + diff --git a/risc0-circuits/bridge-circuit/Cargo.lock b/risc0-circuits/bridge-circuit/Cargo.lock new file mode 100644 index 000000000..4e30ad21f --- /dev/null +++ b/risc0-circuits/bridge-circuit/Cargo.lock @@ -0,0 +1,1385 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 4 + +[[package]] +name = "ahash" +version = "0.8.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a15f179cd60c4584b8a8c596927aadc462e27f2ca70c04e0071964a73ba7a75" +dependencies = [ + "cfg-if", + "once_cell", + "version_check", + "zerocopy", +] + +[[package]] +name = "anyhow" +version = "1.0.98" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e16d2d3311acee920a9eb8d33b8cbc1787ce4a264e85f964c2404b969bdcd487" + +[[package]] +name = "arraydeque" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d902e3d592a523def97af8f317b08ce16b7ab854c1985a0c671e6f15cebc236" + +[[package]] +name = "bitflags" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" + +[[package]] +name = "bitflags" +version = "2.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b8e56985ec62d17e9c1001dc89c88ecd7dc08e47eba5ec7c29c7b5eeecde967" + +[[package]] +name = "blake2" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46502ad458c9a52b69d4d4d32775c788b7a1b85e8bc9d482d92250fc0e3f8efe" +dependencies = [ + "digest", +] + +[[package]] +name = "block" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d8c1fef690941d3e7788d328517591fecc684c084084702d6ff1641e993699a" + +[[package]] +name = "block-buffer" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" +dependencies = [ + "generic-array", +] + +[[package]] +name = "borsh" +version = "1.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ad8646f98db542e39fc66e68a20b2144f6a732636df7c2354e74645faaa433ce" +dependencies = [ + "borsh-derive", + "cfg_aliases", +] + +[[package]] +name = "borsh-derive" +version = "1.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fdd1d3c0c2f5833f22386f252fe8ed005c7f59fdcddeef025c01b4c3b9fd9ac3" +dependencies = [ + "once_cell", + "proc-macro-crate", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "bridge-circuit" +version = "0.1.0" +dependencies = [ + "risc0-binfmt", + "risc0-build", +] + +[[package]] +name = "bytemuck" +version = "1.23.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c76a5792e44e4abe34d3abf15636779261d45a7450612059293d1d2cfc63422" +dependencies = [ + "bytemuck_derive", +] + +[[package]] +name = "bytemuck_derive" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3fa76293b4f7bb636ab88fd78228235b5248b4d05cc589aed610f954af5d7c7a" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "camino" +version = "1.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0da45bc31171d8d6960122e222a67740df867c1dd53b4d51caa297084c185cab" +dependencies = [ + "serde", +] + +[[package]] +name = "cargo-platform" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e35af189006b9c0f00a064685c727031e3ed2d8020f7ba284d78cc2671bd36ea" +dependencies = [ + "serde", +] + +[[package]] +name = "cargo_metadata" +version = "0.19.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd5eb614ed4c27c5d706420e4320fbe3216ab31fa1c33cd8246ac36dae4479ba" +dependencies = [ + "camino", + "cargo-platform", + "semver", + "serde", + "serde_json", + "thiserror 2.0.12", +] + +[[package]] +name = "cfg-if" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9555578bc9e57714c812a1f84e4fc5b4d21fcb063490c624de019f7464c91268" + +[[package]] +name = "cfg_aliases" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" + +[[package]] +name = "cobs" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fa961b519f0b462e3a3b4a34b64d119eeaca1d59af726fe450bbba07a9fc0a1" +dependencies = [ + "thiserror 2.0.12", +] + +[[package]] +name = "const-oid" +version = "0.9.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" + +[[package]] +name = "core-foundation" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "core-foundation-sys" +version = "0.8.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" + +[[package]] +name = "core-graphics-types" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "45390e6114f68f718cc7a830514a96f903cccd70d02a8f6d9f643ac4ba45afaf" +dependencies = [ + "bitflags 1.3.2", + "core-foundation", + "libc", +] + +[[package]] +name = "cpufeatures" +version = "0.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59ed5838eebb26a2bb2e58f6d5b5316989ae9d08bab10e0e6d103e656d1b0280" +dependencies = [ + "libc", +] + +[[package]] +name = "crypto-common" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" +dependencies = [ + "generic-array", + "typenum", +] + +[[package]] +name = "darling" +version = "0.20.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc7f46116c46ff9ab3eb1597a45688b6715c6e628b5c133e288e709a29bcb4ee" +dependencies = [ + "darling_core", + "darling_macro", +] + +[[package]] +name = "darling_core" +version = "0.20.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d00b9596d185e565c2207a0b01f8bd1a135483d02d9b7b0a54b11da8d53412e" +dependencies = [ + "fnv", + "ident_case", + "proc-macro2", + "quote", + "strsim", + "syn", +] + +[[package]] +name = "darling_macro" +version = "0.20.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc34b93ccb385b40dc71c6fceac4b2ad23662c7eeb248cf10d529b7e055b6ead" +dependencies = [ + "darling_core", + "quote", + "syn", +] + +[[package]] +name = "derive_builder" +version = "0.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "507dfb09ea8b7fa618fcf76e953f4f5e192547945816d5358edffe39f6f94947" +dependencies = [ + "derive_builder_macro", +] + +[[package]] +name = "derive_builder_core" +version = "0.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d5bcf7b024d6835cfb3d473887cd966994907effbe9227e8c8219824d06c4e8" +dependencies = [ + "darling", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "derive_builder_macro" +version = "0.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab63b0e2bf4d5928aff72e83a7dace85d7bba5fe12dcc3c5a572d78caffd3f3c" +dependencies = [ + "derive_builder_core", + "syn", +] + +[[package]] +name = "derive_more" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "093242cf7570c207c83073cf82f79706fe7b8317e98620a47d5be7c3d8497678" +dependencies = [ + "derive_more-impl", +] + +[[package]] +name = "derive_more-impl" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bda628edc44c4bb645fbe0f758797143e4e07926f7ebf4e9bdfbd3d2ce621df3" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "unicode-xid", +] + +[[package]] +name = "digest" +version = "0.10.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" +dependencies = [ + "block-buffer", + "const-oid", + "crypto-common", + "subtle", +] + +[[package]] +name = "dirs" +version = "5.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44c45a9d03d6676652bcb5e724c7e988de1acad23a711b5217ab9cbecbec2225" +dependencies = [ + "dirs-sys", +] + +[[package]] +name = "dirs-sys" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "520f05a5cbd335fae5a99ff7a6ab8627577660ee5cfd6a94a6a929b52ff0321c" +dependencies = [ + "libc", + "option-ext", + "redox_users", + "windows-sys 0.48.0", +] + +[[package]] +name = "docker-generate" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ccf673e0848ef09fa4aeeba78e681cf651c0c7d35f76ee38cec8e55bc32fa111" + +[[package]] +name = "elf" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4445909572dbd556c457c849c4ca58623d84b27c8fff1e74b0b4227d8b90d17b" + +[[package]] +name = "embedded-io" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef1a6892d9eef45c8fa6b9e0086428a2cca8491aca8f787c534a3d6d0bcb3ced" + +[[package]] +name = "embedded-io" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "edd0f118536f44f5ccd48bcb8b111bdc3de888b58c74639dfb034a357d0f206d" + +[[package]] +name = "encoding_rs" +version = "0.8.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75030f3c4f45dafd7586dd6780965a8c7e8e285a5ecb86713e63a79c5b2766f3" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "equivalent" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" + +[[package]] +name = "errno" +version = "0.3.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "778e2ac28f6c47af28e4907f13ffd1e1ddbd400980a9abd7c8df189bf578a5ad" +dependencies = [ + "libc", + "windows-sys 0.60.2", +] + +[[package]] +name = "fastrand" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" + +[[package]] +name = "fnv" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" + +[[package]] +name = "foreign-types" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d737d9aa519fb7b749cbc3b962edcf310a8dd1f4b67c91c4f83975dbdd17d965" +dependencies = [ + "foreign-types-macros", + "foreign-types-shared", +] + +[[package]] +name = "foreign-types-macros" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a5c6c585bc94aaf2c7b51dd4c2ba22680844aba4c687be581871a6f518c5742" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "foreign-types-shared" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa9a19cbb55df58761df49b23516a86d432839add4af60fc256da840f66ed35b" + +[[package]] +name = "generic-array" +version = "0.14.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" +dependencies = [ + "typenum", + "version_check", +] + +[[package]] +name = "getrandom" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592" +dependencies = [ + "cfg-if", + "libc", + "wasi 0.11.1+wasi-snapshot-preview1", +] + +[[package]] +name = "getrandom" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26145e563e54f2cadc477553f1ec5ee650b00862f0a58bcd12cbdc5f0ea2d2f4" +dependencies = [ + "cfg-if", + "libc", + "r-efi", + "wasi 0.14.2+wasi-0.2.4", +] + +[[package]] +name = "hashbrown" +version = "0.14.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" +dependencies = [ + "ahash", +] + +[[package]] +name = "hashbrown" +version = "0.15.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5971ac85611da7067dbfcabef3c70ebb5606018acd9e2a3903a0da507521e0d5" + +[[package]] +name = "hashlink" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ba4ff7128dee98c7dc9794b6a411377e1404dba1c97deb8d1a55297bd25d8af" +dependencies = [ + "hashbrown 0.14.5", +] + +[[package]] +name = "heck" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" + +[[package]] +name = "hex" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" + +[[package]] +name = "hex-literal" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6fe2267d4ed49bc07b63801559be28c718ea06c4738b7a03c94df7386d2cde46" + +[[package]] +name = "ident_case" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" + +[[package]] +name = "include_bytes_aligned" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ee796ad498c8d9a1d68e477df8f754ed784ef875de1414ebdaf169f70a6a784" + +[[package]] +name = "indexmap" +version = "2.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fe4cd85333e22411419a0bcae1297d25e58c9443848b11dc6a86fefe8c78a661" +dependencies = [ + "equivalent", + "hashbrown 0.15.4", +] + +[[package]] +name = "itoa" +version = "1.0.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" + +[[package]] +name = "lazy_static" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" +dependencies = [ + "spin", +] + +[[package]] +name = "libc" +version = "0.2.174" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1171693293099992e19cddea4e8b849964e9846f4acee11b3948bcc337be8776" + +[[package]] +name = "libredox" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1580801010e535496706ba011c15f8532df6b42297d2e471fec38ceadd8c0638" +dependencies = [ + "bitflags 2.9.1", + "libc", +] + +[[package]] +name = "linux-raw-sys" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd945864f07fe9f5371a27ad7b52a172b4b499999f1d97574c9fa68373937e12" + +[[package]] +name = "log" +version = "0.4.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94" + +[[package]] +name = "malloc_buf" +version = "0.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62bb907fe88d54d8d9ce32a3cceab4218ed2f6b7d35617cafe9adf84e43919cb" +dependencies = [ + "libc", +] + +[[package]] +name = "memchr" +version = "2.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32a282da65faaf38286cf3be983213fcf1d2e2a58700e808f83f4ea9a4804bc0" + +[[package]] +name = "metal" +version = "0.29.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ecfd3296f8c56b7c1f6fbac3c71cefa9d78ce009850c45000015f206dc7fa21" +dependencies = [ + "bitflags 2.9.1", + "block", + "core-graphics-types", + "foreign-types", + "log", + "objc", + "paste", +] + +[[package]] +name = "no_std_strings" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a5b0c77c1b780822bc749a33e39aeb2c07584ab93332303babeabb645298a76e" + +[[package]] +name = "objc" +version = "0.2.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "915b1b472bc21c53464d6c8461c9d3af805ba1ef837e1cac254428f4a77177b1" +dependencies = [ + "malloc_buf", +] + +[[package]] +name = "once_cell" +version = "1.21.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" + +[[package]] +name = "option-ext" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" + +[[package]] +name = "paste" +version = "1.0.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" + +[[package]] +name = "pin-project-lite" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" + +[[package]] +name = "postcard" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c1de96e20f51df24ca73cafcc4690e044854d803259db27a00a461cb3b9d17a" +dependencies = [ + "cobs", + "embedded-io 0.4.0", + "embedded-io 0.6.1", + "serde", +] + +[[package]] +name = "proc-macro-crate" +version = "3.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "edce586971a4dfaa28950c6f18ed55e0406c1ab88bbce2c6f6293a7aaba73d35" +dependencies = [ + "toml_edit", +] + +[[package]] +name = "proc-macro2" +version = "1.0.95" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "02b3e5e68a3a1a02aad3ec490a98007cbc13c37cbe84a3cd7b8e406d76e7f778" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "quote" +version = "1.0.40" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "r-efi" +version = "5.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" + +[[package]] +name = "rand_core" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" + +[[package]] +name = "redox_users" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba009ff324d1fc1b900bd1fdb31564febe58a8ccc8a6fdbb93b543d33b13ca43" +dependencies = [ + "getrandom 0.2.16", + "libredox", + "thiserror 1.0.69", +] + +[[package]] +name = "risc0-binfmt" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62eb7025356a233c1bc267c458a2ce56fcfc89b136d813c8a77be14ef1eaf2b1" +dependencies = [ + "anyhow", + "borsh", + "derive_more", + "elf", + "lazy_static", + "postcard", + "risc0-zkp", + "risc0-zkvm-platform", + "semver", + "serde", + "tracing", +] + +[[package]] +name = "risc0-build" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62ffc0f135e6c1e9851e7e19438d03ff41a9d49199ee4f6c17b8bb30b4f83910" +dependencies = [ + "anyhow", + "cargo_metadata", + "derive_builder", + "dirs", + "docker-generate", + "hex", + "risc0-binfmt", + "risc0-zkos-v1compat", + "risc0-zkp", + "risc0-zkvm-platform", + "rzup", + "semver", + "serde", + "serde_json", + "stability", + "tempfile", +] + +[[package]] +name = "risc0-core" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "317bbf70a8750b64d4fd7a2bdc9d7d5f30d8bb305cae486962c797ef35c8d08e" +dependencies = [ + "bytemuck", + "bytemuck_derive", + "rand_core", +] + +[[package]] +name = "risc0-zkos-v1compat" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f76c479b69d1987cb54ac72dcc017197296fdcd6daf78fafc10cbbd3a167a7de" +dependencies = [ + "include_bytes_aligned", + "no_std_strings", +] + +[[package]] +name = "risc0-zkp" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a287e9cd6d7b3b38eeb49c62090c46a1935922309fbd997a9143ed8c43c8f3cb" +dependencies = [ + "anyhow", + "blake2", + "borsh", + "bytemuck", + "cfg-if", + "digest", + "hex", + "hex-literal", + "metal", + "paste", + "rand_core", + "risc0-core", + "risc0-zkvm-platform", + "serde", + "sha2", + "stability", + "tracing", +] + +[[package]] +name = "risc0-zkvm-platform" +version = "2.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cae9cb2c2f6cab2dfa395ea6e2576713929040c7fb0c5f4150d13e1119d18686" +dependencies = [ + "cfg-if", + "stability", +] + +[[package]] +name = "rustix" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c71e83d6afe7ff64890ec6b71d6a69bb8a610ab78ce364b3352876bb4c801266" +dependencies = [ + "bitflags 2.9.1", + "errno", + "libc", + "linux-raw-sys", + "windows-sys 0.59.0", +] + +[[package]] +name = "rustversion" +version = "1.0.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a0d197bd2c9dc6e53b84da9556a69ba4cdfab8619eb41a8bd1cc2027a0f6b1d" + +[[package]] +name = "ryu" +version = "1.0.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" + +[[package]] +name = "rzup" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "400558bf12d4292a7804093b60a437ba8b0219ea7d53716b2c010a0d31e5f4a8" +dependencies = [ + "semver", + "serde", + "strum", + "tempfile", + "thiserror 2.0.12", + "toml", + "yaml-rust2", +] + +[[package]] +name = "semver" +version = "1.0.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56e6fa9c48d24d85fb3de5ad847117517440f6beceb7798af16b4a87d616b8d0" +dependencies = [ + "serde", +] + +[[package]] +name = "serde" +version = "1.0.219" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.219" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "serde_json" +version = "1.0.140" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "20068b6e96dc6c9bd23e01df8827e6c7e1f2fddd43c21810382803c136b99373" +dependencies = [ + "itoa", + "memchr", + "ryu", + "serde", +] + +[[package]] +name = "serde_spanned" +version = "0.6.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf41e0cfaf7226dca15e8197172c295a782857fcb97fad1808a166870dee75a3" +dependencies = [ + "serde", +] + +[[package]] +name = "sha2" +version = "0.10.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7507d819769d01a365ab707794a4084392c824f54a7a6a7862f8c3d0892b283" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest", +] + +[[package]] +name = "spin" +version = "0.9.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" + +[[package]] +name = "stability" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d904e7009df136af5297832a3ace3370cd14ff1546a232f4f185036c2736fcac" +dependencies = [ + "quote", + "syn", +] + +[[package]] +name = "strsim" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" + +[[package]] +name = "strum" +version = "0.26.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8fec0f0aef304996cf250b31b5a10dee7980c85da9d759361292b8bca5a18f06" +dependencies = [ + "strum_macros", +] + +[[package]] +name = "strum_macros" +version = "0.26.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c6bee85a5a24955dc440386795aa378cd9cf82acd5f764469152d2270e581be" +dependencies = [ + "heck", + "proc-macro2", + "quote", + "rustversion", + "syn", +] + +[[package]] +name = "subtle" +version = "2.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" + +[[package]] +name = "syn" +version = "2.0.104" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17b6f705963418cdb9927482fa304bc562ece2fdd4f616084c50b7023b435a40" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "tempfile" +version = "3.20.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8a64e3985349f2441a1a9ef0b853f869006c3855f2cda6862a94d26ebb9d6a1" +dependencies = [ + "fastrand", + "getrandom 0.3.3", + "once_cell", + "rustix", + "windows-sys 0.59.0", +] + +[[package]] +name = "thiserror" +version = "1.0.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52" +dependencies = [ + "thiserror-impl 1.0.69", +] + +[[package]] +name = "thiserror" +version = "2.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "567b8a2dae586314f7be2a752ec7474332959c6460e02bde30d702a66d488708" +dependencies = [ + "thiserror-impl 2.0.12", +] + +[[package]] +name = "thiserror-impl" +version = "1.0.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "thiserror-impl" +version = "2.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f7cf42b4507d8ea322120659672cf1b9dbb93f8f2d4ecfd6e51350ff5b17a1d" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "toml" +version = "0.8.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc1beb996b9d83529a9e75c17a1686767d148d70663143c7854d8b4a09ced362" +dependencies = [ + "serde", + "serde_spanned", + "toml_datetime", + "toml_edit", +] + +[[package]] +name = "toml_datetime" +version = "0.6.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22cddaf88f4fbc13c51aebbf5f8eceb5c7c5a9da2ac40a13519eb5b0a0e8f11c" +dependencies = [ + "serde", +] + +[[package]] +name = "toml_edit" +version = "0.22.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41fe8c660ae4257887cf66394862d21dbca4a6ddd26f04a3560410406a2f819a" +dependencies = [ + "indexmap", + "serde", + "serde_spanned", + "toml_datetime", + "toml_write", + "winnow", +] + +[[package]] +name = "toml_write" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d99f8c9a7727884afe522e9bd5edbfc91a3312b36a77b5fb8926e4c31a41801" + +[[package]] +name = "tracing" +version = "0.1.41" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0" +dependencies = [ + "log", + "pin-project-lite", + "tracing-attributes", + "tracing-core", +] + +[[package]] +name = "tracing-attributes" +version = "0.1.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81383ab64e72a7a8b8e13130c49e3dab29def6d0c7d76a03087b3cf71c5c6903" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "tracing-core" +version = "0.1.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9d12581f227e93f094d3af2ae690a574abb8a2b9b7a96e7cfe9647b2b617678" +dependencies = [ + "once_cell", +] + +[[package]] +name = "typenum" +version = "1.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1dccffe3ce07af9386bfd29e80c0ab1a8205a2fc34e4bcd40364df902cfa8f3f" + +[[package]] +name = "unicode-ident" +version = "1.0.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512" + +[[package]] +name = "unicode-xid" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853" + +[[package]] +name = "version_check" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" + +[[package]] +name = "wasi" +version = "0.11.1+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" + +[[package]] +name = "wasi" +version = "0.14.2+wasi-0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9683f9a5a998d873c0d21fcbe3c083009670149a8fab228644b8bd36b2c48cb3" +dependencies = [ + "wit-bindgen-rt", +] + +[[package]] +name = "windows-sys" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" +dependencies = [ + "windows-targets 0.48.5", +] + +[[package]] +name = "windows-sys" +version = "0.59.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-sys" +version = "0.60.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb" +dependencies = [ + "windows-targets 0.53.2", +] + +[[package]] +name = "windows-targets" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" +dependencies = [ + "windows_aarch64_gnullvm 0.48.5", + "windows_aarch64_msvc 0.48.5", + "windows_i686_gnu 0.48.5", + "windows_i686_msvc 0.48.5", + "windows_x86_64_gnu 0.48.5", + "windows_x86_64_gnullvm 0.48.5", + "windows_x86_64_msvc 0.48.5", +] + +[[package]] +name = "windows-targets" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" +dependencies = [ + "windows_aarch64_gnullvm 0.52.6", + "windows_aarch64_msvc 0.52.6", + "windows_i686_gnu 0.52.6", + "windows_i686_gnullvm 0.52.6", + "windows_i686_msvc 0.52.6", + "windows_x86_64_gnu 0.52.6", + "windows_x86_64_gnullvm 0.52.6", + "windows_x86_64_msvc 0.52.6", +] + +[[package]] +name = "windows-targets" +version = "0.53.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c66f69fcc9ce11da9966ddb31a40968cad001c5bedeb5c2b82ede4253ab48aef" +dependencies = [ + "windows_aarch64_gnullvm 0.53.0", + "windows_aarch64_msvc 0.53.0", + "windows_i686_gnu 0.53.0", + "windows_i686_gnullvm 0.53.0", + "windows_i686_msvc 0.53.0", + "windows_x86_64_gnu 0.53.0", + "windows_x86_64_gnullvm 0.53.0", + "windows_x86_64_msvc 0.53.0", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "86b8d5f90ddd19cb4a147a5fa63ca848db3df085e25fee3cc10b39b6eebae764" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7651a1f62a11b8cbd5e0d42526e55f2c99886c77e007179efff86c2b137e66c" + +[[package]] +name = "windows_i686_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" + +[[package]] +name = "windows_i686_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" + +[[package]] +name = "windows_i686_gnu" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1dc67659d35f387f5f6c479dc4e28f1d4bb90ddd1a5d3da2e5d97b42d6272c3" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ce6ccbdedbf6d6354471319e781c0dfef054c81fbc7cf83f338a4296c0cae11" + +[[package]] +name = "windows_i686_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" + +[[package]] +name = "windows_i686_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" + +[[package]] +name = "windows_i686_msvc" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "581fee95406bb13382d2f65cd4a908ca7b1e4c2f1917f143ba16efe98a589b5d" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e55b5ac9ea33f2fc1716d1742db15574fd6fc8dadc51caab1c16a3d3b4190ba" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0a6e035dd0599267ce1ee132e51c27dd29437f63325753051e71dd9e42406c57" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486" + +[[package]] +name = "winnow" +version = "0.7.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3edebf492c8125044983378ecb5766203ad3b4c2f7a922bd7dd207f6d443e95" +dependencies = [ + "memchr", +] + +[[package]] +name = "wit-bindgen-rt" +version = "0.39.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f42320e61fe2cfd34354ecb597f86f413484a798ba44a8ca1165c58d42da6c1" +dependencies = [ + "bitflags 2.9.1", +] + +[[package]] +name = "yaml-rust2" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a1a1c0bc9823338a3bdf8c61f994f23ac004c6fa32c08cd152984499b445e8d" +dependencies = [ + "arraydeque", + "encoding_rs", + "hashlink", +] + +[[package]] +name = "zerocopy" +version = "0.8.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1039dd0d3c310cf05de012d8a39ff557cb0d23087fd44cad61df08fc31907a2f" +dependencies = [ + "zerocopy-derive", +] + +[[package]] +name = "zerocopy-derive" +version = "0.8.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ecf5b4cc5364572d7f4c329661bcc82724222973f2cab6f050a4e5c22f75181" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] diff --git a/risc0-circuits/bridge-circuit/Cargo.toml b/risc0-circuits/bridge-circuit/Cargo.toml new file mode 100644 index 000000000..8230ade33 --- /dev/null +++ b/risc0-circuits/bridge-circuit/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "bridge-circuit" +version = "0.1.0" +edition = "2021" + +[workspace] + +[build-dependencies] +risc0-build = { version = "2.3.1", features = ["unstable"] } +risc0-binfmt = {version = "2.0.2"} + +[features] +default = [] +use-test-vk = [] + +[package.metadata.risc0] +methods = ["guest"] \ No newline at end of file diff --git a/risc0-circuits/bridge-circuit/build.rs b/risc0-circuits/bridge-circuit/build.rs new file mode 100644 index 000000000..eadfa8038 --- /dev/null +++ b/risc0-circuits/bridge-circuit/build.rs @@ -0,0 +1,160 @@ +use risc0_binfmt::compute_image_id; +use risc0_build::{embed_methods_with_options, DockerOptionsBuilder, GuestOptionsBuilder}; +use std::{collections::HashMap, env, fs, path::Path}; + +fn main() { + // Build environment variables + println!("cargo:rerun-if-env-changed=SKIP_GUEST_BUILD"); + println!("cargo:rerun-if-env-changed=REPR_GUEST_BUILD"); + println!("cargo:rerun-if-env-changed=OUT_DIR"); + + // Compile time constant environment variables + println!("cargo:rerun-if-env-changed=BITCOIN_NETWORK"); + println!("cargo:rerun-if-env-changed=TEST_SKIP_GUEST_BUILD"); + + if std::env::var("CLIPPY_ARGS").is_ok() { + let out_dir = env::var("OUT_DIR").expect("OUT_DIR not set"); + let dummy_path = Path::new(&out_dir).join("methods.rs"); + fs::write(dummy_path, "// dummy methods.rs for Clippy\n") + .expect("Failed to write dummy methods.rs"); + println!("cargo:warning=Skipping guest build in Clippy"); + return; + } + + // Check if we should skip the guest build for tests + if let Ok("1" | "true") = env::var("TEST_SKIP_GUEST_BUILD").as_deref() { + println!("cargo:warning=Skipping guest build in test. Exiting"); + return; + } + + let network = env::var("BITCOIN_NETWORK").unwrap_or_else(|_| { + println!("cargo:warning=BITCOIN_NETWORK not set, defaulting to 'mainnet'"); + "mainnet".to_string() + }); + + let is_repr_guest_build = match env::var("REPR_GUEST_BUILD") { + Ok(value) => match value.as_str() { + "1" | "true" => { + println!("cargo:warning=REPR_GUEST_BUILD is set to true"); + true + } + "0" | "false" => { + println!("cargo:warning=REPR_GUEST_BUILD is set to false"); + false + } + _ => { + println!("cargo:warning=Invalid value for REPR_GUEST_BUILD: '{}'. Expected '0', '1', 'true', or 'false'. Defaulting to false.", value); + false + } + }, + Err(env::VarError::NotPresent) => { + println!("cargo:warning=REPR_GUEST_BUILD not set. Defaulting to false."); + false + } + Err(env::VarError::NotUnicode(_)) => { + println!( + "cargo:warning=REPR_GUEST_BUILD contains invalid Unicode. Defaulting to false." + ); + false + } + }; + + println!("cargo:warning=Building for Bitcoin network: {}", network); + + // Use embed_methods_with_options with our custom options + let guest_pkg_to_options = get_guest_options(network.clone()); + embed_methods_with_options(guest_pkg_to_options); + + if is_repr_guest_build { + copy_binary_to_elfs_folder(network); + println!("cargo:warning=Copying binary to elfs folder"); + } else { + println!("cargo:warning=Not copying binary to elfs folder"); + } +} + +fn get_guest_options(network: String) -> HashMap<&'static str, risc0_build::GuestOptions> { + let mut guest_pkg_to_options = HashMap::new(); + + let opts = if env::var("REPR_GUEST_BUILD").is_ok() { + let manifest_dir = env!("CARGO_MANIFEST_DIR"); + let root_dir = format!("{manifest_dir}/../../"); + + let docker_opts = DockerOptionsBuilder::default() + .root_dir(root_dir) + .env(vec![("BITCOIN_NETWORK".to_string(), network.clone())]) + .build() + .unwrap(); + + println!( + "cargo:warning=Root dir: {}", + docker_opts.root_dir().display() + ); + + let mut guest_opts_builder = GuestOptionsBuilder::default(); + guest_opts_builder.use_docker(docker_opts); + + #[cfg(feature = "use-test-vk")] + { + guest_opts_builder.features(vec!["use-test-vk".to_string()]); + } + + guest_opts_builder.build().unwrap() + } else { + println!("cargo:warning=Guest code is not built in docker"); + GuestOptionsBuilder::default().build().unwrap() + }; + + guest_pkg_to_options.insert("bridge-circuit-guest", opts); + guest_pkg_to_options +} + +fn copy_binary_to_elfs_folder(network: String) { + let current_dir = env::current_dir().expect("Failed to get current dir"); + let base_dir = current_dir.join("../.."); + let elfs_dir = base_dir.join("risc0-circuits/elfs"); + + if !elfs_dir.exists() { + fs::create_dir_all(&elfs_dir).expect("Failed to create elfs directory"); + println!("cargo:warning=Created elfs directory at {:?}", elfs_dir); + } + + let src_path = current_dir.join("target/riscv-guest/bridge-circuit/bridge-circuit-guest/riscv32im-risc0-zkvm-elf/docker/bridge-circuit-guest.bin"); + if !src_path.exists() { + println!( + "cargo:warning=Source binary not found at {:?}, skipping copy", + src_path + ); + return; + } + + let prefix = if cfg!(feature = "use-test-vk") { + "test-" + } else { + "" + }; + let dest_filename = format!( + "{}{}-bridge-circuit-guest.bin", + prefix, + network.to_lowercase() + ); + + let dest_path = elfs_dir.join(&dest_filename); + + match fs::copy(&src_path, &dest_path) { + Ok(_) => println!( + "cargo:warning=Successfully copied binary to {:?}", + dest_path + ), + Err(e) => println!("cargo:warning=Failed to copy binary: {}", e), + } + + let elf_bytes: Vec = fs::read(dest_path).expect("Failed to read ELF file"); + + let method_id = compute_image_id(elf_bytes.as_slice()).unwrap(); + println!("cargo:warning=Computed method ID: {:x?}", method_id); + println!( + "cargo:warning=Computed method ID words: {:?}", + method_id.as_words() + ); +} diff --git a/risc0-circuits/bridge-circuit/guest/Cargo.lock b/risc0-circuits/bridge-circuit/guest/Cargo.lock new file mode 100644 index 000000000..31ce96b70 --- /dev/null +++ b/risc0-circuits/bridge-circuit/guest/Cargo.lock @@ -0,0 +1,4889 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 4 + +[[package]] +name = "addr2line" +version = "0.24.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dfbe277e56a376000877090da837660b4427aad530e3028d44e0bffe4f89a1c1" +dependencies = [ + "gimli", +] + +[[package]] +name = "adler2" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "320119579fcad9c21884f5c4861d16174d0e06250625266f50fe6898340abefa" + +[[package]] +name = "ahash" +version = "0.8.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a15f179cd60c4584b8a8c596927aadc462e27f2ca70c04e0071964a73ba7a75" +dependencies = [ + "cfg-if", + "once_cell", + "version_check", + "zerocopy", +] + +[[package]] +name = "aho-corasick" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" +dependencies = [ + "memchr", +] + +[[package]] +name = "allocator-api2" +version = "0.2.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" + +[[package]] +name = "alloy-consensus" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69e32ef5c74bbeb1733c37f4ac7f866f8c8af208b7b4265e21af609dcac5bd5e" +dependencies = [ + "alloy-eips", + "alloy-primitives", + "alloy-rlp", + "alloy-serde", + "alloy-trie", + "auto_impl", + "c-kzg", + "derive_more 1.0.0", + "serde", +] + +[[package]] +name = "alloy-consensus-any" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fa13b7b1e1e3fedc42f0728103bfa3b4d566d3d42b606db449504d88dbdbdcf" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-primitives", + "alloy-rlp", + "alloy-serde", + "serde", +] + +[[package]] +name = "alloy-eip2124" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "675264c957689f0fd75f5993a73123c2cc3b5c235a38f5b9037fe6c826bfb2c0" +dependencies = [ + "alloy-primitives", + "alloy-rlp", + "crc", + "thiserror 2.0.12", +] + +[[package]] +name = "alloy-eip2930" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0069cf0642457f87a01a014f6dc29d5d893cd4fd8fddf0c3cdfad1bb3ebafc41" +dependencies = [ + "alloy-primitives", + "alloy-rlp", + "serde", +] + +[[package]] +name = "alloy-eip7702" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b15b13d38b366d01e818fe8e710d4d702ef7499eacd44926a06171dd9585d0c" +dependencies = [ + "alloy-primitives", + "alloy-rlp", + "serde", + "thiserror 2.0.12", +] + +[[package]] +name = "alloy-eips" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5591581ca2ab0b3e7226a4047f9a1bfcf431da1d0cce3752fda609fea3c27e37" +dependencies = [ + "alloy-eip2124", + "alloy-eip2930", + "alloy-eip7702", + "alloy-primitives", + "alloy-rlp", + "alloy-serde", + "auto_impl", + "c-kzg", + "derive_more 1.0.0", + "once_cell", + "serde", + "sha2", +] + +[[package]] +name = "alloy-network-primitives" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3a00ce618ae2f78369918be0c20f620336381502c83b6ed62c2f7b2db27698b0" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-primitives", + "alloy-serde", + "serde", +] + +[[package]] +name = "alloy-primitives" +version = "0.8.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8c77490fe91a0ce933a1f219029521f20fc28c2c0ca95d53fa4da9c00b8d9d4e" +dependencies = [ + "alloy-rlp", + "bytes", + "cfg-if", + "const-hex", + "derive_more 2.0.1", + "foldhash", + "hashbrown 0.15.4", + "indexmap", + "itoa", + "k256", + "keccak-asm", + "paste", + "proptest", + "rand 0.8.5", + "ruint", + "rustc-hash", + "serde", + "sha3", + "tiny-keccak", +] + +[[package]] +name = "alloy-rlp" +version = "0.3.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f70d83b765fdc080dbcd4f4db70d8d23fe4761f2f02ebfa9146b833900634b4" +dependencies = [ + "alloy-rlp-derive", + "arrayvec", + "bytes", +] + +[[package]] +name = "alloy-rlp-derive" +version = "0.3.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "64b728d511962dda67c1bc7ea7c03736ec275ed2cf4c35d9585298ac9ccf3b73" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "alloy-rpc-types" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f18e68a3882f372e045ddc89eb455469347767d17878ca492cfbac81e71a111" +dependencies = [ + "alloy-primitives", + "alloy-rpc-types-engine", + "alloy-rpc-types-eth", + "alloy-serde", + "serde", +] + +[[package]] +name = "alloy-rpc-types-engine" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e83dde9fcf1ccb9b815cc0c89bba26bbbbaae5150a53ae624ed0fc63cb3676c1" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-primitives", + "alloy-rlp", + "alloy-serde", + "derive_more 1.0.0", + "jsonwebtoken", + "rand 0.8.5", + "serde", + "strum", +] + +[[package]] +name = "alloy-rpc-types-eth" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b4dbee4d82f8a22dde18c28257bed759afeae7ba73da4a1479a039fd1445d04" +dependencies = [ + "alloy-consensus", + "alloy-consensus-any", + "alloy-eips", + "alloy-network-primitives", + "alloy-primitives", + "alloy-rlp", + "alloy-serde", + "alloy-sol-types", + "itertools 0.14.0", + "serde", + "serde_json", + "thiserror 2.0.12", +] + +[[package]] +name = "alloy-serde" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8732058f5ca28c1d53d241e8504620b997ef670315d7c8afab856b3e3b80d945" +dependencies = [ + "alloy-primitives", + "serde", + "serde_json", +] + +[[package]] +name = "alloy-sol-macro" +version = "0.8.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e10ae8e9a91d328ae954c22542415303919aabe976fe7a92eb06db1b68fd59f2" +dependencies = [ + "alloy-sol-macro-expander", + "alloy-sol-macro-input", + "proc-macro-error2", + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "alloy-sol-macro-expander" +version = "0.8.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "83ad5da86c127751bc607c174d6c9fe9b85ef0889a9ca0c641735d77d4f98f26" +dependencies = [ + "alloy-sol-macro-input", + "const-hex", + "heck 0.5.0", + "indexmap", + "proc-macro-error2", + "proc-macro2", + "quote", + "syn 2.0.104", + "syn-solidity", + "tiny-keccak", +] + +[[package]] +name = "alloy-sol-macro-input" +version = "0.8.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba3d30f0d3f9ba3b7686f3ff1de9ee312647aac705604417a2f40c604f409a9e" +dependencies = [ + "const-hex", + "dunce", + "heck 0.5.0", + "macro-string", + "proc-macro2", + "quote", + "syn 2.0.104", + "syn-solidity", +] + +[[package]] +name = "alloy-sol-types" +version = "0.8.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d43d5e60466a440230c07761aa67671d4719d46f43be8ea6e7ed334d8db4a9ab" +dependencies = [ + "alloy-primitives", + "alloy-sol-macro", + "const-hex", +] + +[[package]] +name = "alloy-trie" +version = "0.7.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d95a94854e420f07e962f7807485856cde359ab99ab6413883e15235ad996e8b" +dependencies = [ + "alloy-primitives", + "alloy-rlp", + "arrayvec", + "derive_more 1.0.0", + "nybbles", + "serde", + "smallvec", + "tracing", +] + +[[package]] +name = "anyhow" +version = "1.0.98" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e16d2d3311acee920a9eb8d33b8cbc1787ce4a264e85f964c2404b969bdcd487" + +[[package]] +name = "ark-bn254" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d69eab57e8d2663efa5c63135b2af4f396d66424f88954c21104125ab6b3e6bc" +dependencies = [ + "ark-ec", + "ark-ff 0.5.0", + "ark-r1cs-std", + "ark-std 0.5.0", +] + +[[package]] +name = "ark-crypto-primitives" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e0c292754729c8a190e50414fd1a37093c786c709899f29c9f7daccecfa855e" +dependencies = [ + "ahash", + "ark-crypto-primitives-macros", + "ark-ec", + "ark-ff 0.5.0", + "ark-relations", + "ark-serialize 0.5.0", + "ark-snark", + "ark-std 0.5.0", + "blake2", + "derivative", + "digest 0.10.7", + "fnv", + "merlin", + "sha2", +] + +[[package]] +name = "ark-crypto-primitives-macros" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7e89fe77d1f0f4fe5b96dfc940923d88d17b6a773808124f21e764dfb063c6a" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "ark-ec" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43d68f2d516162846c1238e755a7c4d131b892b70cc70c471a8e3ca3ed818fce" +dependencies = [ + "ahash", + "ark-ff 0.5.0", + "ark-poly", + "ark-serialize 0.5.0", + "ark-std 0.5.0", + "educe", + "fnv", + "hashbrown 0.15.4", + "itertools 0.13.0", + "num-bigint", + "num-integer", + "num-traits", + "zeroize", +] + +[[package]] +name = "ark-ff" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b3235cc41ee7a12aaaf2c575a2ad7b46713a8a50bda2fc3b003a04845c05dd6" +dependencies = [ + "ark-ff-asm 0.3.0", + "ark-ff-macros 0.3.0", + "ark-serialize 0.3.0", + "ark-std 0.3.0", + "derivative", + "num-bigint", + "num-traits", + "paste", + "rustc_version 0.3.3", + "zeroize", +] + +[[package]] +name = "ark-ff" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec847af850f44ad29048935519032c33da8aa03340876d351dfab5660d2966ba" +dependencies = [ + "ark-ff-asm 0.4.2", + "ark-ff-macros 0.4.2", + "ark-serialize 0.4.2", + "ark-std 0.4.0", + "derivative", + "digest 0.10.7", + "itertools 0.10.5", + "num-bigint", + "num-traits", + "paste", + "rustc_version 0.4.1", + "zeroize", +] + +[[package]] +name = "ark-ff" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a177aba0ed1e0fbb62aa9f6d0502e9b46dad8c2eab04c14258a1212d2557ea70" +dependencies = [ + "ark-ff-asm 0.5.0", + "ark-ff-macros 0.5.0", + "ark-serialize 0.5.0", + "ark-std 0.5.0", + "arrayvec", + "digest 0.10.7", + "educe", + "itertools 0.13.0", + "num-bigint", + "num-traits", + "paste", + "zeroize", +] + +[[package]] +name = "ark-ff-asm" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db02d390bf6643fb404d3d22d31aee1c4bc4459600aef9113833d17e786c6e44" +dependencies = [ + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ark-ff-asm" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ed4aa4fe255d0bc6d79373f7e31d2ea147bcf486cba1be5ba7ea85abdb92348" +dependencies = [ + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ark-ff-asm" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62945a2f7e6de02a31fe400aa489f0e0f5b2502e69f95f853adb82a96c7a6b60" +dependencies = [ + "quote", + "syn 2.0.104", +] + +[[package]] +name = "ark-ff-macros" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db2fd794a08ccb318058009eefdf15bcaaaaf6f8161eb3345f907222bac38b20" +dependencies = [ + "num-bigint", + "num-traits", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ark-ff-macros" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7abe79b0e4288889c4574159ab790824d0033b9fdcb2a112a3182fac2e514565" +dependencies = [ + "num-bigint", + "num-traits", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ark-ff-macros" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09be120733ee33f7693ceaa202ca41accd5653b779563608f1234f78ae07c4b3" +dependencies = [ + "num-bigint", + "num-traits", + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "ark-groth16" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "88f1d0f3a534bb54188b8dcc104307db6c56cdae574ddc3212aec0625740fc7e" +dependencies = [ + "ark-crypto-primitives", + "ark-ec", + "ark-ff 0.5.0", + "ark-poly", + "ark-relations", + "ark-serialize 0.5.0", + "ark-std 0.5.0", +] + +[[package]] +name = "ark-poly" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "579305839da207f02b89cd1679e50e67b4331e2f9294a57693e5051b7703fe27" +dependencies = [ + "ahash", + "ark-ff 0.5.0", + "ark-serialize 0.5.0", + "ark-std 0.5.0", + "educe", + "fnv", + "hashbrown 0.15.4", +] + +[[package]] +name = "ark-r1cs-std" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "941551ef1df4c7a401de7068758db6503598e6f01850bdb2cfdb614a1f9dbea1" +dependencies = [ + "ark-ec", + "ark-ff 0.5.0", + "ark-relations", + "ark-std 0.5.0", + "educe", + "num-bigint", + "num-integer", + "num-traits", + "tracing", +] + +[[package]] +name = "ark-relations" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec46ddc93e7af44bcab5230937635b06fb5744464dd6a7e7b083e80ebd274384" +dependencies = [ + "ark-ff 0.5.0", + "ark-std 0.5.0", + "tracing", + "tracing-subscriber", +] + +[[package]] +name = "ark-serialize" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d6c2b318ee6e10f8c2853e73a83adc0ccb88995aa978d8a3408d492ab2ee671" +dependencies = [ + "ark-std 0.3.0", + "digest 0.9.0", +] + +[[package]] +name = "ark-serialize" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "adb7b85a02b83d2f22f89bd5cac66c9c89474240cb6207cb1efc16d098e822a5" +dependencies = [ + "ark-std 0.4.0", + "digest 0.10.7", + "num-bigint", +] + +[[package]] +name = "ark-serialize" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f4d068aaf107ebcd7dfb52bc748f8030e0fc930ac8e360146ca54c1203088f7" +dependencies = [ + "ark-serialize-derive", + "ark-std 0.5.0", + "arrayvec", + "digest 0.10.7", + "num-bigint", +] + +[[package]] +name = "ark-serialize-derive" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "213888f660fddcca0d257e88e54ac05bca01885f258ccdf695bafd77031bb69d" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "ark-snark" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d368e2848c2d4c129ce7679a7d0d2d612b6a274d3ea6a13bad4445d61b381b88" +dependencies = [ + "ark-ff 0.5.0", + "ark-relations", + "ark-serialize 0.5.0", + "ark-std 0.5.0", +] + +[[package]] +name = "ark-std" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1df2c09229cbc5a028b1d70e00fdb2acee28b1055dfb5ca73eea49c5a25c4e7c" +dependencies = [ + "num-traits", + "rand 0.8.5", +] + +[[package]] +name = "ark-std" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94893f1e0c6eeab764ade8dc4c0db24caf4fe7cbbaafc0eba0a9030f447b5185" +dependencies = [ + "num-traits", + "rand 0.8.5", +] + +[[package]] +name = "ark-std" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "246a225cc6131e9ee4f24619af0f19d67761fff15d7ccc22e42b80846e69449a" +dependencies = [ + "num-traits", + "rand 0.8.5", +] + +[[package]] +name = "arraydeque" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d902e3d592a523def97af8f317b08ce16b7ab854c1985a0c671e6f15cebc236" + +[[package]] +name = "arrayref" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76a2e8124351fda1ef8aaaa3bbd7ebbcb486bbcd4225aca0aa0d84bb2db8fecb" + +[[package]] +name = "arrayvec" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" +dependencies = [ + "serde", +] + +[[package]] +name = "auto_impl" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffdcb70bdbc4d478427380519163274ac86e52916e10f0a8889adf0f96d3fee7" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "autocfg" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" + +[[package]] +name = "backtrace" +version = "0.3.75" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6806a6321ec58106fea15becdad98371e28d92ccbc7c8f1b3b6dd724fe8f1002" +dependencies = [ + "addr2line", + "cfg-if", + "libc", + "miniz_oxide", + "object", + "rustc-demangle", + "windows-targets 0.52.6", +] + +[[package]] +name = "base16ct" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c7f02d4ea65f2c1853089ffd8d2787bdbc63de2f0d29dedbcf8ccdfa0ccd4cf" + +[[package]] +name = "base58ck" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c8d66485a3a2ea485c1913c4572ce0256067a5377ac8c75c4960e1cda98605f" +dependencies = [ + "bitcoin-internals", + "bitcoin_hashes", +] + +[[package]] +name = "base64" +version = "0.21.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" + +[[package]] +name = "base64" +version = "0.22.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" + +[[package]] +name = "base64ct" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55248b47b0caf0546f7988906588779981c43bb1bc9d0c44087278f80cdb44ba" + +[[package]] +name = "bech32" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d965446196e3b7decd44aa7ee49e31d630118f90ef12f97900f262eb915c951d" + +[[package]] +name = "bincode" +version = "1.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1f45e9417d87227c7a56d22e471c6206462cba514c7590c09aff4cf6d1ddcad" +dependencies = [ + "serde", +] + +[[package]] +name = "bit-set" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08807e080ed7f9d5433fa9b275196cfc35414f66a0c79d864dc51a0d825231a3" +dependencies = [ + "bit-vec", +] + +[[package]] +name = "bit-vec" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e764a1d40d510daf35e07be9eb06e75770908c27d411ee6c92109c9840eaaf7" + +[[package]] +name = "bitcoin" +version = "0.32.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ad8929a18b8e33ea6b3c09297b687baaa71fb1b97353243a3f1029fad5c59c5b" +dependencies = [ + "base58ck", + "base64 0.21.7", + "bech32", + "bitcoin-internals", + "bitcoin-io", + "bitcoin-units", + "bitcoin_hashes", + "hex-conservative", + "hex_lit", + "secp256k1", + "serde", +] + +[[package]] +name = "bitcoin-internals" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "30bdbe14aa07b06e6cfeffc529a1f099e5fbe249524f8125358604df99a4bed2" +dependencies = [ + "serde", +] + +[[package]] +name = "bitcoin-io" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b47c4ab7a93edb0c7198c5535ed9b52b63095f4e9b45279c6736cec4b856baf" + +[[package]] +name = "bitcoin-units" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5285c8bcaa25876d07f37e3d30c303f2609179716e11d688f51e8f1fe70063e2" +dependencies = [ + "bitcoin-internals", + "serde", +] + +[[package]] +name = "bitcoin_hashes" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb18c03d0db0247e147a21a6faafd5a7eb851c743db062de72018b6b7e8e4d16" +dependencies = [ + "bitcoin-io", + "hex-conservative", + "serde", +] + +[[package]] +name = "bitflags" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" + +[[package]] +name = "bitflags" +version = "2.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b8e56985ec62d17e9c1001dc89c88ecd7dc08e47eba5ec7c29c7b5eeecde967" + +[[package]] +name = "bitvec" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1bc2832c24239b0141d5674bb9174f9d68a8b5b3f2753311927c172ca46f7e9c" +dependencies = [ + "funty", + "radium", + "tap", + "wyz", +] + +[[package]] +name = "blake2" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46502ad458c9a52b69d4d4d32775c788b7a1b85e8bc9d482d92250fc0e3f8efe" +dependencies = [ + "digest 0.10.7", +] + +[[package]] +name = "blake3" +version = "1.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3888aaa89e4b2a40fca9848e400f6a658a5a3978de7be858e209cafa8be9a4a0" +dependencies = [ + "arrayref", + "arrayvec", + "cc", + "cfg-if", + "constant_time_eq", +] + +[[package]] +name = "block" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d8c1fef690941d3e7788d328517591fecc684c084084702d6ff1641e993699a" + +[[package]] +name = "block-buffer" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" +dependencies = [ + "generic-array", +] + +[[package]] +name = "blst" +version = "0.3.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4fd49896f12ac9b6dcd7a5998466b9b58263a695a3dd1ecc1aaca2e12a90b080" +dependencies = [ + "cc", + "glob", + "threadpool", + "zeroize", +] + +[[package]] +name = "bonsai-sdk" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0bce8d6acc5286a16e94c29e9c885d1869358885e08a6feeb6bc54e36fe20055" +dependencies = [ + "duplicate", + "maybe-async", + "reqwest", + "serde", + "thiserror 1.0.69", +] + +[[package]] +name = "borsh" +version = "1.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ad8646f98db542e39fc66e68a20b2144f6a732636df7c2354e74645faaa433ce" +dependencies = [ + "borsh-derive", + "bytes", + "cfg_aliases", +] + +[[package]] +name = "borsh-derive" +version = "1.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fdd1d3c0c2f5833f22386f252fe8ed005c7f59fdcddeef025c01b4c3b9fd9ac3" +dependencies = [ + "once_cell", + "proc-macro-crate", + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "bridge-circuit-guest" +version = "0.1.0" +dependencies = [ + "circuits-lib", +] + +[[package]] +name = "bumpalo" +version = "3.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46c5e41b57b8bba42a04676d81cb89e9ee8e859a1a66f80a5a72e1cb76b34d43" + +[[package]] +name = "byte-slice-cast" +version = "1.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7575182f7272186991736b70173b0ea045398f984bf5ebbb3804736ce1330c9d" + +[[package]] +name = "bytemuck" +version = "1.23.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c76a5792e44e4abe34d3abf15636779261d45a7450612059293d1d2cfc63422" +dependencies = [ + "bytemuck_derive", +] + +[[package]] +name = "bytemuck_derive" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3fa76293b4f7bb636ab88fd78228235b5248b4d05cc589aed610f954af5d7c7a" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "byteorder" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" + +[[package]] +name = "bytes" +version = "1.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a" +dependencies = [ + "serde", +] + +[[package]] +name = "c-kzg" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0307f72feab3300336fb803a57134159f6e20139af1357f36c54cb90d8e8928" +dependencies = [ + "blst", + "cc", + "glob", + "hex", + "libc", + "once_cell", + "serde", +] + +[[package]] +name = "camino" +version = "1.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0da45bc31171d8d6960122e222a67740df867c1dd53b4d51caa297084c185cab" +dependencies = [ + "serde", +] + +[[package]] +name = "cargo-platform" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e35af189006b9c0f00a064685c727031e3ed2d8020f7ba284d78cc2671bd36ea" +dependencies = [ + "serde", +] + +[[package]] +name = "cargo_metadata" +version = "0.19.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd5eb614ed4c27c5d706420e4320fbe3216ab31fa1c33cd8246ac36dae4479ba" +dependencies = [ + "camino", + "cargo-platform", + "semver 1.0.26", + "serde", + "serde_json", + "thiserror 2.0.12", +] + +[[package]] +name = "cc" +version = "1.2.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "deec109607ca693028562ed836a5f1c4b8bd77755c4e132fc5ce11b0b6211ae7" +dependencies = [ + "shlex", +] + +[[package]] +name = "cfg-if" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9555578bc9e57714c812a1f84e4fc5b4d21fcb063490c624de019f7464c91268" + +[[package]] +name = "cfg_aliases" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" + +[[package]] +name = "circuits-lib" +version = "0.1.0" +dependencies = [ + "alloy-primitives", + "alloy-rpc-types", + "alloy-rpc-types-eth", + "ark-bn254", + "ark-crypto-primitives", + "ark-ec", + "ark-ff 0.5.0", + "ark-groth16", + "ark-relations", + "ark-serialize 0.5.0", + "ark-std 0.5.0", + "bincode", + "bitcoin", + "blake3", + "borsh", + "crypto-bigint", + "derive_more 1.0.0", + "eyre", + "hex", + "hex-literal", + "itertools 0.14.0", + "jmt 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", + "k256", + "lazy_static", + "num-bigint", + "num-traits", + "once_cell", + "risc0-groth16", + "risc0-zkvm", + "serde", + "serde_json", + "sha2", + "sov-rollup-interface", + "tracing", +] + +[[package]] +name = "cobs" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fa961b519f0b462e3a3b4a34b64d119eeaca1d59af726fe450bbba07a9fc0a1" +dependencies = [ + "thiserror 2.0.12", +] + +[[package]] +name = "const-hex" +version = "1.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "83e22e0ed40b96a48d3db274f72fd365bd78f67af39b6bbd47e8a15e1c6207ff" +dependencies = [ + "cfg-if", + "cpufeatures", + "hex", + "proptest", + "serde", +] + +[[package]] +name = "const-oid" +version = "0.9.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" + +[[package]] +name = "const_format" +version = "0.2.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "126f97965c8ad46d6d9163268ff28432e8f6a1196a55578867832e3049df63dd" +dependencies = [ + "const_format_proc_macros", +] + +[[package]] +name = "const_format_proc_macros" +version = "0.2.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d57c2eccfb16dbac1f4e61e206105db5820c9d26c3c472bc17c774259ef7744" +dependencies = [ + "proc-macro2", + "quote", + "unicode-xid", +] + +[[package]] +name = "constant_time_eq" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7c74b8349d32d297c9134b8c88677813a227df8f779daa29bfc29c183fe3dca6" + +[[package]] +name = "core-foundation" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "core-foundation-sys" +version = "0.8.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" + +[[package]] +name = "core-graphics-types" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "45390e6114f68f718cc7a830514a96f903cccd70d02a8f6d9f643ac4ba45afaf" +dependencies = [ + "bitflags 1.3.2", + "core-foundation", + "libc", +] + +[[package]] +name = "cpufeatures" +version = "0.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59ed5838eebb26a2bb2e58f6d5b5316989ae9d08bab10e0e6d103e656d1b0280" +dependencies = [ + "libc", +] + +[[package]] +name = "crc" +version = "3.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9710d3b3739c2e349eb44fe848ad0b7c8cb1e42bd87ee49371df2f7acaf3e675" +dependencies = [ + "crc-catalog", +] + +[[package]] +name = "crc-catalog" +version = "2.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19d374276b40fb8bbdee95aef7c7fa6b5316ec764510eb64b8dd0e2ed0d7e7f5" + +[[package]] +name = "crunchy" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "460fbee9c2c2f33933d720630a6a0bac33ba7053db5344fac858d4b8952d77d5" + +[[package]] +name = "crypto-bigint" +version = "0.5.5" +source = "git+https://github.com/risc0/RustCrypto-crypto-bigint?tag=v0.5.5-risczero.0#3ab63a6f1048833f7047d5a50532e4a4cc789384" +dependencies = [ + "generic-array", + "getrandom 0.2.16", + "rand_core 0.6.4", + "subtle", + "zeroize", +] + +[[package]] +name = "crypto-common" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" +dependencies = [ + "generic-array", + "typenum", +] + +[[package]] +name = "darling" +version = "0.20.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc7f46116c46ff9ab3eb1597a45688b6715c6e628b5c133e288e709a29bcb4ee" +dependencies = [ + "darling_core", + "darling_macro", +] + +[[package]] +name = "darling_core" +version = "0.20.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d00b9596d185e565c2207a0b01f8bd1a135483d02d9b7b0a54b11da8d53412e" +dependencies = [ + "fnv", + "ident_case", + "proc-macro2", + "quote", + "strsim", + "syn 2.0.104", +] + +[[package]] +name = "darling_macro" +version = "0.20.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc34b93ccb385b40dc71c6fceac4b2ad23662c7eeb248cf10d529b7e055b6ead" +dependencies = [ + "darling_core", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "der" +version = "0.7.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7c1832837b905bbfb5101e07cc24c8deddf52f93225eee6ead5f4d63d53ddcb" +dependencies = [ + "const-oid", + "pem-rfc7468", + "zeroize", +] + +[[package]] +name = "deranged" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c9e6a11ca8224451684bc0d7d5a7adbf8f2fd6887261a1cfc3c0432f9d4068e" +dependencies = [ + "powerfmt", +] + +[[package]] +name = "derivative" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "derive_builder" +version = "0.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "507dfb09ea8b7fa618fcf76e953f4f5e192547945816d5358edffe39f6f94947" +dependencies = [ + "derive_builder_macro", +] + +[[package]] +name = "derive_builder_core" +version = "0.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d5bcf7b024d6835cfb3d473887cd966994907effbe9227e8c8219824d06c4e8" +dependencies = [ + "darling", + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "derive_builder_macro" +version = "0.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab63b0e2bf4d5928aff72e83a7dace85d7bba5fe12dcc3c5a572d78caffd3f3c" +dependencies = [ + "derive_builder_core", + "syn 2.0.104", +] + +[[package]] +name = "derive_more" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a9b99b9cbbe49445b21764dc0625032a89b145a2642e67603e1c936f5458d05" +dependencies = [ + "derive_more-impl 1.0.0", +] + +[[package]] +name = "derive_more" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "093242cf7570c207c83073cf82f79706fe7b8317e98620a47d5be7c3d8497678" +dependencies = [ + "derive_more-impl 2.0.1", +] + +[[package]] +name = "derive_more-impl" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb7330aeadfbe296029522e6c40f315320aba36fc43a5b3632f3795348f3bd22" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", + "unicode-xid", +] + +[[package]] +name = "derive_more-impl" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bda628edc44c4bb645fbe0f758797143e4e07926f7ebf4e9bdfbd3d2ce621df3" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", + "unicode-xid", +] + +[[package]] +name = "digest" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" +dependencies = [ + "generic-array", +] + +[[package]] +name = "digest" +version = "0.10.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" +dependencies = [ + "block-buffer", + "const-oid", + "crypto-common", + "subtle", +] + +[[package]] +name = "dirs" +version = "5.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44c45a9d03d6676652bcb5e724c7e988de1acad23a711b5217ab9cbecbec2225" +dependencies = [ + "dirs-sys", +] + +[[package]] +name = "dirs-sys" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "520f05a5cbd335fae5a99ff7a6ab8627577660ee5cfd6a94a6a929b52ff0321c" +dependencies = [ + "libc", + "option-ext", + "redox_users", + "windows-sys 0.48.0", +] + +[[package]] +name = "displaydoc" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "docker-generate" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ccf673e0848ef09fa4aeeba78e681cf651c0c7d35f76ee38cec8e55bc32fa111" + +[[package]] +name = "downcast-rs" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75b325c5dbd37f80359721ad39aca5a29fb04c89279657cffdda8736d0c0b9d2" + +[[package]] +name = "dunce" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92773504d58c093f6de2459af4af33faa518c13451eb8f2b5698ed3d36e7c813" + +[[package]] +name = "duplicate" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "de78e66ac9061e030587b2a2e75cc88f22304913c907b11307bca737141230cb" +dependencies = [ + "heck 0.4.1", + "proc-macro-error", +] + +[[package]] +name = "dyn-clone" +version = "1.0.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0881ea181b1df73ff77ffaaf9c7544ecc11e82fba9b5f27b262a3c73a332555" + +[[package]] +name = "ecdsa" +version = "0.16.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee27f32b5c5292967d2d4a9d7f1e0b0aed2c15daded5a60300e4abb9d8020bca" +dependencies = [ + "der", + "digest 0.10.7", + "elliptic-curve", + "rfc6979", + "serdect", + "signature", + "spki", +] + +[[package]] +name = "educe" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d7bc049e1bd8cdeb31b68bbd586a9464ecf9f3944af3958a7a9d0f8b9799417" +dependencies = [ + "enum-ordinalize", + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "either" +version = "1.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" + +[[package]] +name = "elf" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4445909572dbd556c457c849c4ca58623d84b27c8fff1e74b0b4227d8b90d17b" + +[[package]] +name = "elliptic-curve" +version = "0.13.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5e6043086bf7973472e0c7dff2142ea0b680d30e18d9cc40f267efbf222bd47" +dependencies = [ + "base16ct", + "crypto-bigint", + "digest 0.10.7", + "ff", + "generic-array", + "group", + "pem-rfc7468", + "pkcs8", + "rand_core 0.6.4", + "sec1", + "serdect", + "subtle", + "zeroize", +] + +[[package]] +name = "embedded-io" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef1a6892d9eef45c8fa6b9e0086428a2cca8491aca8f787c534a3d6d0bcb3ced" + +[[package]] +name = "embedded-io" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "edd0f118536f44f5ccd48bcb8b111bdc3de888b58c74639dfb034a357d0f206d" + +[[package]] +name = "encoding_rs" +version = "0.8.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75030f3c4f45dafd7586dd6780965a8c7e8e285a5ecb86713e63a79c5b2766f3" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "enum-ordinalize" +version = "4.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fea0dcfa4e54eeb516fe454635a95753ddd39acda650ce703031c6973e315dd5" +dependencies = [ + "enum-ordinalize-derive", +] + +[[package]] +name = "enum-ordinalize-derive" +version = "4.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d28318a75d4aead5c4db25382e8ef717932d0346600cacae6357eb5941bc5ff" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "equivalent" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" + +[[package]] +name = "errno" +version = "0.3.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "778e2ac28f6c47af28e4907f13ffd1e1ddbd400980a9abd7c8df189bf578a5ad" +dependencies = [ + "libc", + "windows-sys 0.60.2", +] + +[[package]] +name = "eyre" +version = "0.6.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7cd915d99f24784cdc19fd37ef22b97e3ff0ae756c7e492e9fbfe897d61e2aec" +dependencies = [ + "indenter", + "once_cell", +] + +[[package]] +name = "fastrand" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" + +[[package]] +name = "fastrlp" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "139834ddba373bbdd213dffe02c8d110508dcf1726c2be27e8d1f7d7e1856418" +dependencies = [ + "arrayvec", + "auto_impl", + "bytes", +] + +[[package]] +name = "fastrlp" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce8dba4714ef14b8274c371879b175aa55b16b30f269663f19d576f380018dc4" +dependencies = [ + "arrayvec", + "auto_impl", + "bytes", +] + +[[package]] +name = "ff" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0b50bfb653653f9ca9095b427bed08ab8d75a137839d9ad64eb11810d5b6393" +dependencies = [ + "rand_core 0.6.4", + "subtle", +] + +[[package]] +name = "fixed-hash" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "835c052cb0c08c1acf6ffd71c022172e18723949c8282f2b9f27efbc51e64534" +dependencies = [ + "byteorder", + "rand 0.8.5", + "rustc-hex", + "static_assertions", +] + +[[package]] +name = "fnv" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" + +[[package]] +name = "foldhash" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9c4f5dac5e15c24eb999c26181a6ca40b39fe946cbe4c263c7209467bc83af2" + +[[package]] +name = "foreign-types" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d737d9aa519fb7b749cbc3b962edcf310a8dd1f4b67c91c4f83975dbdd17d965" +dependencies = [ + "foreign-types-macros", + "foreign-types-shared", +] + +[[package]] +name = "foreign-types-macros" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a5c6c585bc94aaf2c7b51dd4c2ba22680844aba4c687be581871a6f518c5742" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "foreign-types-shared" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa9a19cbb55df58761df49b23516a86d432839add4af60fc256da840f66ed35b" + +[[package]] +name = "form_urlencoded" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456" +dependencies = [ + "percent-encoding", +] + +[[package]] +name = "funty" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" + +[[package]] +name = "futures-channel" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" +dependencies = [ + "futures-core", + "futures-sink", +] + +[[package]] +name = "futures-core" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" + +[[package]] +name = "futures-io" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" + +[[package]] +name = "futures-macro" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "futures-sink" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7" + +[[package]] +name = "futures-task" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" + +[[package]] +name = "futures-util" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" +dependencies = [ + "futures-core", + "futures-io", + "futures-macro", + "futures-sink", + "futures-task", + "memchr", + "pin-project-lite", + "pin-utils", + "slab", +] + +[[package]] +name = "generic-array" +version = "0.14.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" +dependencies = [ + "typenum", + "version_check", + "zeroize", +] + +[[package]] +name = "getrandom" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592" +dependencies = [ + "cfg-if", + "js-sys", + "libc", + "wasi 0.11.1+wasi-snapshot-preview1", + "wasm-bindgen", +] + +[[package]] +name = "getrandom" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26145e563e54f2cadc477553f1ec5ee650b00862f0a58bcd12cbdc5f0ea2d2f4" +dependencies = [ + "cfg-if", + "js-sys", + "libc", + "r-efi", + "wasi 0.14.2+wasi-0.2.4", + "wasm-bindgen", +] + +[[package]] +name = "gimli" +version = "0.31.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" + +[[package]] +name = "glob" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8d1add55171497b4705a648c6b583acafb01d58050a51727785f0b2c8e0a2b2" + +[[package]] +name = "group" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63" +dependencies = [ + "ff", + "rand_core 0.6.4", + "subtle", +] + +[[package]] +name = "hashbrown" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e" +dependencies = [ + "ahash", +] + +[[package]] +name = "hashbrown" +version = "0.14.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" +dependencies = [ + "ahash", +] + +[[package]] +name = "hashbrown" +version = "0.15.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5971ac85611da7067dbfcabef3c70ebb5606018acd9e2a3903a0da507521e0d5" +dependencies = [ + "allocator-api2", + "foldhash", + "serde", +] + +[[package]] +name = "hashlink" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ba4ff7128dee98c7dc9794b6a411377e1404dba1c97deb8d1a55297bd25d8af" +dependencies = [ + "hashbrown 0.14.5", +] + +[[package]] +name = "heck" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" + +[[package]] +name = "heck" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" + +[[package]] +name = "hermit-abi" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc0fef456e4baa96da950455cd02c081ca953b141298e41db3fc7e36b1da849c" + +[[package]] +name = "hex" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" +dependencies = [ + "serde", +] + +[[package]] +name = "hex-conservative" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5313b072ce3c597065a808dbf612c4c8e8590bdbf8b579508bf7a762c5eae6cd" +dependencies = [ + "arrayvec", +] + +[[package]] +name = "hex-literal" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6fe2267d4ed49bc07b63801559be28c718ea06c4738b7a03c94df7386d2cde46" + +[[package]] +name = "hex_lit" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3011d1213f159867b13cfd6ac92d2cd5f1345762c63be3554e84092d85a50bbd" + +[[package]] +name = "hmac" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" +dependencies = [ + "digest 0.10.7", +] + +[[package]] +name = "http" +version = "1.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f4a85d31aea989eead29a3aaf9e1115a180df8282431156e533de47660892565" +dependencies = [ + "bytes", + "fnv", + "itoa", +] + +[[package]] +name = "http-body" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" +dependencies = [ + "bytes", + "http", +] + +[[package]] +name = "http-body-util" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b021d93e26becf5dc7e1b75b1bed1fd93124b374ceb73f43d4d4eafec896a64a" +dependencies = [ + "bytes", + "futures-core", + "http", + "http-body", + "pin-project-lite", +] + +[[package]] +name = "httparse" +version = "1.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6dbf3de79e51f3d586ab4cb9d5c3e2c14aa28ed23d180cf89b4df0454a69cc87" + +[[package]] +name = "hyper" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc2b571658e38e0c01b1fdca3bbbe93c00d3d71693ff2770043f8c29bc7d6f80" +dependencies = [ + "bytes", + "futures-channel", + "futures-util", + "http", + "http-body", + "httparse", + "itoa", + "pin-project-lite", + "smallvec", + "tokio", + "want", +] + +[[package]] +name = "hyper-rustls" +version = "0.27.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3c93eb611681b207e1fe55d5a71ecf91572ec8a6705cdb6857f7d8d5242cf58" +dependencies = [ + "http", + "hyper", + "hyper-util", + "rustls", + "rustls-pki-types", + "tokio", + "tokio-rustls", + "tower-service", + "webpki-roots", +] + +[[package]] +name = "hyper-util" +version = "0.1.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d9b05277c7e8da2c93a568989bb6207bef0112e8d17df7a6eda4a3cf143bc5e" +dependencies = [ + "base64 0.22.1", + "bytes", + "futures-channel", + "futures-core", + "futures-util", + "http", + "http-body", + "hyper", + "ipnet", + "libc", + "percent-encoding", + "pin-project-lite", + "socket2 0.6.0", + "tokio", + "tower-service", + "tracing", +] + +[[package]] +name = "ics23" +version = "0.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73b17f1a5bd7d12ad30a21445cfa5f52fd7651cb3243ba866f9916b1ec112f12" +dependencies = [ + "anyhow", + "blake2", + "blake3", + "bytes", + "hex", + "informalsystems-pbjson", + "prost", + "ripemd", + "serde", + "sha2", + "sha3", +] + +[[package]] +name = "icu_collections" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "200072f5d0e3614556f94a9930d5dc3e0662a652823904c3a75dc3b0af7fee47" +dependencies = [ + "displaydoc", + "potential_utf", + "yoke", + "zerofrom", + "zerovec", +] + +[[package]] +name = "icu_locale_core" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0cde2700ccaed3872079a65fb1a78f6c0a36c91570f28755dda67bc8f7d9f00a" +dependencies = [ + "displaydoc", + "litemap", + "tinystr", + "writeable", + "zerovec", +] + +[[package]] +name = "icu_normalizer" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "436880e8e18df4d7bbc06d58432329d6458cc84531f7ac5f024e93deadb37979" +dependencies = [ + "displaydoc", + "icu_collections", + "icu_normalizer_data", + "icu_properties", + "icu_provider", + "smallvec", + "zerovec", +] + +[[package]] +name = "icu_normalizer_data" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00210d6893afc98edb752b664b8890f0ef174c8adbb8d0be9710fa66fbbf72d3" + +[[package]] +name = "icu_properties" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "016c619c1eeb94efb86809b015c58f479963de65bdb6253345c1a1276f22e32b" +dependencies = [ + "displaydoc", + "icu_collections", + "icu_locale_core", + "icu_properties_data", + "icu_provider", + "potential_utf", + "zerotrie", + "zerovec", +] + +[[package]] +name = "icu_properties_data" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "298459143998310acd25ffe6810ed544932242d3f07083eee1084d83a71bd632" + +[[package]] +name = "icu_provider" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "03c80da27b5f4187909049ee2d72f276f0d9f99a42c306bd0131ecfe04d8e5af" +dependencies = [ + "displaydoc", + "icu_locale_core", + "stable_deref_trait", + "tinystr", + "writeable", + "yoke", + "zerofrom", + "zerotrie", + "zerovec", +] + +[[package]] +name = "ident_case" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" + +[[package]] +name = "idna" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "686f825264d630750a544639377bae737628043f20d38bbc029e8f29ea968a7e" +dependencies = [ + "idna_adapter", + "smallvec", + "utf8_iter", +] + +[[package]] +name = "idna_adapter" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3acae9609540aa318d1bc588455225fb2085b9ed0c4f6bd0d9d5bcd86f1a0344" +dependencies = [ + "icu_normalizer", + "icu_properties", +] + +[[package]] +name = "impl-codec" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba6a270039626615617f3f36d15fc827041df3b78c439da2cadfa47455a77f2f" +dependencies = [ + "parity-scale-codec", +] + +[[package]] +name = "impl-trait-for-tuples" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a0eb5a3343abf848c0984fe4604b2b105da9539376e24fc0a3b0007411ae4fd9" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "include_bytes_aligned" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ee796ad498c8d9a1d68e477df8f754ed784ef875de1414ebdaf169f70a6a784" + +[[package]] +name = "indenter" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce23b50ad8242c51a442f3ff322d56b02f08852c77e4c0b4d3fd684abc89c683" + +[[package]] +name = "indexmap" +version = "2.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fe4cd85333e22411419a0bcae1297d25e58c9443848b11dc6a86fefe8c78a661" +dependencies = [ + "equivalent", + "hashbrown 0.15.4", + "serde", +] + +[[package]] +name = "informalsystems-pbjson" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9aa4a0980c8379295100d70854354e78df2ee1c6ca0f96ffe89afeb3140e3a3d" +dependencies = [ + "base64 0.21.7", + "serde", +] + +[[package]] +name = "io-uring" +version = "0.7.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d93587f37623a1a17d94ef2bc9ada592f5465fe7732084ab7beefabe5c77c0c4" +dependencies = [ + "bitflags 2.9.1", + "cfg-if", + "libc", +] + +[[package]] +name = "ipnet" +version = "2.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130" + +[[package]] +name = "iri-string" +version = "0.7.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dbc5ebe9c3a1a7a5127f920a418f7585e9e758e911d0466ed004f393b0e380b2" +dependencies = [ + "memchr", + "serde", +] + +[[package]] +name = "itertools" +version = "0.10.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" +dependencies = [ + "either", +] + +[[package]] +name = "itertools" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186" +dependencies = [ + "either", +] + +[[package]] +name = "itertools" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b192c782037fadd9cfa75548310488aabdbf3d2da73885b31bd0abd03351285" +dependencies = [ + "either", +] + +[[package]] +name = "itoa" +version = "1.0.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" + +[[package]] +name = "jmt" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf2a10370b45cd850e64993ccd81d25ea2d4b5b0d0312546e7489fed82064f2e" +dependencies = [ + "anyhow", + "borsh", + "digest 0.10.7", + "hashbrown 0.13.2", + "hex", + "ics23", + "itertools 0.10.5", + "mirai-annotations", + "num-derive", + "num-traits", + "serde", + "sha2", + "thiserror 1.0.69", + "tracing", +] + +[[package]] +name = "jmt" +version = "0.11.0" +source = "git+https://github.com/penumbra-zone/jmt.git?rev=550a2f2#550a2f20984a5c31c51715381d3f67390e138ffa" +dependencies = [ + "anyhow", + "borsh", + "digest 0.10.7", + "hashbrown 0.13.2", + "hex", + "ics23", + "itertools 0.10.5", + "mirai-annotations", + "num-derive", + "num-traits", + "serde", + "sha2", + "thiserror 1.0.69", + "tracing", +] + +[[package]] +name = "js-sys" +version = "0.3.77" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1cfaf33c695fc6e08064efbc1f72ec937429614f25eef83af942d0e227c3a28f" +dependencies = [ + "once_cell", + "wasm-bindgen", +] + +[[package]] +name = "jsonwebtoken" +version = "9.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a87cc7a48537badeae96744432de36f4be2b4a34a05a5ef32e9dd8a1c169dde" +dependencies = [ + "base64 0.22.1", + "js-sys", + "pem", + "ring", + "serde", + "serde_json", + "simple_asn1", +] + +[[package]] +name = "k256" +version = "0.13.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6e3919bbaa2945715f0bb6d3934a173d1e9a59ac23767fbaaef277265a7411b" +dependencies = [ + "cfg-if", + "ecdsa", + "elliptic-curve", + "once_cell", + "serdect", + "sha2", + "signature", +] + +[[package]] +name = "keccak" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ecc2af9a1119c51f12a14607e783cb977bde58bc069ff0c3da1095e635d70654" +dependencies = [ + "cpufeatures", +] + +[[package]] +name = "keccak-asm" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "505d1856a39b200489082f90d897c3f07c455563880bc5952e38eabf731c83b6" +dependencies = [ + "digest 0.10.7", + "sha3-asm", +] + +[[package]] +name = "lazy-regex" +version = "3.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60c7310b93682b36b98fa7ea4de998d3463ccbebd94d935d6b48ba5b6ffa7126" +dependencies = [ + "lazy-regex-proc_macros", + "once_cell", + "regex", +] + +[[package]] +name = "lazy-regex-proc_macros" +version = "3.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ba01db5ef81e17eb10a5e0f2109d1b3a3e29bac3070fdbd7d156bf7dbd206a1" +dependencies = [ + "proc-macro2", + "quote", + "regex", + "syn 2.0.104", +] + +[[package]] +name = "lazy_static" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" +dependencies = [ + "spin", +] + +[[package]] +name = "libc" +version = "0.2.174" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1171693293099992e19cddea4e8b849964e9846f4acee11b3948bcc337be8776" + +[[package]] +name = "libm" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f9fbbcab51052fe104eb5e5d351cf728d30a5be1fe14d9be8a3b097481fb97de" + +[[package]] +name = "libredox" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "360e552c93fa0e8152ab463bc4c4837fce76a225df11dfaeea66c313de5e61f7" +dependencies = [ + "bitflags 2.9.1", + "libc", +] + +[[package]] +name = "linux-raw-sys" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd945864f07fe9f5371a27ad7b52a172b4b499999f1d97574c9fa68373937e12" + +[[package]] +name = "litemap" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "241eaef5fd12c88705a01fc1066c48c4b36e0dd4377dcdc7ec3942cea7a69956" + +[[package]] +name = "log" +version = "0.4.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94" + +[[package]] +name = "lru-slab" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "112b39cec0b298b6c1999fee3e31427f74f676e4cb9879ed1a121b43661a4154" + +[[package]] +name = "macro-string" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b27834086c65ec3f9387b096d66e99f221cf081c2b738042aa252bcd41204e3" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "malloc_buf" +version = "0.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62bb907fe88d54d8d9ce32a3cceab4218ed2f6b7d35617cafe9adf84e43919cb" +dependencies = [ + "libc", +] + +[[package]] +name = "maybe-async" +version = "0.2.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5cf92c10c7e361d6b99666ec1c6f9805b0bea2c3bd8c78dc6fe98ac5bd78db11" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "memchr" +version = "2.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32a282da65faaf38286cf3be983213fcf1d2e2a58700e808f83f4ea9a4804bc0" + +[[package]] +name = "merlin" +version = "3.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "58c38e2799fc0978b65dfff8023ec7843e2330bb462f19198840b34b6582397d" +dependencies = [ + "byteorder", + "keccak", + "rand_core 0.6.4", + "zeroize", +] + +[[package]] +name = "metal" +version = "0.29.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ecfd3296f8c56b7c1f6fbac3c71cefa9d78ce009850c45000015f206dc7fa21" +dependencies = [ + "bitflags 2.9.1", + "block", + "core-graphics-types", + "foreign-types", + "log", + "objc", + "paste", +] + +[[package]] +name = "miniz_oxide" +version = "0.8.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fa76a2c86f704bdb222d66965fb3d63269ce38518b83cb0575fca855ebb6316" +dependencies = [ + "adler2", +] + +[[package]] +name = "mio" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78bed444cc8a2160f01cbcf811ef18cac863ad68ae8ca62092e8db51d51c761c" +dependencies = [ + "libc", + "wasi 0.11.1+wasi-snapshot-preview1", + "windows-sys 0.59.0", +] + +[[package]] +name = "mirai-annotations" +version = "1.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c9be0862c1b3f26a88803c4a49de6889c10e608b3ee9344e6ef5b45fb37ad3d1" + +[[package]] +name = "no_std_strings" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a5b0c77c1b780822bc749a33e39aeb2c07584ab93332303babeabb645298a76e" + +[[package]] +name = "num-bigint" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a5e44f723f1133c9deac646763579fdb3ac745e418f2a7af9cd0c431da1f20b9" +dependencies = [ + "num-integer", + "num-traits", +] + +[[package]] +name = "num-conv" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" + +[[package]] +name = "num-derive" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "876a53fff98e03a936a674b29568b0e605f06b29372c2489ff4de23f1949743d" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "num-integer" +version = "0.1.46" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f" +dependencies = [ + "num-traits", +] + +[[package]] +name = "num-traits" +version = "0.2.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" +dependencies = [ + "autocfg", + "libm", +] + +[[package]] +name = "num_cpus" +version = "1.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91df4bbde75afed763b708b7eee1e8e7651e02d97f6d5dd763e89367e957b23b" +dependencies = [ + "hermit-abi", + "libc", +] + +[[package]] +name = "nybbles" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8983bb634df7248924ee0c4c3a749609b5abcb082c28fffe3254b3eb3602b307" +dependencies = [ + "const-hex", + "serde", + "smallvec", +] + +[[package]] +name = "objc" +version = "0.2.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "915b1b472bc21c53464d6c8461c9d3af805ba1ef837e1cac254428f4a77177b1" +dependencies = [ + "malloc_buf", +] + +[[package]] +name = "object" +version = "0.36.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62948e14d923ea95ea2c7c86c71013138b66525b86bdc08d2dcc262bdb497b87" +dependencies = [ + "memchr", +] + +[[package]] +name = "once_cell" +version = "1.21.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" + +[[package]] +name = "option-ext" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" + +[[package]] +name = "parity-scale-codec" +version = "3.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "799781ae679d79a948e13d4824a40970bfa500058d245760dd857301059810fa" +dependencies = [ + "arrayvec", + "bitvec", + "byte-slice-cast", + "const_format", + "impl-trait-for-tuples", + "parity-scale-codec-derive", + "rustversion", + "serde", +] + +[[package]] +name = "parity-scale-codec-derive" +version = "3.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34b4653168b563151153c9e4c08ebed57fb8262bebfa79711552fa983c623e7a" +dependencies = [ + "proc-macro-crate", + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "paste" +version = "1.0.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" + +[[package]] +name = "pem" +version = "3.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38af38e8470ac9dee3ce1bae1af9c1671fffc44ddfd8bd1d0a3445bf349a8ef3" +dependencies = [ + "base64 0.22.1", + "serde", +] + +[[package]] +name = "pem-rfc7468" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "88b39c9bfcfc231068454382784bb460aae594343fb030d46e9f50a645418412" +dependencies = [ + "base64ct", +] + +[[package]] +name = "percent-encoding" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" + +[[package]] +name = "pest" +version = "2.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1db05f56d34358a8b1066f67cbb203ee3e7ed2ba674a6263a1d5ec6db2204323" +dependencies = [ + "memchr", + "thiserror 2.0.12", + "ucd-trie", +] + +[[package]] +name = "pin-project-lite" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" + +[[package]] +name = "pin-utils" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" + +[[package]] +name = "pkcs8" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7" +dependencies = [ + "der", + "spki", +] + +[[package]] +name = "postcard" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6764c3b5dd454e283a30e6dfe78e9b31096d9e32036b5d1eaac7a6119ccb9a24" +dependencies = [ + "cobs", + "embedded-io 0.4.0", + "embedded-io 0.6.1", + "serde", +] + +[[package]] +name = "potential_utf" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5a7c30837279ca13e7c867e9e40053bc68740f988cb07f7ca6df43cc734b585" +dependencies = [ + "zerovec", +] + +[[package]] +name = "powerfmt" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" + +[[package]] +name = "ppv-lite86" +version = "0.2.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9" +dependencies = [ + "zerocopy", +] + +[[package]] +name = "primitive-types" +version = "0.12.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b34d9fd68ae0b74a41b21c03c2f62847aa0ffea044eee893b4c140b37e244e2" +dependencies = [ + "fixed-hash", + "impl-codec", + "uint", +] + +[[package]] +name = "proc-macro-crate" +version = "3.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "edce586971a4dfaa28950c6f18ed55e0406c1ab88bbce2c6f6293a7aaba73d35" +dependencies = [ + "toml_edit", +] + +[[package]] +name = "proc-macro-error" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" +dependencies = [ + "proc-macro-error-attr", + "proc-macro2", + "quote", + "syn 1.0.109", + "version_check", +] + +[[package]] +name = "proc-macro-error-attr" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" +dependencies = [ + "proc-macro2", + "quote", + "version_check", +] + +[[package]] +name = "proc-macro-error-attr2" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96de42df36bb9bba5542fe9f1a054b8cc87e172759a1868aa05c1f3acc89dfc5" +dependencies = [ + "proc-macro2", + "quote", +] + +[[package]] +name = "proc-macro-error2" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11ec05c52be0a07b08061f7dd003e7d7092e0472bc731b4af7bb1ef876109802" +dependencies = [ + "proc-macro-error-attr2", + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "proc-macro2" +version = "1.0.95" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "02b3e5e68a3a1a02aad3ec490a98007cbc13c37cbe84a3cd7b8e406d76e7f778" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "proptest" +version = "1.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6fcdab19deb5195a31cf7726a210015ff1496ba1464fd42cb4f537b8b01b471f" +dependencies = [ + "bit-set", + "bit-vec", + "bitflags 2.9.1", + "lazy_static", + "num-traits", + "rand 0.9.2", + "rand_chacha 0.9.0", + "rand_xorshift", + "regex-syntax", + "rusty-fork", + "tempfile", + "unarray", +] + +[[package]] +name = "prost" +version = "0.13.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2796faa41db3ec313a31f7624d9286acf277b52de526150b7e69f3debf891ee5" +dependencies = [ + "bytes", + "prost-derive", +] + +[[package]] +name = "prost-derive" +version = "0.13.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a56d757972c98b346a9b766e3f02746cde6dd1cd1d1d563472929fdd74bec4d" +dependencies = [ + "anyhow", + "itertools 0.14.0", + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "quick-error" +version = "1.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" + +[[package]] +name = "quinn" +version = "0.11.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "626214629cda6781b6dc1d316ba307189c85ba657213ce642d9c77670f8202c8" +dependencies = [ + "bytes", + "cfg_aliases", + "pin-project-lite", + "quinn-proto", + "quinn-udp", + "rustc-hash", + "rustls", + "socket2 0.5.10", + "thiserror 2.0.12", + "tokio", + "tracing", + "web-time", +] + +[[package]] +name = "quinn-proto" +version = "0.11.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49df843a9161c85bb8aae55f101bc0bac8bcafd637a620d9122fd7e0b2f7422e" +dependencies = [ + "bytes", + "getrandom 0.3.3", + "lru-slab", + "rand 0.9.2", + "ring", + "rustc-hash", + "rustls", + "rustls-pki-types", + "slab", + "thiserror 2.0.12", + "tinyvec", + "tracing", + "web-time", +] + +[[package]] +name = "quinn-udp" +version = "0.5.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcebb1209ee276352ef14ff8732e24cc2b02bbac986cd74a4c81bcb2f9881970" +dependencies = [ + "cfg_aliases", + "libc", + "once_cell", + "socket2 0.5.10", + "tracing", + "windows-sys 0.59.0", +] + +[[package]] +name = "quote" +version = "1.0.40" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "r-efi" +version = "5.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" + +[[package]] +name = "radium" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09" + +[[package]] +name = "rand" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" +dependencies = [ + "libc", + "rand_chacha 0.3.1", + "rand_core 0.6.4", + "serde", +] + +[[package]] +name = "rand" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6db2770f06117d490610c7488547d543617b21bfa07796d7a12f6f1bd53850d1" +dependencies = [ + "rand_chacha 0.9.0", + "rand_core 0.9.3", +] + +[[package]] +name = "rand_chacha" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +dependencies = [ + "ppv-lite86", + "rand_core 0.6.4", +] + +[[package]] +name = "rand_chacha" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb" +dependencies = [ + "ppv-lite86", + "rand_core 0.9.3", +] + +[[package]] +name = "rand_core" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" +dependencies = [ + "getrandom 0.2.16", +] + +[[package]] +name = "rand_core" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "99d9a13982dcf210057a8a78572b2217b667c3beacbf3a0d8b454f6f82837d38" +dependencies = [ + "getrandom 0.3.3", +] + +[[package]] +name = "rand_xorshift" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "513962919efc330f829edb2535844d1b912b0fbe2ca165d613e4e8788bb05a5a" +dependencies = [ + "rand_core 0.9.3", +] + +[[package]] +name = "redox_users" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba009ff324d1fc1b900bd1fdb31564febe58a8ccc8a6fdbb93b543d33b13ca43" +dependencies = [ + "getrandom 0.2.16", + "libredox", + "thiserror 1.0.69", +] + +[[package]] +name = "regex" +version = "1.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" +dependencies = [ + "aho-corasick", + "memchr", + "regex-automata", + "regex-syntax", +] + +[[package]] +name = "regex-automata" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax", +] + +[[package]] +name = "regex-syntax" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" + +[[package]] +name = "reqwest" +version = "0.12.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cbc931937e6ca3a06e3b6c0aa7841849b160a90351d6ab467a8b9b9959767531" +dependencies = [ + "base64 0.22.1", + "bytes", + "futures-channel", + "futures-core", + "futures-util", + "http", + "http-body", + "http-body-util", + "hyper", + "hyper-rustls", + "hyper-util", + "js-sys", + "log", + "percent-encoding", + "pin-project-lite", + "quinn", + "rustls", + "rustls-pki-types", + "serde", + "serde_json", + "serde_urlencoded", + "sync_wrapper", + "tokio", + "tokio-rustls", + "tokio-util", + "tower", + "tower-http", + "tower-service", + "url", + "wasm-bindgen", + "wasm-bindgen-futures", + "wasm-streams", + "web-sys", + "webpki-roots", +] + +[[package]] +name = "rfc6979" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8dd2a808d456c4a54e300a23e9f5a67e122c3024119acbfd73e3bf664491cb2" +dependencies = [ + "hmac", + "subtle", +] + +[[package]] +name = "ring" +version = "0.17.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4689e6c2294d81e88dc6261c768b63bc4fcdb852be6d1352498b114f61383b7" +dependencies = [ + "cc", + "cfg-if", + "getrandom 0.2.16", + "libc", + "untrusted", + "windows-sys 0.52.0", +] + +[[package]] +name = "ripemd" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd124222d17ad93a644ed9d011a40f4fb64aa54275c08cc216524a9ea82fb09f" +dependencies = [ + "digest 0.10.7", +] + +[[package]] +name = "risc0-binfmt" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62eb7025356a233c1bc267c458a2ce56fcfc89b136d813c8a77be14ef1eaf2b1" +dependencies = [ + "anyhow", + "borsh", + "derive_more 2.0.1", + "elf", + "lazy_static", + "postcard", + "risc0-zkp", + "risc0-zkvm-platform", + "semver 1.0.26", + "serde", + "tracing", +] + +[[package]] +name = "risc0-build" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62ffc0f135e6c1e9851e7e19438d03ff41a9d49199ee4f6c17b8bb30b4f83910" +dependencies = [ + "anyhow", + "cargo_metadata", + "derive_builder", + "dirs", + "docker-generate", + "hex", + "risc0-binfmt", + "risc0-zkos-v1compat", + "risc0-zkp", + "risc0-zkvm-platform", + "rzup", + "semver 1.0.26", + "serde", + "serde_json", + "stability", + "tempfile", +] + +[[package]] +name = "risc0-circuit-keccak" +version = "3.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0094af5a57b020388a03bdd3834959c7d62723f1687be81414ade25104d93263" +dependencies = [ + "anyhow", + "bytemuck", + "paste", + "risc0-binfmt", + "risc0-circuit-recursion", + "risc0-core", + "risc0-zkp", + "tracing", +] + +[[package]] +name = "risc0-circuit-recursion" +version = "3.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76ebded45c902c2b6939924a1cddd1d06b5d1d4ad1531e8798ebfee78f9c038d" +dependencies = [ + "anyhow", + "bytemuck", + "hex", + "metal", + "risc0-core", + "risc0-zkp", + "tracing", +] + +[[package]] +name = "risc0-circuit-rv32im" +version = "3.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "15030849f8356f01f23c74b37dbfa4283100b594eb634109993e9e005ef45f64" +dependencies = [ + "anyhow", + "bit-vec", + "bytemuck", + "derive_more 2.0.1", + "paste", + "risc0-binfmt", + "risc0-core", + "risc0-zkp", + "serde", + "tracing", +] + +[[package]] +name = "risc0-core" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "317bbf70a8750b64d4fd7a2bdc9d7d5f30d8bb305cae486962c797ef35c8d08e" +dependencies = [ + "bytemuck", + "bytemuck_derive", + "rand_core 0.6.4", +] + +[[package]] +name = "risc0-groth16" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7cf5d0b673d5fc67a89147c2e9c53134707dcc8137a43d1ef06b4ff68e99b74f" +dependencies = [ + "anyhow", + "ark-bn254", + "ark-ec", + "ark-groth16", + "ark-serialize 0.5.0", + "bytemuck", + "hex", + "num-bigint", + "num-traits", + "risc0-binfmt", + "risc0-zkp", + "serde", + "stability", +] + +[[package]] +name = "risc0-zkos-v1compat" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f76c479b69d1987cb54ac72dcc017197296fdcd6daf78fafc10cbbd3a167a7de" +dependencies = [ + "include_bytes_aligned", + "no_std_strings", +] + +[[package]] +name = "risc0-zkp" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a287e9cd6d7b3b38eeb49c62090c46a1935922309fbd997a9143ed8c43c8f3cb" +dependencies = [ + "anyhow", + "blake2", + "borsh", + "bytemuck", + "cfg-if", + "digest 0.10.7", + "hex", + "hex-literal", + "metal", + "paste", + "rand_core 0.6.4", + "risc0-core", + "risc0-zkvm-platform", + "serde", + "sha2", + "stability", + "tracing", +] + +[[package]] +name = "risc0-zkvm" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9684b333c1c5d83f29ce2a92314ccfafd9d8cdfa6c4e19c07b97015d2f1eb9d0" +dependencies = [ + "anyhow", + "bincode", + "bonsai-sdk", + "borsh", + "bytemuck", + "bytes", + "derive_more 2.0.1", + "getrandom 0.2.16", + "hex", + "lazy-regex", + "prost", + "risc0-binfmt", + "risc0-build", + "risc0-circuit-keccak", + "risc0-circuit-recursion", + "risc0-circuit-rv32im", + "risc0-core", + "risc0-groth16", + "risc0-zkos-v1compat", + "risc0-zkp", + "risc0-zkvm-platform", + "rrs-lib", + "rzup", + "semver 1.0.26", + "serde", + "sha2", + "stability", + "tempfile", + "tracing", +] + +[[package]] +name = "risc0-zkvm-platform" +version = "2.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cae9cb2c2f6cab2dfa395ea6e2576713929040c7fb0c5f4150d13e1119d18686" +dependencies = [ + "bytemuck", + "cfg-if", + "getrandom 0.2.16", + "getrandom 0.3.3", + "libm", + "stability", +] + +[[package]] +name = "rlp" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb919243f34364b6bd2fc10ef797edbfa75f33c252e7998527479c6d6b47e1ec" +dependencies = [ + "bytes", + "rustc-hex", +] + +[[package]] +name = "rrs-lib" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4382d3af3a4ebdae7f64ba6edd9114fff92c89808004c4943b393377a25d001" +dependencies = [ + "downcast-rs", + "paste", +] + +[[package]] +name = "ruint" +version = "1.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11256b5fe8c68f56ac6f39ef0720e592f33d2367a4782740d9c9142e889c7fb4" +dependencies = [ + "alloy-rlp", + "ark-ff 0.3.0", + "ark-ff 0.4.2", + "bytes", + "fastrlp 0.3.1", + "fastrlp 0.4.0", + "num-bigint", + "num-integer", + "num-traits", + "parity-scale-codec", + "primitive-types", + "proptest", + "rand 0.8.5", + "rand 0.9.2", + "rlp", + "ruint-macro", + "serde", + "valuable", + "zeroize", +] + +[[package]] +name = "ruint-macro" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48fd7bd8a6377e15ad9d42a8ec25371b94ddc67abe7c8b9127bec79bebaaae18" + +[[package]] +name = "rustc-demangle" +version = "0.1.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56f7d92ca342cea22a06f2121d944b4fd82af56988c270852495420f961d4ace" + +[[package]] +name = "rustc-hash" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "357703d41365b4b27c590e3ed91eabb1b663f07c4c084095e60cbed4362dff0d" + +[[package]] +name = "rustc-hex" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3e75f6a532d0fd9f7f13144f392b6ad56a32696bfcd9c78f797f16bbb6f072d6" + +[[package]] +name = "rustc_version" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0dfe2087c51c460008730de8b57e6a320782fbfb312e1f4d520e6c6fae155ee" +dependencies = [ + "semver 0.11.0", +] + +[[package]] +name = "rustc_version" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" +dependencies = [ + "semver 1.0.26", +] + +[[package]] +name = "rustix" +version = "1.0.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11181fbabf243db407ef8df94a6ce0b2f9a733bd8be4ad02b4eda9602296cac8" +dependencies = [ + "bitflags 2.9.1", + "errno", + "libc", + "linux-raw-sys", + "windows-sys 0.60.2", +] + +[[package]] +name = "rustls" +version = "0.23.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0ebcbd2f03de0fc1122ad9bb24b127a5a6cd51d72604a3f3c50ac459762b6cc" +dependencies = [ + "once_cell", + "ring", + "rustls-pki-types", + "rustls-webpki", + "subtle", + "zeroize", +] + +[[package]] +name = "rustls-pki-types" +version = "1.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "229a4a4c221013e7e1f1a043678c5cc39fe5171437c88fb47151a21e6f5b5c79" +dependencies = [ + "web-time", + "zeroize", +] + +[[package]] +name = "rustls-webpki" +version = "0.103.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0a17884ae0c1b773f1ccd2bd4a8c72f16da897310a98b0e84bf349ad5ead92fc" +dependencies = [ + "ring", + "rustls-pki-types", + "untrusted", +] + +[[package]] +name = "rustversion" +version = "1.0.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a0d197bd2c9dc6e53b84da9556a69ba4cdfab8619eb41a8bd1cc2027a0f6b1d" + +[[package]] +name = "rusty-fork" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb3dcc6e454c328bb824492db107ab7c0ae8fcffe4ad210136ef014458c1bc4f" +dependencies = [ + "fnv", + "quick-error", + "tempfile", + "wait-timeout", +] + +[[package]] +name = "ryu" +version = "1.0.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" + +[[package]] +name = "rzup" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "400558bf12d4292a7804093b60a437ba8b0219ea7d53716b2c010a0d31e5f4a8" +dependencies = [ + "semver 1.0.26", + "serde", + "strum", + "tempfile", + "thiserror 2.0.12", + "toml", + "yaml-rust2", +] + +[[package]] +name = "schemars" +version = "0.8.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3fbf2ae1b8bc8e02df939598064d22402220cd5bbcca1c76f7d6a310974d5615" +dependencies = [ + "dyn-clone", + "schemars_derive", + "serde", + "serde_json", +] + +[[package]] +name = "schemars_derive" +version = "0.8.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32e265784ad618884abaea0600a9adf15393368d840e0222d101a072f3f7534d" +dependencies = [ + "proc-macro2", + "quote", + "serde_derive_internals", + "syn 2.0.104", +] + +[[package]] +name = "sec1" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3e97a565f76233a6003f9f5c54be1d9c5bdfa3eccfb189469f11ec4901c47dc" +dependencies = [ + "base16ct", + "der", + "generic-array", + "pkcs8", + "serdect", + "subtle", + "zeroize", +] + +[[package]] +name = "secp256k1" +version = "0.29.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9465315bc9d4566e1724f0fffcbcc446268cb522e60f9a27bcded6b19c108113" +dependencies = [ + "bitcoin_hashes", + "rand 0.8.5", + "secp256k1-sys", + "serde", +] + +[[package]] +name = "secp256k1-sys" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d4387882333d3aa8cb20530a17c69a3752e97837832f34f6dccc760e715001d9" +dependencies = [ + "cc", +] + +[[package]] +name = "semver" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f301af10236f6df4160f7c3f04eec6dbc70ace82d23326abad5edee88801c6b6" +dependencies = [ + "semver-parser", +] + +[[package]] +name = "semver" +version = "1.0.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56e6fa9c48d24d85fb3de5ad847117517440f6beceb7798af16b4a87d616b8d0" +dependencies = [ + "serde", +] + +[[package]] +name = "semver-parser" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9900206b54a3527fdc7b8a938bffd94a568bac4f4aa8113b209df75a09c0dec2" +dependencies = [ + "pest", +] + +[[package]] +name = "serde" +version = "1.0.219" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.219" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "serde_derive_internals" +version = "0.29.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "18d26a20a969b9e3fdf2fc2d9f21eda6c40e2de84c9408bb5d3b05d499aae711" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "serde_json" +version = "1.0.141" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "30b9eff21ebe718216c6ec64e1d9ac57087aad11efc64e32002bce4a0d4c03d3" +dependencies = [ + "itoa", + "memchr", + "ryu", + "serde", +] + +[[package]] +name = "serde_spanned" +version = "0.6.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf41e0cfaf7226dca15e8197172c295a782857fcb97fad1808a166870dee75a3" +dependencies = [ + "serde", +] + +[[package]] +name = "serde_urlencoded" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" +dependencies = [ + "form_urlencoded", + "itoa", + "ryu", + "serde", +] + +[[package]] +name = "serdect" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a84f14a19e9a014bb9f4512488d9829a68e04ecabffb0f9904cd1ace94598177" +dependencies = [ + "base16ct", + "serde", +] + +[[package]] +name = "sha2" +version = "0.10.8" +source = "git+https://github.com/risc0/RustCrypto-hashes?tag=sha2-v0.10.8-risczero.0#244dc3b08788f7a4ccce14c66896ae3b4f24c166" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest 0.10.7", + "sha2-asm", +] + +[[package]] +name = "sha2-asm" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b845214d6175804686b2bd482bcffe96651bb2d1200742b712003504a2dac1ab" +dependencies = [ + "cc", +] + +[[package]] +name = "sha3" +version = "0.10.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75872d278a8f37ef87fa0ddbda7802605cb18344497949862c0d4dcb291eba60" +dependencies = [ + "digest 0.10.7", + "keccak", +] + +[[package]] +name = "sha3-asm" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c28efc5e327c837aa837c59eae585fc250715ef939ac32881bcc11677cd02d46" +dependencies = [ + "cc", + "cfg-if", +] + +[[package]] +name = "shlex" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" + +[[package]] +name = "signature" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77549399552de45a898a580c1b41d445bf730df867cc44e6c0233bbc4b8329de" +dependencies = [ + "digest 0.10.7", + "rand_core 0.6.4", +] + +[[package]] +name = "simple_asn1" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "297f631f50729c8c99b84667867963997ec0b50f32b2a7dbcab828ef0541e8bb" +dependencies = [ + "num-bigint", + "num-traits", + "thiserror 2.0.12", + "time", +] + +[[package]] +name = "slab" +version = "0.4.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04dc19736151f35336d325007ac991178d504a119863a2fcb3758cdb5e52c50d" + +[[package]] +name = "smallvec" +version = "1.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03" +dependencies = [ + "serde", +] + +[[package]] +name = "socket2" +version = "0.5.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e22376abed350d73dd1cd119b57ffccad95b4e585a7cda43e286245ce23c0678" +dependencies = [ + "libc", + "windows-sys 0.52.0", +] + +[[package]] +name = "socket2" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "233504af464074f9d066d7b5416c5f9b894a5862a6506e306f7b816cdd6f1807" +dependencies = [ + "libc", + "windows-sys 0.59.0", +] + +[[package]] +name = "sov-keys" +version = "0.7.3-rc.5" +source = "git+https://github.com/chainwayxyz/citrea?tag=v0.7.3-rc.5#bbf5f5622291c45f8eda26bf9d905e045724a4c9" +dependencies = [ + "borsh", + "derive_more 1.0.0", + "digest 0.10.7", + "hex", + "k256", + "rand 0.8.5", + "schemars", + "serde", + "sha2", + "thiserror 2.0.12", +] + +[[package]] +name = "sov-rollup-interface" +version = "0.7.3-rc.5" +source = "git+https://github.com/chainwayxyz/citrea?tag=v0.7.3-rc.5#bbf5f5622291c45f8eda26bf9d905e045724a4c9" +dependencies = [ + "anyhow", + "borsh", + "bytes", + "digest 0.10.7", + "jmt 0.11.0 (git+https://github.com/penumbra-zone/jmt.git?rev=550a2f2)", + "serde", + "sha2", + "sov-keys", +] + +[[package]] +name = "spin" +version = "0.9.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" + +[[package]] +name = "spki" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d91ed6c858b01f942cd56b37a94b3e0a1798290327d1236e4d9cf4eaca44d29d" +dependencies = [ + "base64ct", + "der", +] + +[[package]] +name = "stability" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d904e7009df136af5297832a3ace3370cd14ff1546a232f4f185036c2736fcac" +dependencies = [ + "quote", + "syn 2.0.104", +] + +[[package]] +name = "stable_deref_trait" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" + +[[package]] +name = "static_assertions" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" + +[[package]] +name = "strsim" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" + +[[package]] +name = "strum" +version = "0.26.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8fec0f0aef304996cf250b31b5a10dee7980c85da9d759361292b8bca5a18f06" +dependencies = [ + "strum_macros", +] + +[[package]] +name = "strum_macros" +version = "0.26.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c6bee85a5a24955dc440386795aa378cd9cf82acd5f764469152d2270e581be" +dependencies = [ + "heck 0.5.0", + "proc-macro2", + "quote", + "rustversion", + "syn 2.0.104", +] + +[[package]] +name = "subtle" +version = "2.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" + +[[package]] +name = "syn" +version = "1.0.109" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "syn" +version = "2.0.104" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17b6f705963418cdb9927482fa304bc562ece2fdd4f616084c50b7023b435a40" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "syn-solidity" +version = "0.8.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4560533fbd6914b94a8fb5cc803ed6801c3455668db3b810702c57612bac9412" +dependencies = [ + "paste", + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "sync_wrapper" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0bf256ce5efdfa370213c1dabab5935a12e49f2c58d15e9eac2870d3b4f27263" +dependencies = [ + "futures-core", +] + +[[package]] +name = "synstructure" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "tap" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" + +[[package]] +name = "tempfile" +version = "3.20.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8a64e3985349f2441a1a9ef0b853f869006c3855f2cda6862a94d26ebb9d6a1" +dependencies = [ + "fastrand", + "getrandom 0.3.3", + "once_cell", + "rustix", + "windows-sys 0.59.0", +] + +[[package]] +name = "thiserror" +version = "1.0.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52" +dependencies = [ + "thiserror-impl 1.0.69", +] + +[[package]] +name = "thiserror" +version = "2.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "567b8a2dae586314f7be2a752ec7474332959c6460e02bde30d702a66d488708" +dependencies = [ + "thiserror-impl 2.0.12", +] + +[[package]] +name = "thiserror-impl" +version = "1.0.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "thiserror-impl" +version = "2.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f7cf42b4507d8ea322120659672cf1b9dbb93f8f2d4ecfd6e51350ff5b17a1d" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "threadpool" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d050e60b33d41c19108b32cea32164033a9013fe3b46cbd4457559bfbf77afaa" +dependencies = [ + "num_cpus", +] + +[[package]] +name = "time" +version = "0.3.41" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a7619e19bc266e0f9c5e6686659d394bc57973859340060a69221e57dbc0c40" +dependencies = [ + "deranged", + "itoa", + "num-conv", + "powerfmt", + "serde", + "time-core", + "time-macros", +] + +[[package]] +name = "time-core" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c9e9a38711f559d9e3ce1cdb06dd7c5b8ea546bc90052da6d06bb76da74bb07c" + +[[package]] +name = "time-macros" +version = "0.2.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3526739392ec93fd8b359c8e98514cb3e8e021beb4e5f597b00a0221f8ed8a49" +dependencies = [ + "num-conv", + "time-core", +] + +[[package]] +name = "tiny-keccak" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c9d3793400a45f954c52e73d068316d76b6f4e36977e3fcebb13a2721e80237" +dependencies = [ + "crunchy", +] + +[[package]] +name = "tinystr" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d4f6d1145dcb577acf783d4e601bc1d76a13337bb54e6233add580b07344c8b" +dependencies = [ + "displaydoc", + "zerovec", +] + +[[package]] +name = "tinyvec" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09b3661f17e86524eccd4371ab0429194e0d7c008abb45f7a7495b1719463c71" +dependencies = [ + "tinyvec_macros", +] + +[[package]] +name = "tinyvec_macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" + +[[package]] +name = "tokio" +version = "1.47.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43864ed400b6043a4757a25c7a64a8efde741aed79a056a2fb348a406701bb35" +dependencies = [ + "backtrace", + "bytes", + "io-uring", + "libc", + "mio", + "pin-project-lite", + "slab", + "socket2 0.6.0", + "windows-sys 0.59.0", +] + +[[package]] +name = "tokio-rustls" +version = "0.26.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e727b36a1a0e8b74c376ac2211e40c2c8af09fb4013c60d910495810f008e9b" +dependencies = [ + "rustls", + "tokio", +] + +[[package]] +name = "tokio-util" +version = "0.7.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "66a539a9ad6d5d281510d5bd368c973d636c02dbf8a67300bfb6b950696ad7df" +dependencies = [ + "bytes", + "futures-core", + "futures-sink", + "pin-project-lite", + "tokio", +] + +[[package]] +name = "toml" +version = "0.8.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc1beb996b9d83529a9e75c17a1686767d148d70663143c7854d8b4a09ced362" +dependencies = [ + "serde", + "serde_spanned", + "toml_datetime", + "toml_edit", +] + +[[package]] +name = "toml_datetime" +version = "0.6.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22cddaf88f4fbc13c51aebbf5f8eceb5c7c5a9da2ac40a13519eb5b0a0e8f11c" +dependencies = [ + "serde", +] + +[[package]] +name = "toml_edit" +version = "0.22.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41fe8c660ae4257887cf66394862d21dbca4a6ddd26f04a3560410406a2f819a" +dependencies = [ + "indexmap", + "serde", + "serde_spanned", + "toml_datetime", + "toml_write", + "winnow", +] + +[[package]] +name = "toml_write" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d99f8c9a7727884afe522e9bd5edbfc91a3312b36a77b5fb8926e4c31a41801" + +[[package]] +name = "tower" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d039ad9159c98b70ecfd540b2573b97f7f52c3e8d9f8ad57a24b916a536975f9" +dependencies = [ + "futures-core", + "futures-util", + "pin-project-lite", + "sync_wrapper", + "tokio", + "tower-layer", + "tower-service", +] + +[[package]] +name = "tower-http" +version = "0.6.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "adc82fd73de2a9722ac5da747f12383d2bfdb93591ee6c58486e0097890f05f2" +dependencies = [ + "bitflags 2.9.1", + "bytes", + "futures-util", + "http", + "http-body", + "iri-string", + "pin-project-lite", + "tower", + "tower-layer", + "tower-service", +] + +[[package]] +name = "tower-layer" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "121c2a6cda46980bb0fcd1647ffaf6cd3fc79a013de288782836f6df9c48780e" + +[[package]] +name = "tower-service" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" + +[[package]] +name = "tracing" +version = "0.1.41" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0" +dependencies = [ + "log", + "pin-project-lite", + "tracing-attributes", + "tracing-core", +] + +[[package]] +name = "tracing-attributes" +version = "0.1.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81383ab64e72a7a8b8e13130c49e3dab29def6d0c7d76a03087b3cf71c5c6903" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "tracing-core" +version = "0.1.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9d12581f227e93f094d3af2ae690a574abb8a2b9b7a96e7cfe9647b2b617678" +dependencies = [ + "once_cell", + "valuable", +] + +[[package]] +name = "tracing-subscriber" +version = "0.2.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e0d2eaa99c3c2e41547cfa109e910a68ea03823cccad4a0525dcbc9b01e8c71" +dependencies = [ + "tracing-core", +] + +[[package]] +name = "try-lock" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" + +[[package]] +name = "typenum" +version = "1.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1dccffe3ce07af9386bfd29e80c0ab1a8205a2fc34e4bcd40364df902cfa8f3f" + +[[package]] +name = "ucd-trie" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2896d95c02a80c6d6a5d6e953d479f5ddf2dfdb6a244441010e373ac0fb88971" + +[[package]] +name = "uint" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76f64bba2c53b04fcab63c01a7d7427eadc821e3bc48c34dc9ba29c501164b52" +dependencies = [ + "byteorder", + "crunchy", + "hex", + "static_assertions", +] + +[[package]] +name = "unarray" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eaea85b334db583fe3274d12b4cd1880032beab409c0d774be044d4480ab9a94" + +[[package]] +name = "unicode-ident" +version = "1.0.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512" + +[[package]] +name = "unicode-xid" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853" + +[[package]] +name = "untrusted" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" + +[[package]] +name = "url" +version = "2.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32f8b686cadd1473f4bd0117a5d28d36b1ade384ea9b5069a1c40aefed7fda60" +dependencies = [ + "form_urlencoded", + "idna", + "percent-encoding", +] + +[[package]] +name = "utf8_iter" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" + +[[package]] +name = "valuable" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba73ea9cf16a25df0c8caa16c51acb937d5712a8429db78a3ee29d5dcacd3a65" + +[[package]] +name = "version_check" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" + +[[package]] +name = "wait-timeout" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09ac3b126d3914f9849036f826e054cbabdc8519970b8998ddaf3b5bd3c65f11" +dependencies = [ + "libc", +] + +[[package]] +name = "want" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" +dependencies = [ + "try-lock", +] + +[[package]] +name = "wasi" +version = "0.11.1+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" + +[[package]] +name = "wasi" +version = "0.14.2+wasi-0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9683f9a5a998d873c0d21fcbe3c083009670149a8fab228644b8bd36b2c48cb3" +dependencies = [ + "wit-bindgen-rt", +] + +[[package]] +name = "wasm-bindgen" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1edc8929d7499fc4e8f0be2262a241556cfc54a0bea223790e71446f2aab1ef5" +dependencies = [ + "cfg-if", + "once_cell", + "rustversion", + "wasm-bindgen-macro", +] + +[[package]] +name = "wasm-bindgen-backend" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2f0a0651a5c2bc21487bde11ee802ccaf4c51935d0d3d42a6101f98161700bc6" +dependencies = [ + "bumpalo", + "log", + "proc-macro2", + "quote", + "syn 2.0.104", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-futures" +version = "0.4.50" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "555d470ec0bc3bb57890405e5d4322cc9ea83cebb085523ced7be4144dac1e61" +dependencies = [ + "cfg-if", + "js-sys", + "once_cell", + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "wasm-bindgen-macro" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7fe63fc6d09ed3792bd0897b314f53de8e16568c2b3f7982f468c0bf9bd0b407" +dependencies = [ + "quote", + "wasm-bindgen-macro-support", +] + +[[package]] +name = "wasm-bindgen-macro-support" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", + "wasm-bindgen-backend", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-shared" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a05d73b933a847d6cccdda8f838a22ff101ad9bf93e33684f39c1f5f0eece3d" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "wasm-streams" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "15053d8d85c7eccdbefef60f06769760a563c7f0a9d6902a13d35c7800b0ad65" +dependencies = [ + "futures-util", + "js-sys", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", +] + +[[package]] +name = "web-sys" +version = "0.3.77" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33b6dd2ef9186f1f2072e409e99cd22a975331a6b3591b12c764e0e55c60d5d2" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "web-time" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a6580f308b1fad9207618087a65c04e7a10bc77e02c8e84e9b00dd4b12fa0bb" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "webpki-roots" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7e8983c3ab33d6fb807cfcdad2491c4ea8cbc8ed839181c7dfd9c67c83e261b2" +dependencies = [ + "rustls-pki-types", +] + +[[package]] +name = "windows-link" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e6ad25900d524eaabdbbb96d20b4311e1e7ae1699af4fb28c17ae66c80d798a" + +[[package]] +name = "windows-sys" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" +dependencies = [ + "windows-targets 0.48.5", +] + +[[package]] +name = "windows-sys" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-sys" +version = "0.59.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-sys" +version = "0.60.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb" +dependencies = [ + "windows-targets 0.53.3", +] + +[[package]] +name = "windows-targets" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" +dependencies = [ + "windows_aarch64_gnullvm 0.48.5", + "windows_aarch64_msvc 0.48.5", + "windows_i686_gnu 0.48.5", + "windows_i686_msvc 0.48.5", + "windows_x86_64_gnu 0.48.5", + "windows_x86_64_gnullvm 0.48.5", + "windows_x86_64_msvc 0.48.5", +] + +[[package]] +name = "windows-targets" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" +dependencies = [ + "windows_aarch64_gnullvm 0.52.6", + "windows_aarch64_msvc 0.52.6", + "windows_i686_gnu 0.52.6", + "windows_i686_gnullvm 0.52.6", + "windows_i686_msvc 0.52.6", + "windows_x86_64_gnu 0.52.6", + "windows_x86_64_gnullvm 0.52.6", + "windows_x86_64_msvc 0.52.6", +] + +[[package]] +name = "windows-targets" +version = "0.53.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d5fe6031c4041849d7c496a8ded650796e7b6ecc19df1a431c1a363342e5dc91" +dependencies = [ + "windows-link", + "windows_aarch64_gnullvm 0.53.0", + "windows_aarch64_msvc 0.53.0", + "windows_i686_gnu 0.53.0", + "windows_i686_gnullvm 0.53.0", + "windows_i686_msvc 0.53.0", + "windows_x86_64_gnu 0.53.0", + "windows_x86_64_gnullvm 0.53.0", + "windows_x86_64_msvc 0.53.0", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "86b8d5f90ddd19cb4a147a5fa63ca848db3df085e25fee3cc10b39b6eebae764" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7651a1f62a11b8cbd5e0d42526e55f2c99886c77e007179efff86c2b137e66c" + +[[package]] +name = "windows_i686_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" + +[[package]] +name = "windows_i686_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" + +[[package]] +name = "windows_i686_gnu" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1dc67659d35f387f5f6c479dc4e28f1d4bb90ddd1a5d3da2e5d97b42d6272c3" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ce6ccbdedbf6d6354471319e781c0dfef054c81fbc7cf83f338a4296c0cae11" + +[[package]] +name = "windows_i686_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" + +[[package]] +name = "windows_i686_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" + +[[package]] +name = "windows_i686_msvc" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "581fee95406bb13382d2f65cd4a908ca7b1e4c2f1917f143ba16efe98a589b5d" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e55b5ac9ea33f2fc1716d1742db15574fd6fc8dadc51caab1c16a3d3b4190ba" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0a6e035dd0599267ce1ee132e51c27dd29437f63325753051e71dd9e42406c57" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486" + +[[package]] +name = "winnow" +version = "0.7.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3edebf492c8125044983378ecb5766203ad3b4c2f7a922bd7dd207f6d443e95" +dependencies = [ + "memchr", +] + +[[package]] +name = "wit-bindgen-rt" +version = "0.39.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f42320e61fe2cfd34354ecb597f86f413484a798ba44a8ca1165c58d42da6c1" +dependencies = [ + "bitflags 2.9.1", +] + +[[package]] +name = "writeable" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea2f10b9bb0928dfb1b42b65e1f9e36f7f54dbdf08457afefb38afcdec4fa2bb" + +[[package]] +name = "wyz" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05f360fc0b24296329c78fda852a1e9ae82de9cf7b27dae4b7f62f118f77b9ed" +dependencies = [ + "tap", +] + +[[package]] +name = "yaml-rust2" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a1a1c0bc9823338a3bdf8c61f994f23ac004c6fa32c08cd152984499b445e8d" +dependencies = [ + "arraydeque", + "encoding_rs", + "hashlink", +] + +[[package]] +name = "yoke" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f41bb01b8226ef4bfd589436a297c53d118f65921786300e427be8d487695cc" +dependencies = [ + "serde", + "stable_deref_trait", + "yoke-derive", + "zerofrom", +] + +[[package]] +name = "yoke-derive" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38da3c9736e16c5d3c8c597a9aaa5d1fa565d0532ae05e27c24aa62fb32c0ab6" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", + "synstructure", +] + +[[package]] +name = "zerocopy" +version = "0.8.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1039dd0d3c310cf05de012d8a39ff557cb0d23087fd44cad61df08fc31907a2f" +dependencies = [ + "zerocopy-derive", +] + +[[package]] +name = "zerocopy-derive" +version = "0.8.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ecf5b4cc5364572d7f4c329661bcc82724222973f2cab6f050a4e5c22f75181" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "zerofrom" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50cc42e0333e05660c3587f3bf9d0478688e15d870fab3346451ce7f8c9fbea5" +dependencies = [ + "zerofrom-derive", +] + +[[package]] +name = "zerofrom-derive" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", + "synstructure", +] + +[[package]] +name = "zeroize" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" +dependencies = [ + "zeroize_derive", +] + +[[package]] +name = "zeroize_derive" +version = "1.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "zerotrie" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "36f0bbd478583f79edad978b407914f61b2972f5af6fa089686016be8f9af595" +dependencies = [ + "displaydoc", + "yoke", + "zerofrom", +] + +[[package]] +name = "zerovec" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a05eb080e015ba39cc9e23bbe5e7fb04d5fb040350f99f34e338d5fdd294428" +dependencies = [ + "yoke", + "zerofrom", + "zerovec-derive", +] + +[[package]] +name = "zerovec-derive" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b96237efa0c878c64bd89c436f661be4e46b2f3eff1ebb976f7ef2321d2f58f" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[patch.unused]] +name = "k256" +version = "0.13.3" +source = "git+https://github.com/risc0/RustCrypto-elliptic-curves?tag=k256%2Fv0.13.3-risczero.1#ff5d67b095cfcc2569b7789f2079ed87ef2c7756" diff --git a/risc0-circuits/bridge-circuit/guest/Cargo.toml b/risc0-circuits/bridge-circuit/guest/Cargo.toml new file mode 100644 index 000000000..a4f628944 --- /dev/null +++ b/risc0-circuits/bridge-circuit/guest/Cargo.toml @@ -0,0 +1,23 @@ +[package] +name = "bridge-circuit-guest" +version = "0.1.0" +edition = "2021" + +[workspace] + +[features] +use-test-vk = ["circuits-lib/use-test-vk"] + +[dependencies] +circuits-lib = { path = "../../../circuits-lib" } + +[patch.crates-io] +sha2 = { git = "https://github.com/risc0/RustCrypto-hashes", tag = "sha2-v0.10.8-risczero.0" } +crypto-bigint = { git = "https://github.com/risc0/RustCrypto-crypto-bigint", tag = "v0.5.5-risczero.0" } +k256 = { git = "https://github.com/risc0/RustCrypto-elliptic-curves", tag = "k256/v0.13.3-risczero.1" } + +[profile.release] +debug = 0 +lto = true +opt-level = 3 +codegen-units = 1 \ No newline at end of file diff --git a/risc0-circuits/bridge-circuit/guest/src/main.rs b/risc0-circuits/bridge-circuit/guest/src/main.rs new file mode 100644 index 000000000..a7527a999 --- /dev/null +++ b/risc0-circuits/bridge-circuit/guest/src/main.rs @@ -0,0 +1,24 @@ +use circuits_lib::{ + bridge_circuit::{ + bridge_circuit, + constants::{ + MAINNET_WORK_ONLY_METHOD_ID, REGTEST_WORK_ONLY_METHOD_ID, SIGNET_WORK_ONLY_METHOD_ID, + TESTNET4_WORK_ONLY_METHOD_ID, + }, + }, + common, +}; + +pub static WORK_ONLY_IMAGE_ID: [u8; 32] = match option_env!("BITCOIN_NETWORK") { + Some(network) if matches!(network.as_bytes(), b"mainnet") => MAINNET_WORK_ONLY_METHOD_ID, + Some(network) if matches!(network.as_bytes(), b"testnet4") => TESTNET4_WORK_ONLY_METHOD_ID, + Some(network) if matches!(network.as_bytes(), b"signet") => SIGNET_WORK_ONLY_METHOD_ID, + Some(network) if matches!(network.as_bytes(), b"regtest") => REGTEST_WORK_ONLY_METHOD_ID, + None => MAINNET_WORK_ONLY_METHOD_ID, + _ => panic!("Invalid network type"), +}; + +fn main() { + let zkvm_guest = common::zkvm::Risc0Guest::new(); + bridge_circuit(&zkvm_guest, WORK_ONLY_IMAGE_ID); +} diff --git a/risc0-circuits/bridge-circuit/src/lib.rs b/risc0-circuits/bridge-circuit/src/lib.rs new file mode 100644 index 000000000..1bdb3085f --- /dev/null +++ b/risc0-circuits/bridge-circuit/src/lib.rs @@ -0,0 +1 @@ +include!(concat!(env!("OUT_DIR"), "/methods.rs")); diff --git a/risc0-circuits/elfs/mainnet-bridge-circuit-guest.bin b/risc0-circuits/elfs/mainnet-bridge-circuit-guest.bin new file mode 100644 index 000000000..3359f229f Binary files /dev/null and b/risc0-circuits/elfs/mainnet-bridge-circuit-guest.bin differ diff --git a/risc0-circuits/elfs/mainnet-header-chain-guest.bin b/risc0-circuits/elfs/mainnet-header-chain-guest.bin new file mode 100644 index 000000000..2586d808c Binary files /dev/null and b/risc0-circuits/elfs/mainnet-header-chain-guest.bin differ diff --git a/risc0-circuits/elfs/mainnet-work-only-guest.bin b/risc0-circuits/elfs/mainnet-work-only-guest.bin new file mode 100644 index 000000000..e7eac1485 Binary files /dev/null and b/risc0-circuits/elfs/mainnet-work-only-guest.bin differ diff --git a/risc0-circuits/elfs/regtest-bridge-circuit-guest.bin b/risc0-circuits/elfs/regtest-bridge-circuit-guest.bin new file mode 100644 index 000000000..009b79678 Binary files /dev/null and b/risc0-circuits/elfs/regtest-bridge-circuit-guest.bin differ diff --git a/risc0-circuits/elfs/regtest-header-chain-guest.bin b/risc0-circuits/elfs/regtest-header-chain-guest.bin new file mode 100644 index 000000000..da1ffb9c0 Binary files /dev/null and b/risc0-circuits/elfs/regtest-header-chain-guest.bin differ diff --git a/risc0-circuits/elfs/regtest-work-only-guest.bin b/risc0-circuits/elfs/regtest-work-only-guest.bin new file mode 100644 index 000000000..68f7bcd79 Binary files /dev/null and b/risc0-circuits/elfs/regtest-work-only-guest.bin differ diff --git a/risc0-circuits/elfs/signet-bridge-circuit-guest.bin b/risc0-circuits/elfs/signet-bridge-circuit-guest.bin new file mode 100644 index 000000000..512087879 Binary files /dev/null and b/risc0-circuits/elfs/signet-bridge-circuit-guest.bin differ diff --git a/risc0-circuits/elfs/signet-header-chain-guest.bin b/risc0-circuits/elfs/signet-header-chain-guest.bin new file mode 100644 index 000000000..5259ab9b8 Binary files /dev/null and b/risc0-circuits/elfs/signet-header-chain-guest.bin differ diff --git a/risc0-circuits/elfs/signet-work-only-guest.bin b/risc0-circuits/elfs/signet-work-only-guest.bin new file mode 100644 index 000000000..7433363c7 Binary files /dev/null and b/risc0-circuits/elfs/signet-work-only-guest.bin differ diff --git a/risc0-circuits/elfs/test-regtest-bridge-circuit-guest.bin b/risc0-circuits/elfs/test-regtest-bridge-circuit-guest.bin new file mode 100644 index 000000000..4ff9634e9 Binary files /dev/null and b/risc0-circuits/elfs/test-regtest-bridge-circuit-guest.bin differ diff --git a/risc0-circuits/elfs/testnet4-bridge-circuit-guest.bin b/risc0-circuits/elfs/testnet4-bridge-circuit-guest.bin new file mode 100644 index 000000000..7a02d8e32 Binary files /dev/null and b/risc0-circuits/elfs/testnet4-bridge-circuit-guest.bin differ diff --git a/risc0-circuits/elfs/testnet4-header-chain-guest.bin b/risc0-circuits/elfs/testnet4-header-chain-guest.bin new file mode 100644 index 000000000..4b7a80eee Binary files /dev/null and b/risc0-circuits/elfs/testnet4-header-chain-guest.bin differ diff --git a/risc0-circuits/elfs/testnet4-work-only-guest.bin b/risc0-circuits/elfs/testnet4-work-only-guest.bin new file mode 100644 index 000000000..5c5d379aa Binary files /dev/null and b/risc0-circuits/elfs/testnet4-work-only-guest.bin differ diff --git a/risc0-circuits/header-chain/Cargo.lock b/risc0-circuits/header-chain/Cargo.lock new file mode 100644 index 000000000..a76b17720 --- /dev/null +++ b/risc0-circuits/header-chain/Cargo.lock @@ -0,0 +1,1385 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 4 + +[[package]] +name = "ahash" +version = "0.8.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a15f179cd60c4584b8a8c596927aadc462e27f2ca70c04e0071964a73ba7a75" +dependencies = [ + "cfg-if", + "once_cell", + "version_check", + "zerocopy", +] + +[[package]] +name = "anyhow" +version = "1.0.98" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e16d2d3311acee920a9eb8d33b8cbc1787ce4a264e85f964c2404b969bdcd487" + +[[package]] +name = "arraydeque" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d902e3d592a523def97af8f317b08ce16b7ab854c1985a0c671e6f15cebc236" + +[[package]] +name = "bitflags" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" + +[[package]] +name = "bitflags" +version = "2.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b8e56985ec62d17e9c1001dc89c88ecd7dc08e47eba5ec7c29c7b5eeecde967" + +[[package]] +name = "blake2" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46502ad458c9a52b69d4d4d32775c788b7a1b85e8bc9d482d92250fc0e3f8efe" +dependencies = [ + "digest", +] + +[[package]] +name = "block" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d8c1fef690941d3e7788d328517591fecc684c084084702d6ff1641e993699a" + +[[package]] +name = "block-buffer" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" +dependencies = [ + "generic-array", +] + +[[package]] +name = "borsh" +version = "1.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ad8646f98db542e39fc66e68a20b2144f6a732636df7c2354e74645faaa433ce" +dependencies = [ + "borsh-derive", + "cfg_aliases", +] + +[[package]] +name = "borsh-derive" +version = "1.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fdd1d3c0c2f5833f22386f252fe8ed005c7f59fdcddeef025c01b4c3b9fd9ac3" +dependencies = [ + "once_cell", + "proc-macro-crate", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "bytemuck" +version = "1.23.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c76a5792e44e4abe34d3abf15636779261d45a7450612059293d1d2cfc63422" +dependencies = [ + "bytemuck_derive", +] + +[[package]] +name = "bytemuck_derive" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3fa76293b4f7bb636ab88fd78228235b5248b4d05cc589aed610f954af5d7c7a" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "camino" +version = "1.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0da45bc31171d8d6960122e222a67740df867c1dd53b4d51caa297084c185cab" +dependencies = [ + "serde", +] + +[[package]] +name = "cargo-platform" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e35af189006b9c0f00a064685c727031e3ed2d8020f7ba284d78cc2671bd36ea" +dependencies = [ + "serde", +] + +[[package]] +name = "cargo_metadata" +version = "0.19.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd5eb614ed4c27c5d706420e4320fbe3216ab31fa1c33cd8246ac36dae4479ba" +dependencies = [ + "camino", + "cargo-platform", + "semver", + "serde", + "serde_json", + "thiserror 2.0.12", +] + +[[package]] +name = "cfg-if" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9555578bc9e57714c812a1f84e4fc5b4d21fcb063490c624de019f7464c91268" + +[[package]] +name = "cfg_aliases" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" + +[[package]] +name = "cobs" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fa961b519f0b462e3a3b4a34b64d119eeaca1d59af726fe450bbba07a9fc0a1" +dependencies = [ + "thiserror 2.0.12", +] + +[[package]] +name = "const-oid" +version = "0.9.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" + +[[package]] +name = "core-foundation" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "core-foundation-sys" +version = "0.8.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" + +[[package]] +name = "core-graphics-types" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "45390e6114f68f718cc7a830514a96f903cccd70d02a8f6d9f643ac4ba45afaf" +dependencies = [ + "bitflags 1.3.2", + "core-foundation", + "libc", +] + +[[package]] +name = "cpufeatures" +version = "0.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59ed5838eebb26a2bb2e58f6d5b5316989ae9d08bab10e0e6d103e656d1b0280" +dependencies = [ + "libc", +] + +[[package]] +name = "crypto-common" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" +dependencies = [ + "generic-array", + "typenum", +] + +[[package]] +name = "darling" +version = "0.20.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc7f46116c46ff9ab3eb1597a45688b6715c6e628b5c133e288e709a29bcb4ee" +dependencies = [ + "darling_core", + "darling_macro", +] + +[[package]] +name = "darling_core" +version = "0.20.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d00b9596d185e565c2207a0b01f8bd1a135483d02d9b7b0a54b11da8d53412e" +dependencies = [ + "fnv", + "ident_case", + "proc-macro2", + "quote", + "strsim", + "syn", +] + +[[package]] +name = "darling_macro" +version = "0.20.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc34b93ccb385b40dc71c6fceac4b2ad23662c7eeb248cf10d529b7e055b6ead" +dependencies = [ + "darling_core", + "quote", + "syn", +] + +[[package]] +name = "derive_builder" +version = "0.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "507dfb09ea8b7fa618fcf76e953f4f5e192547945816d5358edffe39f6f94947" +dependencies = [ + "derive_builder_macro", +] + +[[package]] +name = "derive_builder_core" +version = "0.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d5bcf7b024d6835cfb3d473887cd966994907effbe9227e8c8219824d06c4e8" +dependencies = [ + "darling", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "derive_builder_macro" +version = "0.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab63b0e2bf4d5928aff72e83a7dace85d7bba5fe12dcc3c5a572d78caffd3f3c" +dependencies = [ + "derive_builder_core", + "syn", +] + +[[package]] +name = "derive_more" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "093242cf7570c207c83073cf82f79706fe7b8317e98620a47d5be7c3d8497678" +dependencies = [ + "derive_more-impl", +] + +[[package]] +name = "derive_more-impl" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bda628edc44c4bb645fbe0f758797143e4e07926f7ebf4e9bdfbd3d2ce621df3" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "unicode-xid", +] + +[[package]] +name = "digest" +version = "0.10.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" +dependencies = [ + "block-buffer", + "const-oid", + "crypto-common", + "subtle", +] + +[[package]] +name = "dirs" +version = "5.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44c45a9d03d6676652bcb5e724c7e988de1acad23a711b5217ab9cbecbec2225" +dependencies = [ + "dirs-sys", +] + +[[package]] +name = "dirs-sys" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "520f05a5cbd335fae5a99ff7a6ab8627577660ee5cfd6a94a6a929b52ff0321c" +dependencies = [ + "libc", + "option-ext", + "redox_users", + "windows-sys 0.48.0", +] + +[[package]] +name = "docker-generate" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ccf673e0848ef09fa4aeeba78e681cf651c0c7d35f76ee38cec8e55bc32fa111" + +[[package]] +name = "elf" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4445909572dbd556c457c849c4ca58623d84b27c8fff1e74b0b4227d8b90d17b" + +[[package]] +name = "embedded-io" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef1a6892d9eef45c8fa6b9e0086428a2cca8491aca8f787c534a3d6d0bcb3ced" + +[[package]] +name = "embedded-io" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "edd0f118536f44f5ccd48bcb8b111bdc3de888b58c74639dfb034a357d0f206d" + +[[package]] +name = "encoding_rs" +version = "0.8.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75030f3c4f45dafd7586dd6780965a8c7e8e285a5ecb86713e63a79c5b2766f3" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "equivalent" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" + +[[package]] +name = "errno" +version = "0.3.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "778e2ac28f6c47af28e4907f13ffd1e1ddbd400980a9abd7c8df189bf578a5ad" +dependencies = [ + "libc", + "windows-sys 0.60.2", +] + +[[package]] +name = "fastrand" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" + +[[package]] +name = "fnv" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" + +[[package]] +name = "foreign-types" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d737d9aa519fb7b749cbc3b962edcf310a8dd1f4b67c91c4f83975dbdd17d965" +dependencies = [ + "foreign-types-macros", + "foreign-types-shared", +] + +[[package]] +name = "foreign-types-macros" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a5c6c585bc94aaf2c7b51dd4c2ba22680844aba4c687be581871a6f518c5742" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "foreign-types-shared" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa9a19cbb55df58761df49b23516a86d432839add4af60fc256da840f66ed35b" + +[[package]] +name = "generic-array" +version = "0.14.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" +dependencies = [ + "typenum", + "version_check", +] + +[[package]] +name = "getrandom" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592" +dependencies = [ + "cfg-if", + "libc", + "wasi 0.11.1+wasi-snapshot-preview1", +] + +[[package]] +name = "getrandom" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26145e563e54f2cadc477553f1ec5ee650b00862f0a58bcd12cbdc5f0ea2d2f4" +dependencies = [ + "cfg-if", + "libc", + "r-efi", + "wasi 0.14.2+wasi-0.2.4", +] + +[[package]] +name = "hashbrown" +version = "0.14.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" +dependencies = [ + "ahash", +] + +[[package]] +name = "hashbrown" +version = "0.15.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5971ac85611da7067dbfcabef3c70ebb5606018acd9e2a3903a0da507521e0d5" + +[[package]] +name = "hashlink" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ba4ff7128dee98c7dc9794b6a411377e1404dba1c97deb8d1a55297bd25d8af" +dependencies = [ + "hashbrown 0.14.5", +] + +[[package]] +name = "header-chain" +version = "0.1.0" +dependencies = [ + "risc0-binfmt", + "risc0-build", +] + +[[package]] +name = "heck" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" + +[[package]] +name = "hex" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" + +[[package]] +name = "hex-literal" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6fe2267d4ed49bc07b63801559be28c718ea06c4738b7a03c94df7386d2cde46" + +[[package]] +name = "ident_case" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" + +[[package]] +name = "include_bytes_aligned" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ee796ad498c8d9a1d68e477df8f754ed784ef875de1414ebdaf169f70a6a784" + +[[package]] +name = "indexmap" +version = "2.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fe4cd85333e22411419a0bcae1297d25e58c9443848b11dc6a86fefe8c78a661" +dependencies = [ + "equivalent", + "hashbrown 0.15.4", +] + +[[package]] +name = "itoa" +version = "1.0.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" + +[[package]] +name = "lazy_static" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" +dependencies = [ + "spin", +] + +[[package]] +name = "libc" +version = "0.2.174" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1171693293099992e19cddea4e8b849964e9846f4acee11b3948bcc337be8776" + +[[package]] +name = "libredox" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1580801010e535496706ba011c15f8532df6b42297d2e471fec38ceadd8c0638" +dependencies = [ + "bitflags 2.9.1", + "libc", +] + +[[package]] +name = "linux-raw-sys" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd945864f07fe9f5371a27ad7b52a172b4b499999f1d97574c9fa68373937e12" + +[[package]] +name = "log" +version = "0.4.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94" + +[[package]] +name = "malloc_buf" +version = "0.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62bb907fe88d54d8d9ce32a3cceab4218ed2f6b7d35617cafe9adf84e43919cb" +dependencies = [ + "libc", +] + +[[package]] +name = "memchr" +version = "2.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32a282da65faaf38286cf3be983213fcf1d2e2a58700e808f83f4ea9a4804bc0" + +[[package]] +name = "metal" +version = "0.29.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ecfd3296f8c56b7c1f6fbac3c71cefa9d78ce009850c45000015f206dc7fa21" +dependencies = [ + "bitflags 2.9.1", + "block", + "core-graphics-types", + "foreign-types", + "log", + "objc", + "paste", +] + +[[package]] +name = "no_std_strings" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a5b0c77c1b780822bc749a33e39aeb2c07584ab93332303babeabb645298a76e" + +[[package]] +name = "objc" +version = "0.2.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "915b1b472bc21c53464d6c8461c9d3af805ba1ef837e1cac254428f4a77177b1" +dependencies = [ + "malloc_buf", +] + +[[package]] +name = "once_cell" +version = "1.21.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" + +[[package]] +name = "option-ext" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" + +[[package]] +name = "paste" +version = "1.0.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" + +[[package]] +name = "pin-project-lite" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" + +[[package]] +name = "postcard" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c1de96e20f51df24ca73cafcc4690e044854d803259db27a00a461cb3b9d17a" +dependencies = [ + "cobs", + "embedded-io 0.4.0", + "embedded-io 0.6.1", + "serde", +] + +[[package]] +name = "proc-macro-crate" +version = "3.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "edce586971a4dfaa28950c6f18ed55e0406c1ab88bbce2c6f6293a7aaba73d35" +dependencies = [ + "toml_edit", +] + +[[package]] +name = "proc-macro2" +version = "1.0.95" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "02b3e5e68a3a1a02aad3ec490a98007cbc13c37cbe84a3cd7b8e406d76e7f778" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "quote" +version = "1.0.40" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "r-efi" +version = "5.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" + +[[package]] +name = "rand_core" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" + +[[package]] +name = "redox_users" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba009ff324d1fc1b900bd1fdb31564febe58a8ccc8a6fdbb93b543d33b13ca43" +dependencies = [ + "getrandom 0.2.16", + "libredox", + "thiserror 1.0.69", +] + +[[package]] +name = "risc0-binfmt" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62eb7025356a233c1bc267c458a2ce56fcfc89b136d813c8a77be14ef1eaf2b1" +dependencies = [ + "anyhow", + "borsh", + "derive_more", + "elf", + "lazy_static", + "postcard", + "risc0-zkp", + "risc0-zkvm-platform", + "semver", + "serde", + "tracing", +] + +[[package]] +name = "risc0-build" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62ffc0f135e6c1e9851e7e19438d03ff41a9d49199ee4f6c17b8bb30b4f83910" +dependencies = [ + "anyhow", + "cargo_metadata", + "derive_builder", + "dirs", + "docker-generate", + "hex", + "risc0-binfmt", + "risc0-zkos-v1compat", + "risc0-zkp", + "risc0-zkvm-platform", + "rzup", + "semver", + "serde", + "serde_json", + "stability", + "tempfile", +] + +[[package]] +name = "risc0-core" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "317bbf70a8750b64d4fd7a2bdc9d7d5f30d8bb305cae486962c797ef35c8d08e" +dependencies = [ + "bytemuck", + "bytemuck_derive", + "rand_core", +] + +[[package]] +name = "risc0-zkos-v1compat" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f76c479b69d1987cb54ac72dcc017197296fdcd6daf78fafc10cbbd3a167a7de" +dependencies = [ + "include_bytes_aligned", + "no_std_strings", +] + +[[package]] +name = "risc0-zkp" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a287e9cd6d7b3b38eeb49c62090c46a1935922309fbd997a9143ed8c43c8f3cb" +dependencies = [ + "anyhow", + "blake2", + "borsh", + "bytemuck", + "cfg-if", + "digest", + "hex", + "hex-literal", + "metal", + "paste", + "rand_core", + "risc0-core", + "risc0-zkvm-platform", + "serde", + "sha2", + "stability", + "tracing", +] + +[[package]] +name = "risc0-zkvm-platform" +version = "2.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cae9cb2c2f6cab2dfa395ea6e2576713929040c7fb0c5f4150d13e1119d18686" +dependencies = [ + "cfg-if", + "stability", +] + +[[package]] +name = "rustix" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c71e83d6afe7ff64890ec6b71d6a69bb8a610ab78ce364b3352876bb4c801266" +dependencies = [ + "bitflags 2.9.1", + "errno", + "libc", + "linux-raw-sys", + "windows-sys 0.59.0", +] + +[[package]] +name = "rustversion" +version = "1.0.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a0d197bd2c9dc6e53b84da9556a69ba4cdfab8619eb41a8bd1cc2027a0f6b1d" + +[[package]] +name = "ryu" +version = "1.0.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" + +[[package]] +name = "rzup" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "400558bf12d4292a7804093b60a437ba8b0219ea7d53716b2c010a0d31e5f4a8" +dependencies = [ + "semver", + "serde", + "strum", + "tempfile", + "thiserror 2.0.12", + "toml", + "yaml-rust2", +] + +[[package]] +name = "semver" +version = "1.0.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56e6fa9c48d24d85fb3de5ad847117517440f6beceb7798af16b4a87d616b8d0" +dependencies = [ + "serde", +] + +[[package]] +name = "serde" +version = "1.0.219" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.219" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "serde_json" +version = "1.0.140" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "20068b6e96dc6c9bd23e01df8827e6c7e1f2fddd43c21810382803c136b99373" +dependencies = [ + "itoa", + "memchr", + "ryu", + "serde", +] + +[[package]] +name = "serde_spanned" +version = "0.6.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf41e0cfaf7226dca15e8197172c295a782857fcb97fad1808a166870dee75a3" +dependencies = [ + "serde", +] + +[[package]] +name = "sha2" +version = "0.10.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7507d819769d01a365ab707794a4084392c824f54a7a6a7862f8c3d0892b283" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest", +] + +[[package]] +name = "spin" +version = "0.9.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" + +[[package]] +name = "stability" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d904e7009df136af5297832a3ace3370cd14ff1546a232f4f185036c2736fcac" +dependencies = [ + "quote", + "syn", +] + +[[package]] +name = "strsim" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" + +[[package]] +name = "strum" +version = "0.26.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8fec0f0aef304996cf250b31b5a10dee7980c85da9d759361292b8bca5a18f06" +dependencies = [ + "strum_macros", +] + +[[package]] +name = "strum_macros" +version = "0.26.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c6bee85a5a24955dc440386795aa378cd9cf82acd5f764469152d2270e581be" +dependencies = [ + "heck", + "proc-macro2", + "quote", + "rustversion", + "syn", +] + +[[package]] +name = "subtle" +version = "2.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" + +[[package]] +name = "syn" +version = "2.0.104" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17b6f705963418cdb9927482fa304bc562ece2fdd4f616084c50b7023b435a40" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "tempfile" +version = "3.20.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8a64e3985349f2441a1a9ef0b853f869006c3855f2cda6862a94d26ebb9d6a1" +dependencies = [ + "fastrand", + "getrandom 0.3.3", + "once_cell", + "rustix", + "windows-sys 0.59.0", +] + +[[package]] +name = "thiserror" +version = "1.0.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52" +dependencies = [ + "thiserror-impl 1.0.69", +] + +[[package]] +name = "thiserror" +version = "2.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "567b8a2dae586314f7be2a752ec7474332959c6460e02bde30d702a66d488708" +dependencies = [ + "thiserror-impl 2.0.12", +] + +[[package]] +name = "thiserror-impl" +version = "1.0.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "thiserror-impl" +version = "2.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f7cf42b4507d8ea322120659672cf1b9dbb93f8f2d4ecfd6e51350ff5b17a1d" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "toml" +version = "0.8.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc1beb996b9d83529a9e75c17a1686767d148d70663143c7854d8b4a09ced362" +dependencies = [ + "serde", + "serde_spanned", + "toml_datetime", + "toml_edit", +] + +[[package]] +name = "toml_datetime" +version = "0.6.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22cddaf88f4fbc13c51aebbf5f8eceb5c7c5a9da2ac40a13519eb5b0a0e8f11c" +dependencies = [ + "serde", +] + +[[package]] +name = "toml_edit" +version = "0.22.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41fe8c660ae4257887cf66394862d21dbca4a6ddd26f04a3560410406a2f819a" +dependencies = [ + "indexmap", + "serde", + "serde_spanned", + "toml_datetime", + "toml_write", + "winnow", +] + +[[package]] +name = "toml_write" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d99f8c9a7727884afe522e9bd5edbfc91a3312b36a77b5fb8926e4c31a41801" + +[[package]] +name = "tracing" +version = "0.1.41" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0" +dependencies = [ + "log", + "pin-project-lite", + "tracing-attributes", + "tracing-core", +] + +[[package]] +name = "tracing-attributes" +version = "0.1.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81383ab64e72a7a8b8e13130c49e3dab29def6d0c7d76a03087b3cf71c5c6903" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "tracing-core" +version = "0.1.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9d12581f227e93f094d3af2ae690a574abb8a2b9b7a96e7cfe9647b2b617678" +dependencies = [ + "once_cell", +] + +[[package]] +name = "typenum" +version = "1.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1dccffe3ce07af9386bfd29e80c0ab1a8205a2fc34e4bcd40364df902cfa8f3f" + +[[package]] +name = "unicode-ident" +version = "1.0.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512" + +[[package]] +name = "unicode-xid" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853" + +[[package]] +name = "version_check" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" + +[[package]] +name = "wasi" +version = "0.11.1+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" + +[[package]] +name = "wasi" +version = "0.14.2+wasi-0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9683f9a5a998d873c0d21fcbe3c083009670149a8fab228644b8bd36b2c48cb3" +dependencies = [ + "wit-bindgen-rt", +] + +[[package]] +name = "windows-sys" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" +dependencies = [ + "windows-targets 0.48.5", +] + +[[package]] +name = "windows-sys" +version = "0.59.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-sys" +version = "0.60.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb" +dependencies = [ + "windows-targets 0.53.2", +] + +[[package]] +name = "windows-targets" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" +dependencies = [ + "windows_aarch64_gnullvm 0.48.5", + "windows_aarch64_msvc 0.48.5", + "windows_i686_gnu 0.48.5", + "windows_i686_msvc 0.48.5", + "windows_x86_64_gnu 0.48.5", + "windows_x86_64_gnullvm 0.48.5", + "windows_x86_64_msvc 0.48.5", +] + +[[package]] +name = "windows-targets" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" +dependencies = [ + "windows_aarch64_gnullvm 0.52.6", + "windows_aarch64_msvc 0.52.6", + "windows_i686_gnu 0.52.6", + "windows_i686_gnullvm 0.52.6", + "windows_i686_msvc 0.52.6", + "windows_x86_64_gnu 0.52.6", + "windows_x86_64_gnullvm 0.52.6", + "windows_x86_64_msvc 0.52.6", +] + +[[package]] +name = "windows-targets" +version = "0.53.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c66f69fcc9ce11da9966ddb31a40968cad001c5bedeb5c2b82ede4253ab48aef" +dependencies = [ + "windows_aarch64_gnullvm 0.53.0", + "windows_aarch64_msvc 0.53.0", + "windows_i686_gnu 0.53.0", + "windows_i686_gnullvm 0.53.0", + "windows_i686_msvc 0.53.0", + "windows_x86_64_gnu 0.53.0", + "windows_x86_64_gnullvm 0.53.0", + "windows_x86_64_msvc 0.53.0", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "86b8d5f90ddd19cb4a147a5fa63ca848db3df085e25fee3cc10b39b6eebae764" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7651a1f62a11b8cbd5e0d42526e55f2c99886c77e007179efff86c2b137e66c" + +[[package]] +name = "windows_i686_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" + +[[package]] +name = "windows_i686_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" + +[[package]] +name = "windows_i686_gnu" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1dc67659d35f387f5f6c479dc4e28f1d4bb90ddd1a5d3da2e5d97b42d6272c3" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ce6ccbdedbf6d6354471319e781c0dfef054c81fbc7cf83f338a4296c0cae11" + +[[package]] +name = "windows_i686_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" + +[[package]] +name = "windows_i686_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" + +[[package]] +name = "windows_i686_msvc" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "581fee95406bb13382d2f65cd4a908ca7b1e4c2f1917f143ba16efe98a589b5d" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e55b5ac9ea33f2fc1716d1742db15574fd6fc8dadc51caab1c16a3d3b4190ba" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0a6e035dd0599267ce1ee132e51c27dd29437f63325753051e71dd9e42406c57" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486" + +[[package]] +name = "winnow" +version = "0.7.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3edebf492c8125044983378ecb5766203ad3b4c2f7a922bd7dd207f6d443e95" +dependencies = [ + "memchr", +] + +[[package]] +name = "wit-bindgen-rt" +version = "0.39.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f42320e61fe2cfd34354ecb597f86f413484a798ba44a8ca1165c58d42da6c1" +dependencies = [ + "bitflags 2.9.1", +] + +[[package]] +name = "yaml-rust2" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a1a1c0bc9823338a3bdf8c61f994f23ac004c6fa32c08cd152984499b445e8d" +dependencies = [ + "arraydeque", + "encoding_rs", + "hashlink", +] + +[[package]] +name = "zerocopy" +version = "0.8.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1039dd0d3c310cf05de012d8a39ff557cb0d23087fd44cad61df08fc31907a2f" +dependencies = [ + "zerocopy-derive", +] + +[[package]] +name = "zerocopy-derive" +version = "0.8.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ecf5b4cc5364572d7f4c329661bcc82724222973f2cab6f050a4e5c22f75181" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] diff --git a/risc0-circuits/header-chain/Cargo.toml b/risc0-circuits/header-chain/Cargo.toml new file mode 100644 index 000000000..790051f0b --- /dev/null +++ b/risc0-circuits/header-chain/Cargo.toml @@ -0,0 +1,13 @@ +[package] +name = "header-chain" +version = "0.1.0" +edition = "2021" + +[workspace] + +[build-dependencies] +risc0-build = { version = "2.3.1", features = ["unstable"] } +risc0-binfmt = {version = "2.0.2"} + +[package.metadata.risc0] +methods = ["guest"] \ No newline at end of file diff --git a/risc0-circuits/header-chain/build.rs b/risc0-circuits/header-chain/build.rs new file mode 100644 index 000000000..9645a4494 --- /dev/null +++ b/risc0-circuits/header-chain/build.rs @@ -0,0 +1,164 @@ +use risc0_binfmt::compute_image_id; +use risc0_build::{embed_methods_with_options, DockerOptionsBuilder, GuestOptionsBuilder}; +use std::{collections::HashMap, env, fs, path::Path}; + +fn main() { + // Build environment variables + println!("cargo:rerun-if-env-changed=SKIP_GUEST_BUILD"); + println!("cargo:rerun-if-env-changed=REPR_GUEST_BUILD"); + println!("cargo:rerun-if-env-changed=OUT_DIR"); + + // Compile time constant environment variables + println!("cargo:rerun-if-env-changed=BITCOIN_NETWORK"); + println!("cargo:rerun-if-env-changed=TEST_SKIP_GUEST_BUILD"); + + if std::env::var("CLIPPY_ARGS").is_ok() { + let out_dir = env::var("OUT_DIR").expect("OUT_DIR not set"); + let dummy_path = Path::new(&out_dir).join("methods.rs"); + fs::write(dummy_path, "// dummy methods.rs for Clippy\n") + .expect("Failed to write dummy methods.rs"); + println!("cargo:warning=Skipping guest build in Clippy"); + return; + } + + // Check if we should skip the guest build for tests + if let Ok("1" | "true") = env::var("TEST_SKIP_GUEST_BUILD").as_deref() { + println!("cargo:warning=Skipping guest build in test. Exiting"); + return; + } + + let network = env::var("BITCOIN_NETWORK").unwrap_or_else(|_| { + println!("cargo:warning=BITCOIN_NETWORK not set, defaulting to 'mainnet'"); + "mainnet".to_string() + }); + println!("cargo:warning=Building for Bitcoin network: {}", network); + + let is_repr_guest_build = match env::var("REPR_GUEST_BUILD") { + Ok(value) => match value.as_str() { + "1" | "true" => { + println!("cargo:warning=REPR_GUEST_BUILD is set to true"); + true + } + "0" | "false" => { + println!("cargo:warning=REPR_GUEST_BUILD is set to false"); + false + } + _ => { + println!("cargo:warning=Invalid value for REPR_GUEST_BUILD: '{}'. Expected '0', '1', 'true', or 'false'. Defaulting to false.", value); + false + } + }, + Err(env::VarError::NotPresent) => { + println!("cargo:warning=REPR_GUEST_BUILD not set. Defaulting to false."); + false + } + Err(env::VarError::NotUnicode(_)) => { + println!( + "cargo:warning=REPR_GUEST_BUILD contains invalid Unicode. Defaulting to false." + ); + false + } + }; + + // Use embed_methods_with_options with our custom options + let guest_pkg_to_options = get_guest_options(network.clone()); + embed_methods_with_options(guest_pkg_to_options); + + // After the build is complete, copy the generated file to the elfs folder + if is_repr_guest_build { + println!("cargo:warning=Copying binary to elfs folder"); + copy_binary_to_elfs_folder(network); + } else { + println!("cargo:warning=Not copying binary to elfs folder"); + } +} + +fn get_guest_options(network: String) -> HashMap<&'static str, risc0_build::GuestOptions> { + let mut guest_pkg_to_options = HashMap::new(); + + let opts = if env::var("REPR_GUEST_BUILD").is_ok() { + let manifest_dir = env!("CARGO_MANIFEST_DIR"); + let root_dir = format!("{manifest_dir}/../../"); + + println!( + "cargo:warning=Using Docker for guest build with root dir: {}", + root_dir + ); + + let docker_opts = DockerOptionsBuilder::default() + .root_dir(root_dir) + .env(vec![("BITCOIN_NETWORK".to_string(), network.clone())]) + .build() + .unwrap(); + + GuestOptionsBuilder::default() + // .features(features) + .use_docker(docker_opts) + .build() + .unwrap() + } else { + println!("cargo:warning=Guest code is not built in docker"); + GuestOptionsBuilder::default() + // .features(features) + .build() + .unwrap() + }; + + guest_pkg_to_options.insert("header-chain-guest", opts); + guest_pkg_to_options +} + +fn copy_binary_to_elfs_folder(network: String) { + let current_dir = env::current_dir().expect("Failed to get current dir"); + let base_dir = current_dir.join("../.."); + + // Create elfs directory if it doesn't exist + let elfs_dir = base_dir.join("risc0-circuits/elfs"); + if !elfs_dir.exists() { + fs::create_dir_all(&elfs_dir).expect("Failed to create elfs directory"); + println!("cargo:warning=Created elfs directory at {:?}", elfs_dir); + } + + // Build source path + let src_path = current_dir.join("target/riscv-guest/header-chain/header-chain-guest/riscv32im-risc0-zkvm-elf/docker/header-chain-guest.bin"); + if !src_path.exists() { + println!( + "cargo:warning=Source binary not found at {:?}, skipping copy", + src_path + ); + return; + } + + // Build destination path with network prefix + let dest_filename = format!("{}-header-chain-guest.bin", network.to_lowercase()); + let dest_path = elfs_dir.join(&dest_filename); + + // Copy the file + match fs::copy(&src_path, &dest_path) { + Ok(_) => println!( + "cargo:warning=Successfully copied binary to {:?}", + dest_path + ), + Err(e) => println!("cargo:warning=Failed to copy binary: {}", e), + } + + let elf_path = match network.as_str() { + "mainnet" => "../elfs/mainnet-header-chain-guest.bin", + "testnet4" => "../elfs/testnet4-header-chain-guest.bin", + "signet" => "../elfs/signet-header-chain-guest.bin", + "regtest" => "../elfs/regtest-header-chain-guest.bin", + _ => { + println!("cargo:warning=Invalid network specified, defaulting to mainnet"); + "../elfs/mainnet-header-chain-guest.bin" + } + }; + + let elf_bytes: Vec = fs::read(Path::new(elf_path)).expect("Failed to read ELF file"); + + let method_id = compute_image_id(elf_bytes.as_slice()).unwrap(); + println!("cargo:warning=Computed method ID: {:x?}", method_id); + println!( + "cargo:warning=Computed method ID words: {:?}", + method_id.as_words() + ); +} diff --git a/risc0-circuits/header-chain/guest/Cargo.lock b/risc0-circuits/header-chain/guest/Cargo.lock new file mode 100644 index 000000000..12547ea30 --- /dev/null +++ b/risc0-circuits/header-chain/guest/Cargo.lock @@ -0,0 +1,4884 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 4 + +[[package]] +name = "addr2line" +version = "0.24.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dfbe277e56a376000877090da837660b4427aad530e3028d44e0bffe4f89a1c1" +dependencies = [ + "gimli", +] + +[[package]] +name = "adler2" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "320119579fcad9c21884f5c4861d16174d0e06250625266f50fe6898340abefa" + +[[package]] +name = "ahash" +version = "0.8.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a15f179cd60c4584b8a8c596927aadc462e27f2ca70c04e0071964a73ba7a75" +dependencies = [ + "cfg-if", + "once_cell", + "version_check", + "zerocopy", +] + +[[package]] +name = "aho-corasick" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" +dependencies = [ + "memchr", +] + +[[package]] +name = "allocator-api2" +version = "0.2.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" + +[[package]] +name = "alloy-consensus" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69e32ef5c74bbeb1733c37f4ac7f866f8c8af208b7b4265e21af609dcac5bd5e" +dependencies = [ + "alloy-eips", + "alloy-primitives", + "alloy-rlp", + "alloy-serde", + "alloy-trie", + "auto_impl", + "c-kzg", + "derive_more 1.0.0", + "serde", +] + +[[package]] +name = "alloy-consensus-any" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fa13b7b1e1e3fedc42f0728103bfa3b4d566d3d42b606db449504d88dbdbdcf" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-primitives", + "alloy-rlp", + "alloy-serde", + "serde", +] + +[[package]] +name = "alloy-eip2124" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "675264c957689f0fd75f5993a73123c2cc3b5c235a38f5b9037fe6c826bfb2c0" +dependencies = [ + "alloy-primitives", + "alloy-rlp", + "crc", + "thiserror 2.0.12", +] + +[[package]] +name = "alloy-eip2930" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0069cf0642457f87a01a014f6dc29d5d893cd4fd8fddf0c3cdfad1bb3ebafc41" +dependencies = [ + "alloy-primitives", + "alloy-rlp", + "serde", +] + +[[package]] +name = "alloy-eip7702" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b15b13d38b366d01e818fe8e710d4d702ef7499eacd44926a06171dd9585d0c" +dependencies = [ + "alloy-primitives", + "alloy-rlp", + "serde", + "thiserror 2.0.12", +] + +[[package]] +name = "alloy-eips" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5591581ca2ab0b3e7226a4047f9a1bfcf431da1d0cce3752fda609fea3c27e37" +dependencies = [ + "alloy-eip2124", + "alloy-eip2930", + "alloy-eip7702", + "alloy-primitives", + "alloy-rlp", + "alloy-serde", + "auto_impl", + "c-kzg", + "derive_more 1.0.0", + "once_cell", + "serde", + "sha2", +] + +[[package]] +name = "alloy-network-primitives" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3a00ce618ae2f78369918be0c20f620336381502c83b6ed62c2f7b2db27698b0" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-primitives", + "alloy-serde", + "serde", +] + +[[package]] +name = "alloy-primitives" +version = "0.8.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8c77490fe91a0ce933a1f219029521f20fc28c2c0ca95d53fa4da9c00b8d9d4e" +dependencies = [ + "alloy-rlp", + "bytes", + "cfg-if", + "const-hex", + "derive_more 2.0.1", + "foldhash", + "hashbrown 0.15.4", + "indexmap", + "itoa", + "k256", + "keccak-asm", + "paste", + "proptest", + "rand 0.8.5", + "ruint", + "rustc-hash", + "serde", + "sha3", + "tiny-keccak", +] + +[[package]] +name = "alloy-rlp" +version = "0.3.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f70d83b765fdc080dbcd4f4db70d8d23fe4761f2f02ebfa9146b833900634b4" +dependencies = [ + "alloy-rlp-derive", + "arrayvec", + "bytes", +] + +[[package]] +name = "alloy-rlp-derive" +version = "0.3.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "64b728d511962dda67c1bc7ea7c03736ec275ed2cf4c35d9585298ac9ccf3b73" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "alloy-rpc-types" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f18e68a3882f372e045ddc89eb455469347767d17878ca492cfbac81e71a111" +dependencies = [ + "alloy-primitives", + "alloy-rpc-types-engine", + "alloy-rpc-types-eth", + "alloy-serde", + "serde", +] + +[[package]] +name = "alloy-rpc-types-engine" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e83dde9fcf1ccb9b815cc0c89bba26bbbbaae5150a53ae624ed0fc63cb3676c1" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-primitives", + "alloy-rlp", + "alloy-serde", + "derive_more 1.0.0", + "jsonwebtoken", + "rand 0.8.5", + "serde", + "strum", +] + +[[package]] +name = "alloy-rpc-types-eth" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b4dbee4d82f8a22dde18c28257bed759afeae7ba73da4a1479a039fd1445d04" +dependencies = [ + "alloy-consensus", + "alloy-consensus-any", + "alloy-eips", + "alloy-network-primitives", + "alloy-primitives", + "alloy-rlp", + "alloy-serde", + "alloy-sol-types", + "itertools 0.14.0", + "serde", + "serde_json", + "thiserror 2.0.12", +] + +[[package]] +name = "alloy-serde" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8732058f5ca28c1d53d241e8504620b997ef670315d7c8afab856b3e3b80d945" +dependencies = [ + "alloy-primitives", + "serde", + "serde_json", +] + +[[package]] +name = "alloy-sol-macro" +version = "0.8.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e10ae8e9a91d328ae954c22542415303919aabe976fe7a92eb06db1b68fd59f2" +dependencies = [ + "alloy-sol-macro-expander", + "alloy-sol-macro-input", + "proc-macro-error2", + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "alloy-sol-macro-expander" +version = "0.8.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "83ad5da86c127751bc607c174d6c9fe9b85ef0889a9ca0c641735d77d4f98f26" +dependencies = [ + "alloy-sol-macro-input", + "const-hex", + "heck 0.5.0", + "indexmap", + "proc-macro-error2", + "proc-macro2", + "quote", + "syn 2.0.104", + "syn-solidity", + "tiny-keccak", +] + +[[package]] +name = "alloy-sol-macro-input" +version = "0.8.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba3d30f0d3f9ba3b7686f3ff1de9ee312647aac705604417a2f40c604f409a9e" +dependencies = [ + "const-hex", + "dunce", + "heck 0.5.0", + "macro-string", + "proc-macro2", + "quote", + "syn 2.0.104", + "syn-solidity", +] + +[[package]] +name = "alloy-sol-types" +version = "0.8.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d43d5e60466a440230c07761aa67671d4719d46f43be8ea6e7ed334d8db4a9ab" +dependencies = [ + "alloy-primitives", + "alloy-sol-macro", + "const-hex", +] + +[[package]] +name = "alloy-trie" +version = "0.7.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d95a94854e420f07e962f7807485856cde359ab99ab6413883e15235ad996e8b" +dependencies = [ + "alloy-primitives", + "alloy-rlp", + "arrayvec", + "derive_more 1.0.0", + "nybbles", + "serde", + "smallvec", + "tracing", +] + +[[package]] +name = "anyhow" +version = "1.0.98" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e16d2d3311acee920a9eb8d33b8cbc1787ce4a264e85f964c2404b969bdcd487" + +[[package]] +name = "ark-bn254" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d69eab57e8d2663efa5c63135b2af4f396d66424f88954c21104125ab6b3e6bc" +dependencies = [ + "ark-ec", + "ark-ff 0.5.0", + "ark-r1cs-std", + "ark-std 0.5.0", +] + +[[package]] +name = "ark-crypto-primitives" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e0c292754729c8a190e50414fd1a37093c786c709899f29c9f7daccecfa855e" +dependencies = [ + "ahash", + "ark-crypto-primitives-macros", + "ark-ec", + "ark-ff 0.5.0", + "ark-relations", + "ark-serialize 0.5.0", + "ark-snark", + "ark-std 0.5.0", + "blake2", + "derivative", + "digest 0.10.7", + "fnv", + "merlin", + "sha2", +] + +[[package]] +name = "ark-crypto-primitives-macros" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7e89fe77d1f0f4fe5b96dfc940923d88d17b6a773808124f21e764dfb063c6a" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "ark-ec" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43d68f2d516162846c1238e755a7c4d131b892b70cc70c471a8e3ca3ed818fce" +dependencies = [ + "ahash", + "ark-ff 0.5.0", + "ark-poly", + "ark-serialize 0.5.0", + "ark-std 0.5.0", + "educe", + "fnv", + "hashbrown 0.15.4", + "itertools 0.13.0", + "num-bigint", + "num-integer", + "num-traits", + "zeroize", +] + +[[package]] +name = "ark-ff" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b3235cc41ee7a12aaaf2c575a2ad7b46713a8a50bda2fc3b003a04845c05dd6" +dependencies = [ + "ark-ff-asm 0.3.0", + "ark-ff-macros 0.3.0", + "ark-serialize 0.3.0", + "ark-std 0.3.0", + "derivative", + "num-bigint", + "num-traits", + "paste", + "rustc_version 0.3.3", + "zeroize", +] + +[[package]] +name = "ark-ff" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec847af850f44ad29048935519032c33da8aa03340876d351dfab5660d2966ba" +dependencies = [ + "ark-ff-asm 0.4.2", + "ark-ff-macros 0.4.2", + "ark-serialize 0.4.2", + "ark-std 0.4.0", + "derivative", + "digest 0.10.7", + "itertools 0.10.5", + "num-bigint", + "num-traits", + "paste", + "rustc_version 0.4.1", + "zeroize", +] + +[[package]] +name = "ark-ff" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a177aba0ed1e0fbb62aa9f6d0502e9b46dad8c2eab04c14258a1212d2557ea70" +dependencies = [ + "ark-ff-asm 0.5.0", + "ark-ff-macros 0.5.0", + "ark-serialize 0.5.0", + "ark-std 0.5.0", + "arrayvec", + "digest 0.10.7", + "educe", + "itertools 0.13.0", + "num-bigint", + "num-traits", + "paste", + "zeroize", +] + +[[package]] +name = "ark-ff-asm" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db02d390bf6643fb404d3d22d31aee1c4bc4459600aef9113833d17e786c6e44" +dependencies = [ + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ark-ff-asm" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ed4aa4fe255d0bc6d79373f7e31d2ea147bcf486cba1be5ba7ea85abdb92348" +dependencies = [ + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ark-ff-asm" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62945a2f7e6de02a31fe400aa489f0e0f5b2502e69f95f853adb82a96c7a6b60" +dependencies = [ + "quote", + "syn 2.0.104", +] + +[[package]] +name = "ark-ff-macros" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db2fd794a08ccb318058009eefdf15bcaaaaf6f8161eb3345f907222bac38b20" +dependencies = [ + "num-bigint", + "num-traits", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ark-ff-macros" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7abe79b0e4288889c4574159ab790824d0033b9fdcb2a112a3182fac2e514565" +dependencies = [ + "num-bigint", + "num-traits", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ark-ff-macros" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09be120733ee33f7693ceaa202ca41accd5653b779563608f1234f78ae07c4b3" +dependencies = [ + "num-bigint", + "num-traits", + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "ark-groth16" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "88f1d0f3a534bb54188b8dcc104307db6c56cdae574ddc3212aec0625740fc7e" +dependencies = [ + "ark-crypto-primitives", + "ark-ec", + "ark-ff 0.5.0", + "ark-poly", + "ark-relations", + "ark-serialize 0.5.0", + "ark-std 0.5.0", +] + +[[package]] +name = "ark-poly" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "579305839da207f02b89cd1679e50e67b4331e2f9294a57693e5051b7703fe27" +dependencies = [ + "ahash", + "ark-ff 0.5.0", + "ark-serialize 0.5.0", + "ark-std 0.5.0", + "educe", + "fnv", + "hashbrown 0.15.4", +] + +[[package]] +name = "ark-r1cs-std" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "941551ef1df4c7a401de7068758db6503598e6f01850bdb2cfdb614a1f9dbea1" +dependencies = [ + "ark-ec", + "ark-ff 0.5.0", + "ark-relations", + "ark-std 0.5.0", + "educe", + "num-bigint", + "num-integer", + "num-traits", + "tracing", +] + +[[package]] +name = "ark-relations" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec46ddc93e7af44bcab5230937635b06fb5744464dd6a7e7b083e80ebd274384" +dependencies = [ + "ark-ff 0.5.0", + "ark-std 0.5.0", + "tracing", + "tracing-subscriber", +] + +[[package]] +name = "ark-serialize" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d6c2b318ee6e10f8c2853e73a83adc0ccb88995aa978d8a3408d492ab2ee671" +dependencies = [ + "ark-std 0.3.0", + "digest 0.9.0", +] + +[[package]] +name = "ark-serialize" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "adb7b85a02b83d2f22f89bd5cac66c9c89474240cb6207cb1efc16d098e822a5" +dependencies = [ + "ark-std 0.4.0", + "digest 0.10.7", + "num-bigint", +] + +[[package]] +name = "ark-serialize" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f4d068aaf107ebcd7dfb52bc748f8030e0fc930ac8e360146ca54c1203088f7" +dependencies = [ + "ark-serialize-derive", + "ark-std 0.5.0", + "arrayvec", + "digest 0.10.7", + "num-bigint", +] + +[[package]] +name = "ark-serialize-derive" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "213888f660fddcca0d257e88e54ac05bca01885f258ccdf695bafd77031bb69d" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "ark-snark" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d368e2848c2d4c129ce7679a7d0d2d612b6a274d3ea6a13bad4445d61b381b88" +dependencies = [ + "ark-ff 0.5.0", + "ark-relations", + "ark-serialize 0.5.0", + "ark-std 0.5.0", +] + +[[package]] +name = "ark-std" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1df2c09229cbc5a028b1d70e00fdb2acee28b1055dfb5ca73eea49c5a25c4e7c" +dependencies = [ + "num-traits", + "rand 0.8.5", +] + +[[package]] +name = "ark-std" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94893f1e0c6eeab764ade8dc4c0db24caf4fe7cbbaafc0eba0a9030f447b5185" +dependencies = [ + "num-traits", + "rand 0.8.5", +] + +[[package]] +name = "ark-std" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "246a225cc6131e9ee4f24619af0f19d67761fff15d7ccc22e42b80846e69449a" +dependencies = [ + "num-traits", + "rand 0.8.5", +] + +[[package]] +name = "arraydeque" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d902e3d592a523def97af8f317b08ce16b7ab854c1985a0c671e6f15cebc236" + +[[package]] +name = "arrayref" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76a2e8124351fda1ef8aaaa3bbd7ebbcb486bbcd4225aca0aa0d84bb2db8fecb" + +[[package]] +name = "arrayvec" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" +dependencies = [ + "serde", +] + +[[package]] +name = "auto_impl" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffdcb70bdbc4d478427380519163274ac86e52916e10f0a8889adf0f96d3fee7" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "autocfg" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" + +[[package]] +name = "backtrace" +version = "0.3.75" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6806a6321ec58106fea15becdad98371e28d92ccbc7c8f1b3b6dd724fe8f1002" +dependencies = [ + "addr2line", + "cfg-if", + "libc", + "miniz_oxide", + "object", + "rustc-demangle", + "windows-targets 0.52.6", +] + +[[package]] +name = "base16ct" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c7f02d4ea65f2c1853089ffd8d2787bdbc63de2f0d29dedbcf8ccdfa0ccd4cf" + +[[package]] +name = "base58ck" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c8d66485a3a2ea485c1913c4572ce0256067a5377ac8c75c4960e1cda98605f" +dependencies = [ + "bitcoin-internals", + "bitcoin_hashes", +] + +[[package]] +name = "base64" +version = "0.21.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" + +[[package]] +name = "base64" +version = "0.22.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" + +[[package]] +name = "base64ct" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55248b47b0caf0546f7988906588779981c43bb1bc9d0c44087278f80cdb44ba" + +[[package]] +name = "bech32" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d965446196e3b7decd44aa7ee49e31d630118f90ef12f97900f262eb915c951d" + +[[package]] +name = "bincode" +version = "1.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1f45e9417d87227c7a56d22e471c6206462cba514c7590c09aff4cf6d1ddcad" +dependencies = [ + "serde", +] + +[[package]] +name = "bit-set" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08807e080ed7f9d5433fa9b275196cfc35414f66a0c79d864dc51a0d825231a3" +dependencies = [ + "bit-vec", +] + +[[package]] +name = "bit-vec" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e764a1d40d510daf35e07be9eb06e75770908c27d411ee6c92109c9840eaaf7" + +[[package]] +name = "bitcoin" +version = "0.32.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ad8929a18b8e33ea6b3c09297b687baaa71fb1b97353243a3f1029fad5c59c5b" +dependencies = [ + "base58ck", + "base64 0.21.7", + "bech32", + "bitcoin-internals", + "bitcoin-io", + "bitcoin-units", + "bitcoin_hashes", + "hex-conservative", + "hex_lit", + "secp256k1", + "serde", +] + +[[package]] +name = "bitcoin-internals" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "30bdbe14aa07b06e6cfeffc529a1f099e5fbe249524f8125358604df99a4bed2" +dependencies = [ + "serde", +] + +[[package]] +name = "bitcoin-io" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b47c4ab7a93edb0c7198c5535ed9b52b63095f4e9b45279c6736cec4b856baf" + +[[package]] +name = "bitcoin-units" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5285c8bcaa25876d07f37e3d30c303f2609179716e11d688f51e8f1fe70063e2" +dependencies = [ + "bitcoin-internals", + "serde", +] + +[[package]] +name = "bitcoin_hashes" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb18c03d0db0247e147a21a6faafd5a7eb851c743db062de72018b6b7e8e4d16" +dependencies = [ + "bitcoin-io", + "hex-conservative", + "serde", +] + +[[package]] +name = "bitflags" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" + +[[package]] +name = "bitflags" +version = "2.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b8e56985ec62d17e9c1001dc89c88ecd7dc08e47eba5ec7c29c7b5eeecde967" + +[[package]] +name = "bitvec" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1bc2832c24239b0141d5674bb9174f9d68a8b5b3f2753311927c172ca46f7e9c" +dependencies = [ + "funty", + "radium", + "tap", + "wyz", +] + +[[package]] +name = "blake2" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46502ad458c9a52b69d4d4d32775c788b7a1b85e8bc9d482d92250fc0e3f8efe" +dependencies = [ + "digest 0.10.7", +] + +[[package]] +name = "blake3" +version = "1.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3888aaa89e4b2a40fca9848e400f6a658a5a3978de7be858e209cafa8be9a4a0" +dependencies = [ + "arrayref", + "arrayvec", + "cc", + "cfg-if", + "constant_time_eq", +] + +[[package]] +name = "block" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d8c1fef690941d3e7788d328517591fecc684c084084702d6ff1641e993699a" + +[[package]] +name = "block-buffer" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" +dependencies = [ + "generic-array", +] + +[[package]] +name = "blst" +version = "0.3.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4fd49896f12ac9b6dcd7a5998466b9b58263a695a3dd1ecc1aaca2e12a90b080" +dependencies = [ + "cc", + "glob", + "threadpool", + "zeroize", +] + +[[package]] +name = "bonsai-sdk" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0bce8d6acc5286a16e94c29e9c885d1869358885e08a6feeb6bc54e36fe20055" +dependencies = [ + "duplicate", + "maybe-async", + "reqwest", + "serde", + "thiserror 1.0.69", +] + +[[package]] +name = "borsh" +version = "1.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ad8646f98db542e39fc66e68a20b2144f6a732636df7c2354e74645faaa433ce" +dependencies = [ + "borsh-derive", + "bytes", + "cfg_aliases", +] + +[[package]] +name = "borsh-derive" +version = "1.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fdd1d3c0c2f5833f22386f252fe8ed005c7f59fdcddeef025c01b4c3b9fd9ac3" +dependencies = [ + "once_cell", + "proc-macro-crate", + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "bumpalo" +version = "3.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46c5e41b57b8bba42a04676d81cb89e9ee8e859a1a66f80a5a72e1cb76b34d43" + +[[package]] +name = "byte-slice-cast" +version = "1.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7575182f7272186991736b70173b0ea045398f984bf5ebbb3804736ce1330c9d" + +[[package]] +name = "bytemuck" +version = "1.23.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c76a5792e44e4abe34d3abf15636779261d45a7450612059293d1d2cfc63422" +dependencies = [ + "bytemuck_derive", +] + +[[package]] +name = "bytemuck_derive" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3fa76293b4f7bb636ab88fd78228235b5248b4d05cc589aed610f954af5d7c7a" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "byteorder" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" + +[[package]] +name = "bytes" +version = "1.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a" +dependencies = [ + "serde", +] + +[[package]] +name = "c-kzg" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0307f72feab3300336fb803a57134159f6e20139af1357f36c54cb90d8e8928" +dependencies = [ + "blst", + "cc", + "glob", + "hex", + "libc", + "once_cell", + "serde", +] + +[[package]] +name = "camino" +version = "1.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0da45bc31171d8d6960122e222a67740df867c1dd53b4d51caa297084c185cab" +dependencies = [ + "serde", +] + +[[package]] +name = "cargo-platform" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e35af189006b9c0f00a064685c727031e3ed2d8020f7ba284d78cc2671bd36ea" +dependencies = [ + "serde", +] + +[[package]] +name = "cargo_metadata" +version = "0.19.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd5eb614ed4c27c5d706420e4320fbe3216ab31fa1c33cd8246ac36dae4479ba" +dependencies = [ + "camino", + "cargo-platform", + "semver 1.0.26", + "serde", + "serde_json", + "thiserror 2.0.12", +] + +[[package]] +name = "cc" +version = "1.2.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "deec109607ca693028562ed836a5f1c4b8bd77755c4e132fc5ce11b0b6211ae7" +dependencies = [ + "shlex", +] + +[[package]] +name = "cfg-if" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9555578bc9e57714c812a1f84e4fc5b4d21fcb063490c624de019f7464c91268" + +[[package]] +name = "cfg_aliases" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" + +[[package]] +name = "circuits-lib" +version = "0.1.0" +dependencies = [ + "alloy-primitives", + "alloy-rpc-types", + "alloy-rpc-types-eth", + "ark-bn254", + "ark-crypto-primitives", + "ark-ec", + "ark-ff 0.5.0", + "ark-groth16", + "ark-relations", + "ark-serialize 0.5.0", + "ark-std 0.5.0", + "bincode", + "bitcoin", + "blake3", + "borsh", + "crypto-bigint", + "derive_more 1.0.0", + "eyre", + "hex", + "hex-literal", + "itertools 0.14.0", + "jmt 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", + "k256", + "lazy_static", + "num-bigint", + "num-traits", + "once_cell", + "risc0-groth16", + "risc0-zkvm", + "serde", + "serde_json", + "sha2", + "sov-rollup-interface", + "tracing", +] + +[[package]] +name = "cobs" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fa961b519f0b462e3a3b4a34b64d119eeaca1d59af726fe450bbba07a9fc0a1" +dependencies = [ + "thiserror 2.0.12", +] + +[[package]] +name = "const-hex" +version = "1.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "83e22e0ed40b96a48d3db274f72fd365bd78f67af39b6bbd47e8a15e1c6207ff" +dependencies = [ + "cfg-if", + "cpufeatures", + "hex", + "proptest", + "serde", +] + +[[package]] +name = "const-oid" +version = "0.9.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" + +[[package]] +name = "const_format" +version = "0.2.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "126f97965c8ad46d6d9163268ff28432e8f6a1196a55578867832e3049df63dd" +dependencies = [ + "const_format_proc_macros", +] + +[[package]] +name = "const_format_proc_macros" +version = "0.2.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d57c2eccfb16dbac1f4e61e206105db5820c9d26c3c472bc17c774259ef7744" +dependencies = [ + "proc-macro2", + "quote", + "unicode-xid", +] + +[[package]] +name = "constant_time_eq" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7c74b8349d32d297c9134b8c88677813a227df8f779daa29bfc29c183fe3dca6" + +[[package]] +name = "core-foundation" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "core-foundation-sys" +version = "0.8.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" + +[[package]] +name = "core-graphics-types" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "45390e6114f68f718cc7a830514a96f903cccd70d02a8f6d9f643ac4ba45afaf" +dependencies = [ + "bitflags 1.3.2", + "core-foundation", + "libc", +] + +[[package]] +name = "cpufeatures" +version = "0.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59ed5838eebb26a2bb2e58f6d5b5316989ae9d08bab10e0e6d103e656d1b0280" +dependencies = [ + "libc", +] + +[[package]] +name = "crc" +version = "3.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9710d3b3739c2e349eb44fe848ad0b7c8cb1e42bd87ee49371df2f7acaf3e675" +dependencies = [ + "crc-catalog", +] + +[[package]] +name = "crc-catalog" +version = "2.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19d374276b40fb8bbdee95aef7c7fa6b5316ec764510eb64b8dd0e2ed0d7e7f5" + +[[package]] +name = "crunchy" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "460fbee9c2c2f33933d720630a6a0bac33ba7053db5344fac858d4b8952d77d5" + +[[package]] +name = "crypto-bigint" +version = "0.5.5" +source = "git+https://github.com/risc0/RustCrypto-crypto-bigint?tag=v0.5.5-risczero.0#3ab63a6f1048833f7047d5a50532e4a4cc789384" +dependencies = [ + "generic-array", + "getrandom 0.2.16", + "rand_core 0.6.4", + "subtle", + "zeroize", +] + +[[package]] +name = "crypto-common" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" +dependencies = [ + "generic-array", + "typenum", +] + +[[package]] +name = "darling" +version = "0.20.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc7f46116c46ff9ab3eb1597a45688b6715c6e628b5c133e288e709a29bcb4ee" +dependencies = [ + "darling_core", + "darling_macro", +] + +[[package]] +name = "darling_core" +version = "0.20.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d00b9596d185e565c2207a0b01f8bd1a135483d02d9b7b0a54b11da8d53412e" +dependencies = [ + "fnv", + "ident_case", + "proc-macro2", + "quote", + "strsim", + "syn 2.0.104", +] + +[[package]] +name = "darling_macro" +version = "0.20.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc34b93ccb385b40dc71c6fceac4b2ad23662c7eeb248cf10d529b7e055b6ead" +dependencies = [ + "darling_core", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "der" +version = "0.7.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7c1832837b905bbfb5101e07cc24c8deddf52f93225eee6ead5f4d63d53ddcb" +dependencies = [ + "const-oid", + "pem-rfc7468", + "zeroize", +] + +[[package]] +name = "deranged" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c9e6a11ca8224451684bc0d7d5a7adbf8f2fd6887261a1cfc3c0432f9d4068e" +dependencies = [ + "powerfmt", +] + +[[package]] +name = "derivative" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "derive_builder" +version = "0.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "507dfb09ea8b7fa618fcf76e953f4f5e192547945816d5358edffe39f6f94947" +dependencies = [ + "derive_builder_macro", +] + +[[package]] +name = "derive_builder_core" +version = "0.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d5bcf7b024d6835cfb3d473887cd966994907effbe9227e8c8219824d06c4e8" +dependencies = [ + "darling", + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "derive_builder_macro" +version = "0.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab63b0e2bf4d5928aff72e83a7dace85d7bba5fe12dcc3c5a572d78caffd3f3c" +dependencies = [ + "derive_builder_core", + "syn 2.0.104", +] + +[[package]] +name = "derive_more" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a9b99b9cbbe49445b21764dc0625032a89b145a2642e67603e1c936f5458d05" +dependencies = [ + "derive_more-impl 1.0.0", +] + +[[package]] +name = "derive_more" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "093242cf7570c207c83073cf82f79706fe7b8317e98620a47d5be7c3d8497678" +dependencies = [ + "derive_more-impl 2.0.1", +] + +[[package]] +name = "derive_more-impl" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb7330aeadfbe296029522e6c40f315320aba36fc43a5b3632f3795348f3bd22" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", + "unicode-xid", +] + +[[package]] +name = "derive_more-impl" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bda628edc44c4bb645fbe0f758797143e4e07926f7ebf4e9bdfbd3d2ce621df3" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", + "unicode-xid", +] + +[[package]] +name = "digest" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" +dependencies = [ + "generic-array", +] + +[[package]] +name = "digest" +version = "0.10.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" +dependencies = [ + "block-buffer", + "const-oid", + "crypto-common", + "subtle", +] + +[[package]] +name = "dirs" +version = "5.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44c45a9d03d6676652bcb5e724c7e988de1acad23a711b5217ab9cbecbec2225" +dependencies = [ + "dirs-sys", +] + +[[package]] +name = "dirs-sys" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "520f05a5cbd335fae5a99ff7a6ab8627577660ee5cfd6a94a6a929b52ff0321c" +dependencies = [ + "libc", + "option-ext", + "redox_users", + "windows-sys 0.48.0", +] + +[[package]] +name = "displaydoc" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "docker-generate" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ccf673e0848ef09fa4aeeba78e681cf651c0c7d35f76ee38cec8e55bc32fa111" + +[[package]] +name = "downcast-rs" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75b325c5dbd37f80359721ad39aca5a29fb04c89279657cffdda8736d0c0b9d2" + +[[package]] +name = "dunce" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92773504d58c093f6de2459af4af33faa518c13451eb8f2b5698ed3d36e7c813" + +[[package]] +name = "duplicate" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "de78e66ac9061e030587b2a2e75cc88f22304913c907b11307bca737141230cb" +dependencies = [ + "heck 0.4.1", + "proc-macro-error", +] + +[[package]] +name = "dyn-clone" +version = "1.0.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0881ea181b1df73ff77ffaaf9c7544ecc11e82fba9b5f27b262a3c73a332555" + +[[package]] +name = "ecdsa" +version = "0.16.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee27f32b5c5292967d2d4a9d7f1e0b0aed2c15daded5a60300e4abb9d8020bca" +dependencies = [ + "der", + "digest 0.10.7", + "elliptic-curve", + "rfc6979", + "serdect", + "signature", + "spki", +] + +[[package]] +name = "educe" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d7bc049e1bd8cdeb31b68bbd586a9464ecf9f3944af3958a7a9d0f8b9799417" +dependencies = [ + "enum-ordinalize", + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "either" +version = "1.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" + +[[package]] +name = "elf" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4445909572dbd556c457c849c4ca58623d84b27c8fff1e74b0b4227d8b90d17b" + +[[package]] +name = "elliptic-curve" +version = "0.13.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5e6043086bf7973472e0c7dff2142ea0b680d30e18d9cc40f267efbf222bd47" +dependencies = [ + "base16ct", + "crypto-bigint", + "digest 0.10.7", + "ff", + "generic-array", + "group", + "pem-rfc7468", + "pkcs8", + "rand_core 0.6.4", + "sec1", + "serdect", + "subtle", + "zeroize", +] + +[[package]] +name = "embedded-io" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef1a6892d9eef45c8fa6b9e0086428a2cca8491aca8f787c534a3d6d0bcb3ced" + +[[package]] +name = "embedded-io" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "edd0f118536f44f5ccd48bcb8b111bdc3de888b58c74639dfb034a357d0f206d" + +[[package]] +name = "encoding_rs" +version = "0.8.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75030f3c4f45dafd7586dd6780965a8c7e8e285a5ecb86713e63a79c5b2766f3" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "enum-ordinalize" +version = "4.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fea0dcfa4e54eeb516fe454635a95753ddd39acda650ce703031c6973e315dd5" +dependencies = [ + "enum-ordinalize-derive", +] + +[[package]] +name = "enum-ordinalize-derive" +version = "4.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d28318a75d4aead5c4db25382e8ef717932d0346600cacae6357eb5941bc5ff" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "equivalent" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" + +[[package]] +name = "errno" +version = "0.3.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "778e2ac28f6c47af28e4907f13ffd1e1ddbd400980a9abd7c8df189bf578a5ad" +dependencies = [ + "libc", + "windows-sys 0.60.2", +] + +[[package]] +name = "eyre" +version = "0.6.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7cd915d99f24784cdc19fd37ef22b97e3ff0ae756c7e492e9fbfe897d61e2aec" +dependencies = [ + "indenter", + "once_cell", +] + +[[package]] +name = "fastrand" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" + +[[package]] +name = "fastrlp" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "139834ddba373bbdd213dffe02c8d110508dcf1726c2be27e8d1f7d7e1856418" +dependencies = [ + "arrayvec", + "auto_impl", + "bytes", +] + +[[package]] +name = "fastrlp" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce8dba4714ef14b8274c371879b175aa55b16b30f269663f19d576f380018dc4" +dependencies = [ + "arrayvec", + "auto_impl", + "bytes", +] + +[[package]] +name = "ff" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0b50bfb653653f9ca9095b427bed08ab8d75a137839d9ad64eb11810d5b6393" +dependencies = [ + "rand_core 0.6.4", + "subtle", +] + +[[package]] +name = "fixed-hash" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "835c052cb0c08c1acf6ffd71c022172e18723949c8282f2b9f27efbc51e64534" +dependencies = [ + "byteorder", + "rand 0.8.5", + "rustc-hex", + "static_assertions", +] + +[[package]] +name = "fnv" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" + +[[package]] +name = "foldhash" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9c4f5dac5e15c24eb999c26181a6ca40b39fe946cbe4c263c7209467bc83af2" + +[[package]] +name = "foreign-types" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d737d9aa519fb7b749cbc3b962edcf310a8dd1f4b67c91c4f83975dbdd17d965" +dependencies = [ + "foreign-types-macros", + "foreign-types-shared", +] + +[[package]] +name = "foreign-types-macros" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a5c6c585bc94aaf2c7b51dd4c2ba22680844aba4c687be581871a6f518c5742" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "foreign-types-shared" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa9a19cbb55df58761df49b23516a86d432839add4af60fc256da840f66ed35b" + +[[package]] +name = "form_urlencoded" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456" +dependencies = [ + "percent-encoding", +] + +[[package]] +name = "funty" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" + +[[package]] +name = "futures-channel" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" +dependencies = [ + "futures-core", + "futures-sink", +] + +[[package]] +name = "futures-core" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" + +[[package]] +name = "futures-io" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" + +[[package]] +name = "futures-macro" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "futures-sink" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7" + +[[package]] +name = "futures-task" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" + +[[package]] +name = "futures-util" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" +dependencies = [ + "futures-core", + "futures-io", + "futures-macro", + "futures-sink", + "futures-task", + "memchr", + "pin-project-lite", + "pin-utils", + "slab", +] + +[[package]] +name = "generic-array" +version = "0.14.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" +dependencies = [ + "typenum", + "version_check", + "zeroize", +] + +[[package]] +name = "getrandom" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592" +dependencies = [ + "cfg-if", + "js-sys", + "libc", + "wasi 0.11.1+wasi-snapshot-preview1", + "wasm-bindgen", +] + +[[package]] +name = "getrandom" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26145e563e54f2cadc477553f1ec5ee650b00862f0a58bcd12cbdc5f0ea2d2f4" +dependencies = [ + "cfg-if", + "js-sys", + "libc", + "r-efi", + "wasi 0.14.2+wasi-0.2.4", + "wasm-bindgen", +] + +[[package]] +name = "gimli" +version = "0.31.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" + +[[package]] +name = "glob" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8d1add55171497b4705a648c6b583acafb01d58050a51727785f0b2c8e0a2b2" + +[[package]] +name = "group" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63" +dependencies = [ + "ff", + "rand_core 0.6.4", + "subtle", +] + +[[package]] +name = "hashbrown" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e" +dependencies = [ + "ahash", +] + +[[package]] +name = "hashbrown" +version = "0.14.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" +dependencies = [ + "ahash", +] + +[[package]] +name = "hashbrown" +version = "0.15.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5971ac85611da7067dbfcabef3c70ebb5606018acd9e2a3903a0da507521e0d5" +dependencies = [ + "allocator-api2", + "foldhash", + "serde", +] + +[[package]] +name = "hashlink" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ba4ff7128dee98c7dc9794b6a411377e1404dba1c97deb8d1a55297bd25d8af" +dependencies = [ + "hashbrown 0.14.5", +] + +[[package]] +name = "header-chain-guest" +version = "0.1.0" +dependencies = [ + "circuits-lib", +] + +[[package]] +name = "heck" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" + +[[package]] +name = "heck" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" + +[[package]] +name = "hermit-abi" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc0fef456e4baa96da950455cd02c081ca953b141298e41db3fc7e36b1da849c" + +[[package]] +name = "hex" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" +dependencies = [ + "serde", +] + +[[package]] +name = "hex-conservative" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5313b072ce3c597065a808dbf612c4c8e8590bdbf8b579508bf7a762c5eae6cd" +dependencies = [ + "arrayvec", +] + +[[package]] +name = "hex-literal" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6fe2267d4ed49bc07b63801559be28c718ea06c4738b7a03c94df7386d2cde46" + +[[package]] +name = "hex_lit" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3011d1213f159867b13cfd6ac92d2cd5f1345762c63be3554e84092d85a50bbd" + +[[package]] +name = "hmac" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" +dependencies = [ + "digest 0.10.7", +] + +[[package]] +name = "http" +version = "1.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f4a85d31aea989eead29a3aaf9e1115a180df8282431156e533de47660892565" +dependencies = [ + "bytes", + "fnv", + "itoa", +] + +[[package]] +name = "http-body" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" +dependencies = [ + "bytes", + "http", +] + +[[package]] +name = "http-body-util" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b021d93e26becf5dc7e1b75b1bed1fd93124b374ceb73f43d4d4eafec896a64a" +dependencies = [ + "bytes", + "futures-core", + "http", + "http-body", + "pin-project-lite", +] + +[[package]] +name = "httparse" +version = "1.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6dbf3de79e51f3d586ab4cb9d5c3e2c14aa28ed23d180cf89b4df0454a69cc87" + +[[package]] +name = "hyper" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc2b571658e38e0c01b1fdca3bbbe93c00d3d71693ff2770043f8c29bc7d6f80" +dependencies = [ + "bytes", + "futures-channel", + "futures-util", + "http", + "http-body", + "httparse", + "itoa", + "pin-project-lite", + "smallvec", + "tokio", + "want", +] + +[[package]] +name = "hyper-rustls" +version = "0.27.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3c93eb611681b207e1fe55d5a71ecf91572ec8a6705cdb6857f7d8d5242cf58" +dependencies = [ + "http", + "hyper", + "hyper-util", + "rustls", + "rustls-pki-types", + "tokio", + "tokio-rustls", + "tower-service", + "webpki-roots", +] + +[[package]] +name = "hyper-util" +version = "0.1.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d9b05277c7e8da2c93a568989bb6207bef0112e8d17df7a6eda4a3cf143bc5e" +dependencies = [ + "base64 0.22.1", + "bytes", + "futures-channel", + "futures-core", + "futures-util", + "http", + "http-body", + "hyper", + "ipnet", + "libc", + "percent-encoding", + "pin-project-lite", + "socket2 0.6.0", + "tokio", + "tower-service", + "tracing", +] + +[[package]] +name = "ics23" +version = "0.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73b17f1a5bd7d12ad30a21445cfa5f52fd7651cb3243ba866f9916b1ec112f12" +dependencies = [ + "anyhow", + "blake2", + "blake3", + "bytes", + "hex", + "informalsystems-pbjson", + "prost", + "ripemd", + "serde", + "sha2", + "sha3", +] + +[[package]] +name = "icu_collections" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "200072f5d0e3614556f94a9930d5dc3e0662a652823904c3a75dc3b0af7fee47" +dependencies = [ + "displaydoc", + "potential_utf", + "yoke", + "zerofrom", + "zerovec", +] + +[[package]] +name = "icu_locale_core" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0cde2700ccaed3872079a65fb1a78f6c0a36c91570f28755dda67bc8f7d9f00a" +dependencies = [ + "displaydoc", + "litemap", + "tinystr", + "writeable", + "zerovec", +] + +[[package]] +name = "icu_normalizer" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "436880e8e18df4d7bbc06d58432329d6458cc84531f7ac5f024e93deadb37979" +dependencies = [ + "displaydoc", + "icu_collections", + "icu_normalizer_data", + "icu_properties", + "icu_provider", + "smallvec", + "zerovec", +] + +[[package]] +name = "icu_normalizer_data" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00210d6893afc98edb752b664b8890f0ef174c8adbb8d0be9710fa66fbbf72d3" + +[[package]] +name = "icu_properties" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "016c619c1eeb94efb86809b015c58f479963de65bdb6253345c1a1276f22e32b" +dependencies = [ + "displaydoc", + "icu_collections", + "icu_locale_core", + "icu_properties_data", + "icu_provider", + "potential_utf", + "zerotrie", + "zerovec", +] + +[[package]] +name = "icu_properties_data" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "298459143998310acd25ffe6810ed544932242d3f07083eee1084d83a71bd632" + +[[package]] +name = "icu_provider" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "03c80da27b5f4187909049ee2d72f276f0d9f99a42c306bd0131ecfe04d8e5af" +dependencies = [ + "displaydoc", + "icu_locale_core", + "stable_deref_trait", + "tinystr", + "writeable", + "yoke", + "zerofrom", + "zerotrie", + "zerovec", +] + +[[package]] +name = "ident_case" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" + +[[package]] +name = "idna" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "686f825264d630750a544639377bae737628043f20d38bbc029e8f29ea968a7e" +dependencies = [ + "idna_adapter", + "smallvec", + "utf8_iter", +] + +[[package]] +name = "idna_adapter" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3acae9609540aa318d1bc588455225fb2085b9ed0c4f6bd0d9d5bcd86f1a0344" +dependencies = [ + "icu_normalizer", + "icu_properties", +] + +[[package]] +name = "impl-codec" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba6a270039626615617f3f36d15fc827041df3b78c439da2cadfa47455a77f2f" +dependencies = [ + "parity-scale-codec", +] + +[[package]] +name = "impl-trait-for-tuples" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a0eb5a3343abf848c0984fe4604b2b105da9539376e24fc0a3b0007411ae4fd9" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "include_bytes_aligned" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ee796ad498c8d9a1d68e477df8f754ed784ef875de1414ebdaf169f70a6a784" + +[[package]] +name = "indenter" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce23b50ad8242c51a442f3ff322d56b02f08852c77e4c0b4d3fd684abc89c683" + +[[package]] +name = "indexmap" +version = "2.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fe4cd85333e22411419a0bcae1297d25e58c9443848b11dc6a86fefe8c78a661" +dependencies = [ + "equivalent", + "hashbrown 0.15.4", + "serde", +] + +[[package]] +name = "informalsystems-pbjson" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9aa4a0980c8379295100d70854354e78df2ee1c6ca0f96ffe89afeb3140e3a3d" +dependencies = [ + "base64 0.21.7", + "serde", +] + +[[package]] +name = "io-uring" +version = "0.7.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d93587f37623a1a17d94ef2bc9ada592f5465fe7732084ab7beefabe5c77c0c4" +dependencies = [ + "bitflags 2.9.1", + "cfg-if", + "libc", +] + +[[package]] +name = "ipnet" +version = "2.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130" + +[[package]] +name = "iri-string" +version = "0.7.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dbc5ebe9c3a1a7a5127f920a418f7585e9e758e911d0466ed004f393b0e380b2" +dependencies = [ + "memchr", + "serde", +] + +[[package]] +name = "itertools" +version = "0.10.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" +dependencies = [ + "either", +] + +[[package]] +name = "itertools" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186" +dependencies = [ + "either", +] + +[[package]] +name = "itertools" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b192c782037fadd9cfa75548310488aabdbf3d2da73885b31bd0abd03351285" +dependencies = [ + "either", +] + +[[package]] +name = "itoa" +version = "1.0.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" + +[[package]] +name = "jmt" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf2a10370b45cd850e64993ccd81d25ea2d4b5b0d0312546e7489fed82064f2e" +dependencies = [ + "anyhow", + "borsh", + "digest 0.10.7", + "hashbrown 0.13.2", + "hex", + "ics23", + "itertools 0.10.5", + "mirai-annotations", + "num-derive", + "num-traits", + "serde", + "sha2", + "thiserror 1.0.69", + "tracing", +] + +[[package]] +name = "jmt" +version = "0.11.0" +source = "git+https://github.com/penumbra-zone/jmt.git?rev=550a2f2#550a2f20984a5c31c51715381d3f67390e138ffa" +dependencies = [ + "anyhow", + "borsh", + "digest 0.10.7", + "hashbrown 0.13.2", + "hex", + "ics23", + "itertools 0.10.5", + "mirai-annotations", + "num-derive", + "num-traits", + "serde", + "sha2", + "thiserror 1.0.69", + "tracing", +] + +[[package]] +name = "js-sys" +version = "0.3.77" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1cfaf33c695fc6e08064efbc1f72ec937429614f25eef83af942d0e227c3a28f" +dependencies = [ + "once_cell", + "wasm-bindgen", +] + +[[package]] +name = "jsonwebtoken" +version = "9.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a87cc7a48537badeae96744432de36f4be2b4a34a05a5ef32e9dd8a1c169dde" +dependencies = [ + "base64 0.22.1", + "js-sys", + "pem", + "ring", + "serde", + "serde_json", + "simple_asn1", +] + +[[package]] +name = "k256" +version = "0.13.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6e3919bbaa2945715f0bb6d3934a173d1e9a59ac23767fbaaef277265a7411b" +dependencies = [ + "cfg-if", + "ecdsa", + "elliptic-curve", + "once_cell", + "serdect", + "sha2", + "signature", +] + +[[package]] +name = "keccak" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ecc2af9a1119c51f12a14607e783cb977bde58bc069ff0c3da1095e635d70654" +dependencies = [ + "cpufeatures", +] + +[[package]] +name = "keccak-asm" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "505d1856a39b200489082f90d897c3f07c455563880bc5952e38eabf731c83b6" +dependencies = [ + "digest 0.10.7", + "sha3-asm", +] + +[[package]] +name = "lazy-regex" +version = "3.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60c7310b93682b36b98fa7ea4de998d3463ccbebd94d935d6b48ba5b6ffa7126" +dependencies = [ + "lazy-regex-proc_macros", + "once_cell", + "regex", +] + +[[package]] +name = "lazy-regex-proc_macros" +version = "3.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ba01db5ef81e17eb10a5e0f2109d1b3a3e29bac3070fdbd7d156bf7dbd206a1" +dependencies = [ + "proc-macro2", + "quote", + "regex", + "syn 2.0.104", +] + +[[package]] +name = "lazy_static" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" +dependencies = [ + "spin", +] + +[[package]] +name = "libc" +version = "0.2.174" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1171693293099992e19cddea4e8b849964e9846f4acee11b3948bcc337be8776" + +[[package]] +name = "libm" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f9fbbcab51052fe104eb5e5d351cf728d30a5be1fe14d9be8a3b097481fb97de" + +[[package]] +name = "libredox" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "360e552c93fa0e8152ab463bc4c4837fce76a225df11dfaeea66c313de5e61f7" +dependencies = [ + "bitflags 2.9.1", + "libc", +] + +[[package]] +name = "linux-raw-sys" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd945864f07fe9f5371a27ad7b52a172b4b499999f1d97574c9fa68373937e12" + +[[package]] +name = "litemap" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "241eaef5fd12c88705a01fc1066c48c4b36e0dd4377dcdc7ec3942cea7a69956" + +[[package]] +name = "log" +version = "0.4.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94" + +[[package]] +name = "lru-slab" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "112b39cec0b298b6c1999fee3e31427f74f676e4cb9879ed1a121b43661a4154" + +[[package]] +name = "macro-string" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b27834086c65ec3f9387b096d66e99f221cf081c2b738042aa252bcd41204e3" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "malloc_buf" +version = "0.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62bb907fe88d54d8d9ce32a3cceab4218ed2f6b7d35617cafe9adf84e43919cb" +dependencies = [ + "libc", +] + +[[package]] +name = "maybe-async" +version = "0.2.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5cf92c10c7e361d6b99666ec1c6f9805b0bea2c3bd8c78dc6fe98ac5bd78db11" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "memchr" +version = "2.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32a282da65faaf38286cf3be983213fcf1d2e2a58700e808f83f4ea9a4804bc0" + +[[package]] +name = "merlin" +version = "3.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "58c38e2799fc0978b65dfff8023ec7843e2330bb462f19198840b34b6582397d" +dependencies = [ + "byteorder", + "keccak", + "rand_core 0.6.4", + "zeroize", +] + +[[package]] +name = "metal" +version = "0.29.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ecfd3296f8c56b7c1f6fbac3c71cefa9d78ce009850c45000015f206dc7fa21" +dependencies = [ + "bitflags 2.9.1", + "block", + "core-graphics-types", + "foreign-types", + "log", + "objc", + "paste", +] + +[[package]] +name = "miniz_oxide" +version = "0.8.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fa76a2c86f704bdb222d66965fb3d63269ce38518b83cb0575fca855ebb6316" +dependencies = [ + "adler2", +] + +[[package]] +name = "mio" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78bed444cc8a2160f01cbcf811ef18cac863ad68ae8ca62092e8db51d51c761c" +dependencies = [ + "libc", + "wasi 0.11.1+wasi-snapshot-preview1", + "windows-sys 0.59.0", +] + +[[package]] +name = "mirai-annotations" +version = "1.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c9be0862c1b3f26a88803c4a49de6889c10e608b3ee9344e6ef5b45fb37ad3d1" + +[[package]] +name = "no_std_strings" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a5b0c77c1b780822bc749a33e39aeb2c07584ab93332303babeabb645298a76e" + +[[package]] +name = "num-bigint" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a5e44f723f1133c9deac646763579fdb3ac745e418f2a7af9cd0c431da1f20b9" +dependencies = [ + "num-integer", + "num-traits", +] + +[[package]] +name = "num-conv" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" + +[[package]] +name = "num-derive" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "876a53fff98e03a936a674b29568b0e605f06b29372c2489ff4de23f1949743d" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "num-integer" +version = "0.1.46" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f" +dependencies = [ + "num-traits", +] + +[[package]] +name = "num-traits" +version = "0.2.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" +dependencies = [ + "autocfg", + "libm", +] + +[[package]] +name = "num_cpus" +version = "1.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91df4bbde75afed763b708b7eee1e8e7651e02d97f6d5dd763e89367e957b23b" +dependencies = [ + "hermit-abi", + "libc", +] + +[[package]] +name = "nybbles" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8983bb634df7248924ee0c4c3a749609b5abcb082c28fffe3254b3eb3602b307" +dependencies = [ + "const-hex", + "serde", + "smallvec", +] + +[[package]] +name = "objc" +version = "0.2.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "915b1b472bc21c53464d6c8461c9d3af805ba1ef837e1cac254428f4a77177b1" +dependencies = [ + "malloc_buf", +] + +[[package]] +name = "object" +version = "0.36.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62948e14d923ea95ea2c7c86c71013138b66525b86bdc08d2dcc262bdb497b87" +dependencies = [ + "memchr", +] + +[[package]] +name = "once_cell" +version = "1.21.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" + +[[package]] +name = "option-ext" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" + +[[package]] +name = "parity-scale-codec" +version = "3.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "799781ae679d79a948e13d4824a40970bfa500058d245760dd857301059810fa" +dependencies = [ + "arrayvec", + "bitvec", + "byte-slice-cast", + "const_format", + "impl-trait-for-tuples", + "parity-scale-codec-derive", + "rustversion", + "serde", +] + +[[package]] +name = "parity-scale-codec-derive" +version = "3.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34b4653168b563151153c9e4c08ebed57fb8262bebfa79711552fa983c623e7a" +dependencies = [ + "proc-macro-crate", + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "paste" +version = "1.0.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" + +[[package]] +name = "pem" +version = "3.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38af38e8470ac9dee3ce1bae1af9c1671fffc44ddfd8bd1d0a3445bf349a8ef3" +dependencies = [ + "base64 0.22.1", + "serde", +] + +[[package]] +name = "pem-rfc7468" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "88b39c9bfcfc231068454382784bb460aae594343fb030d46e9f50a645418412" +dependencies = [ + "base64ct", +] + +[[package]] +name = "percent-encoding" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" + +[[package]] +name = "pest" +version = "2.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1db05f56d34358a8b1066f67cbb203ee3e7ed2ba674a6263a1d5ec6db2204323" +dependencies = [ + "memchr", + "thiserror 2.0.12", + "ucd-trie", +] + +[[package]] +name = "pin-project-lite" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" + +[[package]] +name = "pin-utils" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" + +[[package]] +name = "pkcs8" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7" +dependencies = [ + "der", + "spki", +] + +[[package]] +name = "postcard" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6764c3b5dd454e283a30e6dfe78e9b31096d9e32036b5d1eaac7a6119ccb9a24" +dependencies = [ + "cobs", + "embedded-io 0.4.0", + "embedded-io 0.6.1", + "serde", +] + +[[package]] +name = "potential_utf" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5a7c30837279ca13e7c867e9e40053bc68740f988cb07f7ca6df43cc734b585" +dependencies = [ + "zerovec", +] + +[[package]] +name = "powerfmt" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" + +[[package]] +name = "ppv-lite86" +version = "0.2.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9" +dependencies = [ + "zerocopy", +] + +[[package]] +name = "primitive-types" +version = "0.12.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b34d9fd68ae0b74a41b21c03c2f62847aa0ffea044eee893b4c140b37e244e2" +dependencies = [ + "fixed-hash", + "impl-codec", + "uint", +] + +[[package]] +name = "proc-macro-crate" +version = "3.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "edce586971a4dfaa28950c6f18ed55e0406c1ab88bbce2c6f6293a7aaba73d35" +dependencies = [ + "toml_edit", +] + +[[package]] +name = "proc-macro-error" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" +dependencies = [ + "proc-macro-error-attr", + "proc-macro2", + "quote", + "syn 1.0.109", + "version_check", +] + +[[package]] +name = "proc-macro-error-attr" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" +dependencies = [ + "proc-macro2", + "quote", + "version_check", +] + +[[package]] +name = "proc-macro-error-attr2" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96de42df36bb9bba5542fe9f1a054b8cc87e172759a1868aa05c1f3acc89dfc5" +dependencies = [ + "proc-macro2", + "quote", +] + +[[package]] +name = "proc-macro-error2" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11ec05c52be0a07b08061f7dd003e7d7092e0472bc731b4af7bb1ef876109802" +dependencies = [ + "proc-macro-error-attr2", + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "proc-macro2" +version = "1.0.95" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "02b3e5e68a3a1a02aad3ec490a98007cbc13c37cbe84a3cd7b8e406d76e7f778" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "proptest" +version = "1.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6fcdab19deb5195a31cf7726a210015ff1496ba1464fd42cb4f537b8b01b471f" +dependencies = [ + "bit-set", + "bit-vec", + "bitflags 2.9.1", + "lazy_static", + "num-traits", + "rand 0.9.2", + "rand_chacha 0.9.0", + "rand_xorshift", + "regex-syntax", + "rusty-fork", + "tempfile", + "unarray", +] + +[[package]] +name = "prost" +version = "0.13.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2796faa41db3ec313a31f7624d9286acf277b52de526150b7e69f3debf891ee5" +dependencies = [ + "bytes", + "prost-derive", +] + +[[package]] +name = "prost-derive" +version = "0.13.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a56d757972c98b346a9b766e3f02746cde6dd1cd1d1d563472929fdd74bec4d" +dependencies = [ + "anyhow", + "itertools 0.14.0", + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "quick-error" +version = "1.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" + +[[package]] +name = "quinn" +version = "0.11.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "626214629cda6781b6dc1d316ba307189c85ba657213ce642d9c77670f8202c8" +dependencies = [ + "bytes", + "cfg_aliases", + "pin-project-lite", + "quinn-proto", + "quinn-udp", + "rustc-hash", + "rustls", + "socket2 0.5.10", + "thiserror 2.0.12", + "tokio", + "tracing", + "web-time", +] + +[[package]] +name = "quinn-proto" +version = "0.11.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49df843a9161c85bb8aae55f101bc0bac8bcafd637a620d9122fd7e0b2f7422e" +dependencies = [ + "bytes", + "getrandom 0.3.3", + "lru-slab", + "rand 0.9.2", + "ring", + "rustc-hash", + "rustls", + "rustls-pki-types", + "slab", + "thiserror 2.0.12", + "tinyvec", + "tracing", + "web-time", +] + +[[package]] +name = "quinn-udp" +version = "0.5.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcebb1209ee276352ef14ff8732e24cc2b02bbac986cd74a4c81bcb2f9881970" +dependencies = [ + "cfg_aliases", + "libc", + "once_cell", + "socket2 0.5.10", + "tracing", + "windows-sys 0.59.0", +] + +[[package]] +name = "quote" +version = "1.0.40" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "r-efi" +version = "5.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" + +[[package]] +name = "radium" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09" + +[[package]] +name = "rand" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" +dependencies = [ + "libc", + "rand_chacha 0.3.1", + "rand_core 0.6.4", + "serde", +] + +[[package]] +name = "rand" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6db2770f06117d490610c7488547d543617b21bfa07796d7a12f6f1bd53850d1" +dependencies = [ + "rand_chacha 0.9.0", + "rand_core 0.9.3", +] + +[[package]] +name = "rand_chacha" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +dependencies = [ + "ppv-lite86", + "rand_core 0.6.4", +] + +[[package]] +name = "rand_chacha" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb" +dependencies = [ + "ppv-lite86", + "rand_core 0.9.3", +] + +[[package]] +name = "rand_core" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" +dependencies = [ + "getrandom 0.2.16", +] + +[[package]] +name = "rand_core" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "99d9a13982dcf210057a8a78572b2217b667c3beacbf3a0d8b454f6f82837d38" +dependencies = [ + "getrandom 0.3.3", +] + +[[package]] +name = "rand_xorshift" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "513962919efc330f829edb2535844d1b912b0fbe2ca165d613e4e8788bb05a5a" +dependencies = [ + "rand_core 0.9.3", +] + +[[package]] +name = "redox_users" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba009ff324d1fc1b900bd1fdb31564febe58a8ccc8a6fdbb93b543d33b13ca43" +dependencies = [ + "getrandom 0.2.16", + "libredox", + "thiserror 1.0.69", +] + +[[package]] +name = "regex" +version = "1.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" +dependencies = [ + "aho-corasick", + "memchr", + "regex-automata", + "regex-syntax", +] + +[[package]] +name = "regex-automata" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax", +] + +[[package]] +name = "regex-syntax" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" + +[[package]] +name = "reqwest" +version = "0.12.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cbc931937e6ca3a06e3b6c0aa7841849b160a90351d6ab467a8b9b9959767531" +dependencies = [ + "base64 0.22.1", + "bytes", + "futures-channel", + "futures-core", + "futures-util", + "http", + "http-body", + "http-body-util", + "hyper", + "hyper-rustls", + "hyper-util", + "js-sys", + "log", + "percent-encoding", + "pin-project-lite", + "quinn", + "rustls", + "rustls-pki-types", + "serde", + "serde_json", + "serde_urlencoded", + "sync_wrapper", + "tokio", + "tokio-rustls", + "tokio-util", + "tower", + "tower-http", + "tower-service", + "url", + "wasm-bindgen", + "wasm-bindgen-futures", + "wasm-streams", + "web-sys", + "webpki-roots", +] + +[[package]] +name = "rfc6979" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8dd2a808d456c4a54e300a23e9f5a67e122c3024119acbfd73e3bf664491cb2" +dependencies = [ + "hmac", + "subtle", +] + +[[package]] +name = "ring" +version = "0.17.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4689e6c2294d81e88dc6261c768b63bc4fcdb852be6d1352498b114f61383b7" +dependencies = [ + "cc", + "cfg-if", + "getrandom 0.2.16", + "libc", + "untrusted", + "windows-sys 0.52.0", +] + +[[package]] +name = "ripemd" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd124222d17ad93a644ed9d011a40f4fb64aa54275c08cc216524a9ea82fb09f" +dependencies = [ + "digest 0.10.7", +] + +[[package]] +name = "risc0-binfmt" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62eb7025356a233c1bc267c458a2ce56fcfc89b136d813c8a77be14ef1eaf2b1" +dependencies = [ + "anyhow", + "borsh", + "derive_more 2.0.1", + "elf", + "lazy_static", + "postcard", + "risc0-zkp", + "risc0-zkvm-platform", + "semver 1.0.26", + "serde", + "tracing", +] + +[[package]] +name = "risc0-build" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62ffc0f135e6c1e9851e7e19438d03ff41a9d49199ee4f6c17b8bb30b4f83910" +dependencies = [ + "anyhow", + "cargo_metadata", + "derive_builder", + "dirs", + "docker-generate", + "hex", + "risc0-binfmt", + "risc0-zkos-v1compat", + "risc0-zkp", + "risc0-zkvm-platform", + "rzup", + "semver 1.0.26", + "serde", + "serde_json", + "stability", + "tempfile", +] + +[[package]] +name = "risc0-circuit-keccak" +version = "3.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0094af5a57b020388a03bdd3834959c7d62723f1687be81414ade25104d93263" +dependencies = [ + "anyhow", + "bytemuck", + "paste", + "risc0-binfmt", + "risc0-circuit-recursion", + "risc0-core", + "risc0-zkp", + "tracing", +] + +[[package]] +name = "risc0-circuit-recursion" +version = "3.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76ebded45c902c2b6939924a1cddd1d06b5d1d4ad1531e8798ebfee78f9c038d" +dependencies = [ + "anyhow", + "bytemuck", + "hex", + "metal", + "risc0-core", + "risc0-zkp", + "tracing", +] + +[[package]] +name = "risc0-circuit-rv32im" +version = "3.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "15030849f8356f01f23c74b37dbfa4283100b594eb634109993e9e005ef45f64" +dependencies = [ + "anyhow", + "bit-vec", + "bytemuck", + "derive_more 2.0.1", + "paste", + "risc0-binfmt", + "risc0-core", + "risc0-zkp", + "serde", + "tracing", +] + +[[package]] +name = "risc0-core" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "317bbf70a8750b64d4fd7a2bdc9d7d5f30d8bb305cae486962c797ef35c8d08e" +dependencies = [ + "bytemuck", + "bytemuck_derive", + "rand_core 0.6.4", +] + +[[package]] +name = "risc0-groth16" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7cf5d0b673d5fc67a89147c2e9c53134707dcc8137a43d1ef06b4ff68e99b74f" +dependencies = [ + "anyhow", + "ark-bn254", + "ark-ec", + "ark-groth16", + "ark-serialize 0.5.0", + "bytemuck", + "hex", + "num-bigint", + "num-traits", + "risc0-binfmt", + "risc0-zkp", + "serde", + "stability", +] + +[[package]] +name = "risc0-zkos-v1compat" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f76c479b69d1987cb54ac72dcc017197296fdcd6daf78fafc10cbbd3a167a7de" +dependencies = [ + "include_bytes_aligned", + "no_std_strings", +] + +[[package]] +name = "risc0-zkp" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a287e9cd6d7b3b38eeb49c62090c46a1935922309fbd997a9143ed8c43c8f3cb" +dependencies = [ + "anyhow", + "blake2", + "borsh", + "bytemuck", + "cfg-if", + "digest 0.10.7", + "hex", + "hex-literal", + "metal", + "paste", + "rand_core 0.6.4", + "risc0-core", + "risc0-zkvm-platform", + "serde", + "sha2", + "stability", + "tracing", +] + +[[package]] +name = "risc0-zkvm" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9684b333c1c5d83f29ce2a92314ccfafd9d8cdfa6c4e19c07b97015d2f1eb9d0" +dependencies = [ + "anyhow", + "bincode", + "bonsai-sdk", + "borsh", + "bytemuck", + "bytes", + "derive_more 2.0.1", + "getrandom 0.2.16", + "hex", + "lazy-regex", + "prost", + "risc0-binfmt", + "risc0-build", + "risc0-circuit-keccak", + "risc0-circuit-recursion", + "risc0-circuit-rv32im", + "risc0-core", + "risc0-groth16", + "risc0-zkos-v1compat", + "risc0-zkp", + "risc0-zkvm-platform", + "rrs-lib", + "rzup", + "semver 1.0.26", + "serde", + "sha2", + "stability", + "tempfile", + "tracing", +] + +[[package]] +name = "risc0-zkvm-platform" +version = "2.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cae9cb2c2f6cab2dfa395ea6e2576713929040c7fb0c5f4150d13e1119d18686" +dependencies = [ + "bytemuck", + "cfg-if", + "getrandom 0.2.16", + "getrandom 0.3.3", + "libm", + "stability", +] + +[[package]] +name = "rlp" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb919243f34364b6bd2fc10ef797edbfa75f33c252e7998527479c6d6b47e1ec" +dependencies = [ + "bytes", + "rustc-hex", +] + +[[package]] +name = "rrs-lib" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4382d3af3a4ebdae7f64ba6edd9114fff92c89808004c4943b393377a25d001" +dependencies = [ + "downcast-rs", + "paste", +] + +[[package]] +name = "ruint" +version = "1.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11256b5fe8c68f56ac6f39ef0720e592f33d2367a4782740d9c9142e889c7fb4" +dependencies = [ + "alloy-rlp", + "ark-ff 0.3.0", + "ark-ff 0.4.2", + "bytes", + "fastrlp 0.3.1", + "fastrlp 0.4.0", + "num-bigint", + "num-integer", + "num-traits", + "parity-scale-codec", + "primitive-types", + "proptest", + "rand 0.8.5", + "rand 0.9.2", + "rlp", + "ruint-macro", + "serde", + "valuable", + "zeroize", +] + +[[package]] +name = "ruint-macro" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48fd7bd8a6377e15ad9d42a8ec25371b94ddc67abe7c8b9127bec79bebaaae18" + +[[package]] +name = "rustc-demangle" +version = "0.1.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56f7d92ca342cea22a06f2121d944b4fd82af56988c270852495420f961d4ace" + +[[package]] +name = "rustc-hash" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "357703d41365b4b27c590e3ed91eabb1b663f07c4c084095e60cbed4362dff0d" + +[[package]] +name = "rustc-hex" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3e75f6a532d0fd9f7f13144f392b6ad56a32696bfcd9c78f797f16bbb6f072d6" + +[[package]] +name = "rustc_version" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0dfe2087c51c460008730de8b57e6a320782fbfb312e1f4d520e6c6fae155ee" +dependencies = [ + "semver 0.11.0", +] + +[[package]] +name = "rustc_version" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" +dependencies = [ + "semver 1.0.26", +] + +[[package]] +name = "rustix" +version = "1.0.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11181fbabf243db407ef8df94a6ce0b2f9a733bd8be4ad02b4eda9602296cac8" +dependencies = [ + "bitflags 2.9.1", + "errno", + "libc", + "linux-raw-sys", + "windows-sys 0.60.2", +] + +[[package]] +name = "rustls" +version = "0.23.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0ebcbd2f03de0fc1122ad9bb24b127a5a6cd51d72604a3f3c50ac459762b6cc" +dependencies = [ + "once_cell", + "ring", + "rustls-pki-types", + "rustls-webpki", + "subtle", + "zeroize", +] + +[[package]] +name = "rustls-pki-types" +version = "1.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "229a4a4c221013e7e1f1a043678c5cc39fe5171437c88fb47151a21e6f5b5c79" +dependencies = [ + "web-time", + "zeroize", +] + +[[package]] +name = "rustls-webpki" +version = "0.103.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0a17884ae0c1b773f1ccd2bd4a8c72f16da897310a98b0e84bf349ad5ead92fc" +dependencies = [ + "ring", + "rustls-pki-types", + "untrusted", +] + +[[package]] +name = "rustversion" +version = "1.0.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a0d197bd2c9dc6e53b84da9556a69ba4cdfab8619eb41a8bd1cc2027a0f6b1d" + +[[package]] +name = "rusty-fork" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb3dcc6e454c328bb824492db107ab7c0ae8fcffe4ad210136ef014458c1bc4f" +dependencies = [ + "fnv", + "quick-error", + "tempfile", + "wait-timeout", +] + +[[package]] +name = "ryu" +version = "1.0.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" + +[[package]] +name = "rzup" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "400558bf12d4292a7804093b60a437ba8b0219ea7d53716b2c010a0d31e5f4a8" +dependencies = [ + "semver 1.0.26", + "serde", + "strum", + "tempfile", + "thiserror 2.0.12", + "toml", + "yaml-rust2", +] + +[[package]] +name = "schemars" +version = "0.8.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3fbf2ae1b8bc8e02df939598064d22402220cd5bbcca1c76f7d6a310974d5615" +dependencies = [ + "dyn-clone", + "schemars_derive", + "serde", + "serde_json", +] + +[[package]] +name = "schemars_derive" +version = "0.8.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32e265784ad618884abaea0600a9adf15393368d840e0222d101a072f3f7534d" +dependencies = [ + "proc-macro2", + "quote", + "serde_derive_internals", + "syn 2.0.104", +] + +[[package]] +name = "sec1" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3e97a565f76233a6003f9f5c54be1d9c5bdfa3eccfb189469f11ec4901c47dc" +dependencies = [ + "base16ct", + "der", + "generic-array", + "pkcs8", + "serdect", + "subtle", + "zeroize", +] + +[[package]] +name = "secp256k1" +version = "0.29.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9465315bc9d4566e1724f0fffcbcc446268cb522e60f9a27bcded6b19c108113" +dependencies = [ + "bitcoin_hashes", + "rand 0.8.5", + "secp256k1-sys", + "serde", +] + +[[package]] +name = "secp256k1-sys" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d4387882333d3aa8cb20530a17c69a3752e97837832f34f6dccc760e715001d9" +dependencies = [ + "cc", +] + +[[package]] +name = "semver" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f301af10236f6df4160f7c3f04eec6dbc70ace82d23326abad5edee88801c6b6" +dependencies = [ + "semver-parser", +] + +[[package]] +name = "semver" +version = "1.0.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56e6fa9c48d24d85fb3de5ad847117517440f6beceb7798af16b4a87d616b8d0" +dependencies = [ + "serde", +] + +[[package]] +name = "semver-parser" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9900206b54a3527fdc7b8a938bffd94a568bac4f4aa8113b209df75a09c0dec2" +dependencies = [ + "pest", +] + +[[package]] +name = "serde" +version = "1.0.219" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.219" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "serde_derive_internals" +version = "0.29.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "18d26a20a969b9e3fdf2fc2d9f21eda6c40e2de84c9408bb5d3b05d499aae711" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "serde_json" +version = "1.0.141" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "30b9eff21ebe718216c6ec64e1d9ac57087aad11efc64e32002bce4a0d4c03d3" +dependencies = [ + "itoa", + "memchr", + "ryu", + "serde", +] + +[[package]] +name = "serde_spanned" +version = "0.6.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf41e0cfaf7226dca15e8197172c295a782857fcb97fad1808a166870dee75a3" +dependencies = [ + "serde", +] + +[[package]] +name = "serde_urlencoded" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" +dependencies = [ + "form_urlencoded", + "itoa", + "ryu", + "serde", +] + +[[package]] +name = "serdect" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a84f14a19e9a014bb9f4512488d9829a68e04ecabffb0f9904cd1ace94598177" +dependencies = [ + "base16ct", + "serde", +] + +[[package]] +name = "sha2" +version = "0.10.8" +source = "git+https://github.com/risc0/RustCrypto-hashes?tag=sha2-v0.10.8-risczero.0#244dc3b08788f7a4ccce14c66896ae3b4f24c166" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest 0.10.7", + "sha2-asm", +] + +[[package]] +name = "sha2-asm" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b845214d6175804686b2bd482bcffe96651bb2d1200742b712003504a2dac1ab" +dependencies = [ + "cc", +] + +[[package]] +name = "sha3" +version = "0.10.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75872d278a8f37ef87fa0ddbda7802605cb18344497949862c0d4dcb291eba60" +dependencies = [ + "digest 0.10.7", + "keccak", +] + +[[package]] +name = "sha3-asm" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c28efc5e327c837aa837c59eae585fc250715ef939ac32881bcc11677cd02d46" +dependencies = [ + "cc", + "cfg-if", +] + +[[package]] +name = "shlex" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" + +[[package]] +name = "signature" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77549399552de45a898a580c1b41d445bf730df867cc44e6c0233bbc4b8329de" +dependencies = [ + "digest 0.10.7", + "rand_core 0.6.4", +] + +[[package]] +name = "simple_asn1" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "297f631f50729c8c99b84667867963997ec0b50f32b2a7dbcab828ef0541e8bb" +dependencies = [ + "num-bigint", + "num-traits", + "thiserror 2.0.12", + "time", +] + +[[package]] +name = "slab" +version = "0.4.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04dc19736151f35336d325007ac991178d504a119863a2fcb3758cdb5e52c50d" + +[[package]] +name = "smallvec" +version = "1.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03" +dependencies = [ + "serde", +] + +[[package]] +name = "socket2" +version = "0.5.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e22376abed350d73dd1cd119b57ffccad95b4e585a7cda43e286245ce23c0678" +dependencies = [ + "libc", + "windows-sys 0.52.0", +] + +[[package]] +name = "socket2" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "233504af464074f9d066d7b5416c5f9b894a5862a6506e306f7b816cdd6f1807" +dependencies = [ + "libc", + "windows-sys 0.59.0", +] + +[[package]] +name = "sov-keys" +version = "0.7.3-rc.5" +source = "git+https://github.com/chainwayxyz/citrea?tag=v0.7.3-rc.5#bbf5f5622291c45f8eda26bf9d905e045724a4c9" +dependencies = [ + "borsh", + "derive_more 1.0.0", + "digest 0.10.7", + "hex", + "k256", + "rand 0.8.5", + "schemars", + "serde", + "sha2", + "thiserror 2.0.12", +] + +[[package]] +name = "sov-rollup-interface" +version = "0.7.3-rc.5" +source = "git+https://github.com/chainwayxyz/citrea?tag=v0.7.3-rc.5#bbf5f5622291c45f8eda26bf9d905e045724a4c9" +dependencies = [ + "anyhow", + "borsh", + "bytes", + "digest 0.10.7", + "jmt 0.11.0 (git+https://github.com/penumbra-zone/jmt.git?rev=550a2f2)", + "serde", + "sha2", + "sov-keys", +] + +[[package]] +name = "spin" +version = "0.9.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" + +[[package]] +name = "spki" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d91ed6c858b01f942cd56b37a94b3e0a1798290327d1236e4d9cf4eaca44d29d" +dependencies = [ + "base64ct", + "der", +] + +[[package]] +name = "stability" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d904e7009df136af5297832a3ace3370cd14ff1546a232f4f185036c2736fcac" +dependencies = [ + "quote", + "syn 2.0.104", +] + +[[package]] +name = "stable_deref_trait" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" + +[[package]] +name = "static_assertions" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" + +[[package]] +name = "strsim" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" + +[[package]] +name = "strum" +version = "0.26.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8fec0f0aef304996cf250b31b5a10dee7980c85da9d759361292b8bca5a18f06" +dependencies = [ + "strum_macros", +] + +[[package]] +name = "strum_macros" +version = "0.26.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c6bee85a5a24955dc440386795aa378cd9cf82acd5f764469152d2270e581be" +dependencies = [ + "heck 0.5.0", + "proc-macro2", + "quote", + "rustversion", + "syn 2.0.104", +] + +[[package]] +name = "subtle" +version = "2.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" + +[[package]] +name = "syn" +version = "1.0.109" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "syn" +version = "2.0.104" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17b6f705963418cdb9927482fa304bc562ece2fdd4f616084c50b7023b435a40" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "syn-solidity" +version = "0.8.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4560533fbd6914b94a8fb5cc803ed6801c3455668db3b810702c57612bac9412" +dependencies = [ + "paste", + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "sync_wrapper" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0bf256ce5efdfa370213c1dabab5935a12e49f2c58d15e9eac2870d3b4f27263" +dependencies = [ + "futures-core", +] + +[[package]] +name = "synstructure" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "tap" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" + +[[package]] +name = "tempfile" +version = "3.20.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8a64e3985349f2441a1a9ef0b853f869006c3855f2cda6862a94d26ebb9d6a1" +dependencies = [ + "fastrand", + "getrandom 0.3.3", + "once_cell", + "rustix", + "windows-sys 0.59.0", +] + +[[package]] +name = "thiserror" +version = "1.0.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52" +dependencies = [ + "thiserror-impl 1.0.69", +] + +[[package]] +name = "thiserror" +version = "2.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "567b8a2dae586314f7be2a752ec7474332959c6460e02bde30d702a66d488708" +dependencies = [ + "thiserror-impl 2.0.12", +] + +[[package]] +name = "thiserror-impl" +version = "1.0.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "thiserror-impl" +version = "2.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f7cf42b4507d8ea322120659672cf1b9dbb93f8f2d4ecfd6e51350ff5b17a1d" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "threadpool" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d050e60b33d41c19108b32cea32164033a9013fe3b46cbd4457559bfbf77afaa" +dependencies = [ + "num_cpus", +] + +[[package]] +name = "time" +version = "0.3.41" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a7619e19bc266e0f9c5e6686659d394bc57973859340060a69221e57dbc0c40" +dependencies = [ + "deranged", + "itoa", + "num-conv", + "powerfmt", + "serde", + "time-core", + "time-macros", +] + +[[package]] +name = "time-core" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c9e9a38711f559d9e3ce1cdb06dd7c5b8ea546bc90052da6d06bb76da74bb07c" + +[[package]] +name = "time-macros" +version = "0.2.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3526739392ec93fd8b359c8e98514cb3e8e021beb4e5f597b00a0221f8ed8a49" +dependencies = [ + "num-conv", + "time-core", +] + +[[package]] +name = "tiny-keccak" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c9d3793400a45f954c52e73d068316d76b6f4e36977e3fcebb13a2721e80237" +dependencies = [ + "crunchy", +] + +[[package]] +name = "tinystr" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d4f6d1145dcb577acf783d4e601bc1d76a13337bb54e6233add580b07344c8b" +dependencies = [ + "displaydoc", + "zerovec", +] + +[[package]] +name = "tinyvec" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09b3661f17e86524eccd4371ab0429194e0d7c008abb45f7a7495b1719463c71" +dependencies = [ + "tinyvec_macros", +] + +[[package]] +name = "tinyvec_macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" + +[[package]] +name = "tokio" +version = "1.47.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43864ed400b6043a4757a25c7a64a8efde741aed79a056a2fb348a406701bb35" +dependencies = [ + "backtrace", + "bytes", + "io-uring", + "libc", + "mio", + "pin-project-lite", + "slab", + "socket2 0.6.0", + "windows-sys 0.59.0", +] + +[[package]] +name = "tokio-rustls" +version = "0.26.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e727b36a1a0e8b74c376ac2211e40c2c8af09fb4013c60d910495810f008e9b" +dependencies = [ + "rustls", + "tokio", +] + +[[package]] +name = "tokio-util" +version = "0.7.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "66a539a9ad6d5d281510d5bd368c973d636c02dbf8a67300bfb6b950696ad7df" +dependencies = [ + "bytes", + "futures-core", + "futures-sink", + "pin-project-lite", + "tokio", +] + +[[package]] +name = "toml" +version = "0.8.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc1beb996b9d83529a9e75c17a1686767d148d70663143c7854d8b4a09ced362" +dependencies = [ + "serde", + "serde_spanned", + "toml_datetime", + "toml_edit", +] + +[[package]] +name = "toml_datetime" +version = "0.6.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22cddaf88f4fbc13c51aebbf5f8eceb5c7c5a9da2ac40a13519eb5b0a0e8f11c" +dependencies = [ + "serde", +] + +[[package]] +name = "toml_edit" +version = "0.22.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41fe8c660ae4257887cf66394862d21dbca4a6ddd26f04a3560410406a2f819a" +dependencies = [ + "indexmap", + "serde", + "serde_spanned", + "toml_datetime", + "toml_write", + "winnow", +] + +[[package]] +name = "toml_write" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d99f8c9a7727884afe522e9bd5edbfc91a3312b36a77b5fb8926e4c31a41801" + +[[package]] +name = "tower" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d039ad9159c98b70ecfd540b2573b97f7f52c3e8d9f8ad57a24b916a536975f9" +dependencies = [ + "futures-core", + "futures-util", + "pin-project-lite", + "sync_wrapper", + "tokio", + "tower-layer", + "tower-service", +] + +[[package]] +name = "tower-http" +version = "0.6.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "adc82fd73de2a9722ac5da747f12383d2bfdb93591ee6c58486e0097890f05f2" +dependencies = [ + "bitflags 2.9.1", + "bytes", + "futures-util", + "http", + "http-body", + "iri-string", + "pin-project-lite", + "tower", + "tower-layer", + "tower-service", +] + +[[package]] +name = "tower-layer" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "121c2a6cda46980bb0fcd1647ffaf6cd3fc79a013de288782836f6df9c48780e" + +[[package]] +name = "tower-service" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" + +[[package]] +name = "tracing" +version = "0.1.41" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0" +dependencies = [ + "log", + "pin-project-lite", + "tracing-attributes", + "tracing-core", +] + +[[package]] +name = "tracing-attributes" +version = "0.1.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81383ab64e72a7a8b8e13130c49e3dab29def6d0c7d76a03087b3cf71c5c6903" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "tracing-core" +version = "0.1.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9d12581f227e93f094d3af2ae690a574abb8a2b9b7a96e7cfe9647b2b617678" +dependencies = [ + "once_cell", + "valuable", +] + +[[package]] +name = "tracing-subscriber" +version = "0.2.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e0d2eaa99c3c2e41547cfa109e910a68ea03823cccad4a0525dcbc9b01e8c71" +dependencies = [ + "tracing-core", +] + +[[package]] +name = "try-lock" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" + +[[package]] +name = "typenum" +version = "1.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1dccffe3ce07af9386bfd29e80c0ab1a8205a2fc34e4bcd40364df902cfa8f3f" + +[[package]] +name = "ucd-trie" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2896d95c02a80c6d6a5d6e953d479f5ddf2dfdb6a244441010e373ac0fb88971" + +[[package]] +name = "uint" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76f64bba2c53b04fcab63c01a7d7427eadc821e3bc48c34dc9ba29c501164b52" +dependencies = [ + "byteorder", + "crunchy", + "hex", + "static_assertions", +] + +[[package]] +name = "unarray" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eaea85b334db583fe3274d12b4cd1880032beab409c0d774be044d4480ab9a94" + +[[package]] +name = "unicode-ident" +version = "1.0.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512" + +[[package]] +name = "unicode-xid" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853" + +[[package]] +name = "untrusted" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" + +[[package]] +name = "url" +version = "2.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32f8b686cadd1473f4bd0117a5d28d36b1ade384ea9b5069a1c40aefed7fda60" +dependencies = [ + "form_urlencoded", + "idna", + "percent-encoding", +] + +[[package]] +name = "utf8_iter" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" + +[[package]] +name = "valuable" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba73ea9cf16a25df0c8caa16c51acb937d5712a8429db78a3ee29d5dcacd3a65" + +[[package]] +name = "version_check" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" + +[[package]] +name = "wait-timeout" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09ac3b126d3914f9849036f826e054cbabdc8519970b8998ddaf3b5bd3c65f11" +dependencies = [ + "libc", +] + +[[package]] +name = "want" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" +dependencies = [ + "try-lock", +] + +[[package]] +name = "wasi" +version = "0.11.1+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" + +[[package]] +name = "wasi" +version = "0.14.2+wasi-0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9683f9a5a998d873c0d21fcbe3c083009670149a8fab228644b8bd36b2c48cb3" +dependencies = [ + "wit-bindgen-rt", +] + +[[package]] +name = "wasm-bindgen" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1edc8929d7499fc4e8f0be2262a241556cfc54a0bea223790e71446f2aab1ef5" +dependencies = [ + "cfg-if", + "once_cell", + "rustversion", + "wasm-bindgen-macro", +] + +[[package]] +name = "wasm-bindgen-backend" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2f0a0651a5c2bc21487bde11ee802ccaf4c51935d0d3d42a6101f98161700bc6" +dependencies = [ + "bumpalo", + "log", + "proc-macro2", + "quote", + "syn 2.0.104", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-futures" +version = "0.4.50" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "555d470ec0bc3bb57890405e5d4322cc9ea83cebb085523ced7be4144dac1e61" +dependencies = [ + "cfg-if", + "js-sys", + "once_cell", + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "wasm-bindgen-macro" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7fe63fc6d09ed3792bd0897b314f53de8e16568c2b3f7982f468c0bf9bd0b407" +dependencies = [ + "quote", + "wasm-bindgen-macro-support", +] + +[[package]] +name = "wasm-bindgen-macro-support" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", + "wasm-bindgen-backend", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-shared" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a05d73b933a847d6cccdda8f838a22ff101ad9bf93e33684f39c1f5f0eece3d" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "wasm-streams" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "15053d8d85c7eccdbefef60f06769760a563c7f0a9d6902a13d35c7800b0ad65" +dependencies = [ + "futures-util", + "js-sys", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", +] + +[[package]] +name = "web-sys" +version = "0.3.77" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33b6dd2ef9186f1f2072e409e99cd22a975331a6b3591b12c764e0e55c60d5d2" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "web-time" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a6580f308b1fad9207618087a65c04e7a10bc77e02c8e84e9b00dd4b12fa0bb" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "webpki-roots" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7e8983c3ab33d6fb807cfcdad2491c4ea8cbc8ed839181c7dfd9c67c83e261b2" +dependencies = [ + "rustls-pki-types", +] + +[[package]] +name = "windows-link" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e6ad25900d524eaabdbbb96d20b4311e1e7ae1699af4fb28c17ae66c80d798a" + +[[package]] +name = "windows-sys" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" +dependencies = [ + "windows-targets 0.48.5", +] + +[[package]] +name = "windows-sys" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-sys" +version = "0.59.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-sys" +version = "0.60.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb" +dependencies = [ + "windows-targets 0.53.3", +] + +[[package]] +name = "windows-targets" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" +dependencies = [ + "windows_aarch64_gnullvm 0.48.5", + "windows_aarch64_msvc 0.48.5", + "windows_i686_gnu 0.48.5", + "windows_i686_msvc 0.48.5", + "windows_x86_64_gnu 0.48.5", + "windows_x86_64_gnullvm 0.48.5", + "windows_x86_64_msvc 0.48.5", +] + +[[package]] +name = "windows-targets" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" +dependencies = [ + "windows_aarch64_gnullvm 0.52.6", + "windows_aarch64_msvc 0.52.6", + "windows_i686_gnu 0.52.6", + "windows_i686_gnullvm 0.52.6", + "windows_i686_msvc 0.52.6", + "windows_x86_64_gnu 0.52.6", + "windows_x86_64_gnullvm 0.52.6", + "windows_x86_64_msvc 0.52.6", +] + +[[package]] +name = "windows-targets" +version = "0.53.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d5fe6031c4041849d7c496a8ded650796e7b6ecc19df1a431c1a363342e5dc91" +dependencies = [ + "windows-link", + "windows_aarch64_gnullvm 0.53.0", + "windows_aarch64_msvc 0.53.0", + "windows_i686_gnu 0.53.0", + "windows_i686_gnullvm 0.53.0", + "windows_i686_msvc 0.53.0", + "windows_x86_64_gnu 0.53.0", + "windows_x86_64_gnullvm 0.53.0", + "windows_x86_64_msvc 0.53.0", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "86b8d5f90ddd19cb4a147a5fa63ca848db3df085e25fee3cc10b39b6eebae764" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7651a1f62a11b8cbd5e0d42526e55f2c99886c77e007179efff86c2b137e66c" + +[[package]] +name = "windows_i686_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" + +[[package]] +name = "windows_i686_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" + +[[package]] +name = "windows_i686_gnu" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1dc67659d35f387f5f6c479dc4e28f1d4bb90ddd1a5d3da2e5d97b42d6272c3" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ce6ccbdedbf6d6354471319e781c0dfef054c81fbc7cf83f338a4296c0cae11" + +[[package]] +name = "windows_i686_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" + +[[package]] +name = "windows_i686_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" + +[[package]] +name = "windows_i686_msvc" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "581fee95406bb13382d2f65cd4a908ca7b1e4c2f1917f143ba16efe98a589b5d" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e55b5ac9ea33f2fc1716d1742db15574fd6fc8dadc51caab1c16a3d3b4190ba" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0a6e035dd0599267ce1ee132e51c27dd29437f63325753051e71dd9e42406c57" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486" + +[[package]] +name = "winnow" +version = "0.7.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3edebf492c8125044983378ecb5766203ad3b4c2f7a922bd7dd207f6d443e95" +dependencies = [ + "memchr", +] + +[[package]] +name = "wit-bindgen-rt" +version = "0.39.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f42320e61fe2cfd34354ecb597f86f413484a798ba44a8ca1165c58d42da6c1" +dependencies = [ + "bitflags 2.9.1", +] + +[[package]] +name = "writeable" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea2f10b9bb0928dfb1b42b65e1f9e36f7f54dbdf08457afefb38afcdec4fa2bb" + +[[package]] +name = "wyz" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05f360fc0b24296329c78fda852a1e9ae82de9cf7b27dae4b7f62f118f77b9ed" +dependencies = [ + "tap", +] + +[[package]] +name = "yaml-rust2" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a1a1c0bc9823338a3bdf8c61f994f23ac004c6fa32c08cd152984499b445e8d" +dependencies = [ + "arraydeque", + "encoding_rs", + "hashlink", +] + +[[package]] +name = "yoke" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f41bb01b8226ef4bfd589436a297c53d118f65921786300e427be8d487695cc" +dependencies = [ + "serde", + "stable_deref_trait", + "yoke-derive", + "zerofrom", +] + +[[package]] +name = "yoke-derive" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38da3c9736e16c5d3c8c597a9aaa5d1fa565d0532ae05e27c24aa62fb32c0ab6" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", + "synstructure", +] + +[[package]] +name = "zerocopy" +version = "0.8.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1039dd0d3c310cf05de012d8a39ff557cb0d23087fd44cad61df08fc31907a2f" +dependencies = [ + "zerocopy-derive", +] + +[[package]] +name = "zerocopy-derive" +version = "0.8.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ecf5b4cc5364572d7f4c329661bcc82724222973f2cab6f050a4e5c22f75181" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "zerofrom" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50cc42e0333e05660c3587f3bf9d0478688e15d870fab3346451ce7f8c9fbea5" +dependencies = [ + "zerofrom-derive", +] + +[[package]] +name = "zerofrom-derive" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", + "synstructure", +] + +[[package]] +name = "zeroize" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" +dependencies = [ + "zeroize_derive", +] + +[[package]] +name = "zeroize_derive" +version = "1.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "zerotrie" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "36f0bbd478583f79edad978b407914f61b2972f5af6fa089686016be8f9af595" +dependencies = [ + "displaydoc", + "yoke", + "zerofrom", +] + +[[package]] +name = "zerovec" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a05eb080e015ba39cc9e23bbe5e7fb04d5fb040350f99f34e338d5fdd294428" +dependencies = [ + "yoke", + "zerofrom", + "zerovec-derive", +] + +[[package]] +name = "zerovec-derive" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b96237efa0c878c64bd89c436f661be4e46b2f3eff1ebb976f7ef2321d2f58f" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] diff --git a/risc0-circuits/header-chain/guest/Cargo.toml b/risc0-circuits/header-chain/guest/Cargo.toml new file mode 100644 index 000000000..79c0611aa --- /dev/null +++ b/risc0-circuits/header-chain/guest/Cargo.toml @@ -0,0 +1,19 @@ +[package] +name = "header-chain-guest" +version = "0.1.0" +edition = "2021" + +[workspace] + +[dependencies] +circuits-lib = { path = "../../../circuits-lib" } + +[patch.crates-io] +sha2 = { git = "https://github.com/risc0/RustCrypto-hashes", tag = "sha2-v0.10.8-risczero.0" } +crypto-bigint = { git = "https://github.com/risc0/RustCrypto-crypto-bigint", tag = "v0.5.5-risczero.0" } + +[profile.release] +debug = 0 +lto = true +opt-level = 3 +codegen-units = 1 \ No newline at end of file diff --git a/risc0-circuits/header-chain/guest/src/main.rs b/risc0-circuits/header-chain/guest/src/main.rs new file mode 100644 index 000000000..4371a0365 --- /dev/null +++ b/risc0-circuits/header-chain/guest/src/main.rs @@ -0,0 +1,4 @@ +fn main() { + let zkvm_guest = circuits_lib::common::zkvm::Risc0Guest::new(); + circuits_lib::header_chain::header_chain_circuit(&zkvm_guest); +} diff --git a/risc0-circuits/header-chain/src/lib.rs b/risc0-circuits/header-chain/src/lib.rs new file mode 100644 index 000000000..1bdb3085f --- /dev/null +++ b/risc0-circuits/header-chain/src/lib.rs @@ -0,0 +1 @@ +include!(concat!(env!("OUT_DIR"), "/methods.rs")); diff --git a/risc0-circuits/work-only/Cargo.lock b/risc0-circuits/work-only/Cargo.lock new file mode 100644 index 000000000..072b0b4b7 --- /dev/null +++ b/risc0-circuits/work-only/Cargo.lock @@ -0,0 +1,1385 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 4 + +[[package]] +name = "ahash" +version = "0.8.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a15f179cd60c4584b8a8c596927aadc462e27f2ca70c04e0071964a73ba7a75" +dependencies = [ + "cfg-if", + "once_cell", + "version_check", + "zerocopy", +] + +[[package]] +name = "anyhow" +version = "1.0.98" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e16d2d3311acee920a9eb8d33b8cbc1787ce4a264e85f964c2404b969bdcd487" + +[[package]] +name = "arraydeque" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d902e3d592a523def97af8f317b08ce16b7ab854c1985a0c671e6f15cebc236" + +[[package]] +name = "bitflags" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" + +[[package]] +name = "bitflags" +version = "2.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b8e56985ec62d17e9c1001dc89c88ecd7dc08e47eba5ec7c29c7b5eeecde967" + +[[package]] +name = "blake2" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46502ad458c9a52b69d4d4d32775c788b7a1b85e8bc9d482d92250fc0e3f8efe" +dependencies = [ + "digest", +] + +[[package]] +name = "block" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d8c1fef690941d3e7788d328517591fecc684c084084702d6ff1641e993699a" + +[[package]] +name = "block-buffer" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" +dependencies = [ + "generic-array", +] + +[[package]] +name = "borsh" +version = "1.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ad8646f98db542e39fc66e68a20b2144f6a732636df7c2354e74645faaa433ce" +dependencies = [ + "borsh-derive", + "cfg_aliases", +] + +[[package]] +name = "borsh-derive" +version = "1.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fdd1d3c0c2f5833f22386f252fe8ed005c7f59fdcddeef025c01b4c3b9fd9ac3" +dependencies = [ + "once_cell", + "proc-macro-crate", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "bytemuck" +version = "1.23.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c76a5792e44e4abe34d3abf15636779261d45a7450612059293d1d2cfc63422" +dependencies = [ + "bytemuck_derive", +] + +[[package]] +name = "bytemuck_derive" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3fa76293b4f7bb636ab88fd78228235b5248b4d05cc589aed610f954af5d7c7a" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "camino" +version = "1.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0da45bc31171d8d6960122e222a67740df867c1dd53b4d51caa297084c185cab" +dependencies = [ + "serde", +] + +[[package]] +name = "cargo-platform" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e35af189006b9c0f00a064685c727031e3ed2d8020f7ba284d78cc2671bd36ea" +dependencies = [ + "serde", +] + +[[package]] +name = "cargo_metadata" +version = "0.19.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd5eb614ed4c27c5d706420e4320fbe3216ab31fa1c33cd8246ac36dae4479ba" +dependencies = [ + "camino", + "cargo-platform", + "semver", + "serde", + "serde_json", + "thiserror 2.0.12", +] + +[[package]] +name = "cfg-if" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9555578bc9e57714c812a1f84e4fc5b4d21fcb063490c624de019f7464c91268" + +[[package]] +name = "cfg_aliases" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" + +[[package]] +name = "cobs" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fa961b519f0b462e3a3b4a34b64d119eeaca1d59af726fe450bbba07a9fc0a1" +dependencies = [ + "thiserror 2.0.12", +] + +[[package]] +name = "const-oid" +version = "0.9.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" + +[[package]] +name = "core-foundation" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "core-foundation-sys" +version = "0.8.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" + +[[package]] +name = "core-graphics-types" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "45390e6114f68f718cc7a830514a96f903cccd70d02a8f6d9f643ac4ba45afaf" +dependencies = [ + "bitflags 1.3.2", + "core-foundation", + "libc", +] + +[[package]] +name = "cpufeatures" +version = "0.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59ed5838eebb26a2bb2e58f6d5b5316989ae9d08bab10e0e6d103e656d1b0280" +dependencies = [ + "libc", +] + +[[package]] +name = "crypto-common" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" +dependencies = [ + "generic-array", + "typenum", +] + +[[package]] +name = "darling" +version = "0.20.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc7f46116c46ff9ab3eb1597a45688b6715c6e628b5c133e288e709a29bcb4ee" +dependencies = [ + "darling_core", + "darling_macro", +] + +[[package]] +name = "darling_core" +version = "0.20.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d00b9596d185e565c2207a0b01f8bd1a135483d02d9b7b0a54b11da8d53412e" +dependencies = [ + "fnv", + "ident_case", + "proc-macro2", + "quote", + "strsim", + "syn", +] + +[[package]] +name = "darling_macro" +version = "0.20.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc34b93ccb385b40dc71c6fceac4b2ad23662c7eeb248cf10d529b7e055b6ead" +dependencies = [ + "darling_core", + "quote", + "syn", +] + +[[package]] +name = "derive_builder" +version = "0.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "507dfb09ea8b7fa618fcf76e953f4f5e192547945816d5358edffe39f6f94947" +dependencies = [ + "derive_builder_macro", +] + +[[package]] +name = "derive_builder_core" +version = "0.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d5bcf7b024d6835cfb3d473887cd966994907effbe9227e8c8219824d06c4e8" +dependencies = [ + "darling", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "derive_builder_macro" +version = "0.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab63b0e2bf4d5928aff72e83a7dace85d7bba5fe12dcc3c5a572d78caffd3f3c" +dependencies = [ + "derive_builder_core", + "syn", +] + +[[package]] +name = "derive_more" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "093242cf7570c207c83073cf82f79706fe7b8317e98620a47d5be7c3d8497678" +dependencies = [ + "derive_more-impl", +] + +[[package]] +name = "derive_more-impl" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bda628edc44c4bb645fbe0f758797143e4e07926f7ebf4e9bdfbd3d2ce621df3" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "unicode-xid", +] + +[[package]] +name = "digest" +version = "0.10.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" +dependencies = [ + "block-buffer", + "const-oid", + "crypto-common", + "subtle", +] + +[[package]] +name = "dirs" +version = "5.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44c45a9d03d6676652bcb5e724c7e988de1acad23a711b5217ab9cbecbec2225" +dependencies = [ + "dirs-sys", +] + +[[package]] +name = "dirs-sys" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "520f05a5cbd335fae5a99ff7a6ab8627577660ee5cfd6a94a6a929b52ff0321c" +dependencies = [ + "libc", + "option-ext", + "redox_users", + "windows-sys 0.48.0", +] + +[[package]] +name = "docker-generate" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ccf673e0848ef09fa4aeeba78e681cf651c0c7d35f76ee38cec8e55bc32fa111" + +[[package]] +name = "elf" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4445909572dbd556c457c849c4ca58623d84b27c8fff1e74b0b4227d8b90d17b" + +[[package]] +name = "embedded-io" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef1a6892d9eef45c8fa6b9e0086428a2cca8491aca8f787c534a3d6d0bcb3ced" + +[[package]] +name = "embedded-io" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "edd0f118536f44f5ccd48bcb8b111bdc3de888b58c74639dfb034a357d0f206d" + +[[package]] +name = "encoding_rs" +version = "0.8.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75030f3c4f45dafd7586dd6780965a8c7e8e285a5ecb86713e63a79c5b2766f3" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "equivalent" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" + +[[package]] +name = "errno" +version = "0.3.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "778e2ac28f6c47af28e4907f13ffd1e1ddbd400980a9abd7c8df189bf578a5ad" +dependencies = [ + "libc", + "windows-sys 0.60.2", +] + +[[package]] +name = "fastrand" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" + +[[package]] +name = "fnv" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" + +[[package]] +name = "foreign-types" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d737d9aa519fb7b749cbc3b962edcf310a8dd1f4b67c91c4f83975dbdd17d965" +dependencies = [ + "foreign-types-macros", + "foreign-types-shared", +] + +[[package]] +name = "foreign-types-macros" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a5c6c585bc94aaf2c7b51dd4c2ba22680844aba4c687be581871a6f518c5742" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "foreign-types-shared" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa9a19cbb55df58761df49b23516a86d432839add4af60fc256da840f66ed35b" + +[[package]] +name = "generic-array" +version = "0.14.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" +dependencies = [ + "typenum", + "version_check", +] + +[[package]] +name = "getrandom" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592" +dependencies = [ + "cfg-if", + "libc", + "wasi 0.11.1+wasi-snapshot-preview1", +] + +[[package]] +name = "getrandom" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26145e563e54f2cadc477553f1ec5ee650b00862f0a58bcd12cbdc5f0ea2d2f4" +dependencies = [ + "cfg-if", + "libc", + "r-efi", + "wasi 0.14.2+wasi-0.2.4", +] + +[[package]] +name = "hashbrown" +version = "0.14.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" +dependencies = [ + "ahash", +] + +[[package]] +name = "hashbrown" +version = "0.15.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5971ac85611da7067dbfcabef3c70ebb5606018acd9e2a3903a0da507521e0d5" + +[[package]] +name = "hashlink" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ba4ff7128dee98c7dc9794b6a411377e1404dba1c97deb8d1a55297bd25d8af" +dependencies = [ + "hashbrown 0.14.5", +] + +[[package]] +name = "heck" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" + +[[package]] +name = "hex" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" + +[[package]] +name = "hex-literal" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6fe2267d4ed49bc07b63801559be28c718ea06c4738b7a03c94df7386d2cde46" + +[[package]] +name = "ident_case" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" + +[[package]] +name = "include_bytes_aligned" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ee796ad498c8d9a1d68e477df8f754ed784ef875de1414ebdaf169f70a6a784" + +[[package]] +name = "indexmap" +version = "2.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fe4cd85333e22411419a0bcae1297d25e58c9443848b11dc6a86fefe8c78a661" +dependencies = [ + "equivalent", + "hashbrown 0.15.4", +] + +[[package]] +name = "itoa" +version = "1.0.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" + +[[package]] +name = "lazy_static" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" +dependencies = [ + "spin", +] + +[[package]] +name = "libc" +version = "0.2.174" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1171693293099992e19cddea4e8b849964e9846f4acee11b3948bcc337be8776" + +[[package]] +name = "libredox" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1580801010e535496706ba011c15f8532df6b42297d2e471fec38ceadd8c0638" +dependencies = [ + "bitflags 2.9.1", + "libc", +] + +[[package]] +name = "linux-raw-sys" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd945864f07fe9f5371a27ad7b52a172b4b499999f1d97574c9fa68373937e12" + +[[package]] +name = "log" +version = "0.4.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94" + +[[package]] +name = "malloc_buf" +version = "0.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62bb907fe88d54d8d9ce32a3cceab4218ed2f6b7d35617cafe9adf84e43919cb" +dependencies = [ + "libc", +] + +[[package]] +name = "memchr" +version = "2.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32a282da65faaf38286cf3be983213fcf1d2e2a58700e808f83f4ea9a4804bc0" + +[[package]] +name = "metal" +version = "0.29.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ecfd3296f8c56b7c1f6fbac3c71cefa9d78ce009850c45000015f206dc7fa21" +dependencies = [ + "bitflags 2.9.1", + "block", + "core-graphics-types", + "foreign-types", + "log", + "objc", + "paste", +] + +[[package]] +name = "no_std_strings" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a5b0c77c1b780822bc749a33e39aeb2c07584ab93332303babeabb645298a76e" + +[[package]] +name = "objc" +version = "0.2.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "915b1b472bc21c53464d6c8461c9d3af805ba1ef837e1cac254428f4a77177b1" +dependencies = [ + "malloc_buf", +] + +[[package]] +name = "once_cell" +version = "1.21.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" + +[[package]] +name = "option-ext" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" + +[[package]] +name = "paste" +version = "1.0.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" + +[[package]] +name = "pin-project-lite" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" + +[[package]] +name = "postcard" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c1de96e20f51df24ca73cafcc4690e044854d803259db27a00a461cb3b9d17a" +dependencies = [ + "cobs", + "embedded-io 0.4.0", + "embedded-io 0.6.1", + "serde", +] + +[[package]] +name = "proc-macro-crate" +version = "3.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "edce586971a4dfaa28950c6f18ed55e0406c1ab88bbce2c6f6293a7aaba73d35" +dependencies = [ + "toml_edit", +] + +[[package]] +name = "proc-macro2" +version = "1.0.95" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "02b3e5e68a3a1a02aad3ec490a98007cbc13c37cbe84a3cd7b8e406d76e7f778" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "quote" +version = "1.0.40" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "r-efi" +version = "5.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" + +[[package]] +name = "rand_core" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" + +[[package]] +name = "redox_users" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba009ff324d1fc1b900bd1fdb31564febe58a8ccc8a6fdbb93b543d33b13ca43" +dependencies = [ + "getrandom 0.2.16", + "libredox", + "thiserror 1.0.69", +] + +[[package]] +name = "risc0-binfmt" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62eb7025356a233c1bc267c458a2ce56fcfc89b136d813c8a77be14ef1eaf2b1" +dependencies = [ + "anyhow", + "borsh", + "derive_more", + "elf", + "lazy_static", + "postcard", + "risc0-zkp", + "risc0-zkvm-platform", + "semver", + "serde", + "tracing", +] + +[[package]] +name = "risc0-build" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62ffc0f135e6c1e9851e7e19438d03ff41a9d49199ee4f6c17b8bb30b4f83910" +dependencies = [ + "anyhow", + "cargo_metadata", + "derive_builder", + "dirs", + "docker-generate", + "hex", + "risc0-binfmt", + "risc0-zkos-v1compat", + "risc0-zkp", + "risc0-zkvm-platform", + "rzup", + "semver", + "serde", + "serde_json", + "stability", + "tempfile", +] + +[[package]] +name = "risc0-core" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "317bbf70a8750b64d4fd7a2bdc9d7d5f30d8bb305cae486962c797ef35c8d08e" +dependencies = [ + "bytemuck", + "bytemuck_derive", + "rand_core", +] + +[[package]] +name = "risc0-zkos-v1compat" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f76c479b69d1987cb54ac72dcc017197296fdcd6daf78fafc10cbbd3a167a7de" +dependencies = [ + "include_bytes_aligned", + "no_std_strings", +] + +[[package]] +name = "risc0-zkp" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a287e9cd6d7b3b38eeb49c62090c46a1935922309fbd997a9143ed8c43c8f3cb" +dependencies = [ + "anyhow", + "blake2", + "borsh", + "bytemuck", + "cfg-if", + "digest", + "hex", + "hex-literal", + "metal", + "paste", + "rand_core", + "risc0-core", + "risc0-zkvm-platform", + "serde", + "sha2", + "stability", + "tracing", +] + +[[package]] +name = "risc0-zkvm-platform" +version = "2.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cae9cb2c2f6cab2dfa395ea6e2576713929040c7fb0c5f4150d13e1119d18686" +dependencies = [ + "cfg-if", + "stability", +] + +[[package]] +name = "rustix" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c71e83d6afe7ff64890ec6b71d6a69bb8a610ab78ce364b3352876bb4c801266" +dependencies = [ + "bitflags 2.9.1", + "errno", + "libc", + "linux-raw-sys", + "windows-sys 0.59.0", +] + +[[package]] +name = "rustversion" +version = "1.0.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a0d197bd2c9dc6e53b84da9556a69ba4cdfab8619eb41a8bd1cc2027a0f6b1d" + +[[package]] +name = "ryu" +version = "1.0.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" + +[[package]] +name = "rzup" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "400558bf12d4292a7804093b60a437ba8b0219ea7d53716b2c010a0d31e5f4a8" +dependencies = [ + "semver", + "serde", + "strum", + "tempfile", + "thiserror 2.0.12", + "toml", + "yaml-rust2", +] + +[[package]] +name = "semver" +version = "1.0.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56e6fa9c48d24d85fb3de5ad847117517440f6beceb7798af16b4a87d616b8d0" +dependencies = [ + "serde", +] + +[[package]] +name = "serde" +version = "1.0.219" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.219" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "serde_json" +version = "1.0.140" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "20068b6e96dc6c9bd23e01df8827e6c7e1f2fddd43c21810382803c136b99373" +dependencies = [ + "itoa", + "memchr", + "ryu", + "serde", +] + +[[package]] +name = "serde_spanned" +version = "0.6.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf41e0cfaf7226dca15e8197172c295a782857fcb97fad1808a166870dee75a3" +dependencies = [ + "serde", +] + +[[package]] +name = "sha2" +version = "0.10.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7507d819769d01a365ab707794a4084392c824f54a7a6a7862f8c3d0892b283" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest", +] + +[[package]] +name = "spin" +version = "0.9.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" + +[[package]] +name = "stability" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d904e7009df136af5297832a3ace3370cd14ff1546a232f4f185036c2736fcac" +dependencies = [ + "quote", + "syn", +] + +[[package]] +name = "strsim" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" + +[[package]] +name = "strum" +version = "0.26.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8fec0f0aef304996cf250b31b5a10dee7980c85da9d759361292b8bca5a18f06" +dependencies = [ + "strum_macros", +] + +[[package]] +name = "strum_macros" +version = "0.26.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c6bee85a5a24955dc440386795aa378cd9cf82acd5f764469152d2270e581be" +dependencies = [ + "heck", + "proc-macro2", + "quote", + "rustversion", + "syn", +] + +[[package]] +name = "subtle" +version = "2.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" + +[[package]] +name = "syn" +version = "2.0.104" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17b6f705963418cdb9927482fa304bc562ece2fdd4f616084c50b7023b435a40" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "tempfile" +version = "3.20.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8a64e3985349f2441a1a9ef0b853f869006c3855f2cda6862a94d26ebb9d6a1" +dependencies = [ + "fastrand", + "getrandom 0.3.3", + "once_cell", + "rustix", + "windows-sys 0.59.0", +] + +[[package]] +name = "thiserror" +version = "1.0.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52" +dependencies = [ + "thiserror-impl 1.0.69", +] + +[[package]] +name = "thiserror" +version = "2.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "567b8a2dae586314f7be2a752ec7474332959c6460e02bde30d702a66d488708" +dependencies = [ + "thiserror-impl 2.0.12", +] + +[[package]] +name = "thiserror-impl" +version = "1.0.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "thiserror-impl" +version = "2.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f7cf42b4507d8ea322120659672cf1b9dbb93f8f2d4ecfd6e51350ff5b17a1d" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "toml" +version = "0.8.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc1beb996b9d83529a9e75c17a1686767d148d70663143c7854d8b4a09ced362" +dependencies = [ + "serde", + "serde_spanned", + "toml_datetime", + "toml_edit", +] + +[[package]] +name = "toml_datetime" +version = "0.6.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22cddaf88f4fbc13c51aebbf5f8eceb5c7c5a9da2ac40a13519eb5b0a0e8f11c" +dependencies = [ + "serde", +] + +[[package]] +name = "toml_edit" +version = "0.22.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41fe8c660ae4257887cf66394862d21dbca4a6ddd26f04a3560410406a2f819a" +dependencies = [ + "indexmap", + "serde", + "serde_spanned", + "toml_datetime", + "toml_write", + "winnow", +] + +[[package]] +name = "toml_write" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d99f8c9a7727884afe522e9bd5edbfc91a3312b36a77b5fb8926e4c31a41801" + +[[package]] +name = "tracing" +version = "0.1.41" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0" +dependencies = [ + "log", + "pin-project-lite", + "tracing-attributes", + "tracing-core", +] + +[[package]] +name = "tracing-attributes" +version = "0.1.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81383ab64e72a7a8b8e13130c49e3dab29def6d0c7d76a03087b3cf71c5c6903" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "tracing-core" +version = "0.1.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9d12581f227e93f094d3af2ae690a574abb8a2b9b7a96e7cfe9647b2b617678" +dependencies = [ + "once_cell", +] + +[[package]] +name = "typenum" +version = "1.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1dccffe3ce07af9386bfd29e80c0ab1a8205a2fc34e4bcd40364df902cfa8f3f" + +[[package]] +name = "unicode-ident" +version = "1.0.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512" + +[[package]] +name = "unicode-xid" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853" + +[[package]] +name = "version_check" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" + +[[package]] +name = "wasi" +version = "0.11.1+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" + +[[package]] +name = "wasi" +version = "0.14.2+wasi-0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9683f9a5a998d873c0d21fcbe3c083009670149a8fab228644b8bd36b2c48cb3" +dependencies = [ + "wit-bindgen-rt", +] + +[[package]] +name = "windows-sys" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" +dependencies = [ + "windows-targets 0.48.5", +] + +[[package]] +name = "windows-sys" +version = "0.59.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-sys" +version = "0.60.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb" +dependencies = [ + "windows-targets 0.53.2", +] + +[[package]] +name = "windows-targets" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" +dependencies = [ + "windows_aarch64_gnullvm 0.48.5", + "windows_aarch64_msvc 0.48.5", + "windows_i686_gnu 0.48.5", + "windows_i686_msvc 0.48.5", + "windows_x86_64_gnu 0.48.5", + "windows_x86_64_gnullvm 0.48.5", + "windows_x86_64_msvc 0.48.5", +] + +[[package]] +name = "windows-targets" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" +dependencies = [ + "windows_aarch64_gnullvm 0.52.6", + "windows_aarch64_msvc 0.52.6", + "windows_i686_gnu 0.52.6", + "windows_i686_gnullvm 0.52.6", + "windows_i686_msvc 0.52.6", + "windows_x86_64_gnu 0.52.6", + "windows_x86_64_gnullvm 0.52.6", + "windows_x86_64_msvc 0.52.6", +] + +[[package]] +name = "windows-targets" +version = "0.53.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c66f69fcc9ce11da9966ddb31a40968cad001c5bedeb5c2b82ede4253ab48aef" +dependencies = [ + "windows_aarch64_gnullvm 0.53.0", + "windows_aarch64_msvc 0.53.0", + "windows_i686_gnu 0.53.0", + "windows_i686_gnullvm 0.53.0", + "windows_i686_msvc 0.53.0", + "windows_x86_64_gnu 0.53.0", + "windows_x86_64_gnullvm 0.53.0", + "windows_x86_64_msvc 0.53.0", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "86b8d5f90ddd19cb4a147a5fa63ca848db3df085e25fee3cc10b39b6eebae764" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7651a1f62a11b8cbd5e0d42526e55f2c99886c77e007179efff86c2b137e66c" + +[[package]] +name = "windows_i686_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" + +[[package]] +name = "windows_i686_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" + +[[package]] +name = "windows_i686_gnu" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1dc67659d35f387f5f6c479dc4e28f1d4bb90ddd1a5d3da2e5d97b42d6272c3" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ce6ccbdedbf6d6354471319e781c0dfef054c81fbc7cf83f338a4296c0cae11" + +[[package]] +name = "windows_i686_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" + +[[package]] +name = "windows_i686_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" + +[[package]] +name = "windows_i686_msvc" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "581fee95406bb13382d2f65cd4a908ca7b1e4c2f1917f143ba16efe98a589b5d" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e55b5ac9ea33f2fc1716d1742db15574fd6fc8dadc51caab1c16a3d3b4190ba" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0a6e035dd0599267ce1ee132e51c27dd29437f63325753051e71dd9e42406c57" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486" + +[[package]] +name = "winnow" +version = "0.7.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3edebf492c8125044983378ecb5766203ad3b4c2f7a922bd7dd207f6d443e95" +dependencies = [ + "memchr", +] + +[[package]] +name = "wit-bindgen-rt" +version = "0.39.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f42320e61fe2cfd34354ecb597f86f413484a798ba44a8ca1165c58d42da6c1" +dependencies = [ + "bitflags 2.9.1", +] + +[[package]] +name = "work-only" +version = "0.1.0" +dependencies = [ + "risc0-binfmt", + "risc0-build", +] + +[[package]] +name = "yaml-rust2" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a1a1c0bc9823338a3bdf8c61f994f23ac004c6fa32c08cd152984499b445e8d" +dependencies = [ + "arraydeque", + "encoding_rs", + "hashlink", +] + +[[package]] +name = "zerocopy" +version = "0.8.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1039dd0d3c310cf05de012d8a39ff557cb0d23087fd44cad61df08fc31907a2f" +dependencies = [ + "zerocopy-derive", +] + +[[package]] +name = "zerocopy-derive" +version = "0.8.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ecf5b4cc5364572d7f4c329661bcc82724222973f2cab6f050a4e5c22f75181" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] diff --git a/risc0-circuits/work-only/Cargo.toml b/risc0-circuits/work-only/Cargo.toml new file mode 100644 index 000000000..076c016c1 --- /dev/null +++ b/risc0-circuits/work-only/Cargo.toml @@ -0,0 +1,13 @@ +[package] +name = "work-only" +version = "0.1.0" +edition = "2021" + +[workspace] + +[build-dependencies] +risc0-build = { version = "2.3.1", features = ["unstable"] } +risc0-binfmt = {version = "2.0.2"} + +[package.metadata.risc0] +methods = ["guest"] \ No newline at end of file diff --git a/risc0-circuits/work-only/build.rs b/risc0-circuits/work-only/build.rs new file mode 100644 index 000000000..87d18a989 --- /dev/null +++ b/risc0-circuits/work-only/build.rs @@ -0,0 +1,164 @@ +use risc0_binfmt::compute_image_id; +use risc0_build::{embed_methods_with_options, DockerOptionsBuilder, GuestOptionsBuilder}; +use std::{collections::HashMap, env, fs, path::Path}; + +fn main() { + // Build environment variables + println!("cargo:rerun-if-env-changed=SKIP_GUEST_BUILD"); + println!("cargo:rerun-if-env-changed=REPR_GUEST_BUILD"); + println!("cargo:rerun-if-env-changed=OUT_DIR"); + + // Compile time constant environment variables + println!("cargo:rerun-if-env-changed=BITCOIN_NETWORK"); + println!("cargo:rerun-if-env-changed=TEST_SKIP_GUEST_BUILD"); + + if std::env::var("CLIPPY_ARGS").is_ok() { + let out_dir = env::var("OUT_DIR").expect("OUT_DIR not set"); + let dummy_path = Path::new(&out_dir).join("methods.rs"); + fs::write(dummy_path, "// dummy methods.rs for Clippy\n") + .expect("Failed to write dummy methods.rs"); + println!("cargo:warning=Skipping guest build in Clippy"); + return; + } + + // Check if we should skip the guest build for tests + if let Ok("1" | "true") = env::var("TEST_SKIP_GUEST_BUILD").as_deref() { + println!("cargo:warning=Skipping guest build in test. Exiting"); + return; + } + + let network = env::var("BITCOIN_NETWORK").unwrap_or_else(|_| { + println!("cargo:warning=BITCOIN_NETWORK not set, defaulting to 'mainnet'"); + "mainnet".to_string() + }); + println!("cargo:warning=Building for Bitcoin network: {}", network); + + let is_repr_guest_build = match env::var("REPR_GUEST_BUILD") { + Ok(value) => match value.as_str() { + "1" | "true" => { + println!("cargo:warning=REPR_GUEST_BUILD is set to true"); + true + } + "0" | "false" => { + println!("cargo:warning=REPR_GUEST_BUILD is set to false"); + false + } + _ => { + println!("cargo:warning=Invalid value for REPR_GUEST_BUILD: '{}'. Expected '0', '1', 'true', or 'false'. Defaulting to false.", value); + false + } + }, + Err(env::VarError::NotPresent) => { + println!("cargo:warning=REPR_GUEST_BUILD not set. Defaulting to false."); + false + } + Err(env::VarError::NotUnicode(_)) => { + println!( + "cargo:warning=REPR_GUEST_BUILD contains invalid Unicode. Defaulting to false." + ); + false + } + }; + + // Use embed_methods_with_options with our custom options + let guest_pkg_to_options = get_guest_options(network.clone()); + embed_methods_with_options(guest_pkg_to_options); + + // After the build is complete, copy the generated file to the elfs folder + if is_repr_guest_build { + println!("cargo:warning=Copying binary to elfs folder"); + copy_binary_to_elfs_folder(network); + } else { + println!("cargo:warning=Not copying binary to elfs folder"); + } +} + +fn get_guest_options(network: String) -> HashMap<&'static str, risc0_build::GuestOptions> { + let mut guest_pkg_to_options = HashMap::new(); + + let opts = if env::var("REPR_GUEST_BUILD").is_ok() { + let manifest_dir = env!("CARGO_MANIFEST_DIR"); + let root_dir = format!("{manifest_dir}/../../"); + + println!( + "cargo:warning=Using Docker for guest build with root dir: {}", + root_dir + ); + + let docker_opts = DockerOptionsBuilder::default() + .root_dir(root_dir) + .env(vec![("BITCOIN_NETWORK".to_string(), network.clone())]) + .build() + .unwrap(); + + GuestOptionsBuilder::default() + // .features(features) + .use_docker(docker_opts) + .build() + .unwrap() + } else { + println!("cargo:warning=Guest code is not built in docker"); + GuestOptionsBuilder::default() + // .features(features) + .build() + .unwrap() + }; + + guest_pkg_to_options.insert("work-only-guest", opts); + guest_pkg_to_options +} + +fn copy_binary_to_elfs_folder(network: String) { + let current_dir = env::current_dir().expect("Failed to get current dir"); + let base_dir = current_dir.join("../.."); + + // Create elfs directory if it doesn't exist + let elfs_dir = base_dir.join("risc0-circuits/elfs"); + if !elfs_dir.exists() { + fs::create_dir_all(&elfs_dir).expect("Failed to create elfs directory"); + println!("cargo:warning=Created elfs directory at {:?}", elfs_dir); + } + + // Build source path + let src_path = current_dir.join("target/riscv-guest/work-only/work-only-guest/riscv32im-risc0-zkvm-elf/docker/work-only-guest.bin"); + if !src_path.exists() { + println!( + "cargo:warning=Source binary not found at {:?}, skipping copy", + src_path + ); + return; + } + + // Build destination path with network prefix + let dest_filename = format!("{}-work-only-guest.bin", network.to_lowercase()); + let dest_path = elfs_dir.join(&dest_filename); + + // Copy the file + match fs::copy(&src_path, &dest_path) { + Ok(_) => println!( + "cargo:warning=Successfully copied binary to {:?}", + dest_path + ), + Err(e) => println!("cargo:warning=Failed to copy binary: {}", e), + } + + let elf_path = match network.as_str() { + "mainnet" => "../elfs/mainnet-work-only-guest.bin", + "testnet4" => "../elfs/testnet4-work-only-guest.bin", + "signet" => "../elfs/signet-work-only-guest.bin", + "regtest" => "../elfs/regtest-work-only-guest.bin", + _ => { + println!("cargo:warning=Invalid network specified, defaulting to mainnet"); + "../elfs/mainnet-work-only-guest.bin" + } + }; + + let elf_bytes: Vec = fs::read(Path::new(elf_path)).expect("Failed to read ELF file"); + + let method_id = compute_image_id(elf_bytes.as_slice()).unwrap(); + println!("cargo:warning=Computed method ID: {:x?}", method_id); + println!( + "cargo:warning=Computed method ID words: {:?}", + method_id.as_words() + ); +} diff --git a/risc0-circuits/work-only/guest/Cargo.lock b/risc0-circuits/work-only/guest/Cargo.lock new file mode 100644 index 000000000..75e47e3d3 --- /dev/null +++ b/risc0-circuits/work-only/guest/Cargo.lock @@ -0,0 +1,4889 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 4 + +[[package]] +name = "addr2line" +version = "0.24.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dfbe277e56a376000877090da837660b4427aad530e3028d44e0bffe4f89a1c1" +dependencies = [ + "gimli", +] + +[[package]] +name = "adler2" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "320119579fcad9c21884f5c4861d16174d0e06250625266f50fe6898340abefa" + +[[package]] +name = "ahash" +version = "0.8.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a15f179cd60c4584b8a8c596927aadc462e27f2ca70c04e0071964a73ba7a75" +dependencies = [ + "cfg-if", + "once_cell", + "version_check", + "zerocopy", +] + +[[package]] +name = "aho-corasick" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" +dependencies = [ + "memchr", +] + +[[package]] +name = "allocator-api2" +version = "0.2.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" + +[[package]] +name = "alloy-consensus" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69e32ef5c74bbeb1733c37f4ac7f866f8c8af208b7b4265e21af609dcac5bd5e" +dependencies = [ + "alloy-eips", + "alloy-primitives", + "alloy-rlp", + "alloy-serde", + "alloy-trie", + "auto_impl", + "c-kzg", + "derive_more 1.0.0", + "serde", +] + +[[package]] +name = "alloy-consensus-any" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fa13b7b1e1e3fedc42f0728103bfa3b4d566d3d42b606db449504d88dbdbdcf" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-primitives", + "alloy-rlp", + "alloy-serde", + "serde", +] + +[[package]] +name = "alloy-eip2124" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "675264c957689f0fd75f5993a73123c2cc3b5c235a38f5b9037fe6c826bfb2c0" +dependencies = [ + "alloy-primitives", + "alloy-rlp", + "crc", + "thiserror 2.0.12", +] + +[[package]] +name = "alloy-eip2930" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0069cf0642457f87a01a014f6dc29d5d893cd4fd8fddf0c3cdfad1bb3ebafc41" +dependencies = [ + "alloy-primitives", + "alloy-rlp", + "serde", +] + +[[package]] +name = "alloy-eip7702" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b15b13d38b366d01e818fe8e710d4d702ef7499eacd44926a06171dd9585d0c" +dependencies = [ + "alloy-primitives", + "alloy-rlp", + "serde", + "thiserror 2.0.12", +] + +[[package]] +name = "alloy-eips" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5591581ca2ab0b3e7226a4047f9a1bfcf431da1d0cce3752fda609fea3c27e37" +dependencies = [ + "alloy-eip2124", + "alloy-eip2930", + "alloy-eip7702", + "alloy-primitives", + "alloy-rlp", + "alloy-serde", + "auto_impl", + "c-kzg", + "derive_more 1.0.0", + "once_cell", + "serde", + "sha2", +] + +[[package]] +name = "alloy-network-primitives" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3a00ce618ae2f78369918be0c20f620336381502c83b6ed62c2f7b2db27698b0" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-primitives", + "alloy-serde", + "serde", +] + +[[package]] +name = "alloy-primitives" +version = "0.8.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8c77490fe91a0ce933a1f219029521f20fc28c2c0ca95d53fa4da9c00b8d9d4e" +dependencies = [ + "alloy-rlp", + "bytes", + "cfg-if", + "const-hex", + "derive_more 2.0.1", + "foldhash", + "hashbrown 0.15.4", + "indexmap", + "itoa", + "k256", + "keccak-asm", + "paste", + "proptest", + "rand 0.8.5", + "ruint", + "rustc-hash", + "serde", + "sha3", + "tiny-keccak", +] + +[[package]] +name = "alloy-rlp" +version = "0.3.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f70d83b765fdc080dbcd4f4db70d8d23fe4761f2f02ebfa9146b833900634b4" +dependencies = [ + "alloy-rlp-derive", + "arrayvec", + "bytes", +] + +[[package]] +name = "alloy-rlp-derive" +version = "0.3.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "64b728d511962dda67c1bc7ea7c03736ec275ed2cf4c35d9585298ac9ccf3b73" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "alloy-rpc-types" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f18e68a3882f372e045ddc89eb455469347767d17878ca492cfbac81e71a111" +dependencies = [ + "alloy-primitives", + "alloy-rpc-types-engine", + "alloy-rpc-types-eth", + "alloy-serde", + "serde", +] + +[[package]] +name = "alloy-rpc-types-engine" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e83dde9fcf1ccb9b815cc0c89bba26bbbbaae5150a53ae624ed0fc63cb3676c1" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-primitives", + "alloy-rlp", + "alloy-serde", + "derive_more 1.0.0", + "jsonwebtoken", + "rand 0.8.5", + "serde", + "strum", +] + +[[package]] +name = "alloy-rpc-types-eth" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b4dbee4d82f8a22dde18c28257bed759afeae7ba73da4a1479a039fd1445d04" +dependencies = [ + "alloy-consensus", + "alloy-consensus-any", + "alloy-eips", + "alloy-network-primitives", + "alloy-primitives", + "alloy-rlp", + "alloy-serde", + "alloy-sol-types", + "itertools 0.14.0", + "serde", + "serde_json", + "thiserror 2.0.12", +] + +[[package]] +name = "alloy-serde" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8732058f5ca28c1d53d241e8504620b997ef670315d7c8afab856b3e3b80d945" +dependencies = [ + "alloy-primitives", + "serde", + "serde_json", +] + +[[package]] +name = "alloy-sol-macro" +version = "0.8.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e10ae8e9a91d328ae954c22542415303919aabe976fe7a92eb06db1b68fd59f2" +dependencies = [ + "alloy-sol-macro-expander", + "alloy-sol-macro-input", + "proc-macro-error2", + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "alloy-sol-macro-expander" +version = "0.8.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "83ad5da86c127751bc607c174d6c9fe9b85ef0889a9ca0c641735d77d4f98f26" +dependencies = [ + "alloy-sol-macro-input", + "const-hex", + "heck 0.5.0", + "indexmap", + "proc-macro-error2", + "proc-macro2", + "quote", + "syn 2.0.104", + "syn-solidity", + "tiny-keccak", +] + +[[package]] +name = "alloy-sol-macro-input" +version = "0.8.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba3d30f0d3f9ba3b7686f3ff1de9ee312647aac705604417a2f40c604f409a9e" +dependencies = [ + "const-hex", + "dunce", + "heck 0.5.0", + "macro-string", + "proc-macro2", + "quote", + "syn 2.0.104", + "syn-solidity", +] + +[[package]] +name = "alloy-sol-types" +version = "0.8.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d43d5e60466a440230c07761aa67671d4719d46f43be8ea6e7ed334d8db4a9ab" +dependencies = [ + "alloy-primitives", + "alloy-sol-macro", + "const-hex", +] + +[[package]] +name = "alloy-trie" +version = "0.7.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d95a94854e420f07e962f7807485856cde359ab99ab6413883e15235ad996e8b" +dependencies = [ + "alloy-primitives", + "alloy-rlp", + "arrayvec", + "derive_more 1.0.0", + "nybbles", + "serde", + "smallvec", + "tracing", +] + +[[package]] +name = "anyhow" +version = "1.0.98" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e16d2d3311acee920a9eb8d33b8cbc1787ce4a264e85f964c2404b969bdcd487" + +[[package]] +name = "ark-bn254" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d69eab57e8d2663efa5c63135b2af4f396d66424f88954c21104125ab6b3e6bc" +dependencies = [ + "ark-ec", + "ark-ff 0.5.0", + "ark-r1cs-std", + "ark-std 0.5.0", +] + +[[package]] +name = "ark-crypto-primitives" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e0c292754729c8a190e50414fd1a37093c786c709899f29c9f7daccecfa855e" +dependencies = [ + "ahash", + "ark-crypto-primitives-macros", + "ark-ec", + "ark-ff 0.5.0", + "ark-relations", + "ark-serialize 0.5.0", + "ark-snark", + "ark-std 0.5.0", + "blake2", + "derivative", + "digest 0.10.7", + "fnv", + "merlin", + "sha2", +] + +[[package]] +name = "ark-crypto-primitives-macros" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7e89fe77d1f0f4fe5b96dfc940923d88d17b6a773808124f21e764dfb063c6a" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "ark-ec" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43d68f2d516162846c1238e755a7c4d131b892b70cc70c471a8e3ca3ed818fce" +dependencies = [ + "ahash", + "ark-ff 0.5.0", + "ark-poly", + "ark-serialize 0.5.0", + "ark-std 0.5.0", + "educe", + "fnv", + "hashbrown 0.15.4", + "itertools 0.13.0", + "num-bigint", + "num-integer", + "num-traits", + "zeroize", +] + +[[package]] +name = "ark-ff" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b3235cc41ee7a12aaaf2c575a2ad7b46713a8a50bda2fc3b003a04845c05dd6" +dependencies = [ + "ark-ff-asm 0.3.0", + "ark-ff-macros 0.3.0", + "ark-serialize 0.3.0", + "ark-std 0.3.0", + "derivative", + "num-bigint", + "num-traits", + "paste", + "rustc_version 0.3.3", + "zeroize", +] + +[[package]] +name = "ark-ff" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec847af850f44ad29048935519032c33da8aa03340876d351dfab5660d2966ba" +dependencies = [ + "ark-ff-asm 0.4.2", + "ark-ff-macros 0.4.2", + "ark-serialize 0.4.2", + "ark-std 0.4.0", + "derivative", + "digest 0.10.7", + "itertools 0.10.5", + "num-bigint", + "num-traits", + "paste", + "rustc_version 0.4.1", + "zeroize", +] + +[[package]] +name = "ark-ff" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a177aba0ed1e0fbb62aa9f6d0502e9b46dad8c2eab04c14258a1212d2557ea70" +dependencies = [ + "ark-ff-asm 0.5.0", + "ark-ff-macros 0.5.0", + "ark-serialize 0.5.0", + "ark-std 0.5.0", + "arrayvec", + "digest 0.10.7", + "educe", + "itertools 0.13.0", + "num-bigint", + "num-traits", + "paste", + "zeroize", +] + +[[package]] +name = "ark-ff-asm" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db02d390bf6643fb404d3d22d31aee1c4bc4459600aef9113833d17e786c6e44" +dependencies = [ + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ark-ff-asm" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ed4aa4fe255d0bc6d79373f7e31d2ea147bcf486cba1be5ba7ea85abdb92348" +dependencies = [ + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ark-ff-asm" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62945a2f7e6de02a31fe400aa489f0e0f5b2502e69f95f853adb82a96c7a6b60" +dependencies = [ + "quote", + "syn 2.0.104", +] + +[[package]] +name = "ark-ff-macros" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db2fd794a08ccb318058009eefdf15bcaaaaf6f8161eb3345f907222bac38b20" +dependencies = [ + "num-bigint", + "num-traits", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ark-ff-macros" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7abe79b0e4288889c4574159ab790824d0033b9fdcb2a112a3182fac2e514565" +dependencies = [ + "num-bigint", + "num-traits", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ark-ff-macros" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09be120733ee33f7693ceaa202ca41accd5653b779563608f1234f78ae07c4b3" +dependencies = [ + "num-bigint", + "num-traits", + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "ark-groth16" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "88f1d0f3a534bb54188b8dcc104307db6c56cdae574ddc3212aec0625740fc7e" +dependencies = [ + "ark-crypto-primitives", + "ark-ec", + "ark-ff 0.5.0", + "ark-poly", + "ark-relations", + "ark-serialize 0.5.0", + "ark-std 0.5.0", +] + +[[package]] +name = "ark-poly" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "579305839da207f02b89cd1679e50e67b4331e2f9294a57693e5051b7703fe27" +dependencies = [ + "ahash", + "ark-ff 0.5.0", + "ark-serialize 0.5.0", + "ark-std 0.5.0", + "educe", + "fnv", + "hashbrown 0.15.4", +] + +[[package]] +name = "ark-r1cs-std" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "941551ef1df4c7a401de7068758db6503598e6f01850bdb2cfdb614a1f9dbea1" +dependencies = [ + "ark-ec", + "ark-ff 0.5.0", + "ark-relations", + "ark-std 0.5.0", + "educe", + "num-bigint", + "num-integer", + "num-traits", + "tracing", +] + +[[package]] +name = "ark-relations" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec46ddc93e7af44bcab5230937635b06fb5744464dd6a7e7b083e80ebd274384" +dependencies = [ + "ark-ff 0.5.0", + "ark-std 0.5.0", + "tracing", + "tracing-subscriber", +] + +[[package]] +name = "ark-serialize" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d6c2b318ee6e10f8c2853e73a83adc0ccb88995aa978d8a3408d492ab2ee671" +dependencies = [ + "ark-std 0.3.0", + "digest 0.9.0", +] + +[[package]] +name = "ark-serialize" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "adb7b85a02b83d2f22f89bd5cac66c9c89474240cb6207cb1efc16d098e822a5" +dependencies = [ + "ark-std 0.4.0", + "digest 0.10.7", + "num-bigint", +] + +[[package]] +name = "ark-serialize" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f4d068aaf107ebcd7dfb52bc748f8030e0fc930ac8e360146ca54c1203088f7" +dependencies = [ + "ark-serialize-derive", + "ark-std 0.5.0", + "arrayvec", + "digest 0.10.7", + "num-bigint", +] + +[[package]] +name = "ark-serialize-derive" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "213888f660fddcca0d257e88e54ac05bca01885f258ccdf695bafd77031bb69d" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "ark-snark" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d368e2848c2d4c129ce7679a7d0d2d612b6a274d3ea6a13bad4445d61b381b88" +dependencies = [ + "ark-ff 0.5.0", + "ark-relations", + "ark-serialize 0.5.0", + "ark-std 0.5.0", +] + +[[package]] +name = "ark-std" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1df2c09229cbc5a028b1d70e00fdb2acee28b1055dfb5ca73eea49c5a25c4e7c" +dependencies = [ + "num-traits", + "rand 0.8.5", +] + +[[package]] +name = "ark-std" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94893f1e0c6eeab764ade8dc4c0db24caf4fe7cbbaafc0eba0a9030f447b5185" +dependencies = [ + "num-traits", + "rand 0.8.5", +] + +[[package]] +name = "ark-std" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "246a225cc6131e9ee4f24619af0f19d67761fff15d7ccc22e42b80846e69449a" +dependencies = [ + "num-traits", + "rand 0.8.5", +] + +[[package]] +name = "arraydeque" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d902e3d592a523def97af8f317b08ce16b7ab854c1985a0c671e6f15cebc236" + +[[package]] +name = "arrayref" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76a2e8124351fda1ef8aaaa3bbd7ebbcb486bbcd4225aca0aa0d84bb2db8fecb" + +[[package]] +name = "arrayvec" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" +dependencies = [ + "serde", +] + +[[package]] +name = "auto_impl" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffdcb70bdbc4d478427380519163274ac86e52916e10f0a8889adf0f96d3fee7" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "autocfg" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" + +[[package]] +name = "backtrace" +version = "0.3.75" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6806a6321ec58106fea15becdad98371e28d92ccbc7c8f1b3b6dd724fe8f1002" +dependencies = [ + "addr2line", + "cfg-if", + "libc", + "miniz_oxide", + "object", + "rustc-demangle", + "windows-targets 0.52.6", +] + +[[package]] +name = "base16ct" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c7f02d4ea65f2c1853089ffd8d2787bdbc63de2f0d29dedbcf8ccdfa0ccd4cf" + +[[package]] +name = "base58ck" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c8d66485a3a2ea485c1913c4572ce0256067a5377ac8c75c4960e1cda98605f" +dependencies = [ + "bitcoin-internals", + "bitcoin_hashes", +] + +[[package]] +name = "base64" +version = "0.21.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" + +[[package]] +name = "base64" +version = "0.22.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" + +[[package]] +name = "base64ct" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55248b47b0caf0546f7988906588779981c43bb1bc9d0c44087278f80cdb44ba" + +[[package]] +name = "bech32" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d965446196e3b7decd44aa7ee49e31d630118f90ef12f97900f262eb915c951d" + +[[package]] +name = "bincode" +version = "1.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1f45e9417d87227c7a56d22e471c6206462cba514c7590c09aff4cf6d1ddcad" +dependencies = [ + "serde", +] + +[[package]] +name = "bit-set" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08807e080ed7f9d5433fa9b275196cfc35414f66a0c79d864dc51a0d825231a3" +dependencies = [ + "bit-vec", +] + +[[package]] +name = "bit-vec" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e764a1d40d510daf35e07be9eb06e75770908c27d411ee6c92109c9840eaaf7" + +[[package]] +name = "bitcoin" +version = "0.32.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ad8929a18b8e33ea6b3c09297b687baaa71fb1b97353243a3f1029fad5c59c5b" +dependencies = [ + "base58ck", + "base64 0.21.7", + "bech32", + "bitcoin-internals", + "bitcoin-io", + "bitcoin-units", + "bitcoin_hashes", + "hex-conservative", + "hex_lit", + "secp256k1", + "serde", +] + +[[package]] +name = "bitcoin-internals" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "30bdbe14aa07b06e6cfeffc529a1f099e5fbe249524f8125358604df99a4bed2" +dependencies = [ + "serde", +] + +[[package]] +name = "bitcoin-io" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b47c4ab7a93edb0c7198c5535ed9b52b63095f4e9b45279c6736cec4b856baf" + +[[package]] +name = "bitcoin-units" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5285c8bcaa25876d07f37e3d30c303f2609179716e11d688f51e8f1fe70063e2" +dependencies = [ + "bitcoin-internals", + "serde", +] + +[[package]] +name = "bitcoin_hashes" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb18c03d0db0247e147a21a6faafd5a7eb851c743db062de72018b6b7e8e4d16" +dependencies = [ + "bitcoin-io", + "hex-conservative", + "serde", +] + +[[package]] +name = "bitflags" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" + +[[package]] +name = "bitflags" +version = "2.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b8e56985ec62d17e9c1001dc89c88ecd7dc08e47eba5ec7c29c7b5eeecde967" + +[[package]] +name = "bitvec" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1bc2832c24239b0141d5674bb9174f9d68a8b5b3f2753311927c172ca46f7e9c" +dependencies = [ + "funty", + "radium", + "tap", + "wyz", +] + +[[package]] +name = "blake2" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46502ad458c9a52b69d4d4d32775c788b7a1b85e8bc9d482d92250fc0e3f8efe" +dependencies = [ + "digest 0.10.7", +] + +[[package]] +name = "blake3" +version = "1.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3888aaa89e4b2a40fca9848e400f6a658a5a3978de7be858e209cafa8be9a4a0" +dependencies = [ + "arrayref", + "arrayvec", + "cc", + "cfg-if", + "constant_time_eq", +] + +[[package]] +name = "block" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d8c1fef690941d3e7788d328517591fecc684c084084702d6ff1641e993699a" + +[[package]] +name = "block-buffer" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" +dependencies = [ + "generic-array", +] + +[[package]] +name = "blst" +version = "0.3.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4fd49896f12ac9b6dcd7a5998466b9b58263a695a3dd1ecc1aaca2e12a90b080" +dependencies = [ + "cc", + "glob", + "threadpool", + "zeroize", +] + +[[package]] +name = "bonsai-sdk" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0bce8d6acc5286a16e94c29e9c885d1869358885e08a6feeb6bc54e36fe20055" +dependencies = [ + "duplicate", + "maybe-async", + "reqwest", + "serde", + "thiserror 1.0.69", +] + +[[package]] +name = "borsh" +version = "1.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ad8646f98db542e39fc66e68a20b2144f6a732636df7c2354e74645faaa433ce" +dependencies = [ + "borsh-derive", + "bytes", + "cfg_aliases", +] + +[[package]] +name = "borsh-derive" +version = "1.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fdd1d3c0c2f5833f22386f252fe8ed005c7f59fdcddeef025c01b4c3b9fd9ac3" +dependencies = [ + "once_cell", + "proc-macro-crate", + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "bumpalo" +version = "3.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46c5e41b57b8bba42a04676d81cb89e9ee8e859a1a66f80a5a72e1cb76b34d43" + +[[package]] +name = "byte-slice-cast" +version = "1.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7575182f7272186991736b70173b0ea045398f984bf5ebbb3804736ce1330c9d" + +[[package]] +name = "bytemuck" +version = "1.23.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c76a5792e44e4abe34d3abf15636779261d45a7450612059293d1d2cfc63422" +dependencies = [ + "bytemuck_derive", +] + +[[package]] +name = "bytemuck_derive" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3fa76293b4f7bb636ab88fd78228235b5248b4d05cc589aed610f954af5d7c7a" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "byteorder" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" + +[[package]] +name = "bytes" +version = "1.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a" +dependencies = [ + "serde", +] + +[[package]] +name = "c-kzg" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0307f72feab3300336fb803a57134159f6e20139af1357f36c54cb90d8e8928" +dependencies = [ + "blst", + "cc", + "glob", + "hex", + "libc", + "once_cell", + "serde", +] + +[[package]] +name = "camino" +version = "1.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0da45bc31171d8d6960122e222a67740df867c1dd53b4d51caa297084c185cab" +dependencies = [ + "serde", +] + +[[package]] +name = "cargo-platform" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e35af189006b9c0f00a064685c727031e3ed2d8020f7ba284d78cc2671bd36ea" +dependencies = [ + "serde", +] + +[[package]] +name = "cargo_metadata" +version = "0.19.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd5eb614ed4c27c5d706420e4320fbe3216ab31fa1c33cd8246ac36dae4479ba" +dependencies = [ + "camino", + "cargo-platform", + "semver 1.0.26", + "serde", + "serde_json", + "thiserror 2.0.12", +] + +[[package]] +name = "cc" +version = "1.2.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "deec109607ca693028562ed836a5f1c4b8bd77755c4e132fc5ce11b0b6211ae7" +dependencies = [ + "shlex", +] + +[[package]] +name = "cfg-if" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9555578bc9e57714c812a1f84e4fc5b4d21fcb063490c624de019f7464c91268" + +[[package]] +name = "cfg_aliases" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" + +[[package]] +name = "circuits-lib" +version = "0.1.0" +dependencies = [ + "alloy-primitives", + "alloy-rpc-types", + "alloy-rpc-types-eth", + "ark-bn254", + "ark-crypto-primitives", + "ark-ec", + "ark-ff 0.5.0", + "ark-groth16", + "ark-relations", + "ark-serialize 0.5.0", + "ark-std 0.5.0", + "bincode", + "bitcoin", + "blake3", + "borsh", + "crypto-bigint", + "derive_more 1.0.0", + "eyre", + "hex", + "hex-literal", + "itertools 0.14.0", + "jmt 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", + "k256", + "lazy_static", + "num-bigint", + "num-traits", + "once_cell", + "risc0-groth16", + "risc0-zkvm", + "serde", + "serde_json", + "sha2", + "sov-rollup-interface", + "tracing", +] + +[[package]] +name = "cobs" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fa961b519f0b462e3a3b4a34b64d119eeaca1d59af726fe450bbba07a9fc0a1" +dependencies = [ + "thiserror 2.0.12", +] + +[[package]] +name = "const-hex" +version = "1.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "83e22e0ed40b96a48d3db274f72fd365bd78f67af39b6bbd47e8a15e1c6207ff" +dependencies = [ + "cfg-if", + "cpufeatures", + "hex", + "proptest", + "serde", +] + +[[package]] +name = "const-oid" +version = "0.9.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" + +[[package]] +name = "const_format" +version = "0.2.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "126f97965c8ad46d6d9163268ff28432e8f6a1196a55578867832e3049df63dd" +dependencies = [ + "const_format_proc_macros", +] + +[[package]] +name = "const_format_proc_macros" +version = "0.2.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d57c2eccfb16dbac1f4e61e206105db5820c9d26c3c472bc17c774259ef7744" +dependencies = [ + "proc-macro2", + "quote", + "unicode-xid", +] + +[[package]] +name = "constant_time_eq" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7c74b8349d32d297c9134b8c88677813a227df8f779daa29bfc29c183fe3dca6" + +[[package]] +name = "core-foundation" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "core-foundation-sys" +version = "0.8.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" + +[[package]] +name = "core-graphics-types" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "45390e6114f68f718cc7a830514a96f903cccd70d02a8f6d9f643ac4ba45afaf" +dependencies = [ + "bitflags 1.3.2", + "core-foundation", + "libc", +] + +[[package]] +name = "cpufeatures" +version = "0.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59ed5838eebb26a2bb2e58f6d5b5316989ae9d08bab10e0e6d103e656d1b0280" +dependencies = [ + "libc", +] + +[[package]] +name = "crc" +version = "3.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9710d3b3739c2e349eb44fe848ad0b7c8cb1e42bd87ee49371df2f7acaf3e675" +dependencies = [ + "crc-catalog", +] + +[[package]] +name = "crc-catalog" +version = "2.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19d374276b40fb8bbdee95aef7c7fa6b5316ec764510eb64b8dd0e2ed0d7e7f5" + +[[package]] +name = "crunchy" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "460fbee9c2c2f33933d720630a6a0bac33ba7053db5344fac858d4b8952d77d5" + +[[package]] +name = "crypto-bigint" +version = "0.5.5" +source = "git+https://github.com/risc0/RustCrypto-crypto-bigint?tag=v0.5.5-risczero.0#3ab63a6f1048833f7047d5a50532e4a4cc789384" +dependencies = [ + "generic-array", + "getrandom 0.2.16", + "rand_core 0.6.4", + "subtle", + "zeroize", +] + +[[package]] +name = "crypto-common" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" +dependencies = [ + "generic-array", + "typenum", +] + +[[package]] +name = "darling" +version = "0.20.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc7f46116c46ff9ab3eb1597a45688b6715c6e628b5c133e288e709a29bcb4ee" +dependencies = [ + "darling_core", + "darling_macro", +] + +[[package]] +name = "darling_core" +version = "0.20.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d00b9596d185e565c2207a0b01f8bd1a135483d02d9b7b0a54b11da8d53412e" +dependencies = [ + "fnv", + "ident_case", + "proc-macro2", + "quote", + "strsim", + "syn 2.0.104", +] + +[[package]] +name = "darling_macro" +version = "0.20.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc34b93ccb385b40dc71c6fceac4b2ad23662c7eeb248cf10d529b7e055b6ead" +dependencies = [ + "darling_core", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "der" +version = "0.7.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7c1832837b905bbfb5101e07cc24c8deddf52f93225eee6ead5f4d63d53ddcb" +dependencies = [ + "const-oid", + "pem-rfc7468", + "zeroize", +] + +[[package]] +name = "deranged" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c9e6a11ca8224451684bc0d7d5a7adbf8f2fd6887261a1cfc3c0432f9d4068e" +dependencies = [ + "powerfmt", +] + +[[package]] +name = "derivative" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "derive_builder" +version = "0.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "507dfb09ea8b7fa618fcf76e953f4f5e192547945816d5358edffe39f6f94947" +dependencies = [ + "derive_builder_macro", +] + +[[package]] +name = "derive_builder_core" +version = "0.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d5bcf7b024d6835cfb3d473887cd966994907effbe9227e8c8219824d06c4e8" +dependencies = [ + "darling", + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "derive_builder_macro" +version = "0.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab63b0e2bf4d5928aff72e83a7dace85d7bba5fe12dcc3c5a572d78caffd3f3c" +dependencies = [ + "derive_builder_core", + "syn 2.0.104", +] + +[[package]] +name = "derive_more" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a9b99b9cbbe49445b21764dc0625032a89b145a2642e67603e1c936f5458d05" +dependencies = [ + "derive_more-impl 1.0.0", +] + +[[package]] +name = "derive_more" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "093242cf7570c207c83073cf82f79706fe7b8317e98620a47d5be7c3d8497678" +dependencies = [ + "derive_more-impl 2.0.1", +] + +[[package]] +name = "derive_more-impl" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb7330aeadfbe296029522e6c40f315320aba36fc43a5b3632f3795348f3bd22" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", + "unicode-xid", +] + +[[package]] +name = "derive_more-impl" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bda628edc44c4bb645fbe0f758797143e4e07926f7ebf4e9bdfbd3d2ce621df3" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", + "unicode-xid", +] + +[[package]] +name = "digest" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" +dependencies = [ + "generic-array", +] + +[[package]] +name = "digest" +version = "0.10.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" +dependencies = [ + "block-buffer", + "const-oid", + "crypto-common", + "subtle", +] + +[[package]] +name = "dirs" +version = "5.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44c45a9d03d6676652bcb5e724c7e988de1acad23a711b5217ab9cbecbec2225" +dependencies = [ + "dirs-sys", +] + +[[package]] +name = "dirs-sys" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "520f05a5cbd335fae5a99ff7a6ab8627577660ee5cfd6a94a6a929b52ff0321c" +dependencies = [ + "libc", + "option-ext", + "redox_users", + "windows-sys 0.48.0", +] + +[[package]] +name = "displaydoc" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "docker-generate" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ccf673e0848ef09fa4aeeba78e681cf651c0c7d35f76ee38cec8e55bc32fa111" + +[[package]] +name = "downcast-rs" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75b325c5dbd37f80359721ad39aca5a29fb04c89279657cffdda8736d0c0b9d2" + +[[package]] +name = "dunce" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92773504d58c093f6de2459af4af33faa518c13451eb8f2b5698ed3d36e7c813" + +[[package]] +name = "duplicate" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "de78e66ac9061e030587b2a2e75cc88f22304913c907b11307bca737141230cb" +dependencies = [ + "heck 0.4.1", + "proc-macro-error", +] + +[[package]] +name = "dyn-clone" +version = "1.0.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0881ea181b1df73ff77ffaaf9c7544ecc11e82fba9b5f27b262a3c73a332555" + +[[package]] +name = "ecdsa" +version = "0.16.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee27f32b5c5292967d2d4a9d7f1e0b0aed2c15daded5a60300e4abb9d8020bca" +dependencies = [ + "der", + "digest 0.10.7", + "elliptic-curve", + "rfc6979", + "serdect", + "signature", + "spki", +] + +[[package]] +name = "educe" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d7bc049e1bd8cdeb31b68bbd586a9464ecf9f3944af3958a7a9d0f8b9799417" +dependencies = [ + "enum-ordinalize", + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "either" +version = "1.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" + +[[package]] +name = "elf" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4445909572dbd556c457c849c4ca58623d84b27c8fff1e74b0b4227d8b90d17b" + +[[package]] +name = "elliptic-curve" +version = "0.13.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5e6043086bf7973472e0c7dff2142ea0b680d30e18d9cc40f267efbf222bd47" +dependencies = [ + "base16ct", + "crypto-bigint", + "digest 0.10.7", + "ff", + "generic-array", + "group", + "pem-rfc7468", + "pkcs8", + "rand_core 0.6.4", + "sec1", + "serdect", + "subtle", + "zeroize", +] + +[[package]] +name = "embedded-io" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef1a6892d9eef45c8fa6b9e0086428a2cca8491aca8f787c534a3d6d0bcb3ced" + +[[package]] +name = "embedded-io" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "edd0f118536f44f5ccd48bcb8b111bdc3de888b58c74639dfb034a357d0f206d" + +[[package]] +name = "encoding_rs" +version = "0.8.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75030f3c4f45dafd7586dd6780965a8c7e8e285a5ecb86713e63a79c5b2766f3" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "enum-ordinalize" +version = "4.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fea0dcfa4e54eeb516fe454635a95753ddd39acda650ce703031c6973e315dd5" +dependencies = [ + "enum-ordinalize-derive", +] + +[[package]] +name = "enum-ordinalize-derive" +version = "4.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d28318a75d4aead5c4db25382e8ef717932d0346600cacae6357eb5941bc5ff" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "equivalent" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" + +[[package]] +name = "errno" +version = "0.3.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "778e2ac28f6c47af28e4907f13ffd1e1ddbd400980a9abd7c8df189bf578a5ad" +dependencies = [ + "libc", + "windows-sys 0.60.2", +] + +[[package]] +name = "eyre" +version = "0.6.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7cd915d99f24784cdc19fd37ef22b97e3ff0ae756c7e492e9fbfe897d61e2aec" +dependencies = [ + "indenter", + "once_cell", +] + +[[package]] +name = "fastrand" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" + +[[package]] +name = "fastrlp" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "139834ddba373bbdd213dffe02c8d110508dcf1726c2be27e8d1f7d7e1856418" +dependencies = [ + "arrayvec", + "auto_impl", + "bytes", +] + +[[package]] +name = "fastrlp" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce8dba4714ef14b8274c371879b175aa55b16b30f269663f19d576f380018dc4" +dependencies = [ + "arrayvec", + "auto_impl", + "bytes", +] + +[[package]] +name = "ff" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0b50bfb653653f9ca9095b427bed08ab8d75a137839d9ad64eb11810d5b6393" +dependencies = [ + "rand_core 0.6.4", + "subtle", +] + +[[package]] +name = "fixed-hash" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "835c052cb0c08c1acf6ffd71c022172e18723949c8282f2b9f27efbc51e64534" +dependencies = [ + "byteorder", + "rand 0.8.5", + "rustc-hex", + "static_assertions", +] + +[[package]] +name = "fnv" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" + +[[package]] +name = "foldhash" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9c4f5dac5e15c24eb999c26181a6ca40b39fe946cbe4c263c7209467bc83af2" + +[[package]] +name = "foreign-types" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d737d9aa519fb7b749cbc3b962edcf310a8dd1f4b67c91c4f83975dbdd17d965" +dependencies = [ + "foreign-types-macros", + "foreign-types-shared", +] + +[[package]] +name = "foreign-types-macros" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a5c6c585bc94aaf2c7b51dd4c2ba22680844aba4c687be581871a6f518c5742" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "foreign-types-shared" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa9a19cbb55df58761df49b23516a86d432839add4af60fc256da840f66ed35b" + +[[package]] +name = "form_urlencoded" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456" +dependencies = [ + "percent-encoding", +] + +[[package]] +name = "funty" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" + +[[package]] +name = "futures-channel" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" +dependencies = [ + "futures-core", + "futures-sink", +] + +[[package]] +name = "futures-core" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" + +[[package]] +name = "futures-io" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" + +[[package]] +name = "futures-macro" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "futures-sink" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7" + +[[package]] +name = "futures-task" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" + +[[package]] +name = "futures-util" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" +dependencies = [ + "futures-core", + "futures-io", + "futures-macro", + "futures-sink", + "futures-task", + "memchr", + "pin-project-lite", + "pin-utils", + "slab", +] + +[[package]] +name = "generic-array" +version = "0.14.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" +dependencies = [ + "typenum", + "version_check", + "zeroize", +] + +[[package]] +name = "getrandom" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592" +dependencies = [ + "cfg-if", + "js-sys", + "libc", + "wasi 0.11.1+wasi-snapshot-preview1", + "wasm-bindgen", +] + +[[package]] +name = "getrandom" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26145e563e54f2cadc477553f1ec5ee650b00862f0a58bcd12cbdc5f0ea2d2f4" +dependencies = [ + "cfg-if", + "js-sys", + "libc", + "r-efi", + "wasi 0.14.2+wasi-0.2.4", + "wasm-bindgen", +] + +[[package]] +name = "gimli" +version = "0.31.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" + +[[package]] +name = "glob" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8d1add55171497b4705a648c6b583acafb01d58050a51727785f0b2c8e0a2b2" + +[[package]] +name = "group" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63" +dependencies = [ + "ff", + "rand_core 0.6.4", + "subtle", +] + +[[package]] +name = "hashbrown" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e" +dependencies = [ + "ahash", +] + +[[package]] +name = "hashbrown" +version = "0.14.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" +dependencies = [ + "ahash", +] + +[[package]] +name = "hashbrown" +version = "0.15.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5971ac85611da7067dbfcabef3c70ebb5606018acd9e2a3903a0da507521e0d5" +dependencies = [ + "allocator-api2", + "foldhash", + "serde", +] + +[[package]] +name = "hashlink" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ba4ff7128dee98c7dc9794b6a411377e1404dba1c97deb8d1a55297bd25d8af" +dependencies = [ + "hashbrown 0.14.5", +] + +[[package]] +name = "heck" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" + +[[package]] +name = "heck" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" + +[[package]] +name = "hermit-abi" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc0fef456e4baa96da950455cd02c081ca953b141298e41db3fc7e36b1da849c" + +[[package]] +name = "hex" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" +dependencies = [ + "serde", +] + +[[package]] +name = "hex-conservative" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5313b072ce3c597065a808dbf612c4c8e8590bdbf8b579508bf7a762c5eae6cd" +dependencies = [ + "arrayvec", +] + +[[package]] +name = "hex-literal" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6fe2267d4ed49bc07b63801559be28c718ea06c4738b7a03c94df7386d2cde46" + +[[package]] +name = "hex_lit" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3011d1213f159867b13cfd6ac92d2cd5f1345762c63be3554e84092d85a50bbd" + +[[package]] +name = "hmac" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" +dependencies = [ + "digest 0.10.7", +] + +[[package]] +name = "http" +version = "1.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f4a85d31aea989eead29a3aaf9e1115a180df8282431156e533de47660892565" +dependencies = [ + "bytes", + "fnv", + "itoa", +] + +[[package]] +name = "http-body" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" +dependencies = [ + "bytes", + "http", +] + +[[package]] +name = "http-body-util" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b021d93e26becf5dc7e1b75b1bed1fd93124b374ceb73f43d4d4eafec896a64a" +dependencies = [ + "bytes", + "futures-core", + "http", + "http-body", + "pin-project-lite", +] + +[[package]] +name = "httparse" +version = "1.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6dbf3de79e51f3d586ab4cb9d5c3e2c14aa28ed23d180cf89b4df0454a69cc87" + +[[package]] +name = "hyper" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc2b571658e38e0c01b1fdca3bbbe93c00d3d71693ff2770043f8c29bc7d6f80" +dependencies = [ + "bytes", + "futures-channel", + "futures-util", + "http", + "http-body", + "httparse", + "itoa", + "pin-project-lite", + "smallvec", + "tokio", + "want", +] + +[[package]] +name = "hyper-rustls" +version = "0.27.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3c93eb611681b207e1fe55d5a71ecf91572ec8a6705cdb6857f7d8d5242cf58" +dependencies = [ + "http", + "hyper", + "hyper-util", + "rustls", + "rustls-pki-types", + "tokio", + "tokio-rustls", + "tower-service", + "webpki-roots", +] + +[[package]] +name = "hyper-util" +version = "0.1.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d9b05277c7e8da2c93a568989bb6207bef0112e8d17df7a6eda4a3cf143bc5e" +dependencies = [ + "base64 0.22.1", + "bytes", + "futures-channel", + "futures-core", + "futures-util", + "http", + "http-body", + "hyper", + "ipnet", + "libc", + "percent-encoding", + "pin-project-lite", + "socket2 0.6.0", + "tokio", + "tower-service", + "tracing", +] + +[[package]] +name = "ics23" +version = "0.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73b17f1a5bd7d12ad30a21445cfa5f52fd7651cb3243ba866f9916b1ec112f12" +dependencies = [ + "anyhow", + "blake2", + "blake3", + "bytes", + "hex", + "informalsystems-pbjson", + "prost", + "ripemd", + "serde", + "sha2", + "sha3", +] + +[[package]] +name = "icu_collections" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "200072f5d0e3614556f94a9930d5dc3e0662a652823904c3a75dc3b0af7fee47" +dependencies = [ + "displaydoc", + "potential_utf", + "yoke", + "zerofrom", + "zerovec", +] + +[[package]] +name = "icu_locale_core" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0cde2700ccaed3872079a65fb1a78f6c0a36c91570f28755dda67bc8f7d9f00a" +dependencies = [ + "displaydoc", + "litemap", + "tinystr", + "writeable", + "zerovec", +] + +[[package]] +name = "icu_normalizer" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "436880e8e18df4d7bbc06d58432329d6458cc84531f7ac5f024e93deadb37979" +dependencies = [ + "displaydoc", + "icu_collections", + "icu_normalizer_data", + "icu_properties", + "icu_provider", + "smallvec", + "zerovec", +] + +[[package]] +name = "icu_normalizer_data" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00210d6893afc98edb752b664b8890f0ef174c8adbb8d0be9710fa66fbbf72d3" + +[[package]] +name = "icu_properties" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "016c619c1eeb94efb86809b015c58f479963de65bdb6253345c1a1276f22e32b" +dependencies = [ + "displaydoc", + "icu_collections", + "icu_locale_core", + "icu_properties_data", + "icu_provider", + "potential_utf", + "zerotrie", + "zerovec", +] + +[[package]] +name = "icu_properties_data" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "298459143998310acd25ffe6810ed544932242d3f07083eee1084d83a71bd632" + +[[package]] +name = "icu_provider" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "03c80da27b5f4187909049ee2d72f276f0d9f99a42c306bd0131ecfe04d8e5af" +dependencies = [ + "displaydoc", + "icu_locale_core", + "stable_deref_trait", + "tinystr", + "writeable", + "yoke", + "zerofrom", + "zerotrie", + "zerovec", +] + +[[package]] +name = "ident_case" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" + +[[package]] +name = "idna" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "686f825264d630750a544639377bae737628043f20d38bbc029e8f29ea968a7e" +dependencies = [ + "idna_adapter", + "smallvec", + "utf8_iter", +] + +[[package]] +name = "idna_adapter" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3acae9609540aa318d1bc588455225fb2085b9ed0c4f6bd0d9d5bcd86f1a0344" +dependencies = [ + "icu_normalizer", + "icu_properties", +] + +[[package]] +name = "impl-codec" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba6a270039626615617f3f36d15fc827041df3b78c439da2cadfa47455a77f2f" +dependencies = [ + "parity-scale-codec", +] + +[[package]] +name = "impl-trait-for-tuples" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a0eb5a3343abf848c0984fe4604b2b105da9539376e24fc0a3b0007411ae4fd9" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "include_bytes_aligned" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ee796ad498c8d9a1d68e477df8f754ed784ef875de1414ebdaf169f70a6a784" + +[[package]] +name = "indenter" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce23b50ad8242c51a442f3ff322d56b02f08852c77e4c0b4d3fd684abc89c683" + +[[package]] +name = "indexmap" +version = "2.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fe4cd85333e22411419a0bcae1297d25e58c9443848b11dc6a86fefe8c78a661" +dependencies = [ + "equivalent", + "hashbrown 0.15.4", + "serde", +] + +[[package]] +name = "informalsystems-pbjson" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9aa4a0980c8379295100d70854354e78df2ee1c6ca0f96ffe89afeb3140e3a3d" +dependencies = [ + "base64 0.21.7", + "serde", +] + +[[package]] +name = "io-uring" +version = "0.7.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d93587f37623a1a17d94ef2bc9ada592f5465fe7732084ab7beefabe5c77c0c4" +dependencies = [ + "bitflags 2.9.1", + "cfg-if", + "libc", +] + +[[package]] +name = "ipnet" +version = "2.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130" + +[[package]] +name = "iri-string" +version = "0.7.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dbc5ebe9c3a1a7a5127f920a418f7585e9e758e911d0466ed004f393b0e380b2" +dependencies = [ + "memchr", + "serde", +] + +[[package]] +name = "itertools" +version = "0.10.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" +dependencies = [ + "either", +] + +[[package]] +name = "itertools" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186" +dependencies = [ + "either", +] + +[[package]] +name = "itertools" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b192c782037fadd9cfa75548310488aabdbf3d2da73885b31bd0abd03351285" +dependencies = [ + "either", +] + +[[package]] +name = "itoa" +version = "1.0.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" + +[[package]] +name = "jmt" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf2a10370b45cd850e64993ccd81d25ea2d4b5b0d0312546e7489fed82064f2e" +dependencies = [ + "anyhow", + "borsh", + "digest 0.10.7", + "hashbrown 0.13.2", + "hex", + "ics23", + "itertools 0.10.5", + "mirai-annotations", + "num-derive", + "num-traits", + "serde", + "sha2", + "thiserror 1.0.69", + "tracing", +] + +[[package]] +name = "jmt" +version = "0.11.0" +source = "git+https://github.com/penumbra-zone/jmt.git?rev=550a2f2#550a2f20984a5c31c51715381d3f67390e138ffa" +dependencies = [ + "anyhow", + "borsh", + "digest 0.10.7", + "hashbrown 0.13.2", + "hex", + "ics23", + "itertools 0.10.5", + "mirai-annotations", + "num-derive", + "num-traits", + "serde", + "sha2", + "thiserror 1.0.69", + "tracing", +] + +[[package]] +name = "js-sys" +version = "0.3.77" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1cfaf33c695fc6e08064efbc1f72ec937429614f25eef83af942d0e227c3a28f" +dependencies = [ + "once_cell", + "wasm-bindgen", +] + +[[package]] +name = "jsonwebtoken" +version = "9.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a87cc7a48537badeae96744432de36f4be2b4a34a05a5ef32e9dd8a1c169dde" +dependencies = [ + "base64 0.22.1", + "js-sys", + "pem", + "ring", + "serde", + "serde_json", + "simple_asn1", +] + +[[package]] +name = "k256" +version = "0.13.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6e3919bbaa2945715f0bb6d3934a173d1e9a59ac23767fbaaef277265a7411b" +dependencies = [ + "cfg-if", + "ecdsa", + "elliptic-curve", + "once_cell", + "serdect", + "sha2", + "signature", +] + +[[package]] +name = "keccak" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ecc2af9a1119c51f12a14607e783cb977bde58bc069ff0c3da1095e635d70654" +dependencies = [ + "cpufeatures", +] + +[[package]] +name = "keccak-asm" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "505d1856a39b200489082f90d897c3f07c455563880bc5952e38eabf731c83b6" +dependencies = [ + "digest 0.10.7", + "sha3-asm", +] + +[[package]] +name = "lazy-regex" +version = "3.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60c7310b93682b36b98fa7ea4de998d3463ccbebd94d935d6b48ba5b6ffa7126" +dependencies = [ + "lazy-regex-proc_macros", + "once_cell", + "regex", +] + +[[package]] +name = "lazy-regex-proc_macros" +version = "3.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ba01db5ef81e17eb10a5e0f2109d1b3a3e29bac3070fdbd7d156bf7dbd206a1" +dependencies = [ + "proc-macro2", + "quote", + "regex", + "syn 2.0.104", +] + +[[package]] +name = "lazy_static" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" +dependencies = [ + "spin", +] + +[[package]] +name = "libc" +version = "0.2.174" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1171693293099992e19cddea4e8b849964e9846f4acee11b3948bcc337be8776" + +[[package]] +name = "libm" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f9fbbcab51052fe104eb5e5d351cf728d30a5be1fe14d9be8a3b097481fb97de" + +[[package]] +name = "libredox" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "360e552c93fa0e8152ab463bc4c4837fce76a225df11dfaeea66c313de5e61f7" +dependencies = [ + "bitflags 2.9.1", + "libc", +] + +[[package]] +name = "linux-raw-sys" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd945864f07fe9f5371a27ad7b52a172b4b499999f1d97574c9fa68373937e12" + +[[package]] +name = "litemap" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "241eaef5fd12c88705a01fc1066c48c4b36e0dd4377dcdc7ec3942cea7a69956" + +[[package]] +name = "log" +version = "0.4.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94" + +[[package]] +name = "lru-slab" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "112b39cec0b298b6c1999fee3e31427f74f676e4cb9879ed1a121b43661a4154" + +[[package]] +name = "macro-string" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b27834086c65ec3f9387b096d66e99f221cf081c2b738042aa252bcd41204e3" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "malloc_buf" +version = "0.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62bb907fe88d54d8d9ce32a3cceab4218ed2f6b7d35617cafe9adf84e43919cb" +dependencies = [ + "libc", +] + +[[package]] +name = "maybe-async" +version = "0.2.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5cf92c10c7e361d6b99666ec1c6f9805b0bea2c3bd8c78dc6fe98ac5bd78db11" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "memchr" +version = "2.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32a282da65faaf38286cf3be983213fcf1d2e2a58700e808f83f4ea9a4804bc0" + +[[package]] +name = "merlin" +version = "3.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "58c38e2799fc0978b65dfff8023ec7843e2330bb462f19198840b34b6582397d" +dependencies = [ + "byteorder", + "keccak", + "rand_core 0.6.4", + "zeroize", +] + +[[package]] +name = "metal" +version = "0.29.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ecfd3296f8c56b7c1f6fbac3c71cefa9d78ce009850c45000015f206dc7fa21" +dependencies = [ + "bitflags 2.9.1", + "block", + "core-graphics-types", + "foreign-types", + "log", + "objc", + "paste", +] + +[[package]] +name = "miniz_oxide" +version = "0.8.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fa76a2c86f704bdb222d66965fb3d63269ce38518b83cb0575fca855ebb6316" +dependencies = [ + "adler2", +] + +[[package]] +name = "mio" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78bed444cc8a2160f01cbcf811ef18cac863ad68ae8ca62092e8db51d51c761c" +dependencies = [ + "libc", + "wasi 0.11.1+wasi-snapshot-preview1", + "windows-sys 0.59.0", +] + +[[package]] +name = "mirai-annotations" +version = "1.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c9be0862c1b3f26a88803c4a49de6889c10e608b3ee9344e6ef5b45fb37ad3d1" + +[[package]] +name = "no_std_strings" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a5b0c77c1b780822bc749a33e39aeb2c07584ab93332303babeabb645298a76e" + +[[package]] +name = "num-bigint" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a5e44f723f1133c9deac646763579fdb3ac745e418f2a7af9cd0c431da1f20b9" +dependencies = [ + "num-integer", + "num-traits", +] + +[[package]] +name = "num-conv" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" + +[[package]] +name = "num-derive" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "876a53fff98e03a936a674b29568b0e605f06b29372c2489ff4de23f1949743d" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "num-integer" +version = "0.1.46" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f" +dependencies = [ + "num-traits", +] + +[[package]] +name = "num-traits" +version = "0.2.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" +dependencies = [ + "autocfg", + "libm", +] + +[[package]] +name = "num_cpus" +version = "1.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91df4bbde75afed763b708b7eee1e8e7651e02d97f6d5dd763e89367e957b23b" +dependencies = [ + "hermit-abi", + "libc", +] + +[[package]] +name = "nybbles" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8983bb634df7248924ee0c4c3a749609b5abcb082c28fffe3254b3eb3602b307" +dependencies = [ + "const-hex", + "serde", + "smallvec", +] + +[[package]] +name = "objc" +version = "0.2.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "915b1b472bc21c53464d6c8461c9d3af805ba1ef837e1cac254428f4a77177b1" +dependencies = [ + "malloc_buf", +] + +[[package]] +name = "object" +version = "0.36.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62948e14d923ea95ea2c7c86c71013138b66525b86bdc08d2dcc262bdb497b87" +dependencies = [ + "memchr", +] + +[[package]] +name = "once_cell" +version = "1.21.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" + +[[package]] +name = "option-ext" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" + +[[package]] +name = "parity-scale-codec" +version = "3.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "799781ae679d79a948e13d4824a40970bfa500058d245760dd857301059810fa" +dependencies = [ + "arrayvec", + "bitvec", + "byte-slice-cast", + "const_format", + "impl-trait-for-tuples", + "parity-scale-codec-derive", + "rustversion", + "serde", +] + +[[package]] +name = "parity-scale-codec-derive" +version = "3.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34b4653168b563151153c9e4c08ebed57fb8262bebfa79711552fa983c623e7a" +dependencies = [ + "proc-macro-crate", + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "paste" +version = "1.0.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" + +[[package]] +name = "pem" +version = "3.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38af38e8470ac9dee3ce1bae1af9c1671fffc44ddfd8bd1d0a3445bf349a8ef3" +dependencies = [ + "base64 0.22.1", + "serde", +] + +[[package]] +name = "pem-rfc7468" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "88b39c9bfcfc231068454382784bb460aae594343fb030d46e9f50a645418412" +dependencies = [ + "base64ct", +] + +[[package]] +name = "percent-encoding" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" + +[[package]] +name = "pest" +version = "2.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1db05f56d34358a8b1066f67cbb203ee3e7ed2ba674a6263a1d5ec6db2204323" +dependencies = [ + "memchr", + "thiserror 2.0.12", + "ucd-trie", +] + +[[package]] +name = "pin-project-lite" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" + +[[package]] +name = "pin-utils" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" + +[[package]] +name = "pkcs8" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7" +dependencies = [ + "der", + "spki", +] + +[[package]] +name = "postcard" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6764c3b5dd454e283a30e6dfe78e9b31096d9e32036b5d1eaac7a6119ccb9a24" +dependencies = [ + "cobs", + "embedded-io 0.4.0", + "embedded-io 0.6.1", + "serde", +] + +[[package]] +name = "potential_utf" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5a7c30837279ca13e7c867e9e40053bc68740f988cb07f7ca6df43cc734b585" +dependencies = [ + "zerovec", +] + +[[package]] +name = "powerfmt" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" + +[[package]] +name = "ppv-lite86" +version = "0.2.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9" +dependencies = [ + "zerocopy", +] + +[[package]] +name = "primitive-types" +version = "0.12.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b34d9fd68ae0b74a41b21c03c2f62847aa0ffea044eee893b4c140b37e244e2" +dependencies = [ + "fixed-hash", + "impl-codec", + "uint", +] + +[[package]] +name = "proc-macro-crate" +version = "3.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "edce586971a4dfaa28950c6f18ed55e0406c1ab88bbce2c6f6293a7aaba73d35" +dependencies = [ + "toml_edit", +] + +[[package]] +name = "proc-macro-error" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" +dependencies = [ + "proc-macro-error-attr", + "proc-macro2", + "quote", + "syn 1.0.109", + "version_check", +] + +[[package]] +name = "proc-macro-error-attr" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" +dependencies = [ + "proc-macro2", + "quote", + "version_check", +] + +[[package]] +name = "proc-macro-error-attr2" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96de42df36bb9bba5542fe9f1a054b8cc87e172759a1868aa05c1f3acc89dfc5" +dependencies = [ + "proc-macro2", + "quote", +] + +[[package]] +name = "proc-macro-error2" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11ec05c52be0a07b08061f7dd003e7d7092e0472bc731b4af7bb1ef876109802" +dependencies = [ + "proc-macro-error-attr2", + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "proc-macro2" +version = "1.0.95" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "02b3e5e68a3a1a02aad3ec490a98007cbc13c37cbe84a3cd7b8e406d76e7f778" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "proptest" +version = "1.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6fcdab19deb5195a31cf7726a210015ff1496ba1464fd42cb4f537b8b01b471f" +dependencies = [ + "bit-set", + "bit-vec", + "bitflags 2.9.1", + "lazy_static", + "num-traits", + "rand 0.9.2", + "rand_chacha 0.9.0", + "rand_xorshift", + "regex-syntax", + "rusty-fork", + "tempfile", + "unarray", +] + +[[package]] +name = "prost" +version = "0.13.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2796faa41db3ec313a31f7624d9286acf277b52de526150b7e69f3debf891ee5" +dependencies = [ + "bytes", + "prost-derive", +] + +[[package]] +name = "prost-derive" +version = "0.13.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a56d757972c98b346a9b766e3f02746cde6dd1cd1d1d563472929fdd74bec4d" +dependencies = [ + "anyhow", + "itertools 0.14.0", + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "quick-error" +version = "1.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" + +[[package]] +name = "quinn" +version = "0.11.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "626214629cda6781b6dc1d316ba307189c85ba657213ce642d9c77670f8202c8" +dependencies = [ + "bytes", + "cfg_aliases", + "pin-project-lite", + "quinn-proto", + "quinn-udp", + "rustc-hash", + "rustls", + "socket2 0.5.10", + "thiserror 2.0.12", + "tokio", + "tracing", + "web-time", +] + +[[package]] +name = "quinn-proto" +version = "0.11.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49df843a9161c85bb8aae55f101bc0bac8bcafd637a620d9122fd7e0b2f7422e" +dependencies = [ + "bytes", + "getrandom 0.3.3", + "lru-slab", + "rand 0.9.2", + "ring", + "rustc-hash", + "rustls", + "rustls-pki-types", + "slab", + "thiserror 2.0.12", + "tinyvec", + "tracing", + "web-time", +] + +[[package]] +name = "quinn-udp" +version = "0.5.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcebb1209ee276352ef14ff8732e24cc2b02bbac986cd74a4c81bcb2f9881970" +dependencies = [ + "cfg_aliases", + "libc", + "once_cell", + "socket2 0.5.10", + "tracing", + "windows-sys 0.59.0", +] + +[[package]] +name = "quote" +version = "1.0.40" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "r-efi" +version = "5.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" + +[[package]] +name = "radium" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09" + +[[package]] +name = "rand" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" +dependencies = [ + "libc", + "rand_chacha 0.3.1", + "rand_core 0.6.4", + "serde", +] + +[[package]] +name = "rand" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6db2770f06117d490610c7488547d543617b21bfa07796d7a12f6f1bd53850d1" +dependencies = [ + "rand_chacha 0.9.0", + "rand_core 0.9.3", +] + +[[package]] +name = "rand_chacha" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +dependencies = [ + "ppv-lite86", + "rand_core 0.6.4", +] + +[[package]] +name = "rand_chacha" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb" +dependencies = [ + "ppv-lite86", + "rand_core 0.9.3", +] + +[[package]] +name = "rand_core" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" +dependencies = [ + "getrandom 0.2.16", +] + +[[package]] +name = "rand_core" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "99d9a13982dcf210057a8a78572b2217b667c3beacbf3a0d8b454f6f82837d38" +dependencies = [ + "getrandom 0.3.3", +] + +[[package]] +name = "rand_xorshift" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "513962919efc330f829edb2535844d1b912b0fbe2ca165d613e4e8788bb05a5a" +dependencies = [ + "rand_core 0.9.3", +] + +[[package]] +name = "redox_users" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba009ff324d1fc1b900bd1fdb31564febe58a8ccc8a6fdbb93b543d33b13ca43" +dependencies = [ + "getrandom 0.2.16", + "libredox", + "thiserror 1.0.69", +] + +[[package]] +name = "regex" +version = "1.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" +dependencies = [ + "aho-corasick", + "memchr", + "regex-automata", + "regex-syntax", +] + +[[package]] +name = "regex-automata" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax", +] + +[[package]] +name = "regex-syntax" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" + +[[package]] +name = "reqwest" +version = "0.12.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cbc931937e6ca3a06e3b6c0aa7841849b160a90351d6ab467a8b9b9959767531" +dependencies = [ + "base64 0.22.1", + "bytes", + "futures-channel", + "futures-core", + "futures-util", + "http", + "http-body", + "http-body-util", + "hyper", + "hyper-rustls", + "hyper-util", + "js-sys", + "log", + "percent-encoding", + "pin-project-lite", + "quinn", + "rustls", + "rustls-pki-types", + "serde", + "serde_json", + "serde_urlencoded", + "sync_wrapper", + "tokio", + "tokio-rustls", + "tokio-util", + "tower", + "tower-http", + "tower-service", + "url", + "wasm-bindgen", + "wasm-bindgen-futures", + "wasm-streams", + "web-sys", + "webpki-roots", +] + +[[package]] +name = "rfc6979" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8dd2a808d456c4a54e300a23e9f5a67e122c3024119acbfd73e3bf664491cb2" +dependencies = [ + "hmac", + "subtle", +] + +[[package]] +name = "ring" +version = "0.17.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4689e6c2294d81e88dc6261c768b63bc4fcdb852be6d1352498b114f61383b7" +dependencies = [ + "cc", + "cfg-if", + "getrandom 0.2.16", + "libc", + "untrusted", + "windows-sys 0.52.0", +] + +[[package]] +name = "ripemd" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd124222d17ad93a644ed9d011a40f4fb64aa54275c08cc216524a9ea82fb09f" +dependencies = [ + "digest 0.10.7", +] + +[[package]] +name = "risc0-binfmt" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62eb7025356a233c1bc267c458a2ce56fcfc89b136d813c8a77be14ef1eaf2b1" +dependencies = [ + "anyhow", + "borsh", + "derive_more 2.0.1", + "elf", + "lazy_static", + "postcard", + "risc0-zkp", + "risc0-zkvm-platform", + "semver 1.0.26", + "serde", + "tracing", +] + +[[package]] +name = "risc0-build" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62ffc0f135e6c1e9851e7e19438d03ff41a9d49199ee4f6c17b8bb30b4f83910" +dependencies = [ + "anyhow", + "cargo_metadata", + "derive_builder", + "dirs", + "docker-generate", + "hex", + "risc0-binfmt", + "risc0-zkos-v1compat", + "risc0-zkp", + "risc0-zkvm-platform", + "rzup", + "semver 1.0.26", + "serde", + "serde_json", + "stability", + "tempfile", +] + +[[package]] +name = "risc0-circuit-keccak" +version = "3.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0094af5a57b020388a03bdd3834959c7d62723f1687be81414ade25104d93263" +dependencies = [ + "anyhow", + "bytemuck", + "paste", + "risc0-binfmt", + "risc0-circuit-recursion", + "risc0-core", + "risc0-zkp", + "tracing", +] + +[[package]] +name = "risc0-circuit-recursion" +version = "3.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76ebded45c902c2b6939924a1cddd1d06b5d1d4ad1531e8798ebfee78f9c038d" +dependencies = [ + "anyhow", + "bytemuck", + "hex", + "metal", + "risc0-core", + "risc0-zkp", + "tracing", +] + +[[package]] +name = "risc0-circuit-rv32im" +version = "3.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "15030849f8356f01f23c74b37dbfa4283100b594eb634109993e9e005ef45f64" +dependencies = [ + "anyhow", + "bit-vec", + "bytemuck", + "derive_more 2.0.1", + "paste", + "risc0-binfmt", + "risc0-core", + "risc0-zkp", + "serde", + "tracing", +] + +[[package]] +name = "risc0-core" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "317bbf70a8750b64d4fd7a2bdc9d7d5f30d8bb305cae486962c797ef35c8d08e" +dependencies = [ + "bytemuck", + "bytemuck_derive", + "rand_core 0.6.4", +] + +[[package]] +name = "risc0-groth16" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7cf5d0b673d5fc67a89147c2e9c53134707dcc8137a43d1ef06b4ff68e99b74f" +dependencies = [ + "anyhow", + "ark-bn254", + "ark-ec", + "ark-groth16", + "ark-serialize 0.5.0", + "bytemuck", + "hex", + "num-bigint", + "num-traits", + "risc0-binfmt", + "risc0-zkp", + "serde", + "stability", +] + +[[package]] +name = "risc0-zkos-v1compat" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f76c479b69d1987cb54ac72dcc017197296fdcd6daf78fafc10cbbd3a167a7de" +dependencies = [ + "include_bytes_aligned", + "no_std_strings", +] + +[[package]] +name = "risc0-zkp" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a287e9cd6d7b3b38eeb49c62090c46a1935922309fbd997a9143ed8c43c8f3cb" +dependencies = [ + "anyhow", + "blake2", + "borsh", + "bytemuck", + "cfg-if", + "digest 0.10.7", + "hex", + "hex-literal", + "metal", + "paste", + "rand_core 0.6.4", + "risc0-core", + "risc0-zkvm-platform", + "serde", + "sha2", + "stability", + "tracing", +] + +[[package]] +name = "risc0-zkvm" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9684b333c1c5d83f29ce2a92314ccfafd9d8cdfa6c4e19c07b97015d2f1eb9d0" +dependencies = [ + "anyhow", + "bincode", + "bonsai-sdk", + "borsh", + "bytemuck", + "bytes", + "derive_more 2.0.1", + "getrandom 0.2.16", + "hex", + "lazy-regex", + "prost", + "risc0-binfmt", + "risc0-build", + "risc0-circuit-keccak", + "risc0-circuit-recursion", + "risc0-circuit-rv32im", + "risc0-core", + "risc0-groth16", + "risc0-zkos-v1compat", + "risc0-zkp", + "risc0-zkvm-platform", + "rrs-lib", + "rzup", + "semver 1.0.26", + "serde", + "sha2", + "stability", + "tempfile", + "tracing", +] + +[[package]] +name = "risc0-zkvm-platform" +version = "2.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cae9cb2c2f6cab2dfa395ea6e2576713929040c7fb0c5f4150d13e1119d18686" +dependencies = [ + "bytemuck", + "cfg-if", + "getrandom 0.2.16", + "getrandom 0.3.3", + "libm", + "stability", +] + +[[package]] +name = "rlp" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb919243f34364b6bd2fc10ef797edbfa75f33c252e7998527479c6d6b47e1ec" +dependencies = [ + "bytes", + "rustc-hex", +] + +[[package]] +name = "rrs-lib" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4382d3af3a4ebdae7f64ba6edd9114fff92c89808004c4943b393377a25d001" +dependencies = [ + "downcast-rs", + "paste", +] + +[[package]] +name = "ruint" +version = "1.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11256b5fe8c68f56ac6f39ef0720e592f33d2367a4782740d9c9142e889c7fb4" +dependencies = [ + "alloy-rlp", + "ark-ff 0.3.0", + "ark-ff 0.4.2", + "bytes", + "fastrlp 0.3.1", + "fastrlp 0.4.0", + "num-bigint", + "num-integer", + "num-traits", + "parity-scale-codec", + "primitive-types", + "proptest", + "rand 0.8.5", + "rand 0.9.2", + "rlp", + "ruint-macro", + "serde", + "valuable", + "zeroize", +] + +[[package]] +name = "ruint-macro" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48fd7bd8a6377e15ad9d42a8ec25371b94ddc67abe7c8b9127bec79bebaaae18" + +[[package]] +name = "rustc-demangle" +version = "0.1.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56f7d92ca342cea22a06f2121d944b4fd82af56988c270852495420f961d4ace" + +[[package]] +name = "rustc-hash" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "357703d41365b4b27c590e3ed91eabb1b663f07c4c084095e60cbed4362dff0d" + +[[package]] +name = "rustc-hex" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3e75f6a532d0fd9f7f13144f392b6ad56a32696bfcd9c78f797f16bbb6f072d6" + +[[package]] +name = "rustc_version" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0dfe2087c51c460008730de8b57e6a320782fbfb312e1f4d520e6c6fae155ee" +dependencies = [ + "semver 0.11.0", +] + +[[package]] +name = "rustc_version" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" +dependencies = [ + "semver 1.0.26", +] + +[[package]] +name = "rustix" +version = "1.0.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11181fbabf243db407ef8df94a6ce0b2f9a733bd8be4ad02b4eda9602296cac8" +dependencies = [ + "bitflags 2.9.1", + "errno", + "libc", + "linux-raw-sys", + "windows-sys 0.60.2", +] + +[[package]] +name = "rustls" +version = "0.23.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0ebcbd2f03de0fc1122ad9bb24b127a5a6cd51d72604a3f3c50ac459762b6cc" +dependencies = [ + "once_cell", + "ring", + "rustls-pki-types", + "rustls-webpki", + "subtle", + "zeroize", +] + +[[package]] +name = "rustls-pki-types" +version = "1.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "229a4a4c221013e7e1f1a043678c5cc39fe5171437c88fb47151a21e6f5b5c79" +dependencies = [ + "web-time", + "zeroize", +] + +[[package]] +name = "rustls-webpki" +version = "0.103.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0a17884ae0c1b773f1ccd2bd4a8c72f16da897310a98b0e84bf349ad5ead92fc" +dependencies = [ + "ring", + "rustls-pki-types", + "untrusted", +] + +[[package]] +name = "rustversion" +version = "1.0.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a0d197bd2c9dc6e53b84da9556a69ba4cdfab8619eb41a8bd1cc2027a0f6b1d" + +[[package]] +name = "rusty-fork" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb3dcc6e454c328bb824492db107ab7c0ae8fcffe4ad210136ef014458c1bc4f" +dependencies = [ + "fnv", + "quick-error", + "tempfile", + "wait-timeout", +] + +[[package]] +name = "ryu" +version = "1.0.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" + +[[package]] +name = "rzup" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "400558bf12d4292a7804093b60a437ba8b0219ea7d53716b2c010a0d31e5f4a8" +dependencies = [ + "semver 1.0.26", + "serde", + "strum", + "tempfile", + "thiserror 2.0.12", + "toml", + "yaml-rust2", +] + +[[package]] +name = "schemars" +version = "0.8.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3fbf2ae1b8bc8e02df939598064d22402220cd5bbcca1c76f7d6a310974d5615" +dependencies = [ + "dyn-clone", + "schemars_derive", + "serde", + "serde_json", +] + +[[package]] +name = "schemars_derive" +version = "0.8.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32e265784ad618884abaea0600a9adf15393368d840e0222d101a072f3f7534d" +dependencies = [ + "proc-macro2", + "quote", + "serde_derive_internals", + "syn 2.0.104", +] + +[[package]] +name = "sec1" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3e97a565f76233a6003f9f5c54be1d9c5bdfa3eccfb189469f11ec4901c47dc" +dependencies = [ + "base16ct", + "der", + "generic-array", + "pkcs8", + "serdect", + "subtle", + "zeroize", +] + +[[package]] +name = "secp256k1" +version = "0.29.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9465315bc9d4566e1724f0fffcbcc446268cb522e60f9a27bcded6b19c108113" +dependencies = [ + "bitcoin_hashes", + "rand 0.8.5", + "secp256k1-sys", + "serde", +] + +[[package]] +name = "secp256k1-sys" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d4387882333d3aa8cb20530a17c69a3752e97837832f34f6dccc760e715001d9" +dependencies = [ + "cc", +] + +[[package]] +name = "semver" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f301af10236f6df4160f7c3f04eec6dbc70ace82d23326abad5edee88801c6b6" +dependencies = [ + "semver-parser", +] + +[[package]] +name = "semver" +version = "1.0.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56e6fa9c48d24d85fb3de5ad847117517440f6beceb7798af16b4a87d616b8d0" +dependencies = [ + "serde", +] + +[[package]] +name = "semver-parser" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9900206b54a3527fdc7b8a938bffd94a568bac4f4aa8113b209df75a09c0dec2" +dependencies = [ + "pest", +] + +[[package]] +name = "serde" +version = "1.0.219" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.219" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "serde_derive_internals" +version = "0.29.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "18d26a20a969b9e3fdf2fc2d9f21eda6c40e2de84c9408bb5d3b05d499aae711" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "serde_json" +version = "1.0.141" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "30b9eff21ebe718216c6ec64e1d9ac57087aad11efc64e32002bce4a0d4c03d3" +dependencies = [ + "itoa", + "memchr", + "ryu", + "serde", +] + +[[package]] +name = "serde_spanned" +version = "0.6.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf41e0cfaf7226dca15e8197172c295a782857fcb97fad1808a166870dee75a3" +dependencies = [ + "serde", +] + +[[package]] +name = "serde_urlencoded" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" +dependencies = [ + "form_urlencoded", + "itoa", + "ryu", + "serde", +] + +[[package]] +name = "serdect" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a84f14a19e9a014bb9f4512488d9829a68e04ecabffb0f9904cd1ace94598177" +dependencies = [ + "base16ct", + "serde", +] + +[[package]] +name = "sha2" +version = "0.10.8" +source = "git+https://github.com/risc0/RustCrypto-hashes?tag=sha2-v0.10.8-risczero.0#244dc3b08788f7a4ccce14c66896ae3b4f24c166" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest 0.10.7", + "sha2-asm", +] + +[[package]] +name = "sha2-asm" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b845214d6175804686b2bd482bcffe96651bb2d1200742b712003504a2dac1ab" +dependencies = [ + "cc", +] + +[[package]] +name = "sha3" +version = "0.10.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75872d278a8f37ef87fa0ddbda7802605cb18344497949862c0d4dcb291eba60" +dependencies = [ + "digest 0.10.7", + "keccak", +] + +[[package]] +name = "sha3-asm" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c28efc5e327c837aa837c59eae585fc250715ef939ac32881bcc11677cd02d46" +dependencies = [ + "cc", + "cfg-if", +] + +[[package]] +name = "shlex" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" + +[[package]] +name = "signature" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77549399552de45a898a580c1b41d445bf730df867cc44e6c0233bbc4b8329de" +dependencies = [ + "digest 0.10.7", + "rand_core 0.6.4", +] + +[[package]] +name = "simple_asn1" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "297f631f50729c8c99b84667867963997ec0b50f32b2a7dbcab828ef0541e8bb" +dependencies = [ + "num-bigint", + "num-traits", + "thiserror 2.0.12", + "time", +] + +[[package]] +name = "slab" +version = "0.4.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04dc19736151f35336d325007ac991178d504a119863a2fcb3758cdb5e52c50d" + +[[package]] +name = "smallvec" +version = "1.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03" +dependencies = [ + "serde", +] + +[[package]] +name = "socket2" +version = "0.5.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e22376abed350d73dd1cd119b57ffccad95b4e585a7cda43e286245ce23c0678" +dependencies = [ + "libc", + "windows-sys 0.52.0", +] + +[[package]] +name = "socket2" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "233504af464074f9d066d7b5416c5f9b894a5862a6506e306f7b816cdd6f1807" +dependencies = [ + "libc", + "windows-sys 0.59.0", +] + +[[package]] +name = "sov-keys" +version = "0.7.3-rc.5" +source = "git+https://github.com/chainwayxyz/citrea?tag=v0.7.3-rc.5#bbf5f5622291c45f8eda26bf9d905e045724a4c9" +dependencies = [ + "borsh", + "derive_more 1.0.0", + "digest 0.10.7", + "hex", + "k256", + "rand 0.8.5", + "schemars", + "serde", + "sha2", + "thiserror 2.0.12", +] + +[[package]] +name = "sov-rollup-interface" +version = "0.7.3-rc.5" +source = "git+https://github.com/chainwayxyz/citrea?tag=v0.7.3-rc.5#bbf5f5622291c45f8eda26bf9d905e045724a4c9" +dependencies = [ + "anyhow", + "borsh", + "bytes", + "digest 0.10.7", + "jmt 0.11.0 (git+https://github.com/penumbra-zone/jmt.git?rev=550a2f2)", + "serde", + "sha2", + "sov-keys", +] + +[[package]] +name = "spin" +version = "0.9.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" + +[[package]] +name = "spki" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d91ed6c858b01f942cd56b37a94b3e0a1798290327d1236e4d9cf4eaca44d29d" +dependencies = [ + "base64ct", + "der", +] + +[[package]] +name = "stability" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d904e7009df136af5297832a3ace3370cd14ff1546a232f4f185036c2736fcac" +dependencies = [ + "quote", + "syn 2.0.104", +] + +[[package]] +name = "stable_deref_trait" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" + +[[package]] +name = "static_assertions" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" + +[[package]] +name = "strsim" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" + +[[package]] +name = "strum" +version = "0.26.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8fec0f0aef304996cf250b31b5a10dee7980c85da9d759361292b8bca5a18f06" +dependencies = [ + "strum_macros", +] + +[[package]] +name = "strum_macros" +version = "0.26.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c6bee85a5a24955dc440386795aa378cd9cf82acd5f764469152d2270e581be" +dependencies = [ + "heck 0.5.0", + "proc-macro2", + "quote", + "rustversion", + "syn 2.0.104", +] + +[[package]] +name = "subtle" +version = "2.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" + +[[package]] +name = "syn" +version = "1.0.109" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "syn" +version = "2.0.104" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17b6f705963418cdb9927482fa304bc562ece2fdd4f616084c50b7023b435a40" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "syn-solidity" +version = "0.8.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4560533fbd6914b94a8fb5cc803ed6801c3455668db3b810702c57612bac9412" +dependencies = [ + "paste", + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "sync_wrapper" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0bf256ce5efdfa370213c1dabab5935a12e49f2c58d15e9eac2870d3b4f27263" +dependencies = [ + "futures-core", +] + +[[package]] +name = "synstructure" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "tap" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" + +[[package]] +name = "tempfile" +version = "3.20.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8a64e3985349f2441a1a9ef0b853f869006c3855f2cda6862a94d26ebb9d6a1" +dependencies = [ + "fastrand", + "getrandom 0.3.3", + "once_cell", + "rustix", + "windows-sys 0.59.0", +] + +[[package]] +name = "thiserror" +version = "1.0.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52" +dependencies = [ + "thiserror-impl 1.0.69", +] + +[[package]] +name = "thiserror" +version = "2.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "567b8a2dae586314f7be2a752ec7474332959c6460e02bde30d702a66d488708" +dependencies = [ + "thiserror-impl 2.0.12", +] + +[[package]] +name = "thiserror-impl" +version = "1.0.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "thiserror-impl" +version = "2.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f7cf42b4507d8ea322120659672cf1b9dbb93f8f2d4ecfd6e51350ff5b17a1d" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "threadpool" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d050e60b33d41c19108b32cea32164033a9013fe3b46cbd4457559bfbf77afaa" +dependencies = [ + "num_cpus", +] + +[[package]] +name = "time" +version = "0.3.41" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a7619e19bc266e0f9c5e6686659d394bc57973859340060a69221e57dbc0c40" +dependencies = [ + "deranged", + "itoa", + "num-conv", + "powerfmt", + "serde", + "time-core", + "time-macros", +] + +[[package]] +name = "time-core" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c9e9a38711f559d9e3ce1cdb06dd7c5b8ea546bc90052da6d06bb76da74bb07c" + +[[package]] +name = "time-macros" +version = "0.2.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3526739392ec93fd8b359c8e98514cb3e8e021beb4e5f597b00a0221f8ed8a49" +dependencies = [ + "num-conv", + "time-core", +] + +[[package]] +name = "tiny-keccak" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c9d3793400a45f954c52e73d068316d76b6f4e36977e3fcebb13a2721e80237" +dependencies = [ + "crunchy", +] + +[[package]] +name = "tinystr" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d4f6d1145dcb577acf783d4e601bc1d76a13337bb54e6233add580b07344c8b" +dependencies = [ + "displaydoc", + "zerovec", +] + +[[package]] +name = "tinyvec" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09b3661f17e86524eccd4371ab0429194e0d7c008abb45f7a7495b1719463c71" +dependencies = [ + "tinyvec_macros", +] + +[[package]] +name = "tinyvec_macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" + +[[package]] +name = "tokio" +version = "1.47.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43864ed400b6043a4757a25c7a64a8efde741aed79a056a2fb348a406701bb35" +dependencies = [ + "backtrace", + "bytes", + "io-uring", + "libc", + "mio", + "pin-project-lite", + "slab", + "socket2 0.6.0", + "windows-sys 0.59.0", +] + +[[package]] +name = "tokio-rustls" +version = "0.26.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e727b36a1a0e8b74c376ac2211e40c2c8af09fb4013c60d910495810f008e9b" +dependencies = [ + "rustls", + "tokio", +] + +[[package]] +name = "tokio-util" +version = "0.7.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "66a539a9ad6d5d281510d5bd368c973d636c02dbf8a67300bfb6b950696ad7df" +dependencies = [ + "bytes", + "futures-core", + "futures-sink", + "pin-project-lite", + "tokio", +] + +[[package]] +name = "toml" +version = "0.8.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc1beb996b9d83529a9e75c17a1686767d148d70663143c7854d8b4a09ced362" +dependencies = [ + "serde", + "serde_spanned", + "toml_datetime", + "toml_edit", +] + +[[package]] +name = "toml_datetime" +version = "0.6.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22cddaf88f4fbc13c51aebbf5f8eceb5c7c5a9da2ac40a13519eb5b0a0e8f11c" +dependencies = [ + "serde", +] + +[[package]] +name = "toml_edit" +version = "0.22.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41fe8c660ae4257887cf66394862d21dbca4a6ddd26f04a3560410406a2f819a" +dependencies = [ + "indexmap", + "serde", + "serde_spanned", + "toml_datetime", + "toml_write", + "winnow", +] + +[[package]] +name = "toml_write" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d99f8c9a7727884afe522e9bd5edbfc91a3312b36a77b5fb8926e4c31a41801" + +[[package]] +name = "tower" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d039ad9159c98b70ecfd540b2573b97f7f52c3e8d9f8ad57a24b916a536975f9" +dependencies = [ + "futures-core", + "futures-util", + "pin-project-lite", + "sync_wrapper", + "tokio", + "tower-layer", + "tower-service", +] + +[[package]] +name = "tower-http" +version = "0.6.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "adc82fd73de2a9722ac5da747f12383d2bfdb93591ee6c58486e0097890f05f2" +dependencies = [ + "bitflags 2.9.1", + "bytes", + "futures-util", + "http", + "http-body", + "iri-string", + "pin-project-lite", + "tower", + "tower-layer", + "tower-service", +] + +[[package]] +name = "tower-layer" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "121c2a6cda46980bb0fcd1647ffaf6cd3fc79a013de288782836f6df9c48780e" + +[[package]] +name = "tower-service" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" + +[[package]] +name = "tracing" +version = "0.1.41" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0" +dependencies = [ + "log", + "pin-project-lite", + "tracing-attributes", + "tracing-core", +] + +[[package]] +name = "tracing-attributes" +version = "0.1.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81383ab64e72a7a8b8e13130c49e3dab29def6d0c7d76a03087b3cf71c5c6903" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "tracing-core" +version = "0.1.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9d12581f227e93f094d3af2ae690a574abb8a2b9b7a96e7cfe9647b2b617678" +dependencies = [ + "once_cell", + "valuable", +] + +[[package]] +name = "tracing-subscriber" +version = "0.2.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e0d2eaa99c3c2e41547cfa109e910a68ea03823cccad4a0525dcbc9b01e8c71" +dependencies = [ + "tracing-core", +] + +[[package]] +name = "try-lock" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" + +[[package]] +name = "typenum" +version = "1.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1dccffe3ce07af9386bfd29e80c0ab1a8205a2fc34e4bcd40364df902cfa8f3f" + +[[package]] +name = "ucd-trie" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2896d95c02a80c6d6a5d6e953d479f5ddf2dfdb6a244441010e373ac0fb88971" + +[[package]] +name = "uint" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76f64bba2c53b04fcab63c01a7d7427eadc821e3bc48c34dc9ba29c501164b52" +dependencies = [ + "byteorder", + "crunchy", + "hex", + "static_assertions", +] + +[[package]] +name = "unarray" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eaea85b334db583fe3274d12b4cd1880032beab409c0d774be044d4480ab9a94" + +[[package]] +name = "unicode-ident" +version = "1.0.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512" + +[[package]] +name = "unicode-xid" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853" + +[[package]] +name = "untrusted" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" + +[[package]] +name = "url" +version = "2.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32f8b686cadd1473f4bd0117a5d28d36b1ade384ea9b5069a1c40aefed7fda60" +dependencies = [ + "form_urlencoded", + "idna", + "percent-encoding", +] + +[[package]] +name = "utf8_iter" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" + +[[package]] +name = "valuable" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba73ea9cf16a25df0c8caa16c51acb937d5712a8429db78a3ee29d5dcacd3a65" + +[[package]] +name = "version_check" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" + +[[package]] +name = "wait-timeout" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09ac3b126d3914f9849036f826e054cbabdc8519970b8998ddaf3b5bd3c65f11" +dependencies = [ + "libc", +] + +[[package]] +name = "want" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" +dependencies = [ + "try-lock", +] + +[[package]] +name = "wasi" +version = "0.11.1+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" + +[[package]] +name = "wasi" +version = "0.14.2+wasi-0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9683f9a5a998d873c0d21fcbe3c083009670149a8fab228644b8bd36b2c48cb3" +dependencies = [ + "wit-bindgen-rt", +] + +[[package]] +name = "wasm-bindgen" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1edc8929d7499fc4e8f0be2262a241556cfc54a0bea223790e71446f2aab1ef5" +dependencies = [ + "cfg-if", + "once_cell", + "rustversion", + "wasm-bindgen-macro", +] + +[[package]] +name = "wasm-bindgen-backend" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2f0a0651a5c2bc21487bde11ee802ccaf4c51935d0d3d42a6101f98161700bc6" +dependencies = [ + "bumpalo", + "log", + "proc-macro2", + "quote", + "syn 2.0.104", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-futures" +version = "0.4.50" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "555d470ec0bc3bb57890405e5d4322cc9ea83cebb085523ced7be4144dac1e61" +dependencies = [ + "cfg-if", + "js-sys", + "once_cell", + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "wasm-bindgen-macro" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7fe63fc6d09ed3792bd0897b314f53de8e16568c2b3f7982f468c0bf9bd0b407" +dependencies = [ + "quote", + "wasm-bindgen-macro-support", +] + +[[package]] +name = "wasm-bindgen-macro-support" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", + "wasm-bindgen-backend", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-shared" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a05d73b933a847d6cccdda8f838a22ff101ad9bf93e33684f39c1f5f0eece3d" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "wasm-streams" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "15053d8d85c7eccdbefef60f06769760a563c7f0a9d6902a13d35c7800b0ad65" +dependencies = [ + "futures-util", + "js-sys", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", +] + +[[package]] +name = "web-sys" +version = "0.3.77" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33b6dd2ef9186f1f2072e409e99cd22a975331a6b3591b12c764e0e55c60d5d2" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "web-time" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a6580f308b1fad9207618087a65c04e7a10bc77e02c8e84e9b00dd4b12fa0bb" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "webpki-roots" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7e8983c3ab33d6fb807cfcdad2491c4ea8cbc8ed839181c7dfd9c67c83e261b2" +dependencies = [ + "rustls-pki-types", +] + +[[package]] +name = "windows-link" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e6ad25900d524eaabdbbb96d20b4311e1e7ae1699af4fb28c17ae66c80d798a" + +[[package]] +name = "windows-sys" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" +dependencies = [ + "windows-targets 0.48.5", +] + +[[package]] +name = "windows-sys" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-sys" +version = "0.59.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-sys" +version = "0.60.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb" +dependencies = [ + "windows-targets 0.53.3", +] + +[[package]] +name = "windows-targets" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" +dependencies = [ + "windows_aarch64_gnullvm 0.48.5", + "windows_aarch64_msvc 0.48.5", + "windows_i686_gnu 0.48.5", + "windows_i686_msvc 0.48.5", + "windows_x86_64_gnu 0.48.5", + "windows_x86_64_gnullvm 0.48.5", + "windows_x86_64_msvc 0.48.5", +] + +[[package]] +name = "windows-targets" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" +dependencies = [ + "windows_aarch64_gnullvm 0.52.6", + "windows_aarch64_msvc 0.52.6", + "windows_i686_gnu 0.52.6", + "windows_i686_gnullvm 0.52.6", + "windows_i686_msvc 0.52.6", + "windows_x86_64_gnu 0.52.6", + "windows_x86_64_gnullvm 0.52.6", + "windows_x86_64_msvc 0.52.6", +] + +[[package]] +name = "windows-targets" +version = "0.53.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d5fe6031c4041849d7c496a8ded650796e7b6ecc19df1a431c1a363342e5dc91" +dependencies = [ + "windows-link", + "windows_aarch64_gnullvm 0.53.0", + "windows_aarch64_msvc 0.53.0", + "windows_i686_gnu 0.53.0", + "windows_i686_gnullvm 0.53.0", + "windows_i686_msvc 0.53.0", + "windows_x86_64_gnu 0.53.0", + "windows_x86_64_gnullvm 0.53.0", + "windows_x86_64_msvc 0.53.0", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "86b8d5f90ddd19cb4a147a5fa63ca848db3df085e25fee3cc10b39b6eebae764" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7651a1f62a11b8cbd5e0d42526e55f2c99886c77e007179efff86c2b137e66c" + +[[package]] +name = "windows_i686_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" + +[[package]] +name = "windows_i686_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" + +[[package]] +name = "windows_i686_gnu" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1dc67659d35f387f5f6c479dc4e28f1d4bb90ddd1a5d3da2e5d97b42d6272c3" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ce6ccbdedbf6d6354471319e781c0dfef054c81fbc7cf83f338a4296c0cae11" + +[[package]] +name = "windows_i686_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" + +[[package]] +name = "windows_i686_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" + +[[package]] +name = "windows_i686_msvc" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "581fee95406bb13382d2f65cd4a908ca7b1e4c2f1917f143ba16efe98a589b5d" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e55b5ac9ea33f2fc1716d1742db15574fd6fc8dadc51caab1c16a3d3b4190ba" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0a6e035dd0599267ce1ee132e51c27dd29437f63325753051e71dd9e42406c57" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486" + +[[package]] +name = "winnow" +version = "0.7.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3edebf492c8125044983378ecb5766203ad3b4c2f7a922bd7dd207f6d443e95" +dependencies = [ + "memchr", +] + +[[package]] +name = "wit-bindgen-rt" +version = "0.39.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f42320e61fe2cfd34354ecb597f86f413484a798ba44a8ca1165c58d42da6c1" +dependencies = [ + "bitflags 2.9.1", +] + +[[package]] +name = "work-only-guest" +version = "0.1.0" +dependencies = [ + "circuits-lib", +] + +[[package]] +name = "writeable" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea2f10b9bb0928dfb1b42b65e1f9e36f7f54dbdf08457afefb38afcdec4fa2bb" + +[[package]] +name = "wyz" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05f360fc0b24296329c78fda852a1e9ae82de9cf7b27dae4b7f62f118f77b9ed" +dependencies = [ + "tap", +] + +[[package]] +name = "yaml-rust2" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a1a1c0bc9823338a3bdf8c61f994f23ac004c6fa32c08cd152984499b445e8d" +dependencies = [ + "arraydeque", + "encoding_rs", + "hashlink", +] + +[[package]] +name = "yoke" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f41bb01b8226ef4bfd589436a297c53d118f65921786300e427be8d487695cc" +dependencies = [ + "serde", + "stable_deref_trait", + "yoke-derive", + "zerofrom", +] + +[[package]] +name = "yoke-derive" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38da3c9736e16c5d3c8c597a9aaa5d1fa565d0532ae05e27c24aa62fb32c0ab6" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", + "synstructure", +] + +[[package]] +name = "zerocopy" +version = "0.8.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1039dd0d3c310cf05de012d8a39ff557cb0d23087fd44cad61df08fc31907a2f" +dependencies = [ + "zerocopy-derive", +] + +[[package]] +name = "zerocopy-derive" +version = "0.8.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ecf5b4cc5364572d7f4c329661bcc82724222973f2cab6f050a4e5c22f75181" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "zerofrom" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50cc42e0333e05660c3587f3bf9d0478688e15d870fab3346451ce7f8c9fbea5" +dependencies = [ + "zerofrom-derive", +] + +[[package]] +name = "zerofrom-derive" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", + "synstructure", +] + +[[package]] +name = "zeroize" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" +dependencies = [ + "zeroize_derive", +] + +[[package]] +name = "zeroize_derive" +version = "1.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "zerotrie" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "36f0bbd478583f79edad978b407914f61b2972f5af6fa089686016be8f9af595" +dependencies = [ + "displaydoc", + "yoke", + "zerofrom", +] + +[[package]] +name = "zerovec" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a05eb080e015ba39cc9e23bbe5e7fb04d5fb040350f99f34e338d5fdd294428" +dependencies = [ + "yoke", + "zerofrom", + "zerovec-derive", +] + +[[package]] +name = "zerovec-derive" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b96237efa0c878c64bd89c436f661be4e46b2f3eff1ebb976f7ef2321d2f58f" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[patch.unused]] +name = "k256" +version = "0.13.3" +source = "git+https://github.com/risc0/RustCrypto-elliptic-curves?tag=k256%2Fv0.13.3-risczero.1#ff5d67b095cfcc2569b7789f2079ed87ef2c7756" diff --git a/risc0-circuits/work-only/guest/Cargo.toml b/risc0-circuits/work-only/guest/Cargo.toml new file mode 100644 index 000000000..6da601e4b --- /dev/null +++ b/risc0-circuits/work-only/guest/Cargo.toml @@ -0,0 +1,20 @@ +[package] +name = "work-only-guest" +version = "0.1.0" +edition = "2021" + +[workspace] + +[dependencies] +circuits-lib = { path = "../../../circuits-lib" } + +[patch.crates-io] +sha2 = { git = "https://github.com/risc0/RustCrypto-hashes", tag = "sha2-v0.10.8-risczero.0" } +crypto-bigint = { git = "https://github.com/risc0/RustCrypto-crypto-bigint", tag = "v0.5.5-risczero.0" } +k256 = { git = "https://github.com/risc0/RustCrypto-elliptic-curves", tag = "k256/v0.13.3-risczero.1" } + +[profile.release] +debug = 0 +lto = true +opt-level = 3 +codegen-units = 1 \ No newline at end of file diff --git a/risc0-circuits/work-only/guest/src/main.rs b/risc0-circuits/work-only/guest/src/main.rs new file mode 100644 index 000000000..cb8ea40dc --- /dev/null +++ b/risc0-circuits/work-only/guest/src/main.rs @@ -0,0 +1,6 @@ +use circuits_lib::common; +use circuits_lib::work_only::work_only_circuit; +fn main() { + let zkvm_guest = common::zkvm::Risc0Guest::new(); + work_only_circuit(&zkvm_guest); +} diff --git a/risc0-circuits/work-only/src/lib.rs b/risc0-circuits/work-only/src/lib.rs new file mode 100644 index 000000000..1bdb3085f --- /dev/null +++ b/risc0-circuits/work-only/src/lib.rs @@ -0,0 +1 @@ +include!(concat!(env!("OUT_DIR"), "/methods.rs")); diff --git a/rust-toolchain.toml b/rust-toolchain.toml new file mode 100644 index 000000000..96204fe77 --- /dev/null +++ b/rust-toolchain.toml @@ -0,0 +1,4 @@ +[toolchain] +channel = "1.85" +components = ["rustfmt", "rust-src"] +profile = "minimal" diff --git a/scripts/Bridge.json b/scripts/Bridge.json new file mode 100644 index 000000000..54d9a08ef --- /dev/null +++ b/scripts/Bridge.json @@ -0,0 +1 @@ +{"abi":[{"type":"function","name":"CODESEP_POS","inputs":[],"outputs":[{"name":"","type":"bytes","internalType":"bytes"}],"stateMutability":"view"},{"type":"function","name":"EPOCH","inputs":[],"outputs":[{"name":"","type":"bytes","internalType":"bytes"}],"stateMutability":"view"},{"type":"function","name":"INPUT_INDEX","inputs":[],"outputs":[{"name":"","type":"bytes","internalType":"bytes"}],"stateMutability":"view"},{"type":"function","name":"KEY_VERSION","inputs":[],"outputs":[{"name":"","type":"bytes","internalType":"bytes"}],"stateMutability":"view"},{"type":"function","name":"LIGHT_CLIENT","inputs":[],"outputs":[{"name":"","type":"address","internalType":"contract BitcoinLightClient"}],"stateMutability":"view"},{"type":"function","name":"SCHNORR_VERIFIER_PRECOMPILE","inputs":[],"outputs":[{"name":"","type":"address","internalType":"address"}],"stateMutability":"view"},{"type":"function","name":"SIGHASH_ALL_HASH_TYPE","inputs":[],"outputs":[{"name":"","type":"bytes","internalType":"bytes"}],"stateMutability":"view"},{"type":"function","name":"SIGHASH_SINGLE_ANYONECANPAY_HASH_TYPE","inputs":[],"outputs":[{"name":"","type":"bytes","internalType":"bytes"}],"stateMutability":"view"},{"type":"function","name":"SPEND_TYPE_EXT","inputs":[],"outputs":[{"name":"","type":"bytes","internalType":"bytes"}],"stateMutability":"view"},{"type":"function","name":"SPEND_TYPE_NO_EXT","inputs":[],"outputs":[{"name":"","type":"bytes","internalType":"bytes"}],"stateMutability":"view"},{"type":"function","name":"SYSTEM_CALLER","inputs":[],"outputs":[{"name":"","type":"address","internalType":"address"}],"stateMutability":"view"},{"type":"function","name":"acceptOwnership","inputs":[],"outputs":[],"stateMutability":"nonpayable"},{"type":"function","name":"batchWithdraw","inputs":[{"name":"txIds","type":"bytes32[]","internalType":"bytes32[]"},{"name":"outputIds","type":"bytes4[]","internalType":"bytes4[]"}],"outputs":[],"stateMutability":"payable"},{"type":"function","name":"deposit","inputs":[{"name":"moveTx","type":"tuple","internalType":"struct Bridge.Transaction","components":[{"name":"version","type":"bytes4","internalType":"bytes4"},{"name":"flag","type":"bytes2","internalType":"bytes2"},{"name":"vin","type":"bytes","internalType":"bytes"},{"name":"vout","type":"bytes","internalType":"bytes"},{"name":"witness","type":"bytes","internalType":"bytes"},{"name":"locktime","type":"bytes4","internalType":"bytes4"}]},{"name":"proof","type":"tuple","internalType":"struct Bridge.MerkleProof","components":[{"name":"intermediateNodes","type":"bytes","internalType":"bytes"},{"name":"blockHeight","type":"uint256","internalType":"uint256"},{"name":"index","type":"uint256","internalType":"uint256"}]},{"name":"shaScriptPubkeys","type":"bytes32","internalType":"bytes32"}],"outputs":[],"stateMutability":"nonpayable"},{"type":"function","name":"depositAmount","inputs":[],"outputs":[{"name":"","type":"uint256","internalType":"uint256"}],"stateMutability":"view"},{"type":"function","name":"depositPrefix","inputs":[],"outputs":[{"name":"","type":"bytes","internalType":"bytes"}],"stateMutability":"view"},{"type":"function","name":"depositSuffix","inputs":[],"outputs":[{"name":"","type":"bytes","internalType":"bytes"}],"stateMutability":"view"},{"type":"function","name":"depositTxIds","inputs":[{"name":"","type":"uint256","internalType":"uint256"}],"outputs":[{"name":"","type":"bytes32","internalType":"bytes32"}],"stateMutability":"view"},{"type":"function","name":"failedDepositVault","inputs":[],"outputs":[{"name":"","type":"address","internalType":"address"}],"stateMutability":"view"},{"type":"function","name":"getAggregatedKey","inputs":[],"outputs":[{"name":"","type":"bytes","internalType":"bytes"}],"stateMutability":"view"},{"type":"function","name":"getWithdrawalCount","inputs":[],"outputs":[{"name":"","type":"uint256","internalType":"uint256"}],"stateMutability":"view"},{"type":"function","name":"initialize","inputs":[{"name":"_depositPrefix","type":"bytes","internalType":"bytes"},{"name":"_depositSuffix","type":"bytes","internalType":"bytes"},{"name":"_depositAmount","type":"uint256","internalType":"uint256"}],"outputs":[],"stateMutability":"nonpayable"},{"type":"function","name":"initialized","inputs":[],"outputs":[{"name":"","type":"bool","internalType":"bool"}],"stateMutability":"view"},{"type":"function","name":"operator","inputs":[],"outputs":[{"name":"","type":"address","internalType":"address"}],"stateMutability":"view"},{"type":"function","name":"owner","inputs":[],"outputs":[{"name":"","type":"address","internalType":"address"}],"stateMutability":"view"},{"type":"function","name":"pendingOwner","inputs":[],"outputs":[{"name":"","type":"address","internalType":"address"}],"stateMutability":"view"},{"type":"function","name":"processedTxIds","inputs":[{"name":"","type":"bytes32","internalType":"bytes32"}],"outputs":[{"name":"","type":"bool","internalType":"bool"}],"stateMutability":"view"},{"type":"function","name":"renounceOwnership","inputs":[],"outputs":[],"stateMutability":"nonpayable"},{"type":"function","name":"replaceDeposit","inputs":[{"name":"replaceTx","type":"tuple","internalType":"struct Bridge.Transaction","components":[{"name":"version","type":"bytes4","internalType":"bytes4"},{"name":"flag","type":"bytes2","internalType":"bytes2"},{"name":"vin","type":"bytes","internalType":"bytes"},{"name":"vout","type":"bytes","internalType":"bytes"},{"name":"witness","type":"bytes","internalType":"bytes"},{"name":"locktime","type":"bytes4","internalType":"bytes4"}]},{"name":"proof","type":"tuple","internalType":"struct Bridge.MerkleProof","components":[{"name":"intermediateNodes","type":"bytes","internalType":"bytes"},{"name":"blockHeight","type":"uint256","internalType":"uint256"},{"name":"index","type":"uint256","internalType":"uint256"}]},{"name":"idToReplace","type":"uint256","internalType":"uint256"},{"name":"shaScriptPubkeys","type":"bytes32","internalType":"bytes32"}],"outputs":[],"stateMutability":"nonpayable"},{"type":"function","name":"replacePrefix","inputs":[],"outputs":[{"name":"","type":"bytes","internalType":"bytes"}],"stateMutability":"view"},{"type":"function","name":"replaceSuffix","inputs":[],"outputs":[{"name":"","type":"bytes","internalType":"bytes"}],"stateMutability":"view"},{"type":"function","name":"safeWithdraw","inputs":[{"name":"prepareTx","type":"tuple","internalType":"struct Bridge.Transaction","components":[{"name":"version","type":"bytes4","internalType":"bytes4"},{"name":"flag","type":"bytes2","internalType":"bytes2"},{"name":"vin","type":"bytes","internalType":"bytes"},{"name":"vout","type":"bytes","internalType":"bytes"},{"name":"witness","type":"bytes","internalType":"bytes"},{"name":"locktime","type":"bytes4","internalType":"bytes4"}]},{"name":"prepareProof","type":"tuple","internalType":"struct Bridge.MerkleProof","components":[{"name":"intermediateNodes","type":"bytes","internalType":"bytes"},{"name":"blockHeight","type":"uint256","internalType":"uint256"},{"name":"index","type":"uint256","internalType":"uint256"}]},{"name":"payoutTx","type":"tuple","internalType":"struct Bridge.Transaction","components":[{"name":"version","type":"bytes4","internalType":"bytes4"},{"name":"flag","type":"bytes2","internalType":"bytes2"},{"name":"vin","type":"bytes","internalType":"bytes"},{"name":"vout","type":"bytes","internalType":"bytes"},{"name":"witness","type":"bytes","internalType":"bytes"},{"name":"locktime","type":"bytes4","internalType":"bytes4"}]},{"name":"blockHeader","type":"bytes","internalType":"bytes"},{"name":"scriptPubKey","type":"bytes","internalType":"bytes"}],"outputs":[],"stateMutability":"payable"},{"type":"function","name":"setDepositScript","inputs":[{"name":"_depositPrefix","type":"bytes","internalType":"bytes"},{"name":"_depositSuffix","type":"bytes","internalType":"bytes"}],"outputs":[],"stateMutability":"nonpayable"},{"type":"function","name":"setFailedDepositVault","inputs":[{"name":"_failedDepositVault","type":"address","internalType":"address"}],"outputs":[],"stateMutability":"nonpayable"},{"type":"function","name":"setOperator","inputs":[{"name":"_operator","type":"address","internalType":"address"}],"outputs":[],"stateMutability":"nonpayable"},{"type":"function","name":"setReplaceScript","inputs":[{"name":"_replacePrefix","type":"bytes","internalType":"bytes"},{"name":"_replaceSuffix","type":"bytes","internalType":"bytes"}],"outputs":[],"stateMutability":"nonpayable"},{"type":"function","name":"transferOwnership","inputs":[{"name":"newOwner","type":"address","internalType":"address"}],"outputs":[],"stateMutability":"nonpayable"},{"type":"function","name":"withdraw","inputs":[{"name":"txId","type":"bytes32","internalType":"bytes32"},{"name":"outputId","type":"bytes4","internalType":"bytes4"}],"outputs":[],"stateMutability":"payable"},{"type":"function","name":"withdrawalUTXOs","inputs":[{"name":"","type":"uint256","internalType":"uint256"}],"outputs":[{"name":"txId","type":"bytes32","internalType":"bytes32"},{"name":"outputId","type":"bytes4","internalType":"bytes4"}],"stateMutability":"view"},{"type":"event","name":"Deposit","inputs":[{"name":"wtxId","type":"bytes32","indexed":false,"internalType":"bytes32"},{"name":"txId","type":"bytes32","indexed":false,"internalType":"bytes32"},{"name":"recipient","type":"address","indexed":false,"internalType":"address"},{"name":"timestamp","type":"uint256","indexed":false,"internalType":"uint256"},{"name":"depositId","type":"uint256","indexed":false,"internalType":"uint256"}],"anonymous":false},{"type":"event","name":"DepositReplaced","inputs":[{"name":"index","type":"uint256","indexed":false,"internalType":"uint256"},{"name":"oldTxId","type":"bytes32","indexed":false,"internalType":"bytes32"},{"name":"newTxId","type":"bytes32","indexed":false,"internalType":"bytes32"}],"anonymous":false},{"type":"event","name":"DepositScriptUpdate","inputs":[{"name":"depositPrefix","type":"bytes","indexed":false,"internalType":"bytes"},{"name":"depositSuffix","type":"bytes","indexed":false,"internalType":"bytes"}],"anonymous":false},{"type":"event","name":"DepositTransferFailed","inputs":[{"name":"wtxId","type":"bytes32","indexed":false,"internalType":"bytes32"},{"name":"txId","type":"bytes32","indexed":false,"internalType":"bytes32"},{"name":"recipient","type":"address","indexed":false,"internalType":"address"},{"name":"timestamp","type":"uint256","indexed":false,"internalType":"uint256"},{"name":"depositId","type":"uint256","indexed":false,"internalType":"uint256"}],"anonymous":false},{"type":"event","name":"FailedDepositVaultUpdated","inputs":[{"name":"oldVault","type":"address","indexed":false,"internalType":"address"},{"name":"newVault","type":"address","indexed":false,"internalType":"address"}],"anonymous":false},{"type":"event","name":"Initialized","inputs":[{"name":"version","type":"uint64","indexed":false,"internalType":"uint64"}],"anonymous":false},{"type":"event","name":"OperatorUpdated","inputs":[{"name":"oldOperator","type":"address","indexed":false,"internalType":"address"},{"name":"newOperator","type":"address","indexed":false,"internalType":"address"}],"anonymous":false},{"type":"event","name":"OwnershipTransferStarted","inputs":[{"name":"previousOwner","type":"address","indexed":true,"internalType":"address"},{"name":"newOwner","type":"address","indexed":true,"internalType":"address"}],"anonymous":false},{"type":"event","name":"OwnershipTransferred","inputs":[{"name":"previousOwner","type":"address","indexed":true,"internalType":"address"},{"name":"newOwner","type":"address","indexed":true,"internalType":"address"}],"anonymous":false},{"type":"event","name":"ReplaceScriptUpdate","inputs":[{"name":"replacePrefix","type":"bytes","indexed":false,"internalType":"bytes"},{"name":"replaceSuffix","type":"bytes","indexed":false,"internalType":"bytes"}],"anonymous":false},{"type":"event","name":"SafeWithdrawal","inputs":[{"name":"payoutTx","type":"tuple","indexed":false,"internalType":"struct Bridge.Transaction","components":[{"name":"version","type":"bytes4","internalType":"bytes4"},{"name":"flag","type":"bytes2","internalType":"bytes2"},{"name":"vin","type":"bytes","internalType":"bytes"},{"name":"vout","type":"bytes","internalType":"bytes"},{"name":"witness","type":"bytes","internalType":"bytes"},{"name":"locktime","type":"bytes4","internalType":"bytes4"}]},{"name":"spentUtxo","type":"tuple","indexed":false,"internalType":"struct Bridge.UTXO","components":[{"name":"txId","type":"bytes32","internalType":"bytes32"},{"name":"outputId","type":"bytes4","internalType":"bytes4"}]},{"name":"index","type":"uint256","indexed":false,"internalType":"uint256"}],"anonymous":false},{"type":"event","name":"Withdrawal","inputs":[{"name":"utxo","type":"tuple","indexed":false,"internalType":"struct Bridge.UTXO","components":[{"name":"txId","type":"bytes32","internalType":"bytes32"},{"name":"outputId","type":"bytes4","internalType":"bytes4"}]},{"name":"index","type":"uint256","indexed":false,"internalType":"uint256"},{"name":"timestamp","type":"uint256","indexed":false,"internalType":"uint256"}],"anonymous":false},{"type":"error","name":"InvalidInitialization","inputs":[]},{"type":"error","name":"NotInitializing","inputs":[]},{"type":"error","name":"OwnableInvalidOwner","inputs":[{"name":"owner","type":"address","internalType":"address"}]},{"type":"error","name":"OwnableUnauthorizedAccount","inputs":[{"name":"account","type":"address","internalType":"address"}]}],"bytecode":{"object":"0x60808060405234601557614958908161001a8239f35b5f80fdfe60806040526004361015610011575f80fd5b5f3560e01c80630659216714610275578063092ac5d4146101f85780630bd89ab7146101f85780631369ac3e146101f8578063158ef93e14610270578063198546231461026b57806323dacd29146102665780632594f107146102615780633c918b6c1461025c5780634126013714610257578063419759f514610252578063428bcd351461024d5780634379caa514610248578063471ba1e314610243578063570ca7351461023e5780635b4f894d146102395780636b0b5a94146102345780636cf7d6411461022f578063715018a61461022a578063781952a81461022557806379ba5097146102205780637ec9732a1461021b57806385fb7151146102165780638752b6b2146102115780638786dba71461020c5780638da5cb5b146102075780639072f747146102025780639a4f308d146101fd578063a0dc2758146101f8578063a670e7ed146101f3578063b2497e70146101ee578063b3ab15fb146101e9578063d761753e146101e4578063e30c3978146101df578063e613ae00146101da578063f2fde38b146101d5578063f42cb4fc146101d0578063f8e655d2146101cb5763fb11d7b9146101c6575f80fd5b6119e0565b611894565b6117ef565b611776565b611754565b611720565b6116f2565b61166f565b611654565b611609565b6103aa565b6115ed565b61153a565b6114b2565b61148d565b611455565b61139d565b6111d7565b6110dd565b6110c0565b611043565b611028565b610f3f565b610f24565b610c4d565b610bf6565b610bc7565b610bac565b610b74565b61092a565b6108da565b6105ba565b610583565b610426565b6103d5565b6102ab565b634e487b7160e01b5f52603260045260245ffd5b6008548110156102a65760085f5260205f2001905f90565b61027a565b346102fb5760203660031901126102fb576004356008548110156102fb5760209060085f527ff3f7a9fe364faab93b216da50a3214154f22a0a2b415b23a84c8169e8b636ee30154604051908152f35b5f80fd5b634e487b7160e01b5f52604160045260245ffd5b90601f801991011681019081106001600160401b0382111761033457604052565b6102ff565b60405190610348604083610313565b565b6001600160401b03811161033457601f01601f191660200190565b60405190610374604083610313565b600182525f6020830152565b602060409281835280519182918282860152018484015e5f828201840152601f01601f1916010190565b346102fb575f3660031901126102fb576103d16103c5610365565b60405191829182610380565b0390f35b346102fb575f3660031901126102fb57602060ff5f54166040519015158152f35b9181601f840112156102fb578235916001600160401b0383116102fb576020808501948460051b0101116102fb57565b60403660031901126102fb576004356001600160401b0381116102fb576104519036906004016103f6565b6024356001600160401b0381116102fb576104709036906004016103f6565b929083830361052d5761048f61048884600154611ae5565b3414611afd565b600754935f5b84811061049e57005b807f3311a04a346a103ac115cca33028a2bc82f1964805860d0d3fc84a2772496ada6104cd6001938888611b49565b356105006104e46104df85888a611b49565b611b59565b6104ec610339565b9283526001600160e01b0319166020830152565b61050981611b63565b610513838a611c02565b610524604051928392429184611c0f565b0390a101610495565b60405162461bcd60e51b815260206004820152600f60248201526e098cadccee8d040dad2e6dac2e8c6d608b1b6044820152606490fd5b60405190610573604083610313565b60018252600160f91b6020830152565b346102fb575f3660031901126102fb576103d16103c5610564565b908160c09103126102fb5790565b908160609103126102fb5790565b346102fb5760803660031901126102fb576004356001600160401b0381116102fb576105ea90369060040161059e565b6024356001600160401b0381116102fb576106099036906004016105ac565b906044356064355f5490929060081c6001600160a01b031633036108955761074e6108909161067b7f4d7c644a48da4c7857af62a00bad9806f0388564f22955ed846d938c244047f0966106606008548710611c40565b61067561066e600554610c78565b1515611c7c565b826132cd565b5050604081016107386107486106a361069e6106978587611cc8565b36916114e6565b6134c0565b9761074060608601956107126106d96106bf6106978a85611cc8565b6106d36106cc8b86611cc8565b9050611cfa565b90613591565b936106f26106ed6106976080860186611cc8565b6138dc565b9c8d6106fd85611b59565b9060a086019761070c89611b59565b936139fd565b61073061072861072183611b59565b9783611cc8565b989092611cc8565b959093611b59565b9636916114e6565b9236916114e6565b91613c90565b610779610774610770610769845f52600960205260405f2090565b5460ff1690565b1590565b611d24565b61079b61078e825f52600960205260405f2090565b805460ff19166001179055565b61087361086b6107e96107ba6107b08761028e565b90549060031b1c90565b966107ce856107c88961028e565b90611d70565b6107e460036107dc836145a1565b905014611d8d565b613d21565b6108666108616108596107fd600554610c78565b610848610843610835610811600654610c78565b9361082f89516108298761082485611bca565b611c02565b14611dd1565b88613619565b61083d610cb0565b90613eaf565b611e15565b610853818651611d17565b8561385e565b61083d610d65565b611e61565b613f5a565b948514611ead565b604051938493846040919493926060820195825260208201520152565b0390a1005b60405162461bcd60e51b815260206004820152601a60248201527f63616c6c6572206973206e6f7420746865206f70657261746f720000000000006044820152606490fd5b346102fb575f3660031901126102fb576103d16103c56108f8610dfa565b613667565b9181601f840112156102fb578235916001600160401b0383116102fb57602083818601950101116102fb57565b346102fb5760603660031901126102fb576004356001600160401b0381116102fb5761095a9036906004016108fd565b6024356001600160401b0381116102fb576109799036906004016108fd565b9092906044359073deaddeaddeaddeaddeaddeaddeaddeaddeaddead3303610b2f5760ff5f5416610aea577f80bd1fdfe157286ce420ee763f91748455b249605748e5df12dad9844402bafc94610a0c836109d8610aa8951515611f03565b6109e3871515611f4f565b6109f3600160ff195f5416175f55565b6109fd878761208a565b610a07848461216b565b600155565b5f8054610100600160a81b03191674deaddeaddeaddeaddeaddeaddeaddeaddeaddead00179055600280546001600160a01b0319166007603160981b011790557ffbe5b6cbafb274f445d7fed869dc77a838d8243a22c460de156560e8857cad0360405180610a99819073deaddeaddeaddeaddeaddeaddeaddeaddeaddead602060408401935f81520152565b0390a160405194859485612335565b0390a1604080515f81526007603160981b0160208201527f79250b96878fd457364d1c1b77a660973c4f4ab67bda5e2fdb42caaa4d515f9d9181908101610890565b60405162461bcd60e51b815260206004820152601f60248201527f436f6e747261637420697320616c726561647920696e697469616c697a6564006044820152606490fd5b60405162461bcd60e51b815260206004820152601f60248201527f63616c6c6572206973206e6f74207468652073797374656d2063616c6c6572006044820152606490fd5b346102fb575f3660031901126102fb576020600154604051908152f35b60405190610ba0604083610313565b600482525f6020830152565b346102fb575f3660031901126102fb576103d16103c5610b91565b346102fb5760203660031901126102fb576004355f526009602052602060ff60405f2054166040519015158152f35b346102fb5760203660031901126102fb576004356007548110156102fb576007548110156102a65760409060075f5260205f209060011b016001815491015460e01b825191825263ffffffff60e01b166020820152f35b346102fb575f3660031901126102fb575f5460405160089190911c6001600160a01b03168152602090f35b90600182811c92168015610ca6575b6020831014610c9257565b634e487b7160e01b5f52602260045260245ffd5b91607f1691610c87565b604051905f8260055491610cc383610c78565b8083529260018116908115610d465750600114610ce7575b61034892500383610313565b5060055f90815290917f036b6384b5eca791c62761152d0c79bb0604c104a5fb6f4eb0703f3154bb3db05b818310610d2a57505090602061034892820101610cdb565b6020919350806001915483858901015201910190918492610d12565b6020925061034894915060ff191682840152151560051b820101610cdb565b604051905f8260065491610d7883610c78565b8083529260018116908115610d465750600114610d9b5761034892500383610313565b5060065f90815290917ff652222313e28459528d920b65115c16c04f3efc82aaedc97be59f3f377c0d3f5b818310610dde57505090602061034892820101610cdb565b6020919350806001915483858901015201910190918492610dc6565b604051905f8260035491610e0d83610c78565b8083529260018116908115610d465750600114610e305761034892500383610313565b5060035f90815290917fc2575a0e9e593c00f959f8c92f12db2869c3395a3b0502d05e2516446f71f85b5b818310610e7357505090602061034892820101610cdb565b6020919350806001915483858901015201910190918492610e5b565b604051905f8260045491610ea283610c78565b8083529260018116908115610d465750600114610ec55761034892500383610313565b5060045f90815290917f8a35acfbc15ff81a39ae7d344fd709f28e8600b4aa8c65c6b64bfe7fe36bd19b5b818310610f0857505090602061034892820101610cdb565b6020919350806001915483858901015201910190918492610ef0565b346102fb575f3660031901126102fb576103d16103c5610dfa565b346102fb575f3660031901126102fb576040515f600554610f5f81610c78565b8084529060018116908115610fe55750600114610f87575b6103d1836103c581850382610313565b60055f9081527f036b6384b5eca791c62761152d0c79bb0604c104a5fb6f4eb0703f3154bb3db0939250905b808210610fcb575090915081016020016103c5610f77565b919260018160209254838588010152019101909291610fb3565b60ff191660208086019190915291151560051b840190910191506103c59050610f77565b60405190611018604083610313565b60018252608360f81b6020830152565b346102fb575f3660031901126102fb576103d16103c5611009565b346102fb575f3660031901126102fb5761105b613fcd565b5f5160206149385f395f51905f5280546001600160a01b03199081169091555f5160206149185f395f51905f52805491821690555f906001600160a01b03167f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e08280a3005b346102fb575f3660031901126102fb576020600754604051908152f35b346102fb575f3660031901126102fb575f5160206149385f395f51905f5254336001600160a01b0390911603611174575f5160206149385f395f51905f5280546001600160a01b03199081169091555f5160206149185f395f51905f5280543392811683179091556001600160a01b03167f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e05f80a3005b63118cdaa760e01b5f523360045260245ffd5b60406003198201126102fb576004356001600160401b0381116102fb57816111b1916004016108fd565b92909291602435906001600160401b0382116102fb576111d3916004016108fd565b9091565b346102fb576111e536611187565b6111f0939193613fcd565b8215611336576001600160401b0383116103345761121883611213600554610c78565b611f9b565b5f93601f841160011461129057906108909161126b85807f6c9ac69a5e351d3e7ac9be95040d29a264d1ce6a409ca9f042c64c66c3f2a23a985f91611285575b508160011b915f199060031b1c19161790565b6005555b6112798282612240565b60405194859485612335565b90508601355f611258565b60055f52601f1984167f036b6384b5eca791c62761152d0c79bb0604c104a5fb6f4eb0703f3154bb3db0905f5b81811061131e5750907f6c9ac69a5e351d3e7ac9be95040d29a264d1ce6a409ca9f042c64c66c3f2a23a968661089095949310611305575b5050600185811b0160055561126f565b8501355f19600388901b60f8161c191690555f806112f5565b858801358355602097880197600190930192016112bd565b60405162461bcd60e51b815260206004820152601e60248201527f5265706c616365207363726970742063616e6e6f7420626520656d70747900006044820152606490fd5b60209060031901126102fb576004356001600160a01b03811681036102fb5790565b346102fb576113ab3661137b565b6113b3613fcd565b6001600160a01b03811690811561141e57600280546001600160a01b031981169093179055604080516001600160a01b0393841681529290911660208301527f79250b96878fd457364d1c1b77a660973c4f4ab67bda5e2fdb42caaa4d515f9d919081908101610890565b60405162461bcd60e51b815260206004820152600f60248201526e496e76616c6964206164647265737360881b6044820152606490fd5b346102fb575f3660031901126102fb576103d16103c5610e8f565b6001600160e01b03198116036102fb57565b359061034882611470565b60403660031901126102fb576114b06024356004356114ab82611470565b61235c565b005b346102fb575f3660031901126102fb575f5160206149185f395f51905f52546040516001600160a01b039091168152602090f35b9291926114f28261034a565b916115006040519384610313565b8294818452818301116102fb578281602093845f960137010152565b9080601f830112156102fb57816020611537933591016114e6565b90565b60a03660031901126102fb576004356001600160401b0381116102fb5761156590369060040161059e565b6024356001600160401b0381116102fb576115849036906004016105ac565b906044356001600160401b0381116102fb576115a490369060040161059e565b6064356001600160401b0381116102fb576115c39036906004016108fd565b91608435946001600160401b0386116102fb576115e76114b096369060040161151c565b94612a7e565b346102fb575f3660031901126102fb5760206040516102008152f35b346102fb575f3660031901126102fb576002546040516001600160a01b039091168152602090f35b60405190611640604083610313565b600482526001600160e01b03196020830152565b346102fb575f3660031901126102fb576103d16103c5611631565b346102fb577ffbe5b6cbafb274f445d7fed869dc77a838d8243a22c460de156560e8857cad0361169e3661137b565b6116a6613fcd565b5f8054610100600160a81b031916600883811b610100600160a81b03169190911791829055604080519290911c6001600160a01b03908116835290921660208201529081908101610890565b346102fb575f3660031901126102fb57602060405173deaddeaddeaddeaddeaddeaddeaddeaddeaddead8152f35b346102fb575f3660031901126102fb575f5160206149385f395f51905f52546040516001600160a01b039091168152602090f35b346102fb575f3660031901126102fb576040516001603160981b018152602090f35b346102fb576117843661137b565b61178c613fcd565b5f5160206149385f395f51905f5280546001600160a01b0319166001600160a01b039283169081179091555f5160206149185f395f51905f52549091167f38d16b8cac22d99fc7c124b9cd0de2d3fa1faef420bfe791d8c362d765e227005f80a3005b346102fb575f3660031901126102fb576040515f60065461180f81610c78565b8084529060018116908115610fe55750600114611836576103d1836103c581850382610313565b60065f9081527ff652222313e28459528d920b65115c16c04f3efc82aaedc97be59f3f377c0d3f939250905b80821061187a575090915081016020016103c5610f77565b919260018160209254838588010152019101909291611862565b346102fb576118a236611187565b6118ad939193613fcd565b6118b8831515611f4f565b6001600160401b038311610334576118da836118d5600354610c78565b611feb565b5f93601f841160011461193a57906108909161192c85807f80bd1fdfe157286ce420ee763f91748455b249605748e5df12dad9844402bafc985f9161128557508160011b915f199060031b1c19161790565b6003555b611279828261216b565b60035f52601f1984167fc2575a0e9e593c00f959f8c92f12db2869c3395a3b0502d05e2516446f71f85b905f5b8181106119c85750907f80bd1fdfe157286ce420ee763f91748455b249605748e5df12dad9844402bafc9686610890959493106119af575b5050600185811b01600355611930565b8501355f19600388901b60f8161c191690555f8061199f565b85880135835560209788019760019093019201611967565b346102fb5760603660031901126102fb576004356001600160401b0381116102fb57611a1090369060040161059e565b6024356001600160401b0381116102fb57611a2f9036906004016105ac565b6044359073deaddeaddeaddeaddeaddeaddeaddeaddeaddead33148015611ab8575b15611a5f576114b092613095565b60405162461bcd60e51b815260206004820152602b60248201527f63616c6c6572206973206e6f74207468652073797374656d2063616c6c65722060448201526a37b91037b832b930ba37b960a91b6064820152608490fd5b505f543360089190911c6001600160a01b031614611a51565b634e487b7160e01b5f52601160045260245ffd5b81810292918115918404141715611af857565b611ad1565b15611b0457565b60405162461bcd60e51b815260206004820152601760248201527f496e76616c696420776974686472617720616d6f756e740000000000000000006044820152606490fd5b91908110156102a65760051b0190565b3561153781611470565b600754600160401b81101561033457600181016007556007548110156102a65760075f5260011b7fa66cc928b5edb82af9bd49922954155ab7b0942694bea4ce44661d9a8736c688016001602091835181550191015160e01c63ffffffff19825416179055565b9060208201809211611af857565b9060148201809211611af857565b9060018201809211611af857565b6001019081600111611af857565b91908201809211611af857565b606091949392611c37826080810197602090805183528163ffffffff60e01b91015116910152565b60408201520152565b15611c4757565b60405162461bcd60e51b815260206004820152600d60248201526c092dcecc2d8d2c840d2dcc8caf609b1b6044820152606490fd5b15611c8357565b60405162461bcd60e51b815260206004820152601960248201527f5265706c61636520736372697074206973206e6f7420736574000000000000006044820152606490fd5b903590601e19813603018212156102fb57018035906001600160401b0382116102fb576020019181360383136102fb57565b5f19810191908211611af857565b601f19810191908211611af857565b91908203918211611af857565b15611d2b57565b60405162461bcd60e51b815260206004820152601c60248201527f7478496420616c7265616479207573656420746f207265706c616365000000006044820152606490fd5b91611d899183549060031b91821b915f19901b19161790565b9055565b15611d9457565b60405162461bcd60e51b8152602060048201526015602482015274496e76616c6964207769746e657373206974656d7360581b6044820152606490fd5b15611dd857565b60405162461bcd60e51b8152602060048201526015602482015274092dcecc2d8d2c840e6c6e4d2e0e840d8cadccee8d605b1b6044820152606490fd5b15611e1c57565b60405162461bcd60e51b815260206004820152601d60248201527f496e76616c6964207265706c61636520736372697074207072656669780000006044820152606490fd5b15611e6857565b60405162461bcd60e51b815260206004820152601d60248201527f496e76616c6964207265706c61636520736372697074207375666669780000006044820152606490fd5b15611eb457565b606460405162461bcd60e51b815260206004820152602060248201527f496e76616c6964207478496420746f207265706c6163652070726f76696465646044820152fd5b6115376108f8610dfa565b15611f0a57565b60405162461bcd60e51b815260206004820152601a60248201527f4465706f73697420616d6f756e742063616e6e6f7420626520300000000000006044820152606490fd5b15611f5657565b60405162461bcd60e51b815260206004820152601e60248201527f4465706f736974207363726970742063616e6e6f7420626520656d70747900006044820152606490fd5b601f8111611fa7575050565b60055f5260205f20906020601f840160051c83019310611fe1575b601f0160051c01905b818110611fd6575050565b5f8155600101611fcb565b9091508190611fc2565b601f8111611ff7575050565b60035f5260205f20906020601f840160051c83019310612031575b601f0160051c01905b818110612026575050565b5f815560010161201b565b9091508190612012565b601f821161204857505050565b5f5260205f20906020601f840160051c83019310612080575b601f0160051c01905b818110612075575050565b5f815560010161206a565b9091508190612061565b91906001600160401b038111610334576120b0816120a9600354610c78565b600361203b565b5f601f82116001146120ee5781906120de93945f926120e3575b50508160011b915f199060031b1c19161790565b600355565b013590505f806120ca565b60035f52601f198216937fc2575a0e9e593c00f959f8c92f12db2869c3395a3b0502d05e2516446f71f85b915f5b868110612153575083600195961061213a575b505050811b01600355565b01355f19600384901b60f8161c191690555f808061212f565b9092602060018192868601358155019401910161211c565b91906001600160401b038111610334576121918161218a600454610c78565b600461203b565b5f601f82116001146121c35781906121be93945f926120e35750508160011b915f199060031b1c19161790565b600455565b60045f52601f198216937f8a35acfbc15ff81a39ae7d344fd709f28e8600b4aa8c65c6b64bfe7fe36bd19b915f5b868110612228575083600195961061220f575b505050811b01600455565b01355f19600384901b60f8161c191690555f8080612204565b909260206001819286860135815501940191016121f1565b91906001600160401b038111610334576122668161225f600654610c78565b600661203b565b5f601f821160011461229857819061229393945f926120e35750508160011b915f199060031b1c19161790565b600655565b60065f52601f198216937ff652222313e28459528d920b65115c16c04f3efc82aaedc97be59f3f377c0d3f915f5b8681106122fd57508360019596106122e4575b505050811b01600655565b01355f19600384901b60f8161c191690555f80806122d9565b909260206001819286860135815501940191016122c6565b908060209392818452848401375f828201840152601f01601f1916010190565b929061234e906115379593604086526040860191612315565b926020818503910152612315565b9061236a6001543414611afd565b6040519060408201928284106001600160401b03851117610334577f3311a04a346a103ac115cca33028a2bc82f1964805860d0d3fc84a2772496ada93604052825263ffffffff60e01b1660208201526007546123c682611b63565b6123d7604051928392429184611c0f565b0390a1565b156123e357565b60405162461bcd60e51b815260206004820152601d60248201527f56696e206973206e6f742070726f7065726c7920666f726d61747465640000006044820152606490fd5b1561242f57565b60405162461bcd60e51b815260206004820152601e60248201527f566f7574206973206e6f742070726f7065726c7920666f726d617474656400006044820152606490fd5b908160209103126102fb575180151581036102fb5790565b97969591926124c394608096946124b5938b5260208b015260a060408b015260a08a0191612315565b918783036060890152612315565b930152565b6040513d5f823e3d90fd5b156124da57565b60405162461bcd60e51b815260206004820152601b60248201527f5472616e73616374696f6e206973206e6f7420696e20626c6f636b00000000006044820152606490fd5b1561252657565b60405162461bcd60e51b8152602060048201526024808201527f5061796f75742076696e206973206e6f742070726f7065726c7920666f726d616044820152631d1d195960e21b6064820152608490fd5b1561257e57565b60405162461bcd60e51b815260206004820152602860248201527f5061796f75742076696e2073686f756c6420686176652065786163746c79206f6044820152671b99481a5b9c1d5d60c21b6064820152608490fd5b156125db57565b60405162461bcd60e51b815260206004820152602560248201527f5061796f757420766f7574206973206e6f742070726f7065726c7920666f726d604482015264185d1d195960da1b6064820152608490fd5b1561263557565b60405162461bcd60e51b815260206004820152602860248201527f5061796f7574207769746e657373206973206e6f742070726f7065726c7920666044820152671bdc9b585d1d195960c21b6064820152608490fd5b1561269257565b60405162461bcd60e51b8152602060048201526012602482015271125b9d985b1a59081cdc195b9d081d1e125960721b6044820152606490fd5b156126d357565b60405162461bcd60e51b815260206004820152601b60248201527f496e76616c6964207370656e74206f7574707574206c656e67746800000000006044820152606490fd5b60405190612727604083610313565b60018252601160f91b6020830152565b1561273e57565b60405162461bcd60e51b815260206004820152602960248201527f496e76616c6964207370656e74206f757470757420736372697074207075626b6044820152680caf240d8cadccee8d60bb1b6064820152608490fd5b604051906127a4604083610313565b6002825261028960f51b6020830152565b156127bc57565b60405162461bcd60e51b815260206004820152602160248201527f5370656e74206f7574707574206973206e6f7420612050325452206f757470756044820152601d60fa1b6064820152608490fd5b1561281257565b60405162461bcd60e51b815260206004820152602260248201527f496e76616c6964207370656e74206f757470757420736372697074207075626b604482015261657960f01b6064820152608490fd5b805191908290602001825e015f815290565b6128d0979360249b99979561289b6128b7956128958f9a9695600896612862565b90612862565b6001600160e01b03199283168152911660048201520190612862565b9182526001600160e01b03191660208201520190612862565b6001600160e01b0319909216825260048201520190565b604051906128f6604083610313565b600a8252690a8c2e0a6d2ced0c2e6d60b31b6020830152565b1561291657565b60405162461bcd60e51b8152602060048201526011602482015270496e76616c6964207369676e617475726560781b6044820152606490fd5b6001600160f01b03198116036102fb57565b9035601e19823603018112156102fb5701602081359101916001600160401b0382116102fb5781360383136102fb57565b612a7a606092959493956080835280356129ab81611470565b6001600160e01b031916608084015260208101356129c88161294f565b61ffff60f01b1660a0840152612a5a612a4860a0612a41612a21612a036129f26040880188612961565b60c0808c01526101408b0191612315565b612a0f8a880188612961565b8a8303607f190160e08c015290612315565b612a2e6080870187612961565b898303607f19016101008b015290612315565b9301611482565b6001600160e01b031916610120850152565b8651602084810191909152909601516001600160e01b0319166040830152565b0152565b919092602061074095612b3895612b03612afa60408801612ab2612aad612aa8610697848d611cc8565b613fed565b6123dc565b60a06107488a606081019d8e612adb612ad6612ad16106978487611cc8565b614065565b612428565b610738612af1612aea85611b59565b9785611cc8565b98909285611cc8565b96909401611b59565b96879260408584013593612b178180611cc8565b929091013592604051988997889763cd4cc08f60e01b89526004890161248c565b03816001603160981b015afa8015612e7f57612b5b915f91612e84575b506124d3565b60408101612b698183611cc8565b3690612b74926114e6565b612b7d90613fed565b612b869061251f565b612b908183611cc8565b3690612b9b926114e6565b612ba4906145a1565b612bb19150600114612577565b6060820195612bc08784611cc8565b3690612bcb926114e6565b612bd490614065565b612bdd906125d4565b6080830191612bec8385611cc8565b3690612bf7926114e6565b612c00906140d4565b612c099061262e565b612c139084611cc8565b3690612c1e926114e6565b612c27906134c0565b96612c328185611cc8565b3690612c3d926114e6565b90612c489085611cc8565b612c529150611cfa565b612c5b91613591565b91612c669084611cc8565b3690612c71926114e6565b612c7a906138dc565b93612c86886020015190565b958614612c929061268b565b60408801516001600160e01b03191696612cab91611cc8565b3690612cb6926114e6565b8660e01c612cc3916141af565b908151602b14612cd2906126cc565b612cdb826136b0565b612ce3612718565b612cec91613eaf565b612cf590612737565b612cfe826136f9565b612d06612795565b612d0f91613eaf565b612d18906127b5565b612d2182613742565b90612d2b91613eaf565b612d349061280b565b612d3d8161378b565b6045909701516001600160e01b03191691604051809160208201612d6091612862565b03601f1981018252612d729082610313565b604051612d80818093612862565b03905a915f916002602094fa15612e7f57610348967fd77102e5369b5b1a9db1972cb3de26ee79abc69de5cde41eeaa67fe3939c1c5594612e3e612e34612e2e87612e218b612e138e612e499b612e449b5f5192612ddc610365565b95612de5611009565b98612dfb60a0612df483611b59565b9201611b59565b612e03610365565b916040519b8c9a60208c01612874565b03601f198101835282610313565b612e296128e7565b614267565b92613dd1565b6106d38151611cfa565b916142cf565b61290f565b612e51610339565b8381526001600160e01b03198516602082015290612e7760075460405193849384612992565b0390a161235c565b6124c8565b612ea6915060203d602011612eac575b612e9e8183610313565b810190612474565b5f612b55565b503d612e94565b15612eba57565b60405162461bcd60e51b815260206004820152601660248201527513db9b1e481bdb99481a5b9c1d5d08185b1b1bddd95960521b6044820152606490fd5b15612eff57565b60405162461bcd60e51b81526020600482015260126024820152711d1e125908185b1c9958591e481cdc195b9d60721b6044820152606490fd5b600854600160401b81101561033457600181016008556008548110156102a65760085f527ff3f7a9fe364faab93b216da50a3214154f22a0a2b415b23a84c8169e8b636ee30155565b15612f8957565b60405162461bcd60e51b8152602060048201526016602482015275125b9d985b1a590819195c1bdcda5d081cd8dc9a5c1d60521b6044820152606490fd5b15612fce57565b60405162461bcd60e51b8152602060048201526015602482015274092dcecc2d8d2c840e6c6e4d2e0e840e6eaccccd2f605b1b6044820152606490fd5b3d15613035573d9061301c8261034a565b9161302a6040519384610313565b82523d5f602084013e565b606090565b1561304157565b60405162461bcd60e51b815260206004820152602660248201527f4661696c656420746f2073656e6420746f206661696c6564206465706f736974604482015265081d985d5b1d60d21b6064820152608490fd5b6131106130d161316a926130b76130af60019796836132cd565b909714612eb3565b604081016107386107486106a361069e6106978587611cc8565b926130f26130ed610770610769875f52600960205260405f2090565b612ef8565b61310761078e855f52600960205260405f2090565b6107ce84612f39565b613165613160613158613124600354610c78565b61084861315361314b613138600454610c78565b9361082f89516108298761082485611bd8565b61083d610dfa565b612f82565b61083d610e8f565b612fc7565b61437c565b905f808080600154865af161317d61300b565b5061321a576131eb7fabd361bc68da04a386a8de9d0fb3044cca0856cbd86e9e4a63237e015b3e4bb9936131b2600854611cfa565b6040805192835260208301949094526001600160a01b0390941692810192909252426060830152608082019290925290819060a0820190565b0390a16103485f80808061320660025460018060a01b031690565b600154905af161321461300b565b5061303a565b6123d77fa82453ca34121b3ecb910d957824e27c5dc6465315949facd15fb72886490058936131b2600854611cfa565b356115378161294f565b1561325b57565b60405162461bcd60e51b815260206004820152602160248201527f5769746e657373206973206e6f742070726f7065726c7920666f726d617474656044820152601960fa1b6064820152608490fd5b9594936124c3926060949288526020880152608060408801526080870191612315565b91906132d883611b59565b906132e56020850161324a565b93604081016132f48183611cc8565b94909660608401956133068786611cc8565b608087019a916133168c89611cc8565b94909361332560a08b01611b59565b9661332f98614429565b9361333a8284611cc8565b3690613345926114e6565b61334e90613fed565b613357906123dc565b6133619083611cc8565b369061336c926114e6565b61337590614065565b61337e90612428565b6133889082611cc8565b3690613393926114e6565b61339c906145a1565b956133a991508692611cc8565b36906133b4926114e6565b906133be91614114565b6133c790613254565b6020810135906133d78180611cc8565b604080516327fe9a2560e11b81529485946133fd949201359291908790600487016132aa565b6001603160981b0191839103815a93602094fa8015612e7f57613426915f91612e8457506124d3565b9190565b1561343157565b60405162461bcd60e51b815260206004820152602260248201527f52656164206f76657272756e20647572696e6720566172496e742070617273696044820152616e6760f01b6064820152608490fd5b1561348857565b60405162461bcd60e51b815260206004820152601060248201526f2b34b7103932b0b21037bb32b9393ab760811b6044820152606490fd5b6134e36134cc826145a1565b9091906134dc5f1984141561342a565b1515613481565b60010180600111611af8576134f8818361449a565b905f19821461350a576115379261385e565b60405162461bcd60e51b815260206004820152601760248201527f42616420566172496e7420696e207363726970745369670000000000000000006044820152606490fd5b1561355657565b60405162461bcd60e51b8152602060048201526013602482015272536c696365206f7574206f6620626f756e647360681b6044820152606490fd5b9190918215613602578260010180600111611af8578060016135bb921190816135f6575b5061354f565b60405192604081850101604052808452602182850391818401930101915b8281106135e557505050565b80518282015f1901526020016135d9565b9050825110155f6135b5565b509050604051613613602082610313565b5f815290565b91909182156136025761362f838251101561354f565b60405192604081850101604052808452602082850391818401930101915b82811061365957505050565b80518282015260200161364d565b9061367660228351101561354f565b6040519160608301604052602083528083036042602283019201915b82811061369e57505050565b80518282016001190152602001613692565b906136bf60098351101561354f565b6040519160418301604052600183528083036029602883019201915b8281106136e757505050565b805182820160071901526020016136db565b90613708600b8351101561354f565b604051916042830160405260028352808303602b602983019201915b82811061373057505050565b80518282016008190152602001613724565b90613751602b8351101561354f565b604051916062830160405260228352808303604b602983019201915b82811061377957505050565b8051828201600819015260200161376d565b9061379a602b8351101561354f565b604051916060830160405260208352808303604b602b83019201915b8281106137c257505050565b8051828201600a1901526020016137b6565b906137e360408351101561354f565b6040519160808301604052604083528083036060602083019201915b82811061380b57505050565b8051828201526020016137ff565b9061382860248351101561354f565b6040519160648301604052602483528083036044602083019201915b82811061385057505050565b805182820152602001613844565b92919081156138ca57818101808211611af8578082613884921190816138be575061354f565b604051936040838601016040528285520190602082850391818401930101915b8281106138b057505050565b8051828201526020016138a4565b9050855110155f6135b5565b50509050604051613613602082610313565b6138e65f826144d9565b5f1981146138f8575f6115379261385e565b60405162461bcd60e51b815260206004820152601560248201527442616420566172496e7420696e207769746e65737360581b6044820152606490fd5b8051600110156102a65760210190565b9081518110156102a6570160200190565b600190611537939260ff60f81b1681520190612862565b6040519061397c604083610313565b60078252662a30b82632b0b360c91b6020830152565b96612895966115379f9e9c989660a8966128959f9c956139be9060209f9a612895906139f39f98612862565b6001600160e01b0319978816815296166004870152600886015260288501526048840152606883015260888201520190612862565b9081520190612862565b91949390929360205f613a1e613a1286613819565b60405191828092612862565b039060025afa15612e7f575f519060205f613b05612e13613a12613ae7613ad7613a5e613a526001546402540be400900490565b6001600160401b031690565b65ffff0000ffff67ffffffffffff000067ff00ff00ff00ff008360081c9360081b169264ff000000ff65ffff0000ff0065ffffffffffff67ffffffffffffff00871666ff00ff00ff00ff85161760101c16951691161760101b1691161767ffffffff0000000063ffffffff8260201c169160201b161790565b60c01b6001600160c01b03191690565b6040519283918783016008916001600160401b0360c01b1681520190565b039060025afa15612e7f5760205f613b4e612e13613a12613b33845199604563ffffffff60e01b9101511690565b60405192839187830160049163ffffffff60e01b1681520190565b039060025afa15612e7f5760205f613b77612e13613a1283519960405192839187830190612862565b039060025afa15612e7f575f5193613b8e88613d21565b607f60f91b613b9c8a613e17565b613ba590613935565b516001600160f81b031916604051928392613bc4921660208401613956565b03601f1981018252613bd69082610313565b613bde61396d565b90613be891614267565b91613bf1610365565b95613bfa610365565b97613c03610564565b92613c0c610b91565b94613c15610365565b97613c1e611631565b996040519d8e9d60208f019d613c339e613992565b03601f1981018252613c459082610313565b613c4d6128e7565b90613c5791614267565b90613c6190613dd1565b8051613c6c90611cfa565b613c7591613591565b613c7d611ef8565b91613c87926142cf565b6103489061290f565b60049061153794613cc0613cdb949561289560405197889563ffffffff60e01b1660208701526024860190612862565b9063ffffffff60e01b16815203601b19810184520182610313565b614573565b15613ce757565b60405162461bcd60e51b815260206004820152601260248201527142616420566172496e7420696e206974656d60701b6044820152606490fd5b90613d46613d2e836145a1565b909190613d3e5f1984141561342a565b600110613481565b60010180600111611af857915f925b60018410613d8b57611537929350613d85613d80613d7383856146fd565b6108245f19821415613ce0565b611be6565b9161385e565b613d9581836146fd565b9190613da45f19841415613ce0565b806001019283600111611af8576001910101809211611af857600191613dc991611c02565b930192613d55565b613ddd6134cc826145a1565b6001019081600111611af857613df382826146fd565b9290613e025f19851415613ce0565b8301809311611af857613d8561153793611be6565b90613e3c613e24836145a1565b909190613e345f1984141561342a565b600210613481565b60010180600111611af857915f925b60028410613e6957611537929350613d85613d80613d7383856146fd565b613e7381836146fd565b9190613e825f19841415613ce0565b806001019283600111611af8576001910101809211611af857600191613ea791611c02565b930192613e4b565b9081519181518303613f535760205b83811115613f3857613ecf90611d08565b838110613edf5750505050600190565b613efa613eec8284613945565b516001600160f81b03191690565b613f17613f0a613eec8487613945565b6001600160f81b03191690565b6001600160f81b031990911603613f3057600101613ecf565b505050505f90565b818101518382015160209092019114613ebe57505050505f90565b5050505f90565b613f65600554610c78565b60208101808211611af8578082613f8392119081613fc1575061354f565b604051916060830160405260208352018082036040602083019201915b828110613fb357505050611537906147be565b805182820152602001613fa0565b9050835110155f6135b5565b5f5160206149185f395f51905f52546001600160a01b0316330361117457565b613ff6816145a1565b91908215801561405b575b613f535760010180600111611af857915f905b808210614022575050511490565b90928251811015613f3057614037818461449a565b5f198114614052578101809111611af8579260010190614014565b50505050505f90565b505f198114614001565b61406e816145a1565b9190821580156140ca575b613f535760010180600111611af857915f905b80821061409a575050511490565b90928251811015613f30576140af818461482e565b5f198114614052578101809111611af857926001019061408c565b505f198114614079565b5f905f5b600181106140e65750511490565b918151811015613f53576140fa81836144d9565b5f198114613f30578101809111611af857916001016140d8565b811561415d575f915f905b80821061412d575050511490565b90928251811015613f305761414281846144d9565b5f198114614052578101809111611af857926001019061411f565b50505f90565b1561416a57565b60405162461bcd60e51b815260206004820152601a60248201527f42616420566172496e7420696e207363726970745075626b65790000000000006044820152606490fd5b9190916141bb816145a1565b6141c85f1983141561342a565b84101561422e576141d890611bf4565b5f935b80851061420657506115379293506141f3818361482e565b916142015f19841415614163565b61385e565b90614225816142176001938661482e565b906108245f19831415614163565b940193906141db565b60405162461bcd60e51b81526020600482015260116024820152702b37baba103932b0b21037bb32b9393ab760791b6044820152606490fd5b5f61427a60209260405191828092612862565b039060025afa15612e7f575f6142ae602092613a128351612e13604051938285938985015260408401526060830190612862565b039060025afa15612e7f575f5190565b916139f36115379493602093612862565b919091815160408114908115614371575b501561432c575f9261430a6142f585946137d4565b91612e136040519384926020840196876142be565b51906102005afa5061153761431d61300b565b60208082518301019101612474565b60405162461bcd60e51b815260206004820152601860248201527f496e76616c6964207369676e6174757265206c656e67746800000000000000006044820152606490fd5b60419150145f6142e0565b614387600354610c78565b60148101808211611af85780826143a592119081613fc1575061354f565b604051916054830160405260148352018082036034602083019201915b82811061441b5750505060208151910151906bffffffffffffffffffffffff19821691601482106143f6575b505060601c90565b6bffffffffffffffffffffffff1960149290920360031b82901b161690505f806143ee565b8051828201526020016143c2565b969483869482949a9896939a6040519b8c9b63ffffffff60e01b1660208d015261ffff60f01b1660248c015260268b0137880191602683015f81523701602601915f83528237019063ffffffff60e01b16815203601b19810182526004016144919082610313565b61153790614573565b906144a49161487c565b5f1982146144d257816025019182602511611af8570160258101809211611af857602901809111611af85790565b50505f1990565b906144e481836146fd565b92905f19811461456a5760010180600111611af8579291905f915b83831061450d575050505090565b90919293808201808311611af85761452590846146fd565b91905f19831461455e57806001019283600111611af8576001910101809211611af85760019161455491611c02565b94930191906144ff565b505050505050505f1990565b505050505f1990565b5f602091828151910160025afa5060205f818160025afa505f5190565b60ff166001019060ff8211611af857565b906145ac5f836148ae565b9160ff83169283156146da576145d05f6108246145ca855194614590565b60ff1690565b116146d1575f60028403614627575061462061460d614607611537936145f55f611bf4565b01602001516001600160f01b03191690565b60f01c90565b61ff0060ff8260081c169160081b161790565b61ffff1690565b60048403614691575061468861465e614658611537936146465f611bf4565b01602001516001600160e01b03191690565b60e01c90565b600881811c62ff00ff1691901b63ff00ff001617601081811b63ffff00001691901c61ffff161790565b63ffffffff1690565b9290600882146146a057509190565b611537919350613a5e6146cb613a52926146b95f611bf4565b01602001516001600160c01b03191690565b60c01c90565b505f1991505f90565b506146f4919250613eec5f6146ee92613945565b60f81c90565b9060ff5f921690565b91909161470a83826148ae565b9260ff84169384156147aa57614728826108246145ca865194614590565b1161479f575f6002850361474d575061460d614607611537936145f561462094611bf4565b6004850361476c575061465e6146586115379361464661468894611bf4565b9391906008831461477e575b50509190565b6147979294506146cb613a52926146b9613a5e93611bf4565b915f80614778565b505f1992505f919050565b506146f4929350613eec906146ee92613945565b805190811561415d57602082116147de57602001519060200360031b1c90565b60405162461bcd60e51b815260206004820152602260248201527f42797465732063616e6e6f74206265206d6f7265207468616e20333220627974604482015261657360f01b6064820152608490fd5b908151816009019081600911611af857106144d25760080180600811611af857614857916146fd565b905f1981146144d257806009019182600911611af8576009910101809111611af85790565b908151816025019081602511611af857106148a55760248101809111611af8576111d3916146fd565b505f19915f9150565b9060ff6148bb8284613945565b5160f81c146149105760fe60ff6148d28385613945565b8160f81b90511660f81c1614614909576148f060ff9160fd93613945565b8160f81b90511660f81c1614614904575f90565b600290565b5050600490565b505060089056fe9016d09d72d40fdae2fd8ceac6b6234c7706214fd39c1cd1e609a0528c199300237e158222e3e6968b72b9db0d8043aacf074ad9f650f0d1606b4d82ee432c00","sourceMap":"430:25565:10:-:0;;;;;;;;;;;;;;;;;","linkReferences":{}},"deployedBytecode":{"object":"0x60806040526004361015610011575f80fd5b5f3560e01c80630659216714610275578063092ac5d4146101f85780630bd89ab7146101f85780631369ac3e146101f8578063158ef93e14610270578063198546231461026b57806323dacd29146102665780632594f107146102615780633c918b6c1461025c5780634126013714610257578063419759f514610252578063428bcd351461024d5780634379caa514610248578063471ba1e314610243578063570ca7351461023e5780635b4f894d146102395780636b0b5a94146102345780636cf7d6411461022f578063715018a61461022a578063781952a81461022557806379ba5097146102205780637ec9732a1461021b57806385fb7151146102165780638752b6b2146102115780638786dba71461020c5780638da5cb5b146102075780639072f747146102025780639a4f308d146101fd578063a0dc2758146101f8578063a670e7ed146101f3578063b2497e70146101ee578063b3ab15fb146101e9578063d761753e146101e4578063e30c3978146101df578063e613ae00146101da578063f2fde38b146101d5578063f42cb4fc146101d0578063f8e655d2146101cb5763fb11d7b9146101c6575f80fd5b6119e0565b611894565b6117ef565b611776565b611754565b611720565b6116f2565b61166f565b611654565b611609565b6103aa565b6115ed565b61153a565b6114b2565b61148d565b611455565b61139d565b6111d7565b6110dd565b6110c0565b611043565b611028565b610f3f565b610f24565b610c4d565b610bf6565b610bc7565b610bac565b610b74565b61092a565b6108da565b6105ba565b610583565b610426565b6103d5565b6102ab565b634e487b7160e01b5f52603260045260245ffd5b6008548110156102a65760085f5260205f2001905f90565b61027a565b346102fb5760203660031901126102fb576004356008548110156102fb5760209060085f527ff3f7a9fe364faab93b216da50a3214154f22a0a2b415b23a84c8169e8b636ee30154604051908152f35b5f80fd5b634e487b7160e01b5f52604160045260245ffd5b90601f801991011681019081106001600160401b0382111761033457604052565b6102ff565b60405190610348604083610313565b565b6001600160401b03811161033457601f01601f191660200190565b60405190610374604083610313565b600182525f6020830152565b602060409281835280519182918282860152018484015e5f828201840152601f01601f1916010190565b346102fb575f3660031901126102fb576103d16103c5610365565b60405191829182610380565b0390f35b346102fb575f3660031901126102fb57602060ff5f54166040519015158152f35b9181601f840112156102fb578235916001600160401b0383116102fb576020808501948460051b0101116102fb57565b60403660031901126102fb576004356001600160401b0381116102fb576104519036906004016103f6565b6024356001600160401b0381116102fb576104709036906004016103f6565b929083830361052d5761048f61048884600154611ae5565b3414611afd565b600754935f5b84811061049e57005b807f3311a04a346a103ac115cca33028a2bc82f1964805860d0d3fc84a2772496ada6104cd6001938888611b49565b356105006104e46104df85888a611b49565b611b59565b6104ec610339565b9283526001600160e01b0319166020830152565b61050981611b63565b610513838a611c02565b610524604051928392429184611c0f565b0390a101610495565b60405162461bcd60e51b815260206004820152600f60248201526e098cadccee8d040dad2e6dac2e8c6d608b1b6044820152606490fd5b60405190610573604083610313565b60018252600160f91b6020830152565b346102fb575f3660031901126102fb576103d16103c5610564565b908160c09103126102fb5790565b908160609103126102fb5790565b346102fb5760803660031901126102fb576004356001600160401b0381116102fb576105ea90369060040161059e565b6024356001600160401b0381116102fb576106099036906004016105ac565b906044356064355f5490929060081c6001600160a01b031633036108955761074e6108909161067b7f4d7c644a48da4c7857af62a00bad9806f0388564f22955ed846d938c244047f0966106606008548710611c40565b61067561066e600554610c78565b1515611c7c565b826132cd565b5050604081016107386107486106a361069e6106978587611cc8565b36916114e6565b6134c0565b9761074060608601956107126106d96106bf6106978a85611cc8565b6106d36106cc8b86611cc8565b9050611cfa565b90613591565b936106f26106ed6106976080860186611cc8565b6138dc565b9c8d6106fd85611b59565b9060a086019761070c89611b59565b936139fd565b61073061072861072183611b59565b9783611cc8565b989092611cc8565b959093611b59565b9636916114e6565b9236916114e6565b91613c90565b610779610774610770610769845f52600960205260405f2090565b5460ff1690565b1590565b611d24565b61079b61078e825f52600960205260405f2090565b805460ff19166001179055565b61087361086b6107e96107ba6107b08761028e565b90549060031b1c90565b966107ce856107c88961028e565b90611d70565b6107e460036107dc836145a1565b905014611d8d565b613d21565b6108666108616108596107fd600554610c78565b610848610843610835610811600654610c78565b9361082f89516108298761082485611bca565b611c02565b14611dd1565b88613619565b61083d610cb0565b90613eaf565b611e15565b610853818651611d17565b8561385e565b61083d610d65565b611e61565b613f5a565b948514611ead565b604051938493846040919493926060820195825260208201520152565b0390a1005b60405162461bcd60e51b815260206004820152601a60248201527f63616c6c6572206973206e6f7420746865206f70657261746f720000000000006044820152606490fd5b346102fb575f3660031901126102fb576103d16103c56108f8610dfa565b613667565b9181601f840112156102fb578235916001600160401b0383116102fb57602083818601950101116102fb57565b346102fb5760603660031901126102fb576004356001600160401b0381116102fb5761095a9036906004016108fd565b6024356001600160401b0381116102fb576109799036906004016108fd565b9092906044359073deaddeaddeaddeaddeaddeaddeaddeaddeaddead3303610b2f5760ff5f5416610aea577f80bd1fdfe157286ce420ee763f91748455b249605748e5df12dad9844402bafc94610a0c836109d8610aa8951515611f03565b6109e3871515611f4f565b6109f3600160ff195f5416175f55565b6109fd878761208a565b610a07848461216b565b600155565b5f8054610100600160a81b03191674deaddeaddeaddeaddeaddeaddeaddeaddeaddead00179055600280546001600160a01b0319166007603160981b011790557ffbe5b6cbafb274f445d7fed869dc77a838d8243a22c460de156560e8857cad0360405180610a99819073deaddeaddeaddeaddeaddeaddeaddeaddeaddead602060408401935f81520152565b0390a160405194859485612335565b0390a1604080515f81526007603160981b0160208201527f79250b96878fd457364d1c1b77a660973c4f4ab67bda5e2fdb42caaa4d515f9d9181908101610890565b60405162461bcd60e51b815260206004820152601f60248201527f436f6e747261637420697320616c726561647920696e697469616c697a6564006044820152606490fd5b60405162461bcd60e51b815260206004820152601f60248201527f63616c6c6572206973206e6f74207468652073797374656d2063616c6c6572006044820152606490fd5b346102fb575f3660031901126102fb576020600154604051908152f35b60405190610ba0604083610313565b600482525f6020830152565b346102fb575f3660031901126102fb576103d16103c5610b91565b346102fb5760203660031901126102fb576004355f526009602052602060ff60405f2054166040519015158152f35b346102fb5760203660031901126102fb576004356007548110156102fb576007548110156102a65760409060075f5260205f209060011b016001815491015460e01b825191825263ffffffff60e01b166020820152f35b346102fb575f3660031901126102fb575f5460405160089190911c6001600160a01b03168152602090f35b90600182811c92168015610ca6575b6020831014610c9257565b634e487b7160e01b5f52602260045260245ffd5b91607f1691610c87565b604051905f8260055491610cc383610c78565b8083529260018116908115610d465750600114610ce7575b61034892500383610313565b5060055f90815290917f036b6384b5eca791c62761152d0c79bb0604c104a5fb6f4eb0703f3154bb3db05b818310610d2a57505090602061034892820101610cdb565b6020919350806001915483858901015201910190918492610d12565b6020925061034894915060ff191682840152151560051b820101610cdb565b604051905f8260065491610d7883610c78565b8083529260018116908115610d465750600114610d9b5761034892500383610313565b5060065f90815290917ff652222313e28459528d920b65115c16c04f3efc82aaedc97be59f3f377c0d3f5b818310610dde57505090602061034892820101610cdb565b6020919350806001915483858901015201910190918492610dc6565b604051905f8260035491610e0d83610c78565b8083529260018116908115610d465750600114610e305761034892500383610313565b5060035f90815290917fc2575a0e9e593c00f959f8c92f12db2869c3395a3b0502d05e2516446f71f85b5b818310610e7357505090602061034892820101610cdb565b6020919350806001915483858901015201910190918492610e5b565b604051905f8260045491610ea283610c78565b8083529260018116908115610d465750600114610ec55761034892500383610313565b5060045f90815290917f8a35acfbc15ff81a39ae7d344fd709f28e8600b4aa8c65c6b64bfe7fe36bd19b5b818310610f0857505090602061034892820101610cdb565b6020919350806001915483858901015201910190918492610ef0565b346102fb575f3660031901126102fb576103d16103c5610dfa565b346102fb575f3660031901126102fb576040515f600554610f5f81610c78565b8084529060018116908115610fe55750600114610f87575b6103d1836103c581850382610313565b60055f9081527f036b6384b5eca791c62761152d0c79bb0604c104a5fb6f4eb0703f3154bb3db0939250905b808210610fcb575090915081016020016103c5610f77565b919260018160209254838588010152019101909291610fb3565b60ff191660208086019190915291151560051b840190910191506103c59050610f77565b60405190611018604083610313565b60018252608360f81b6020830152565b346102fb575f3660031901126102fb576103d16103c5611009565b346102fb575f3660031901126102fb5761105b613fcd565b5f5160206149385f395f51905f5280546001600160a01b03199081169091555f5160206149185f395f51905f52805491821690555f906001600160a01b03167f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e08280a3005b346102fb575f3660031901126102fb576020600754604051908152f35b346102fb575f3660031901126102fb575f5160206149385f395f51905f5254336001600160a01b0390911603611174575f5160206149385f395f51905f5280546001600160a01b03199081169091555f5160206149185f395f51905f5280543392811683179091556001600160a01b03167f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e05f80a3005b63118cdaa760e01b5f523360045260245ffd5b60406003198201126102fb576004356001600160401b0381116102fb57816111b1916004016108fd565b92909291602435906001600160401b0382116102fb576111d3916004016108fd565b9091565b346102fb576111e536611187565b6111f0939193613fcd565b8215611336576001600160401b0383116103345761121883611213600554610c78565b611f9b565b5f93601f841160011461129057906108909161126b85807f6c9ac69a5e351d3e7ac9be95040d29a264d1ce6a409ca9f042c64c66c3f2a23a985f91611285575b508160011b915f199060031b1c19161790565b6005555b6112798282612240565b60405194859485612335565b90508601355f611258565b60055f52601f1984167f036b6384b5eca791c62761152d0c79bb0604c104a5fb6f4eb0703f3154bb3db0905f5b81811061131e5750907f6c9ac69a5e351d3e7ac9be95040d29a264d1ce6a409ca9f042c64c66c3f2a23a968661089095949310611305575b5050600185811b0160055561126f565b8501355f19600388901b60f8161c191690555f806112f5565b858801358355602097880197600190930192016112bd565b60405162461bcd60e51b815260206004820152601e60248201527f5265706c616365207363726970742063616e6e6f7420626520656d70747900006044820152606490fd5b60209060031901126102fb576004356001600160a01b03811681036102fb5790565b346102fb576113ab3661137b565b6113b3613fcd565b6001600160a01b03811690811561141e57600280546001600160a01b031981169093179055604080516001600160a01b0393841681529290911660208301527f79250b96878fd457364d1c1b77a660973c4f4ab67bda5e2fdb42caaa4d515f9d919081908101610890565b60405162461bcd60e51b815260206004820152600f60248201526e496e76616c6964206164647265737360881b6044820152606490fd5b346102fb575f3660031901126102fb576103d16103c5610e8f565b6001600160e01b03198116036102fb57565b359061034882611470565b60403660031901126102fb576114b06024356004356114ab82611470565b61235c565b005b346102fb575f3660031901126102fb575f5160206149185f395f51905f52546040516001600160a01b039091168152602090f35b9291926114f28261034a565b916115006040519384610313565b8294818452818301116102fb578281602093845f960137010152565b9080601f830112156102fb57816020611537933591016114e6565b90565b60a03660031901126102fb576004356001600160401b0381116102fb5761156590369060040161059e565b6024356001600160401b0381116102fb576115849036906004016105ac565b906044356001600160401b0381116102fb576115a490369060040161059e565b6064356001600160401b0381116102fb576115c39036906004016108fd565b91608435946001600160401b0386116102fb576115e76114b096369060040161151c565b94612a7e565b346102fb575f3660031901126102fb5760206040516102008152f35b346102fb575f3660031901126102fb576002546040516001600160a01b039091168152602090f35b60405190611640604083610313565b600482526001600160e01b03196020830152565b346102fb575f3660031901126102fb576103d16103c5611631565b346102fb577ffbe5b6cbafb274f445d7fed869dc77a838d8243a22c460de156560e8857cad0361169e3661137b565b6116a6613fcd565b5f8054610100600160a81b031916600883811b610100600160a81b03169190911791829055604080519290911c6001600160a01b03908116835290921660208201529081908101610890565b346102fb575f3660031901126102fb57602060405173deaddeaddeaddeaddeaddeaddeaddeaddeaddead8152f35b346102fb575f3660031901126102fb575f5160206149385f395f51905f52546040516001600160a01b039091168152602090f35b346102fb575f3660031901126102fb576040516001603160981b018152602090f35b346102fb576117843661137b565b61178c613fcd565b5f5160206149385f395f51905f5280546001600160a01b0319166001600160a01b039283169081179091555f5160206149185f395f51905f52549091167f38d16b8cac22d99fc7c124b9cd0de2d3fa1faef420bfe791d8c362d765e227005f80a3005b346102fb575f3660031901126102fb576040515f60065461180f81610c78565b8084529060018116908115610fe55750600114611836576103d1836103c581850382610313565b60065f9081527ff652222313e28459528d920b65115c16c04f3efc82aaedc97be59f3f377c0d3f939250905b80821061187a575090915081016020016103c5610f77565b919260018160209254838588010152019101909291611862565b346102fb576118a236611187565b6118ad939193613fcd565b6118b8831515611f4f565b6001600160401b038311610334576118da836118d5600354610c78565b611feb565b5f93601f841160011461193a57906108909161192c85807f80bd1fdfe157286ce420ee763f91748455b249605748e5df12dad9844402bafc985f9161128557508160011b915f199060031b1c19161790565b6003555b611279828261216b565b60035f52601f1984167fc2575a0e9e593c00f959f8c92f12db2869c3395a3b0502d05e2516446f71f85b905f5b8181106119c85750907f80bd1fdfe157286ce420ee763f91748455b249605748e5df12dad9844402bafc9686610890959493106119af575b5050600185811b01600355611930565b8501355f19600388901b60f8161c191690555f8061199f565b85880135835560209788019760019093019201611967565b346102fb5760603660031901126102fb576004356001600160401b0381116102fb57611a1090369060040161059e565b6024356001600160401b0381116102fb57611a2f9036906004016105ac565b6044359073deaddeaddeaddeaddeaddeaddeaddeaddeaddead33148015611ab8575b15611a5f576114b092613095565b60405162461bcd60e51b815260206004820152602b60248201527f63616c6c6572206973206e6f74207468652073797374656d2063616c6c65722060448201526a37b91037b832b930ba37b960a91b6064820152608490fd5b505f543360089190911c6001600160a01b031614611a51565b634e487b7160e01b5f52601160045260245ffd5b81810292918115918404141715611af857565b611ad1565b15611b0457565b60405162461bcd60e51b815260206004820152601760248201527f496e76616c696420776974686472617720616d6f756e740000000000000000006044820152606490fd5b91908110156102a65760051b0190565b3561153781611470565b600754600160401b81101561033457600181016007556007548110156102a65760075f5260011b7fa66cc928b5edb82af9bd49922954155ab7b0942694bea4ce44661d9a8736c688016001602091835181550191015160e01c63ffffffff19825416179055565b9060208201809211611af857565b9060148201809211611af857565b9060018201809211611af857565b6001019081600111611af857565b91908201809211611af857565b606091949392611c37826080810197602090805183528163ffffffff60e01b91015116910152565b60408201520152565b15611c4757565b60405162461bcd60e51b815260206004820152600d60248201526c092dcecc2d8d2c840d2dcc8caf609b1b6044820152606490fd5b15611c8357565b60405162461bcd60e51b815260206004820152601960248201527f5265706c61636520736372697074206973206e6f7420736574000000000000006044820152606490fd5b903590601e19813603018212156102fb57018035906001600160401b0382116102fb576020019181360383136102fb57565b5f19810191908211611af857565b601f19810191908211611af857565b91908203918211611af857565b15611d2b57565b60405162461bcd60e51b815260206004820152601c60248201527f7478496420616c7265616479207573656420746f207265706c616365000000006044820152606490fd5b91611d899183549060031b91821b915f19901b19161790565b9055565b15611d9457565b60405162461bcd60e51b8152602060048201526015602482015274496e76616c6964207769746e657373206974656d7360581b6044820152606490fd5b15611dd857565b60405162461bcd60e51b8152602060048201526015602482015274092dcecc2d8d2c840e6c6e4d2e0e840d8cadccee8d605b1b6044820152606490fd5b15611e1c57565b60405162461bcd60e51b815260206004820152601d60248201527f496e76616c6964207265706c61636520736372697074207072656669780000006044820152606490fd5b15611e6857565b60405162461bcd60e51b815260206004820152601d60248201527f496e76616c6964207265706c61636520736372697074207375666669780000006044820152606490fd5b15611eb457565b606460405162461bcd60e51b815260206004820152602060248201527f496e76616c6964207478496420746f207265706c6163652070726f76696465646044820152fd5b6115376108f8610dfa565b15611f0a57565b60405162461bcd60e51b815260206004820152601a60248201527f4465706f73697420616d6f756e742063616e6e6f7420626520300000000000006044820152606490fd5b15611f5657565b60405162461bcd60e51b815260206004820152601e60248201527f4465706f736974207363726970742063616e6e6f7420626520656d70747900006044820152606490fd5b601f8111611fa7575050565b60055f5260205f20906020601f840160051c83019310611fe1575b601f0160051c01905b818110611fd6575050565b5f8155600101611fcb565b9091508190611fc2565b601f8111611ff7575050565b60035f5260205f20906020601f840160051c83019310612031575b601f0160051c01905b818110612026575050565b5f815560010161201b565b9091508190612012565b601f821161204857505050565b5f5260205f20906020601f840160051c83019310612080575b601f0160051c01905b818110612075575050565b5f815560010161206a565b9091508190612061565b91906001600160401b038111610334576120b0816120a9600354610c78565b600361203b565b5f601f82116001146120ee5781906120de93945f926120e3575b50508160011b915f199060031b1c19161790565b600355565b013590505f806120ca565b60035f52601f198216937fc2575a0e9e593c00f959f8c92f12db2869c3395a3b0502d05e2516446f71f85b915f5b868110612153575083600195961061213a575b505050811b01600355565b01355f19600384901b60f8161c191690555f808061212f565b9092602060018192868601358155019401910161211c565b91906001600160401b038111610334576121918161218a600454610c78565b600461203b565b5f601f82116001146121c35781906121be93945f926120e35750508160011b915f199060031b1c19161790565b600455565b60045f52601f198216937f8a35acfbc15ff81a39ae7d344fd709f28e8600b4aa8c65c6b64bfe7fe36bd19b915f5b868110612228575083600195961061220f575b505050811b01600455565b01355f19600384901b60f8161c191690555f8080612204565b909260206001819286860135815501940191016121f1565b91906001600160401b038111610334576122668161225f600654610c78565b600661203b565b5f601f821160011461229857819061229393945f926120e35750508160011b915f199060031b1c19161790565b600655565b60065f52601f198216937ff652222313e28459528d920b65115c16c04f3efc82aaedc97be59f3f377c0d3f915f5b8681106122fd57508360019596106122e4575b505050811b01600655565b01355f19600384901b60f8161c191690555f80806122d9565b909260206001819286860135815501940191016122c6565b908060209392818452848401375f828201840152601f01601f1916010190565b929061234e906115379593604086526040860191612315565b926020818503910152612315565b9061236a6001543414611afd565b6040519060408201928284106001600160401b03851117610334577f3311a04a346a103ac115cca33028a2bc82f1964805860d0d3fc84a2772496ada93604052825263ffffffff60e01b1660208201526007546123c682611b63565b6123d7604051928392429184611c0f565b0390a1565b156123e357565b60405162461bcd60e51b815260206004820152601d60248201527f56696e206973206e6f742070726f7065726c7920666f726d61747465640000006044820152606490fd5b1561242f57565b60405162461bcd60e51b815260206004820152601e60248201527f566f7574206973206e6f742070726f7065726c7920666f726d617474656400006044820152606490fd5b908160209103126102fb575180151581036102fb5790565b97969591926124c394608096946124b5938b5260208b015260a060408b015260a08a0191612315565b918783036060890152612315565b930152565b6040513d5f823e3d90fd5b156124da57565b60405162461bcd60e51b815260206004820152601b60248201527f5472616e73616374696f6e206973206e6f7420696e20626c6f636b00000000006044820152606490fd5b1561252657565b60405162461bcd60e51b8152602060048201526024808201527f5061796f75742076696e206973206e6f742070726f7065726c7920666f726d616044820152631d1d195960e21b6064820152608490fd5b1561257e57565b60405162461bcd60e51b815260206004820152602860248201527f5061796f75742076696e2073686f756c6420686176652065786163746c79206f6044820152671b99481a5b9c1d5d60c21b6064820152608490fd5b156125db57565b60405162461bcd60e51b815260206004820152602560248201527f5061796f757420766f7574206973206e6f742070726f7065726c7920666f726d604482015264185d1d195960da1b6064820152608490fd5b1561263557565b60405162461bcd60e51b815260206004820152602860248201527f5061796f7574207769746e657373206973206e6f742070726f7065726c7920666044820152671bdc9b585d1d195960c21b6064820152608490fd5b1561269257565b60405162461bcd60e51b8152602060048201526012602482015271125b9d985b1a59081cdc195b9d081d1e125960721b6044820152606490fd5b156126d357565b60405162461bcd60e51b815260206004820152601b60248201527f496e76616c6964207370656e74206f7574707574206c656e67746800000000006044820152606490fd5b60405190612727604083610313565b60018252601160f91b6020830152565b1561273e57565b60405162461bcd60e51b815260206004820152602960248201527f496e76616c6964207370656e74206f757470757420736372697074207075626b6044820152680caf240d8cadccee8d60bb1b6064820152608490fd5b604051906127a4604083610313565b6002825261028960f51b6020830152565b156127bc57565b60405162461bcd60e51b815260206004820152602160248201527f5370656e74206f7574707574206973206e6f7420612050325452206f757470756044820152601d60fa1b6064820152608490fd5b1561281257565b60405162461bcd60e51b815260206004820152602260248201527f496e76616c6964207370656e74206f757470757420736372697074207075626b604482015261657960f01b6064820152608490fd5b805191908290602001825e015f815290565b6128d0979360249b99979561289b6128b7956128958f9a9695600896612862565b90612862565b6001600160e01b03199283168152911660048201520190612862565b9182526001600160e01b03191660208201520190612862565b6001600160e01b0319909216825260048201520190565b604051906128f6604083610313565b600a8252690a8c2e0a6d2ced0c2e6d60b31b6020830152565b1561291657565b60405162461bcd60e51b8152602060048201526011602482015270496e76616c6964207369676e617475726560781b6044820152606490fd5b6001600160f01b03198116036102fb57565b9035601e19823603018112156102fb5701602081359101916001600160401b0382116102fb5781360383136102fb57565b612a7a606092959493956080835280356129ab81611470565b6001600160e01b031916608084015260208101356129c88161294f565b61ffff60f01b1660a0840152612a5a612a4860a0612a41612a21612a036129f26040880188612961565b60c0808c01526101408b0191612315565b612a0f8a880188612961565b8a8303607f190160e08c015290612315565b612a2e6080870187612961565b898303607f19016101008b015290612315565b9301611482565b6001600160e01b031916610120850152565b8651602084810191909152909601516001600160e01b0319166040830152565b0152565b919092602061074095612b3895612b03612afa60408801612ab2612aad612aa8610697848d611cc8565b613fed565b6123dc565b60a06107488a606081019d8e612adb612ad6612ad16106978487611cc8565b614065565b612428565b610738612af1612aea85611b59565b9785611cc8565b98909285611cc8565b96909401611b59565b96879260408584013593612b178180611cc8565b929091013592604051988997889763cd4cc08f60e01b89526004890161248c565b03816001603160981b015afa8015612e7f57612b5b915f91612e84575b506124d3565b60408101612b698183611cc8565b3690612b74926114e6565b612b7d90613fed565b612b869061251f565b612b908183611cc8565b3690612b9b926114e6565b612ba4906145a1565b612bb19150600114612577565b6060820195612bc08784611cc8565b3690612bcb926114e6565b612bd490614065565b612bdd906125d4565b6080830191612bec8385611cc8565b3690612bf7926114e6565b612c00906140d4565b612c099061262e565b612c139084611cc8565b3690612c1e926114e6565b612c27906134c0565b96612c328185611cc8565b3690612c3d926114e6565b90612c489085611cc8565b612c529150611cfa565b612c5b91613591565b91612c669084611cc8565b3690612c71926114e6565b612c7a906138dc565b93612c86886020015190565b958614612c929061268b565b60408801516001600160e01b03191696612cab91611cc8565b3690612cb6926114e6565b8660e01c612cc3916141af565b908151602b14612cd2906126cc565b612cdb826136b0565b612ce3612718565b612cec91613eaf565b612cf590612737565b612cfe826136f9565b612d06612795565b612d0f91613eaf565b612d18906127b5565b612d2182613742565b90612d2b91613eaf565b612d349061280b565b612d3d8161378b565b6045909701516001600160e01b03191691604051809160208201612d6091612862565b03601f1981018252612d729082610313565b604051612d80818093612862565b03905a915f916002602094fa15612e7f57610348967fd77102e5369b5b1a9db1972cb3de26ee79abc69de5cde41eeaa67fe3939c1c5594612e3e612e34612e2e87612e218b612e138e612e499b612e449b5f5192612ddc610365565b95612de5611009565b98612dfb60a0612df483611b59565b9201611b59565b612e03610365565b916040519b8c9a60208c01612874565b03601f198101835282610313565b612e296128e7565b614267565b92613dd1565b6106d38151611cfa565b916142cf565b61290f565b612e51610339565b8381526001600160e01b03198516602082015290612e7760075460405193849384612992565b0390a161235c565b6124c8565b612ea6915060203d602011612eac575b612e9e8183610313565b810190612474565b5f612b55565b503d612e94565b15612eba57565b60405162461bcd60e51b815260206004820152601660248201527513db9b1e481bdb99481a5b9c1d5d08185b1b1bddd95960521b6044820152606490fd5b15612eff57565b60405162461bcd60e51b81526020600482015260126024820152711d1e125908185b1c9958591e481cdc195b9d60721b6044820152606490fd5b600854600160401b81101561033457600181016008556008548110156102a65760085f527ff3f7a9fe364faab93b216da50a3214154f22a0a2b415b23a84c8169e8b636ee30155565b15612f8957565b60405162461bcd60e51b8152602060048201526016602482015275125b9d985b1a590819195c1bdcda5d081cd8dc9a5c1d60521b6044820152606490fd5b15612fce57565b60405162461bcd60e51b8152602060048201526015602482015274092dcecc2d8d2c840e6c6e4d2e0e840e6eaccccd2f605b1b6044820152606490fd5b3d15613035573d9061301c8261034a565b9161302a6040519384610313565b82523d5f602084013e565b606090565b1561304157565b60405162461bcd60e51b815260206004820152602660248201527f4661696c656420746f2073656e6420746f206661696c6564206465706f736974604482015265081d985d5b1d60d21b6064820152608490fd5b6131106130d161316a926130b76130af60019796836132cd565b909714612eb3565b604081016107386107486106a361069e6106978587611cc8565b926130f26130ed610770610769875f52600960205260405f2090565b612ef8565b61310761078e855f52600960205260405f2090565b6107ce84612f39565b613165613160613158613124600354610c78565b61084861315361314b613138600454610c78565b9361082f89516108298761082485611bd8565b61083d610dfa565b612f82565b61083d610e8f565b612fc7565b61437c565b905f808080600154865af161317d61300b565b5061321a576131eb7fabd361bc68da04a386a8de9d0fb3044cca0856cbd86e9e4a63237e015b3e4bb9936131b2600854611cfa565b6040805192835260208301949094526001600160a01b0390941692810192909252426060830152608082019290925290819060a0820190565b0390a16103485f80808061320660025460018060a01b031690565b600154905af161321461300b565b5061303a565b6123d77fa82453ca34121b3ecb910d957824e27c5dc6465315949facd15fb72886490058936131b2600854611cfa565b356115378161294f565b1561325b57565b60405162461bcd60e51b815260206004820152602160248201527f5769746e657373206973206e6f742070726f7065726c7920666f726d617474656044820152601960fa1b6064820152608490fd5b9594936124c3926060949288526020880152608060408801526080870191612315565b91906132d883611b59565b906132e56020850161324a565b93604081016132f48183611cc8565b94909660608401956133068786611cc8565b608087019a916133168c89611cc8565b94909361332560a08b01611b59565b9661332f98614429565b9361333a8284611cc8565b3690613345926114e6565b61334e90613fed565b613357906123dc565b6133619083611cc8565b369061336c926114e6565b61337590614065565b61337e90612428565b6133889082611cc8565b3690613393926114e6565b61339c906145a1565b956133a991508692611cc8565b36906133b4926114e6565b906133be91614114565b6133c790613254565b6020810135906133d78180611cc8565b604080516327fe9a2560e11b81529485946133fd949201359291908790600487016132aa565b6001603160981b0191839103815a93602094fa8015612e7f57613426915f91612e8457506124d3565b9190565b1561343157565b60405162461bcd60e51b815260206004820152602260248201527f52656164206f76657272756e20647572696e6720566172496e742070617273696044820152616e6760f01b6064820152608490fd5b1561348857565b60405162461bcd60e51b815260206004820152601060248201526f2b34b7103932b0b21037bb32b9393ab760811b6044820152606490fd5b6134e36134cc826145a1565b9091906134dc5f1984141561342a565b1515613481565b60010180600111611af8576134f8818361449a565b905f19821461350a576115379261385e565b60405162461bcd60e51b815260206004820152601760248201527f42616420566172496e7420696e207363726970745369670000000000000000006044820152606490fd5b1561355657565b60405162461bcd60e51b8152602060048201526013602482015272536c696365206f7574206f6620626f756e647360681b6044820152606490fd5b9190918215613602578260010180600111611af8578060016135bb921190816135f6575b5061354f565b60405192604081850101604052808452602182850391818401930101915b8281106135e557505050565b80518282015f1901526020016135d9565b9050825110155f6135b5565b509050604051613613602082610313565b5f815290565b91909182156136025761362f838251101561354f565b60405192604081850101604052808452602082850391818401930101915b82811061365957505050565b80518282015260200161364d565b9061367660228351101561354f565b6040519160608301604052602083528083036042602283019201915b82811061369e57505050565b80518282016001190152602001613692565b906136bf60098351101561354f565b6040519160418301604052600183528083036029602883019201915b8281106136e757505050565b805182820160071901526020016136db565b90613708600b8351101561354f565b604051916042830160405260028352808303602b602983019201915b82811061373057505050565b80518282016008190152602001613724565b90613751602b8351101561354f565b604051916062830160405260228352808303604b602983019201915b82811061377957505050565b8051828201600819015260200161376d565b9061379a602b8351101561354f565b604051916060830160405260208352808303604b602b83019201915b8281106137c257505050565b8051828201600a1901526020016137b6565b906137e360408351101561354f565b6040519160808301604052604083528083036060602083019201915b82811061380b57505050565b8051828201526020016137ff565b9061382860248351101561354f565b6040519160648301604052602483528083036044602083019201915b82811061385057505050565b805182820152602001613844565b92919081156138ca57818101808211611af8578082613884921190816138be575061354f565b604051936040838601016040528285520190602082850391818401930101915b8281106138b057505050565b8051828201526020016138a4565b9050855110155f6135b5565b50509050604051613613602082610313565b6138e65f826144d9565b5f1981146138f8575f6115379261385e565b60405162461bcd60e51b815260206004820152601560248201527442616420566172496e7420696e207769746e65737360581b6044820152606490fd5b8051600110156102a65760210190565b9081518110156102a6570160200190565b600190611537939260ff60f81b1681520190612862565b6040519061397c604083610313565b60078252662a30b82632b0b360c91b6020830152565b96612895966115379f9e9c989660a8966128959f9c956139be9060209f9a612895906139f39f98612862565b6001600160e01b0319978816815296166004870152600886015260288501526048840152606883015260888201520190612862565b9081520190612862565b91949390929360205f613a1e613a1286613819565b60405191828092612862565b039060025afa15612e7f575f519060205f613b05612e13613a12613ae7613ad7613a5e613a526001546402540be400900490565b6001600160401b031690565b65ffff0000ffff67ffffffffffff000067ff00ff00ff00ff008360081c9360081b169264ff000000ff65ffff0000ff0065ffffffffffff67ffffffffffffff00871666ff00ff00ff00ff85161760101c16951691161760101b1691161767ffffffff0000000063ffffffff8260201c169160201b161790565b60c01b6001600160c01b03191690565b6040519283918783016008916001600160401b0360c01b1681520190565b039060025afa15612e7f5760205f613b4e612e13613a12613b33845199604563ffffffff60e01b9101511690565b60405192839187830160049163ffffffff60e01b1681520190565b039060025afa15612e7f5760205f613b77612e13613a1283519960405192839187830190612862565b039060025afa15612e7f575f5193613b8e88613d21565b607f60f91b613b9c8a613e17565b613ba590613935565b516001600160f81b031916604051928392613bc4921660208401613956565b03601f1981018252613bd69082610313565b613bde61396d565b90613be891614267565b91613bf1610365565b95613bfa610365565b97613c03610564565b92613c0c610b91565b94613c15610365565b97613c1e611631565b996040519d8e9d60208f019d613c339e613992565b03601f1981018252613c459082610313565b613c4d6128e7565b90613c5791614267565b90613c6190613dd1565b8051613c6c90611cfa565b613c7591613591565b613c7d611ef8565b91613c87926142cf565b6103489061290f565b60049061153794613cc0613cdb949561289560405197889563ffffffff60e01b1660208701526024860190612862565b9063ffffffff60e01b16815203601b19810184520182610313565b614573565b15613ce757565b60405162461bcd60e51b815260206004820152601260248201527142616420566172496e7420696e206974656d60701b6044820152606490fd5b90613d46613d2e836145a1565b909190613d3e5f1984141561342a565b600110613481565b60010180600111611af857915f925b60018410613d8b57611537929350613d85613d80613d7383856146fd565b6108245f19821415613ce0565b611be6565b9161385e565b613d9581836146fd565b9190613da45f19841415613ce0565b806001019283600111611af8576001910101809211611af857600191613dc991611c02565b930192613d55565b613ddd6134cc826145a1565b6001019081600111611af857613df382826146fd565b9290613e025f19851415613ce0565b8301809311611af857613d8561153793611be6565b90613e3c613e24836145a1565b909190613e345f1984141561342a565b600210613481565b60010180600111611af857915f925b60028410613e6957611537929350613d85613d80613d7383856146fd565b613e7381836146fd565b9190613e825f19841415613ce0565b806001019283600111611af8576001910101809211611af857600191613ea791611c02565b930192613e4b565b9081519181518303613f535760205b83811115613f3857613ecf90611d08565b838110613edf5750505050600190565b613efa613eec8284613945565b516001600160f81b03191690565b613f17613f0a613eec8487613945565b6001600160f81b03191690565b6001600160f81b031990911603613f3057600101613ecf565b505050505f90565b818101518382015160209092019114613ebe57505050505f90565b5050505f90565b613f65600554610c78565b60208101808211611af8578082613f8392119081613fc1575061354f565b604051916060830160405260208352018082036040602083019201915b828110613fb357505050611537906147be565b805182820152602001613fa0565b9050835110155f6135b5565b5f5160206149185f395f51905f52546001600160a01b0316330361117457565b613ff6816145a1565b91908215801561405b575b613f535760010180600111611af857915f905b808210614022575050511490565b90928251811015613f3057614037818461449a565b5f198114614052578101809111611af8579260010190614014565b50505050505f90565b505f198114614001565b61406e816145a1565b9190821580156140ca575b613f535760010180600111611af857915f905b80821061409a575050511490565b90928251811015613f30576140af818461482e565b5f198114614052578101809111611af857926001019061408c565b505f198114614079565b5f905f5b600181106140e65750511490565b918151811015613f53576140fa81836144d9565b5f198114613f30578101809111611af857916001016140d8565b811561415d575f915f905b80821061412d575050511490565b90928251811015613f305761414281846144d9565b5f198114614052578101809111611af857926001019061411f565b50505f90565b1561416a57565b60405162461bcd60e51b815260206004820152601a60248201527f42616420566172496e7420696e207363726970745075626b65790000000000006044820152606490fd5b9190916141bb816145a1565b6141c85f1983141561342a565b84101561422e576141d890611bf4565b5f935b80851061420657506115379293506141f3818361482e565b916142015f19841415614163565b61385e565b90614225816142176001938661482e565b906108245f19831415614163565b940193906141db565b60405162461bcd60e51b81526020600482015260116024820152702b37baba103932b0b21037bb32b9393ab760791b6044820152606490fd5b5f61427a60209260405191828092612862565b039060025afa15612e7f575f6142ae602092613a128351612e13604051938285938985015260408401526060830190612862565b039060025afa15612e7f575f5190565b916139f36115379493602093612862565b919091815160408114908115614371575b501561432c575f9261430a6142f585946137d4565b91612e136040519384926020840196876142be565b51906102005afa5061153761431d61300b565b60208082518301019101612474565b60405162461bcd60e51b815260206004820152601860248201527f496e76616c6964207369676e6174757265206c656e67746800000000000000006044820152606490fd5b60419150145f6142e0565b614387600354610c78565b60148101808211611af85780826143a592119081613fc1575061354f565b604051916054830160405260148352018082036034602083019201915b82811061441b5750505060208151910151906bffffffffffffffffffffffff19821691601482106143f6575b505060601c90565b6bffffffffffffffffffffffff1960149290920360031b82901b161690505f806143ee565b8051828201526020016143c2565b969483869482949a9896939a6040519b8c9b63ffffffff60e01b1660208d015261ffff60f01b1660248c015260268b0137880191602683015f81523701602601915f83528237019063ffffffff60e01b16815203601b19810182526004016144919082610313565b61153790614573565b906144a49161487c565b5f1982146144d257816025019182602511611af8570160258101809211611af857602901809111611af85790565b50505f1990565b906144e481836146fd565b92905f19811461456a5760010180600111611af8579291905f915b83831061450d575050505090565b90919293808201808311611af85761452590846146fd565b91905f19831461455e57806001019283600111611af8576001910101809211611af85760019161455491611c02565b94930191906144ff565b505050505050505f1990565b505050505f1990565b5f602091828151910160025afa5060205f818160025afa505f5190565b60ff166001019060ff8211611af857565b906145ac5f836148ae565b9160ff83169283156146da576145d05f6108246145ca855194614590565b60ff1690565b116146d1575f60028403614627575061462061460d614607611537936145f55f611bf4565b01602001516001600160f01b03191690565b60f01c90565b61ff0060ff8260081c169160081b161790565b61ffff1690565b60048403614691575061468861465e614658611537936146465f611bf4565b01602001516001600160e01b03191690565b60e01c90565b600881811c62ff00ff1691901b63ff00ff001617601081811b63ffff00001691901c61ffff161790565b63ffffffff1690565b9290600882146146a057509190565b611537919350613a5e6146cb613a52926146b95f611bf4565b01602001516001600160c01b03191690565b60c01c90565b505f1991505f90565b506146f4919250613eec5f6146ee92613945565b60f81c90565b9060ff5f921690565b91909161470a83826148ae565b9260ff84169384156147aa57614728826108246145ca865194614590565b1161479f575f6002850361474d575061460d614607611537936145f561462094611bf4565b6004850361476c575061465e6146586115379361464661468894611bf4565b9391906008831461477e575b50509190565b6147979294506146cb613a52926146b9613a5e93611bf4565b915f80614778565b505f1992505f919050565b506146f4929350613eec906146ee92613945565b805190811561415d57602082116147de57602001519060200360031b1c90565b60405162461bcd60e51b815260206004820152602260248201527f42797465732063616e6e6f74206265206d6f7265207468616e20333220627974604482015261657360f01b6064820152608490fd5b908151816009019081600911611af857106144d25760080180600811611af857614857916146fd565b905f1981146144d257806009019182600911611af8576009910101809111611af85790565b908151816025019081602511611af857106148a55760248101809111611af8576111d3916146fd565b505f19915f9150565b9060ff6148bb8284613945565b5160f81c146149105760fe60ff6148d28385613945565b8160f81b90511660f81c1614614909576148f060ff9160fd93613945565b8160f81b90511660f81c1614614904575f90565b600290565b5050600490565b505060089056fe9016d09d72d40fdae2fd8ceac6b6234c7706214fd39c1cd1e609a0528c199300237e158222e3e6968b72b9db0d8043aacf074ad9f650f0d1606b4d82ee432c00","sourceMap":"430:25565:10:-:0;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;:::i;:::-;;:::i;:::-;;:::i;:::-;;:::i;:::-;;:::i;:::-;;:::i;:::-;;:::i;:::-;;:::i;:::-;;:::i;:::-;;:::i;:::-;;:::i;:::-;;:::i;:::-;;:::i;:::-;;:::i;:::-;;:::i;:::-;;:::i;:::-;;:::i;:::-;;:::i;:::-;;:::i;:::-;;:::i;:::-;;:::i;:::-;;:::i;:::-;;:::i;:::-;;:::i;:::-;;:::i;:::-;;:::i;:::-;;:::i;:::-;;:::i;:::-;;:::i;:::-;;:::i;:::-;;:::i;:::-;;:::i;:::-;;:::i;:::-;;:::i;:::-;;:::i;:::-;;:::i;:::-;;;;;;;;;;;;;;;;;;;;;-1:-1:-1;430:25565:10;;-1:-1:-1;430:25565:10;;;-1:-1:-1;430:25565:10;:::o;:::-;;:::i;:::-;;;;;;-1:-1:-1;;430:25565:10;;;;;;1965:29;430:25565;1965:29;;;;;430:25565;;1965:29;-1:-1:-1;430:25565:10;;;;;;;;;;1965:29;-1:-1:-1;1965:29:10;;430:25565;;;;;;;;;;;;;;;;;;;;;;;;;-1:-1:-1;;;;;430:25565:10;;;;;;;:::o;:::-;;:::i;:::-;;;;;;;;:::i;:::-;:::o;:::-;-1:-1:-1;;;;;430:25565:10;;;;;;-1:-1:-1;;430:25565:10;;;;:::o;:::-;;;;;;;;:::i;:::-;;;;-1:-1:-1;430:25565:10;;;;:::o;:::-;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;-1:-1:-1;;430:25565:10;;;;:::o;:::-;;;;;;-1:-1:-1;;430:25565:10;;;;;;;:::i;:::-;;;;;;;;:::i;:::-;;;;;;;;;;-1:-1:-1;;430:25565:10;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;-1:-1:-1;;;;;430:25565:10;;;;;;;;;;;;;;;;;:::o;:::-;;;-1:-1:-1;;430:25565:10;;;;;;-1:-1:-1;;;;;430:25565:10;;;;;;;;;;;:::i;:::-;;;-1:-1:-1;;;;;430:25565:10;;;;;;;;;;;:::i;:::-;16004:32;;;;;430:25565;;16066:77;16087:28;430:25565;16087:13;430:25565;16087:28;:::i;:::-;16074:9;:41;16066:77;:::i;:::-;16169:15;430:25565;16206:10;-1:-1:-1;16218:16:10;;;;;;430:25565;16236:3;16303:8;16425:44;16303:8;16087:13;16303:8;;;;:::i;:::-;430:25565;16274:92;16339:12;;;;;;:::i;:::-;;:::i;:::-;430:25565;;:::i;:::-;;;;-1:-1:-1;;;;;;430:25565:10;;16274:92;;430:25565;;16274:92;16380:26;;;:::i;:::-;16442:9;;;;:::i;:::-;16425:44;430:25565;;16453:15;;;;16425:44;;;:::i;:::-;;;;430:25565;16206:10;;430:25565;;;-1:-1:-1;;;430:25565:10;;;;;;;;;;;;-1:-1:-1;;;430:25565:10;;;;;;;;;;;;;;;:::i;:::-;;;;-1:-1:-1;;;430:25565:10;;;;:::o;:::-;;;;;;-1:-1:-1;;430:25565:10;;;;;;;:::i;:::-;;;;;;;;;;:::o;:::-;;;;;;;;;;:::o;:::-;;;;;;-1:-1:-1;;430:25565:10;;;;;;-1:-1:-1;;;;;430:25565:10;;;;;;;;;;;:::i;:::-;;;-1:-1:-1;;;;;430:25565:10;;;;;;;;;;;:::i;:::-;;;;;;-1:-1:-1;430:25565:10;;;;;;-1:-1:-1;;;;;430:25565:10;2962:10;:22;430:25565;;18856:95;20546:43;430:25565;18118:43;20546;430:25565;17861:59;430:25565;;17869:33;;17861:59;:::i;:::-;17930:63;430:25565;17938:13;430:25565;;:::i;:::-;17938:25;;17930:63;:::i;:::-;18118:43;;:::i;:::-;18358:13;;430:25565;18358:13;;18932:18;430:25565;18358:36;430:25565;18358:13;;;;:::i;:::-;430:25565;;;:::i;:::-;18358:36;:::i;:::-;18426:14;430:25565;;18426:14;;;18753:16;18426:50;430:25565;18426:14;;;;:::i;430:25565::-;18450:25;:14;;;;:::i;:::-;:25;;;:::i;:::-;18426:50;;:::i;:::-;18545:17;18510:56;430:25565;18545:17;430:25565;18545:17;;;;:::i;430:25565::-;18510:56;:::i;:::-;18714:17;;;;;:::i;:::-;18733:18;;;;;;;;:::i;:::-;18753:16;;:::i;:::-;18916:14;18901:13;18882:17;;;:::i;:::-;18901:13;;;:::i;:::-;18916:14;;;;:::i;:::-;18932:18;;;;:::i;:::-;430:25565;;;;:::i;:::-;;;;;:::i;:::-;18856:95;;:::i;:::-;18961:65;18969:24;18970:23;;;430:25565;;18970:14;430:25565;;;;;;;18970:23;430:25565;;;;;18970:23;18969:24;;430:25565;18969:24;18961:65;:::i;:::-;19036:30;:23;;430:25565;;18970:14;430:25565;;;;;;;19036:23;430:25565;;-1:-1:-1;;430:25565:10;18447:1;430:25565;;;;19036:30;20464:66;20435:19;19529:48;430:25565;19173:25;;;:::i;:::-;430:25565;;;;;;;;;19208:25;:35;:25;;;;:::i;:::-;:35;;:::i;:::-;19407:45;19425:1;2510:20:1;;;:::i;:::-;19415:11:10;;;19407:45;:::i;:::-;19529:48;:::i;:::-;20324:85;20332:43;20264:50;430:25565;17938:13;430:25565;;:::i;:::-;20139:85;20147:43;20103:26;430:25565;19831:13;430:25565;;:::i;:::-;;19986:77;430:25565;;20011:26;:14;;;;:::i;:::-;:26;:::i;:::-;19994:43;19986:77;:::i;:::-;20103:26;;:::i;:::-;430:25565;;:::i;:::-;20147:43;;:::i;:::-;20139:85;:::i;:::-;20277:25;430:25565;;;20277:25;:::i;:::-;20264:50;;:::i;:::-;430:25565;;:::i;20332:43::-;20324:85;:::i;:::-;20435:19;:::i;:::-;20472:21;;;20464:66;:::i;:::-;430:25565;;20546:43;;;;430:25565;;;;;;;;;;;;;;;;;;20546:43;;;;430:25565;;;;-1:-1:-1;;;430:25565:10;;;;;;;;;;;;;;;;;;;;;;;;;;-1:-1:-1;;430:25565:10;;;;;21976:26;430:25565;;:::i;:::-;21976:26;:::i;430:25565::-;;;;;;;;;;;;;-1:-1:-1;;;;;430:25565:10;;;;;;;;;;;;;;;:::o;:::-;;;;;;-1:-1:-1;;430:25565:10;;;;;;-1:-1:-1;;;;;430:25565:10;;;;;;;;;;;:::i;:::-;;;-1:-1:-1;;;;;430:25565:10;;;;;;;;;;;:::i;:::-;;;;;;2833:10;1102:42;2833:10;:27;430:25565;;;-1:-1:-1;430:25565:10;;;;4384:51;3793:19;4041:30;3793:19;3785:58;4384:51;3793:19;;;3785:58;:::i;:::-;3853:69;3861:26;;;3853:69;:::i;:::-;3933:18;3947:4;430:25565;;3728:11;430:25565;;;3728:11;430:25565;;3933:18;430:25565;;;;:::i;:::-;;;;;:::i;:::-;3947:4;430:25565;;4041:30;3728:11;430:25565;;-1:-1:-1;;;;;;430:25565:10;;;;;4231:72;430:25565;;-1:-1:-1;;;;;;430:25565:10;-1:-1:-1;;;;;430:25565:10;;;4327:42;430:25565;;4327:42;;;430:25565;1102:42;430:25565;;;;;;;;;;;4327:42;;;;430:25565;;4384:51;;;;;:::i;:::-;;;;430:25565;;;;;;-1:-1:-1;;;;;430:25565:10;;;;4450:90;;430:25565;;;;4450:90;430:25565;;;;-1:-1:-1;;;430:25565:10;;;;;;;;;;;;;;;;;;;;;;;-1:-1:-1;;;430:25565:10;;;;;;;;;;;;;;;;;;;;;;;;;;-1:-1:-1;;430:25565:10;;;;;1728:28;430:25565;;;;;;;;;;;;;;;:::i;:::-;;;;-1:-1:-1;430:25565:10;;;;:::o;:::-;;;;;;-1:-1:-1;;430:25565:10;;;;;;;:::i;:::-;;;;;;-1:-1:-1;;430:25565:10;;;;;;-1:-1:-1;430:25565:10;2001:46;430:25565;;;;;-1:-1:-1;430:25565:10;;;;;;;;;;;;;;;;;-1:-1:-1;;430:25565:10;;;;;;1930:29;430:25565;1930:29;;;;;;430:25565;;;;;;;;1930:29;-1:-1:-1;430:25565:10;;-1:-1:-1;430:25565:10;;;;;1930:29;430:25565;;1930:29;;430:25565;;;;;;;;;;;;;;;;;;;;;;;-1:-1:-1;;430:25565:10;;;;;;;;;;;;;-1:-1:-1;;;;;430:25565:10;;;;;;;;;;;;;;;;;;;;;;;;;:::o;:::-;;;;;;;;;;;;;;;;;;;;;;;-1:-1:-1;430:25565:10;17938:13;430:25565;;;;;:::i;:::-;;;;;;;;;;;;;;;;;;;;;;;;;:::i;:::-;-1:-1:-1;17938:13:10;-1:-1:-1;430:25565:10;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;17938:13;430:25565;;;;;;;;;;-1:-1:-1;430:25565:10;19831:13;430:25565;;;;;:::i;:::-;;;;;;;;;;;;;;;;;;;;;;;;:::i;:::-;-1:-1:-1;19831:13:10;-1:-1:-1;430:25565:10;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;-1:-1:-1;430:25565:10;21976:13;430:25565;;;;;:::i;:::-;;;;;;;;;;;;;;;;;;;;;;;;:::i;:::-;-1:-1:-1;21976:13:10;-1:-1:-1;430:25565:10;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;-1:-1:-1;430:25565:10;;;;;;;:::i;:::-;;;;;;;;;;;;;;;;;;;;;;;;:::i;:::-;-1:-1:-1;430:25565:10;-1:-1:-1;430:25565:10;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;-1:-1:-1;;430:25565:10;;;;;;;:::i;:::-;;;;;;-1:-1:-1;;430:25565:10;;;;;;;1865:26;430:25565;;;;:::i;:::-;;;;;;;;;;;;;;;;;;;;;;;;;;;:::i;:::-;1865:26;430:25565;;;;;;;-1:-1:-1;430:25565:10;;;;;;;-1:-1:-1;430:25565:10;;-1:-1:-1;430:25565:10;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;-1:-1:-1;;430:25565:10;;;;;;;;;;;;1865:26;430:25565;;;;;;;-1:-1:-1;430:25565:10;;-1:-1:-1;430:25565:10;;;;;;;;;;:::i;:::-;;;;-1:-1:-1;;;430:25565:10;;;;:::o;:::-;;;;;;-1:-1:-1;;430:25565:10;;;;;;;:::i;:::-;;;;;;-1:-1:-1;;430:25565:10;;;;2303:62:6;;:::i;:::-;-1:-1:-1;;;;;;;;;;;430:25565:10;;-1:-1:-1;;;;;;430:25565:10;;;;;;-1:-1:-1;;;;;;;;;;;430:25565:10;;;;;;;-1:-1:-1;;;;;;;430:25565:10;3975:40:6;-1:-1:-1;;3975:40:6;430:25565:10;;;;;;;-1:-1:-1;;430:25565:10;;;;;16628:15;430:25565;;;;;;;;;;;;;-1:-1:-1;;430:25565:10;;;;-1:-1:-1;;;;;;;;;;;430:25565:10;966:10:8;-1:-1:-1;;;;;430:25565:10;;;2869:24:5;2865:96;;-1:-1:-1;;;;;;;;;;;430:25565:10;;-1:-1:-1;;;;;;430:25565:10;;;;;;-1:-1:-1;;;;;;;;;;;430:25565:10;;966:10:8;430:25565:10;;;;;;;;-1:-1:-1;;;;;430:25565:10;3975:40:6;-1:-1:-1;;3975:40:6;430:25565:10;2865:96:5;2916:34;;;430:25565:10;2916:34:5;966:10:8;430:25565:10;;;;2916:34:5;430:25565:10;;-1:-1:-1;;430:25565:10;;;;;;;-1:-1:-1;;;;;430:25565:10;;;;;;;;;;:::i;:::-;;;;;;;;-1:-1:-1;;;;;430:25565:10;;;;;;;;;:::i;:::-;;;:::o;:::-;;;;;;;:::i;:::-;2303:62:6;;;;;:::i;:::-;5812:26:10;;430:25565;;-1:-1:-1;;;;;430:25565:10;;;;;;;5884:30;430:25565;;:::i;:::-;;:::i;:::-;5837:1;430:25565;;;;;;;;;5970:51;430:25565;;;;5970:51;430:25565;5837:1;430:25565;;;;;;;;;;;;;;;;;;;;;5884:30;430:25565;;;;;;:::i;:::-;;;5970:51;;;;;:::i;430:25565::-;;;;;;;;;;5884:30;430:25565;;-1:-1:-1;;430:25565:10;;;;5837:1;430:25565;;;;;;;;5970:51;430:25565;;5970:51;430:25565;;;;;;;;;;;;;;5884:30;430:25565;;;;;;;-1:-1:-1;;430:25565:10;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;-1:-1:-1;;;430:25565:10;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;-1:-1:-1;;;;;430:25565:10;;;;;;;:::o;:::-;;;;;;;:::i;:::-;2303:62:6;;:::i;:::-;-1:-1:-1;;;;;430:25565:10;;;6267:33;;430:25565;;6349:18;430:25565;;-1:-1:-1;;;;;;430:25565:10;;;;;;;;;;-1:-1:-1;;;;;430:25565:10;;;;;;;;;;;;;6432:56;;430:25565;;;;;6432:56;430:25565;;;;-1:-1:-1;;;430:25565:10;;;;;;;;;;;;-1:-1:-1;;;430:25565:10;;;;;;;;;;;;;-1:-1:-1;;430:25565:10;;;;;;;:::i;:::-;-1:-1:-1;;;;;;430:25565:10;;;;;:::o;:::-;;;;;;:::i;:::-;;;-1:-1:-1;;430:25565:10;;;;;;;;;;;;:::i;:::-;;:::i;:::-;;;;;;;;-1:-1:-1;;430:25565:10;;;;-1:-1:-1;;;;;;;;;;;430:25565:10;;;-1:-1:-1;;;;;430:25565:10;;;;;;;;;;;;;;;:::i;:::-;;;;;;;;:::i;:::-;;;;;;;;;;;;;;;;;-1:-1:-1;430:25565:10;;;;;;:::o;:::-;;;;;;;;;;;;;;;;;;:::i;:::-;;:::o;:::-;;;-1:-1:-1;;430:25565:10;;;;;;-1:-1:-1;;;;;430:25565:10;;;;;;;;;;;:::i;:::-;;;-1:-1:-1;;;;;430:25565:10;;;;;;;;;;;:::i;:::-;;;;-1:-1:-1;;;;;430:25565:10;;;;;;;;;;;:::i;:::-;;;-1:-1:-1;;;;;430:25565:10;;;;;;;;;;;:::i;:::-;;;;;-1:-1:-1;;;;;430:25565:10;;;;;;;;;;;;:::i;:::-;;;:::i;:::-;;;;;;-1:-1:-1;;430:25565:10;;;;;;;1213:5;430:25565;;;;;;;;;-1:-1:-1;;430:25565:10;;;;1762:33;430:25565;;;-1:-1:-1;;;;;430:25565:10;;;;;;;;;;;;;;;;:::i;:::-;;;;-1:-1:-1;;;;;;430:25565:10;;;;:::o;:::-;;;;;;-1:-1:-1;;430:25565:10;;;;;;;:::i;:::-;;;;16904:36;430:25565;;;:::i;:::-;2303:62:6;;:::i;:::-;16869:20:10;430:25565;;-1:-1:-1;;;;;;430:25565:10;;;;;-1:-1:-1;;;;;430:25565:10;;;;;;;;;;;;;;;;-1:-1:-1;;;;;430:25565:10;;;;;;;;;;;;;;;;;16904:36;430:25565;;;;;;;-1:-1:-1;;430:25565:10;;;;;;;1102:42;430:25565;;;;;;;;;-1:-1:-1;;430:25565:10;;;;-1:-1:-1;;;;;;;;;;;430:25565:10;;;-1:-1:-1;;;;;430:25565:10;;;;;;;;;;;;;;-1:-1:-1;;430:25565:10;;;;;;-1:-1:-1;;;;;430:25565:10;;;;;;;;;;;;:::i;:::-;2303:62:6;;:::i;:::-;-1:-1:-1;;;;;;;;;;;430:25565:10;;-1:-1:-1;;;;;;430:25565:10;-1:-1:-1;;;;;430:25565:10;;;;;;;;;-1:-1:-1;;;;;;;;;;;430:25565:10;;;;2238:43:5;-1:-1:-1;;2238:43:5;430:25565:10;;;;;;;-1:-1:-1;;430:25565:10;;;;;;;1897:26;430:25565;;;;:::i;:::-;;;;;;;;;;;;;;;;;;;;;;;;;;:::i;:::-;1897:26;430:25565;;;;;;;-1:-1:-1;430:25565:10;;;;;;;-1:-1:-1;430:25565:10;;-1:-1:-1;430:25565:10;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;:::i;:::-;2303:62:6;;;;;:::i;:::-;5066:69:10;5074:26;;;5066:69;:::i;:::-;-1:-1:-1;;;;;430:25565:10;;;;;;;5146:30;430:25565;;:::i;:::-;;:::i;:::-;5099:1;430:25565;;;;;;;;;5232:51;430:25565;;;;5232:51;430:25565;5099:1;430:25565;;;;;;;;;;;;;;;;;;;;5146:30;430:25565;;;;;;:::i;:::-;5146:30;430:25565;;-1:-1:-1;;430:25565:10;;;;5099:1;430:25565;;;;;;;;5232:51;430:25565;;5232:51;430:25565;;;;;;;;;;;;;;5146:30;430:25565;;;;;;;-1:-1:-1;;5146:30:10;430:25565;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;-1:-1:-1;;430:25565:10;;;;;;-1:-1:-1;;;;;430:25565:10;;;;;;;;;;;:::i;:::-;;;-1:-1:-1;;;;;430:25565:10;;;;;;;;;;;:::i;:::-;;;3089:10;1102:42;3089:10;:27;:53;;;;430:25565;;;;3200:1;;;:::i;430:25565::-;;;-1:-1:-1;;;430:25565:10;;;;;;;;;;;;;;;;;-1:-1:-1;;;430:25565:10;;;;;;;3089:53;-1:-1:-1;;430:25565:10;3089:10;430:25565;;;;;-1:-1:-1;;;;;430:25565:10;3120:22;3089:53;;430:25565;;;;;;;;;;;;;;;;;;;;;;;;;;;;:::o;:::-;;:::i;:::-;;;;:::o;:::-;;;-1:-1:-1;;;430:25565:10;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;:::o;:::-;;;;;:::i;:::-;16169:15;430:25565;-1:-1:-1;;;430:25565:10;;;;;;;;16169:15;430:25565;16169:15;430:25565;;;;;;16169:15;-1:-1:-1;430:25565:10;;;;;;;;;;;;;;;;;;;;;;;;;;:::o;:::-;;;;;;;;;;:::o;:::-;;9456:2;430:25565;;;;;;;:::o;:::-;;4811:1:0;430:25565:10;;;;;;;:::o;:::-;22223:1:1;430:25565:10;;;22223:1:1;430:25565:10;;;:::o;:::-;;;;;;;;;;:::o;:::-;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;:::o;:::-;;;;:::o;:::-;;;-1:-1:-1;;;430:25565:10;;;;;;;;;;;;-1:-1:-1;;;430:25565:10;;;;;;;;;;;:::o;:::-;;;-1:-1:-1;;;430:25565:10;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;-1:-1:-1;;;;;430:25565:10;;;;;;;;;;;;;;:::o;:::-;-1:-1:-1;;430:25565:10;;;;;;;;:::o;:::-;-1:-1:-1;;430:25565:10;;;;;;;;:::o;:::-;;;;;;;;;;:::o;:::-;;;;:::o;:::-;;;-1:-1:-1;;;430:25565:10;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;:::o;:::-;;;;:::o;:::-;;;-1:-1:-1;;;430:25565:10;;;;;;;;;;;;-1:-1:-1;;;430:25565:10;;;;;;;;;;;:::o;:::-;;;-1:-1:-1;;;430:25565:10;;;;;;;;;;;;-1:-1:-1;;;430:25565:10;;;;;;;;;;;:::o;:::-;;;-1:-1:-1;;;430:25565:10;;;;;;;;;;;;;;;;;;;;;;;;:::o;:::-;;;-1:-1:-1;;;430:25565:10;;;;;;;;;;;;;;;;;;;;;;;;:::o;:::-;;;;;;;;;;;;;;;;;;;;;;;;;21896:113;21976:26;430:25565;;:::i;:::-;;;;:::o;:::-;;;-1:-1:-1;;;430:25565:10;;;;;;;;;;;;;;;;;;;;;;;;:::o;:::-;;;-1:-1:-1;;;430:25565:10;;;;;;;;;;;;;;;;;;;;;;;;;;;;:::o;:::-;5884:30;-1:-1:-1;430:25565:10;;-1:-1:-1;430:25565:10;;;;;;5884:30;430:25565;;;;;;;;;;5884:30;430:25565;;;;;;;;;;;:::o;:::-;-1:-1:-1;430:25565:10;;;;;;;;;-1:-1:-1;430:25565:10;;;;;;;;;;;;:::o;:::-;5146:30;-1:-1:-1;430:25565:10;;-1:-1:-1;430:25565:10;;;;;;;;;;;;;;;;;;;;;;;;;;;;;:::o;:::-;-1:-1:-1;430:25565:10;;;;;;;;;-1:-1:-1;430:25565:10;;;;;;;;;;;;;:::o;:::-;-1:-1:-1;430:25565:10;;-1:-1:-1;430:25565:10;;;;;;;;;;;;;;;;;;;;;;;;;;;;;:::o;:::-;-1:-1:-1;430:25565:10;;;;;;;;;-1:-1:-1;430:25565:10;;;;;;;-1:-1:-1;;;;;430:25565:10;;;;;;;3961:30;430:25565;;:::i;:::-;3961:30;430:25565;:::i;:::-;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;3961:30;430:25565;:::o;:::-;;;;-1:-1:-1;430:25565:10;;;;;3961:30;430:25565;;-1:-1:-1;;430:25565:10;;;;;;;;;;;;;;;;;;;;;;;;;;;3961:30;430:25565;:::o;:::-;;;-1:-1:-1;;3961:30:10;430:25565;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;-1:-1:-1;;;;;430:25565:10;;;;;;;;;;:::i;:::-;;;:::i;:::-;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;:::o;:::-;;;;-1:-1:-1;;430:25565:10;;;;;;;;;;;;;;;;;;;;;;;;;;;;;:::o;:::-;;;-1:-1:-1;;430:25565:10;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;-1:-1:-1;;;;;430:25565:10;;;;;;;5924:30;430:25565;;:::i;:::-;5924:30;430:25565;:::i;:::-;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;5924:30;430:25565;:::o;:::-;5924:30;430:25565;;-1:-1:-1;;430:25565:10;;;;;;;;;;;;;;;;;;;;;;;;;;;5924:30;430:25565;:::o;:::-;;;-1:-1:-1;;430:25565:10;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;-1:-1:-1;430:25565:10;;;;;;;;-1:-1:-1;;430:25565:10;;;;:::o;:::-;;;;;;;;;;;;;;;;:::i;:::-;;;;;;;;;;:::i;10785:383::-;;10859:62;10880:13;430:25565;10867:9;:26;10859:62;:::i;:::-;430:25565;;;;;;;;;;-1:-1:-1;;;;;430:25565:10;;;;;11121:40;430:25565;;;;;;;;;10950:72;;;430:25565;11048:15;430:25565;11080:26;;;:::i;:::-;11121:40;430:25565;;11145:15;;;;11121:40;;;:::i;:::-;;;;10785:383::o;430:25565::-;;;;:::o;:::-;;;-1:-1:-1;;;430:25565:10;;;;;;;;;;;;;;;;;;;;;;;;:::o;:::-;;;-1:-1:-1;;;430:25565:10;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;:::o;:::-;;;;;;;;;;;;;;;;;;;;;;;;;;;;;:::i;:::-;;;;;;;;;;:::i;:::-;;;;:::o;:::-;;;;;;;;;;;;;;:::o;:::-;;;-1:-1:-1;;;430:25565:10;;;;;;;;;;;;;;;;;;;;;;;;:::o;:::-;;;-1:-1:-1;;;430:25565:10;;;;;;;;;;;;;;;;;-1:-1:-1;;;430:25565:10;;;;;;;;;;;:::o;:::-;;;-1:-1:-1;;;430:25565:10;;;;;;;;;;;;;;;;;-1:-1:-1;;;430:25565:10;;;;;;;;;;;:::o;:::-;;;-1:-1:-1;;;430:25565:10;;;;;;;;;;;;;;;;;-1:-1:-1;;;430:25565:10;;;;;;;;;;;:::o;:::-;;;-1:-1:-1;;;430:25565:10;;;;;;;;;;;;;;;;;-1:-1:-1;;;430:25565:10;;;;;;;;;;;:::o;:::-;;;-1:-1:-1;;;430:25565:10;;;;;;;;;;;;-1:-1:-1;;;430:25565:10;;;;;;;;;;;:::o;:::-;;;-1:-1:-1;;;430:25565:10;;;;;;;;;;;;;;;;;;;;;;;;;;;;:::i;:::-;;;;-1:-1:-1;;;430:25565:10;;;;:::o;:::-;;;;:::o;:::-;;;-1:-1:-1;;;430:25565:10;;;;;;;;;;;;;;;;;-1:-1:-1;;;430:25565:10;;;;;;;;;;;;;;;:::i;:::-;;;;-1:-1:-1;;;430:25565:10;;;;:::o;:::-;;;;:::o;:::-;;;-1:-1:-1;;;430:25565:10;;;;;;;;;;;;;;;;;-1:-1:-1;;;430:25565:10;;;;;;;;;;;:::o;:::-;;;-1:-1:-1;;;430:25565:10;;;;;;;;;;;;;;;;;-1:-1:-1;;;430:25565:10;;;;;;;;;;;;;;;;;;;;;;;:::o;:::-;;;;;;;;;;;;;;;;;;;;:::i;:::-;;;:::i;:::-;-1:-1:-1;;;;;;430:25565:10;;;;;;;;;;;;;;:::i;:::-;;;;-1:-1:-1;;;;;;430:25565:10;;;;;;;;:::i;:::-;-1:-1:-1;;;;;;430:25565:10;;;;;;;;;;;:::o;:::-;;;;;;;;:::i;:::-;;;;-1:-1:-1;;;430:25565:10;;;;:::o;:::-;;;;:::o;:::-;;;-1:-1:-1;;;430:25565:10;;;;;;;;;;;;-1:-1:-1;;;430:25565:10;;;;;;;;-1:-1:-1;;;;;;430:25565:10;;;;;:::o;:::-;;;;;;;;;;;;;;;;;;;;;-1:-1:-1;;;;;430:25565:10;;;;;;;;;;;:::o;:::-;;;;;;;;;;;;;;;;:::i;:::-;-1:-1:-1;;;;;;430:25565:10;;;;;;;;;;;;:::i;:::-;;;;;;;;;;;;;;;;;;;;;:::i;:::-;;;;;;;;;;;:::i;:::-;;;;;;;:::i;:::-;;;;-1:-1:-1;;430:25565:10;;;;;;;:::i;:::-;;;;;;;:::i;:::-;;;;-1:-1:-1;;430:25565:10;;;;;;;:::i;:::-;;;;:::i;:::-;-1:-1:-1;;;;;;430:25565:10;;;;;;;;;;;;;;;;;;;;;-1:-1:-1;;;;;;430:25565:10;;;;;;;;;:::o;11779:3781::-;;;;12389:24;430:25565;11779:3781;12354:131;11779:3781;12241:95;12301:14;12078:13;;;12049:77;12057:35;430:25565;12078:13;;;;:::i;430:25565::-;12057:35;:::i;:::-;12049:77;:::i;:::-;12317:18;430:25565;12166:14;;;;;;12136:80;12144:37;430:25565;12166:14;;;;:::i;430:25565::-;12144:37;:::i;:::-;12136:80;:::i;:::-;12317:18;12286:13;12267:17;;;:::i;:::-;12286:13;;;:::i;:::-;12301:14;;;;;:::i;:::-;12317:18;;;;;:::i;12241:95::-;12389:24;;;12078:13;12389:24;;;430:25565;12434:30;;;;;:::i;:::-;12466:18;;;;430:25565;;12078:13;430:25565;;;;;;;;;12354:131;;;;;;:::i;:::-;;1004:42;-1:-1:-1;;;;;12354:131:10;;;;;;12346:171;12354:131;12267:17;12354:131;;;11779:3781;12346:171;;:::i;:::-;12078:13;12752:12;;;;;;:::i;:::-;430:25565;;;;;:::i;:::-;12731:34;;;:::i;:::-;12723:83;;;:::i;:::-;12856:12;;;;:::i;:::-;430:25565;;;;;:::i;:::-;2510:20:1;;;:::i;:::-;12879:62:10;;-1:-1:-1;12895:1:10;12887:9;12879:62;:::i;:::-;12166:14;12981:13;;;;;;;:::i;:::-;430:25565;;;;;:::i;:::-;12959:36;;;:::i;:::-;12951:86;;;:::i;:::-;13084:16;;;;;;;;:::i;:::-;430:25565;;;;;:::i;:::-;13055:49;;;:::i;:::-;13047:102;;;:::i;:::-;13195:12;;;;:::i;:::-;430:25565;;;;;:::i;:::-;13195:35;;;:::i;:::-;13268:13;;;;;:::i;:::-;430:25565;;;;;:::i;:::-;13291:13;;;;;:::i;:::-;:24;;;;:::i;:::-;13268:48;;;:::i;:::-;13390:16;;;;;:::i;:::-;430:25565;;;;;:::i;:::-;13355:55;;;:::i;:::-;18729:17:1;;;12514:75:2;;;12417:178;;18729:17:1;13630::10;;;13622:48;;;:::i;:::-;12514:75:2;;;;-1:-1:-1;;;;;;430:25565:10;13898:14;;;;:::i;:::-;430:25565;;;;;:::i;:::-;;;;13898:55;;;:::i;:::-;430:25565;;;13993:2;13971:24;13963:64;;;:::i;:::-;14173:23;;;:::i;:::-;430:25565;;:::i;:::-;14160:46;;;:::i;:::-;14152:100;;;:::i;:::-;14283:23;;;:::i;:::-;430:25565;;:::i;:::-;14270:48;;;:::i;:::-;14262:94;;;:::i;:::-;14410:24;;;:::i;:::-;14397:52;;;;:::i;:::-;14389:99;;;:::i;:::-;14520:25;;;:::i;:::-;12514:75:2;;;;;-1:-1:-1;;;;;;430:25565:10;;12078:13;430:25565;14654:30;;12389:24;14654:30;;430:25565;;;:::i;:::-;14654:30;430:25565;;14654:30;;;;;;;;:::i;:::-;12078:13;430:25565;;;;;;:::i;:::-;14647:38;;;;12267:17;14647:38;14304:1;12389:24;14647:38;;;;;15542:10;14647:38;15452:59;14647:38;15163:54;15089:39;15014:33;14647:38;14801:181;14647:38;14801:181;14647:38;15236:79;14647:38;15244:49;14647:38;12267:17;14647:38;430:25565;;;:::i;:::-;;;;:::i;:::-;14864:16;14882:17;12317:18;14864:16;;;:::i;:::-;14882:17;;;:::i;:::-;430:25565;;:::i;:::-;;12078:13;430:25565;14801:181;;;12389:24;14801:181;;;:::i;:::-;;430:25565;;14801:181;;;;;;:::i;:::-;430:25565;;:::i;:::-;15014:33;:::i;:::-;15089:39;;:::i;:::-;15189:27;430:25565;;15189:27;:::i;15163:54::-;15244:49;;:::i;:::-;15236:79;:::i;:::-;430:25565;;:::i;:::-;;;;-1:-1:-1;;;;;;430:25565:10;;12389:24;15358:79;;430:25565;;15452:59;15488:15;430:25565;12078:13;430:25565;15452:59;;;;;:::i;:::-;;;;15542:10;:::i;14647:38::-;;:::i;12354:131::-;;;;12389:24;12354:131;12389:24;12354:131;;;;;;;;:::i;:::-;;;;;:::i;:::-;;;;;;;;;430:25565;;;;:::o;:::-;;;-1:-1:-1;;;430:25565:10;;;;;;;;;;;;-1:-1:-1;;;430:25565:10;;;;;;;;;;;:::o;:::-;;;-1:-1:-1;;;430:25565:10;;;;;;;;;;;;-1:-1:-1;;;430:25565:10;;;;;;;;8667:12;430:25565;-1:-1:-1;;;430:25565:10;;;;;;;;8667:12;430:25565;8667:12;430:25565;;;;;;8667:12;-1:-1:-1;430:25565:10;;;;:::o;:::-;;;;:::o;:::-;;;-1:-1:-1;;;430:25565:10;;;;;;;;;;;;-1:-1:-1;;;430:25565:10;;;;;;;;;;;:::o;:::-;;;-1:-1:-1;;;430:25565:10;;;;;;;;;;;;-1:-1:-1;;;430:25565:10;;;;;;;;;;;;;;;;;:::i;:::-;;;;;;;;:::i;:::-;;;;-1:-1:-1;430:25565:10;;;;:::o;:::-;;;:::o;:::-;;;;:::o;:::-;;;-1:-1:-1;;;430:25565:10;;;;;;;;;;;;;;;;;-1:-1:-1;;;430:25565:10;;;;;;;7079:3368;8985:48;8475:83;9858:31;7079:3368;7763:44;7713:40;7779:1;7079:3368;;7713:40;;:::i;:::-;7771:9;;;7763:44;:::i;:::-;8004:10;;;8542:15;430:25565;8004:33;430:25565;8004:10;;;;:::i;8475:83::-;8577:20;8568:52;8576:21;8577:20;;;430:25565;;18970:14;430:25565;;;;;;;8576:21;8568:52;:::i;:::-;8630:27;:20;;430:25565;;18970:14;430:25565;;;;;;;8630:27;8667:23;;;:::i;8985:48::-;9750:77;9758:43;9690:50;430:25565;8880:1;430:25565;;:::i;:::-;9572:78;9580:43;9536:26;430:25565;9264:13;430:25565;;:::i;:::-;;9419:77;430:25565;;9444:26;:14;;;;:::i;9536:26::-;430:25565;;:::i;9580:43::-;9572:78;:::i;9690:50::-;430:25565;;:::i;9758:43::-;9750:77;:::i;:::-;9858:31;:::i;:::-;430:25565;8035:1;430:25565;;;7779:1;430:25565;9919:40;;;;;:::i;:::-;-1:-1:-1;9972:8:10;;10085:87;;430:25565;10148:23;8667:12;430:25565;10148:23;:::i;:::-;8004:10;430:25565;;;;;;;;;;;;-1:-1:-1;;;;;430:25565:10;;;;;;;;;;10131:15;430:25565;;;;;;;;;;;;;;;;;;;10085:87;;;;10263:58;8035:1;430:25565;;;;10200:18;430:25565;;;;;;;;;;7779:1;430:25565;10200:49;;;;;:::i;:::-;;10263:58;:::i;9969:472::-;10357:73;;430:25565;10406:23;8667:12;430:25565;10406:23;:::i;430:25565::-;;;;;:::i;:::-;;;;:::o;:::-;;;-1:-1:-1;;;430:25565:10;;;;;;;;;;;;;;;;;-1:-1:-1;;;430:25565:10;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;:::i;20602:819::-;;;20786:11;;;:::i;:::-;20799:8;;;;;;:::i;:::-;20809:7;;;;;;;;:::i;:::-;20818:8;;;;;;;;;;;:::i;:::-;20828:11;;;;;;;;;:::i;:::-;20841:12;;;;;;;;:::i;:::-;20758:96;;;;:::i;:::-;20893:7;;;;;:::i;:::-;430:25565;;;;;:::i;:::-;20872:29;;;:::i;:::-;20864:71;;;:::i;:::-;20975:8;;;;:::i;:::-;430:25565;;;;;:::i;:::-;20953:31;;;:::i;:::-;20945:74;;;:::i;:::-;21078:7;;;;:::i;:::-;430:25565;;;;;:::i;:::-;2510:20:1;;;:::i;:::-;21184:11:10;;;;;;;:::i;:::-;430:25565;;;;;:::i;:::-;21155:47;;;;:::i;:::-;21147:93;;;:::i;:::-;20799:8;21288:17;;430:25565;21314:23;;;;;:::i;:::-;20809:7;430:25565;;-1:-1:-1;;;21259:92:10;;430:25565;;;21259:92;;21339:11;;430:25565;;21339:11;;21259:92;;;;;;:::i;:::-;-1:-1:-1;;;;;1004:42:10;;;21259:92;1004:42;21259:92;;20799:8;21259:92;;;;;;21251:132;21259:92;-1:-1:-1;21259:92:10;;;21251:132;;:::i;:::-;21394:20;20602:819;:::o;654:66:1:-;;;;:::o;:::-;430:25565:10;;-1:-1:-1;;;654:66:1;;;;;;;;;;;430:25565:10;654:66:1;430:25565:10;;;654:66:1;-1:-1:-1;;;654:66:1;;;;;;;;;;;:::o;:::-;430:25565:10;;-1:-1:-1;;;654:66:1;;;;;;;;;;;430:25565:10;-1:-1:-1;;;430:25565:10;;;654:66:1;;;;11841:818;12147:43;2510:20;;;:::i;:::-;430:25565:10;;;12061:76:1;-1:-1:-1;;12069:29:1;;;12061:76;:::i;:::-;12155:14;;12147:43;:::i;:::-;12245:1;430:25565:10;;12245:1:1;430:25565:10;;;12508:37:1;;;;:::i;:::-;430:25565:10;-1:-1:-1;;12563:19:1;;654:66;;12627:25;;;:::i;654:66::-;430:25565:10;;-1:-1:-1;;;654:66:1;;;;;;;;;;;430:25565:10;654:66:1;430:25565:10;;;654:66:1;;;;430:25565:10;;;;:::o;:::-;;;-1:-1:-1;;;430:25565:10;;;;;;;;;;;;-1:-1:-1;;;430:25565:10;;;;;;;10344:924:2;;;;10463:12;;10459:55;;430:25565:10;18447:1;430:25565;;18447:1;430:25565;;;10569:13:2;18447:1:10;10561:70:2;10569:13;;:38;;;;10344:924;10561:70;;:::i;:::-;10642:620;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;10344:924;;;:::o;10642:620::-;;;;;;-1:-1:-1;;10642:620:2;;;;;;10569:38;430:25565:10;;;;10586:21:2;;10569:38;;;10459:55;430:25565:10;;;;;;;;;:::i;:::-;10474:1:2;430:25565:10;;10491:12:2;:::o;10344:924::-;;;;10463:12;;10459:55;;10561:70;430:25565:10;;;10586:21:2;;10561:70;:::i;:::-;10642:620;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;10344:924;;;:::o;10642:620::-;;;;;;;;;;;10344:924;;10561:70;430:25565:10;;;10586:21:2;;10561:70;:::i;:::-;10642:620;;;430:25565:10;10642:620:2;;;;21999:2:10;10642:620:2;;;;;;430:25565:10;10642:620:2;;;;;;;;;;;10344:924;;;:::o;10642:620::-;;;;;;-1:-1:-1;;10642:620:2;;21999:2:10;10642:620:2;;;10344:924;;10561:70;430:25565:10;;;10586:21:2;;10561:70;:::i;:::-;10642:620;;;;;;;;12895:1:10;10642:620:2;;;;;;;;;;;;;;;;;;10344:924;;;:::o;10642:620::-;;;;;;-1:-1:-1;;10642:620:2;;;;;;10344:924;;10561:70;430:25565:10;;;10586:21:2;;10561:70;:::i;:::-;10642:620;;;;;;;;14304:1:10;10642:620:2;;;;;;;;;;;;;;;;;;10344:924;;;:::o;10642:620::-;;;;;;-1:-1:-1;;10642:620:2;;;;;;10344:924;;10561:70;430:25565:10;;;10586:21:2;;10561:70;:::i;:::-;10642:620;;;;;;;;14431:2:10;10642:620:2;;;;;;;;;;;;;;;;;;10344:924;;;:::o;10642:620::-;;;;;;-1:-1:-1;;10642:620:2;;;;;;10344:924;;10561:70;430:25565:10;;;10586:21:2;;10561:70;:::i;:::-;10642:620;;;430:25565:10;10642:620:2;;;;12389:24:10;10642:620:2;;;;;;430:25565:10;10642:620:2;;;;;;;;;;;10344:924;;;:::o;10642:620::-;;;;;;-1:-1:-1;;10642:620:2;;12389:24:10;10642:620:2;;;10344:924;;10561:70;25486:2:10;430:25565;;10586:21:2;;10561:70;:::i;:::-;25486:2:10;10642:620:2;;;;;25486:2:10;10642:620:2;25486:2:10;10642:620:2;;;;;430:25565:10;10642:620:2;;;;;;;;;;;;10344:924;;;:::o;10642:620::-;;;;;;;;;;;10344:924;;10561:70;18417:2:1;430:25565:10;;10586:21:2;;10561:70;:::i;:::-;10642:620;;;;;;;;18417:2:1;10642:620:2;;;;;;;;;;;;;;;;;;10344:924;;;:::o;10642:620::-;;;;;;;;;;;10344:924;;;;10463:12;;10459:55;;430:25565:10;;;;;;;;10569:13:2;;10561:70;10569:13;;:38;;;;10561:70;;:::i;:::-;10642:620;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;10344:924;;;:::o;10642:620::-;;;;;;;;;;;10569:38;430:25565:10;;;;10586:21:2;;10569:38;;;10459:55;430:25565:10;;;;;;;;;;:::i;3420:579:0:-;3831:43;2976:8:10;3831:43:0;;:::i;:::-;-1:-1:-1;;3892:28:0;;430:25565:10;;2976:8;3963:29:0;;;:::i;430:25565:10:-;;;-1:-1:-1;;;430:25565:10;;;;;;;;;;;;-1:-1:-1;;;430:25565:10;;;;;;;;;;23959:13;430:25565;;;;;;;:::o;:::-;;;;;;;;;;;;;:::o;:::-;;;;;;;;;;;;;;;:::i;:::-;;;;;;;;:::i;:::-;;;;-1:-1:-1;;;430:25565:10;;;;:::o;:::-;;;;;;;;;;;;;;;;;;;;;;;;;;;:::i;:::-;-1:-1:-1;;;;;;430:25565:10;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;:::i;:::-;;;;;;;:::i;23644:1542::-;;;;;;;23836:31;;430:25565;18401:19:1;;;:::i;:::-;430:25565:10;;;;;;;:::i;:::-;23836:31;;;;;;;;;;430:25565;23836:31;;430:25565;23905:80;;23922:62;23929:54;23952:30;23959:22;:13;430:25565;23974:6;430:25565;;;;23959:22;-1:-1:-1;;;;;430:25565:10;;;23952:30;5929:18:1;430:25565:10;;;;;;;;;;5965:22:1;;430:25565:10;;;;5803:18:1;5792:29;;5791:77;430:25565:10;;;5964:30:1;5965:22;;;;430:25565:10;;;5917:30:1;;5916:79;430:25565:10;;;;;;6056:7:1;430:25565:10;;;6043:21:1;5671:400;;23929:54:10;430:25565;;-1:-1:-1;;;;;;430:25565:10;;;23922:62;430:25565;;23905:80;;;;;;430:25565;;-1:-1:-1;;;;;430:25565:10;;;;;;;;;23898:88;;23836:31;23898:88;;;;;23836:31;;430:25565;24046:50;;17514:17:1;23898:88:10;;17514:17:1;12514:75:2;430:25565:10;;;12514:75:2;;;430:25565:10;13081:136:2;;17514:17:1;430:25565:10;;24046:50;;;;;;430:25565;;;;;;;;;;;;24039:58;;23836:31;24039:58;;;;;23836:31;;430:25565;;24135:24;24039:58;;430:25565;;;24135:24;;;;;;430:25565;;:::i;:::-;24128:32;;23836:31;24128:32;;;;;23836:31;24128:32;24192:34;;;;:::i;:::-;-1:-1:-1;;;24264:34:10;;;:::i;:::-;24445:15;;;:::i;:::-;430:25565;-1:-1:-1;;;;;;430:25565:10;;;;;;24522:37;;24445:22;23836:31;24522:37;;;:::i;:::-;;430:25565;;24522:37;;;;;;;;:::i;:::-;430:25565;;:::i;:::-;24499:62;;;;:::i;:::-;430:25565;;;:::i;:::-;;;;:::i;:::-;;;;:::i;:::-;;;;:::i;:::-;;;;:::i;:::-;;;;:::i;:::-;;;;24594:202;;;23836:31;24594:202;;;;;;:::i;:::-;;430:25565;;24594:202;;;;;;;;:::i;:::-;430:25565;;:::i;:::-;24828:33;;;;:::i;:::-;24903:34;;;;:::i;:::-;430:25565;;24998:27;;;:::i;:::-;24972:54;;;:::i;:::-;25065:18;;:::i;:::-;25101:56;;;;:::i;:::-;25093:86;;;:::i;2637:355:4:-;430:25565:10;2637:355:4;2921:64;2637:355;430:25565:10;2921:50:4;2637:355;;430:25565:10;;;;;;;;;;2921:50:4;;;430:25565:10;;;;;;:::i;:::-;;;;;;;;2921:50:4;;;;;;;;;;:::i;:::-;:64;:::i;430:25565:10:-;;;;:::o;:::-;;;-1:-1:-1;;;430:25565:10;;;;;;;;;;;;-1:-1:-1;;;430:25565:10;;;;;;;4360:974:0;;4707:45;2510:20:1;;;:::i;:::-;430:25565:10;;;4612:85:0;-1:-1:-1;;4620:38:0;;;4612:85;:::i;:::-;18447:1:10;4715:16:0;4707:45;:::i;:::-;18447:1:10;430:25565;;18447:1;430:25565;;;4845:13:0;-1:-1:-1;4840:263:0;4860:10;18447:1:10;4860:10:0;;;;5273:54;5142:41;;;5297:29;:25;5142:41;;;;:::i;:::-;5193:63;-1:-1:-1;;5201:32:0;;;5193:63;:::i;5297:25::-;:29;:::i;:::-;5273:54;;:::i;4872:3::-;4920:41;;;;:::i;:::-;430:25565:10;;4975:63:0;-1:-1:-1;;4983:32:0;;;4975:63;:::i;:::-;430:25565:10;18447:1;430:25565;;;18447:1;430:25565;;;18447:1;430:25565;;;;;;;;18447:1;5052:40:0;;;;:::i;:::-;4872:3;430:25565:10;4845:13:0;;;4360:974;4707:45;2510:20:1;;;:::i;4707:45:0:-;4811:1;430:25565:10;;;4811:1:0;430:25565:10;;;5142:41:0;;;;:::i;:::-;430:25565:10;;5193:63:0;-1:-1:-1;;5201:32:0;;;5193:63;:::i;:::-;430:25565:10;;;;;;;5297:29:0;5273:54;5297:29;;:::i;4360:974::-;;4707:45;2510:20:1;;;:::i;:::-;430:25565:10;;;4612:85:0;-1:-1:-1;;4620:38:0;;;4612:85;:::i;:::-;23836:31:10;4715:16:0;4707:45;:::i;:::-;4811:1;430:25565:10;;4811:1:0;430:25565:10;;;4845:13:0;-1:-1:-1;4840:263:0;4860:10;23836:31:10;4860:10:0;;;;5273:54;5142:41;;;5297:29;:25;5142:41;;;;:::i;4872:3::-;4920:41;;;;:::i;:::-;430:25565:10;;4975:63:0;-1:-1:-1;;4983:32:0;;;4975:63;:::i;:::-;430:25565:10;4811:1:0;430:25565:10;;;4811:1:0;430:25565:10;;;4811:1:0;430:25565:10;;;;;;;;4811:1:0;5052:40;;;;:::i;:::-;4872:3;430:25565:10;4845:13:0;;;22269:774:10;;430:25565;;;;;22405:15;;22401:58;;22486:2;22553:13;;;;;;;22901:11;;;:::i;:::-;22914:7;;;;;23025:11;;;;430:25565;22269:774;:::o;22923:3::-;22946:4;;;;;:::i;:::-;430:25565;-1:-1:-1;;;;;;430:25565:10;;;22946:4;:12;22954:4;;;;;:::i;:::-;-1:-1:-1;;;;;;430:25565:10;;;22946:12;-1:-1:-1;;;;;;430:25565:10;;;22946:12;22942:63;;430:25565;;22892:20;;22942:63;22978:12;;;;-1:-1:-1;22978:12:10;:::o;22546:289::-;22582:163;;;;;;;;22486:2;22582:163;;;;22758:67;22546:289;22758:67;22798:12;;;;-1:-1:-1;22798:12:10;:::o;22401:58::-;22436:12;;;-1:-1:-1;22436:12:10;:::o;21674:216::-;430:25565;21776:13;430:25565;;:::i;:::-;21858:2;430:25565;;;;;;;10569:13:2;;10561:70;10569:13;;:38;;;;10561:70;;:::i;:::-;10642:620;;;430:25565:10;10642:620:2;;;;21858:2:10;10642:620:2;;;;;;;21858:2:10;10642:620:2;;;;;;;;;;;21821:41:10;;;;;;:::i;10642:620:2:-;;;;;;;21858:2:10;10642:620:2;;;10569:38;430:25565:10;;;;10586:21:2;;10569:38;;;2658:162:6;-1:-1:-1;;;;;;;;;;;430:25565:10;-1:-1:-1;;;;;430:25565:10;966:10:8;2717:23:6;2713:101;;2658:162::o;27793:991:1:-;2510:20;;;:::i;:::-;28050:10;;;;:43;;;;27793:991;28046:86;;28160:1;430:25565:10;;28160:1:1;430:25565:10;;;28194:13:1;430:25565:10;28189:492:1;28209:9;;;;;;430:25565:10;;;28755:22:1;27793:991;:::o;28220:3::-;430:25565:10;;;;28301:22:1;;;28297:73;;28464:37;;;;:::i;:::-;-1:-1:-1;;28519:23:1;;28515:74;;430:25565:10;;;;;;;28651:19:1;28160:1;430:25565:10;;28194:13:1;;28515:74;28562:12;;;;;430:25565:10;28562:12:1;:::o;28050:43::-;-1:-1:-1;;;28064:29:1;;28050:43;;29056:1004;2510:20;;;:::i;:::-;29319:11;;;;:44;;;;29056:1004;29315:87;;29430:1;430:25565:10;;29430:1:1;430:25565:10;;;29464:13:1;430:25565:10;29459:497:1;29479:10;;;;;;430:25565:10;;;30030:23:1;29056:1004;:::o;29491:3::-;430:25565:10;;;;29572:23:1;;;29568:74;;29785:39;;;;:::i;:::-;-1:-1:-1;;29842:23:1;;29838:74;;430:25565:10;;;;;;;29926:19:1;29430:1;430:25565:10;;29464:13:1;;29319:44;-1:-1:-1;;;29334:29:1;;29319:44;;1060:871:0;-1:-1:-1;1317:13:0;-1:-1:-1;1332:10:0;12895:1:10;1332:10:0;;;;430:25565:10;;1898:26:0;1060:871;:::o;1344:3::-;430:25565:10;;;1425:26:0;;;1421:77;;1592:43;;;;:::i;:::-;-1:-1:-1;;1653:32:0;;1649:83;;430:25565:10;;;;;;;1794:19:0;12895:1:10;430:25565;1317:13:0;;1060:871;1222:11;;1218:54;;430:25565:10;1317:13:0;430:25565:10;1312:512:0;1332:10;;;;;;430:25565:10;;;1898:26:0;1060:871;:::o;1344:3::-;430:25565:10;;;;1425:26:0;;;1421:77;;1592:43;;;;:::i;:::-;-1:-1:-1;;1653:32:0;;1649:83;;430:25565:10;;;;;;;1794:19:0;430:25565:10;;;1317:13:0;;1218:54;1249:12;;430:25565:10;1249:12:0;:::o;430:25565:10:-;;;;:::o;:::-;;;-1:-1:-1;;;430:25565:10;;;;;;;;;;;;;;;;;;;;21812:827:1;;;;2510:20;;;:::i;:::-;22037:76;-1:-1:-1;;22045:29:1;;;22037:76;:::i;:::-;22131:15;;430:25565:10;;;22223:18:1;;;:::i;:::-;-1:-1:-1;22252:213:1;22273:11;;;;;;22482:39;22606:26;22482:39;;;;;;;:::i;:::-;430:25565:10;22531:58:1;-1:-1:-1;;22539:19:1;;;22531:58;:::i;:::-;22606:26;:::i;22286:5::-;22314:39;22439:15;22314:39;;22223:1;22314:39;;;:::i;:::-;430:25565:10;22367:58:1;-1:-1:-1;;22375:19:1;;;22367:58;:::i;22439:15::-;22286:5;430:25565:10;22257:14:1;;;;430:25565:10;;;-1:-1:-1;;;430:25565:10;;;;;;;;;;;;-1:-1:-1;;;430:25565:10;;;;;;;25779:214;25900:18;430:25565;25900:18;25779:214;430:25565;;;;;;;:::i;:::-;25900:18;;;;;;;;;430:25565;25900:18;;25942:43;25900:18;;430:25565;;;25942:43;;;;;;;430:25565;;;;;;;;;;:::i;:::-;25935:51;;25900:18;25935:51;;;;;25900:18;25935:51;25779:214;:::o;430:25565::-;;;;;;;;;:::i;25318:455::-;;;;430:25565;;25486:2;25466:22;;:48;;;;;25318:455;430:25565;;;;25581:1;25565:22;25671:48;25565:22;;;;:::i;:::-;430:25565;25671:48;25486:2;430:25565;25671:48;;;;;;;;;:::i;:::-;25623:97;;1213:5;25623:97;;;25740:26;25623:97;;:::i;:::-;25671:48;430:25565;;;25740:26;;;;;;:::i;430:25565::-;25486:2;430:25565;-1:-1:-1;;;430:25565:10;;;;;;;;;;;;;;;;;;;;25466:48;25512:2;25492:22;;;25466:48;;;21427:241;430:25565;21541:13;430:25565;;:::i;:::-;21617:2;430:25565;;;;;;;10569:13:2;;10561:70;10569:13;;:38;;;;10561:70;;:::i;:::-;10642:620;;;;;;;;21617:2:10;10642:620:2;;;;;;;;;;;;;;;;;;;21587:34:10;;;10642:620:2;430:25565:10;;;;;;;;;;;21617:2;430:25565;;;;10642:620:2;430:25565:10;;;;21427:241;:::o;430:25565::-;-1:-1:-1;;21617:2:10;430:25565;;;;21541:13;430:25565;;;;;;;-1:-1:-1;430:25565:10;;;;10642:620:2;;;;;;;;;;;431:320:0;;;;;;;;;;;;;430:25565:10;;;;;;;;;669:61:0;;;430:25565:10;;;;;;;;;;;;;;;;;;;-1:-1:-1;430:25565:10;;;;;;;-1:-1:-1;430:25565:10;;;;;;;;;;;;669:61:0;2921:50:4;;669:61:0;;;;430:25565:10;669:61:0;;;;;:::i;:::-;:75;;;:::i;14980:394:1:-;;15185:34;14980:394;15185:34;:::i;:::-;-1:-1:-1;;15233:29:1;;15229:78;;430:25565:10;15324:6:1;430:25565:10;;;15324:6:1;430:25565:10;;;;15324:6:1;430:25565:10;;;;;;;;;;;;;;14980:394:1;:::o;15229:78::-;430:25565:10;;;;15278:18:1;:::o;2254:783:0:-;;2470:37;;;;:::i;:::-;430:25565:10;;-1:-1:-1;;2521:38:0;;2517:96;;2667:1;430:25565:10;;2667:1:0;430:25565:10;;;2649:36:0;;;-1:-1:-1;2696:310:0;2716:16;;;;;;3016:14;;;;2254:783;:::o;2734:3::-;430:25565:10;;;;;;;;;;;;2782:47:0;;;;:::i;:::-;430:25565:10;;-1:-1:-1;;2847:32:0;;2843:98;;430:25565:10;2667:1:0;430:25565:10;;;2667:1:0;430:25565:10;;;2667:1:0;430:25565:10;;;;;;;;2667:1:0;2955:40;;;;:::i;:::-;2734:3;2701:13;430:25565:10;;2701:13:0;;;2843:98;430:25565:10;;;;;;;;;2899:27:0;:::o;2517:96::-;430:25565:10;;;;;;2575:27:0;:::o;9609:335:1:-;9757:181;;9609:335;9757:181;;;;;;;;;;;;;;;;;;;9609:335;:::o;430:25565:10:-;;;;;;;;;;;:::o;2999:704:1:-;;3120:36;2976:8:10;3120:36:1;;:::i;:::-;430:25565:10;;;;3171:13:1;;;3167:70;;3262:18;2976:8:10;3262:18:1;:12;430:25565:10;;3262:12:1;;:::i;:::-;430:25565:10;;;;3262:18:1;-1:-1:-1;3246:84:1;;2976:8:10;3380:1:1;3368:13;;3380:1;;3438:7;3407:41;3421:26;3428:18;3397:51;3438:7;;2976:8:10;3438:7:1;:::i;:::-;12514:75:2;;;;-1:-1:-1;;;;;;430:25565:10;;13493:136:2;3428:18:1;430:25565:10;;;;3421:26:1;430:25565:10;;;7048:1:1;430:25565:10;;;7048:1:1;430:25565:10;;7041:21:1;6959:110;;3407:41;430:25565:10;;;;3364:297:1;3481:1;3469:13;;3481:1;;3539:7;3508:41;3522:26;3529:18;3498:51;3539:7;;2976:8:10;3539:7:1;:::i;:::-;12514:75:2;;;;-1:-1:-1;;;;;;430:25565:10;;13081:136:2;3529:18:1;430:25565:10;;;;3522:26:1;430:25565:10;;;;6370:10:1;6359:21;430:25565:10;;;;;6358:61:1;430:25565:10;;;;;;;;;;;6467:21:1;;6238:257;3508:41;430:25565:10;;;;3465:196:1;3570:13;;3582:1;3570:13;;3566:95;;3465:196;21394:20:10;20602:819;:::o;3566:95:1:-;3599:51;3640:7;;;3623:26;3630:18;3609:41;3640:7;;2976:8:10;3640:7:1;:::i;:::-;12514:75:2;;;;-1:-1:-1;;;;;;430:25565:10;;12875:136:2;3630:18:1;430:25565:10;;;;3246:84:1;-1:-1:-1;;;430:25565:10;-1:-1:-1;430:25565:10;;3296:23:1:o;3167:70::-;3217:7;3211:14;3217:7;;;;2976:8:10;3217:7:1;;;:::i;:::-;430:25565:10;;;;3211:14:1;3200:26;430:25565:10;2976:8;430:25565;;3200:26:1;:::o;2999:704::-;;;;3120:36;;;;:::i;:::-;430:25565:10;;;;3171:13:1;;;3167:70;;3262:18;430:25565:10;3262:18:1;:12;430:25565:10;;3262:12:1;;:::i;:18::-;-1:-1:-1;3246:84:1;;430:25565:10;3380:1:1;3368:13;;3380:1;;3438:7;3421:26;3428:18;3397:51;3438:7;;3407:41;3438:7;;:::i;3364:297::-;3481:1;3469:13;;3481:1;;3539:7;3522:26;3529:18;3498:51;3539:7;;3508:41;3539:7;;:::i;3465:196::-;3570:13;;;3582:1;3570:13;;3566:95;;3465:196;;;21394:20:10;20602:819;:::o;3566:95:1:-;3599:51;3640:7;;;3630:18;3609:41;3640:7;;3623:26;3640:7;;:::i;3599:51::-;3566:95;;;;;3246:84;-1:-1:-1;;;430:25565:10;-1:-1:-1;430:25565:10;;;-1:-1:-1;3296:23:1:o;3167:70::-;3217:7;3211:14;3217:7;;;;;;;;:::i;23049:452:10:-;430:25565;;23148:19;;;23144:60;;23272:2;23262:12;;430:25565;;23272:2;23345:150;;;23272:2;23345:150;;;;23049:452;:::o;430:25565::-;;;-1:-1:-1;;;430:25565:10;;23272:2;430:25565;;;;;;;;;;;;;;-1:-1:-1;;;430:25565:10;;;;;;;20911:536:1;;430:25565:10;;;21042:1:1;430:25565:10;;;21042:1:1;430:25565:10;;;21025:24:1;21021:73;;21235:1;430:25565:10;;21235:1:1;430:25565:10;;;21212:31:1;;;:::i;:::-;430:25565:10;-1:-1:-1;;21258:29:1;;21254:78;;430:25565:10;21042:1:1;430:25565:10;;;21042:1:1;430:25565:10;;;21042:1:1;430:25565:10;;;;;;;;20911:536:1;:::o;13785:388::-;;430:25565:10;;;13921:2:1;430:25565:10;;;13921:2:1;430:25565:10;;;13905:24:1;13901:78;;14114:2;430:25565:10;;;;;;;14086:31:1;;;:::i;13901:78::-;-1:-1:-1;;;430:25565:10;;;-1:-1:-1;13945:23:1:o;1550:446::-;;430:25565:10;1667:7:1;;;;:::i;:::-;430:25565:10;;;1661:22:1;1657:93;;1781:4;430:25565:10;1769:7:1;;;;:::i;:::-;430:25565:10;;;;;;;;;1763:22:1;1759:93;;1871:7;430:25565:10;1871:7:1;1883:4;1871:7;;:::i;:::-;430:25565:10;;;;;;;;;1865:22:1;1861:93;;430:25565:10;1550:446:1;:::o;1861:93::-;1910:1;1903:8;:::o;1759:93::-;1801:8;;1808:1;1801:8;:::o;1657:93::-;1699:8;;1706:1;1699:8;:::o","linkReferences":{}},"methodIdentifiers":{"CODESEP_POS()":"b2497e70","EPOCH()":"a0dc2758","INPUT_INDEX()":"428bcd35","KEY_VERSION()":"1369ac3e","LIGHT_CLIENT()":"e613ae00","SCHNORR_VERIFIER_PRECOMPILE()":"9a4f308d","SIGHASH_ALL_HASH_TYPE()":"0bd89ab7","SIGHASH_SINGLE_ANYONECANPAY_HASH_TYPE()":"6cf7d641","SPEND_TYPE_EXT()":"23dacd29","SPEND_TYPE_NO_EXT()":"092ac5d4","SYSTEM_CALLER()":"d761753e","acceptOwnership()":"79ba5097","batchWithdraw(bytes32[],bytes4[])":"19854623","deposit((bytes4,bytes2,bytes,bytes,bytes,bytes4),(bytes,uint256,uint256),bytes32)":"fb11d7b9","depositAmount()":"419759f5","depositPrefix()":"5b4f894d","depositSuffix()":"8752b6b2","depositTxIds(uint256)":"06592167","failedDepositVault()":"a670e7ed","getAggregatedKey()":"3c918b6c","getWithdrawalCount()":"781952a8","initialize(bytes,bytes,uint256)":"41260137","initialized()":"158ef93e","operator()":"570ca735","owner()":"8da5cb5b","pendingOwner()":"e30c3978","processedTxIds(bytes32)":"4379caa5","renounceOwnership()":"715018a6","replaceDeposit((bytes4,bytes2,bytes,bytes,bytes,bytes4),(bytes,uint256,uint256),uint256,bytes32)":"2594f107","replacePrefix()":"6b0b5a94","replaceSuffix()":"f42cb4fc","safeWithdraw((bytes4,bytes2,bytes,bytes,bytes,bytes4),(bytes,uint256,uint256),(bytes4,bytes2,bytes,bytes,bytes,bytes4),bytes,bytes)":"9072f747","setDepositScript(bytes,bytes)":"f8e655d2","setFailedDepositVault(address)":"85fb7151","setOperator(address)":"b3ab15fb","setReplaceScript(bytes,bytes)":"7ec9732a","transferOwnership(address)":"f2fde38b","withdraw(bytes32,bytes4)":"8786dba7","withdrawalUTXOs(uint256)":"471ba1e3"},"rawMetadata":"{\"compiler\":{\"version\":\"0.8.28+commit.7893614a\"},\"language\":\"Solidity\",\"output\":{\"abi\":[{\"inputs\":[],\"name\":\"InvalidInitialization\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"NotInitializing\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"owner\",\"type\":\"address\"}],\"name\":\"OwnableInvalidOwner\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"}],\"name\":\"OwnableUnauthorizedAccount\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"wtxId\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"txId\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"recipient\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"timestamp\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"depositId\",\"type\":\"uint256\"}],\"name\":\"Deposit\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"index\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"oldTxId\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"newTxId\",\"type\":\"bytes32\"}],\"name\":\"DepositReplaced\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"depositPrefix\",\"type\":\"bytes\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"depositSuffix\",\"type\":\"bytes\"}],\"name\":\"DepositScriptUpdate\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"wtxId\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"txId\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"recipient\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"timestamp\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"depositId\",\"type\":\"uint256\"}],\"name\":\"DepositTransferFailed\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"oldVault\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"newVault\",\"type\":\"address\"}],\"name\":\"FailedDepositVaultUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint64\",\"name\":\"version\",\"type\":\"uint64\"}],\"name\":\"Initialized\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"oldOperator\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"newOperator\",\"type\":\"address\"}],\"name\":\"OperatorUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"previousOwner\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"newOwner\",\"type\":\"address\"}],\"name\":\"OwnershipTransferStarted\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"previousOwner\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"newOwner\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"replacePrefix\",\"type\":\"bytes\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"replaceSuffix\",\"type\":\"bytes\"}],\"name\":\"ReplaceScriptUpdate\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"components\":[{\"internalType\":\"bytes4\",\"name\":\"version\",\"type\":\"bytes4\"},{\"internalType\":\"bytes2\",\"name\":\"flag\",\"type\":\"bytes2\"},{\"internalType\":\"bytes\",\"name\":\"vin\",\"type\":\"bytes\"},{\"internalType\":\"bytes\",\"name\":\"vout\",\"type\":\"bytes\"},{\"internalType\":\"bytes\",\"name\":\"witness\",\"type\":\"bytes\"},{\"internalType\":\"bytes4\",\"name\":\"locktime\",\"type\":\"bytes4\"}],\"indexed\":false,\"internalType\":\"struct Bridge.Transaction\",\"name\":\"payoutTx\",\"type\":\"tuple\"},{\"components\":[{\"internalType\":\"bytes32\",\"name\":\"txId\",\"type\":\"bytes32\"},{\"internalType\":\"bytes4\",\"name\":\"outputId\",\"type\":\"bytes4\"}],\"indexed\":false,\"internalType\":\"struct Bridge.UTXO\",\"name\":\"spentUtxo\",\"type\":\"tuple\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"index\",\"type\":\"uint256\"}],\"name\":\"SafeWithdrawal\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"components\":[{\"internalType\":\"bytes32\",\"name\":\"txId\",\"type\":\"bytes32\"},{\"internalType\":\"bytes4\",\"name\":\"outputId\",\"type\":\"bytes4\"}],\"indexed\":false,\"internalType\":\"struct Bridge.UTXO\",\"name\":\"utxo\",\"type\":\"tuple\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"index\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"timestamp\",\"type\":\"uint256\"}],\"name\":\"Withdrawal\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"CODESEP_POS\",\"outputs\":[{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"EPOCH\",\"outputs\":[{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"INPUT_INDEX\",\"outputs\":[{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"KEY_VERSION\",\"outputs\":[{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"LIGHT_CLIENT\",\"outputs\":[{\"internalType\":\"contract BitcoinLightClient\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"SCHNORR_VERIFIER_PRECOMPILE\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"SIGHASH_ALL_HASH_TYPE\",\"outputs\":[{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"SIGHASH_SINGLE_ANYONECANPAY_HASH_TYPE\",\"outputs\":[{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"SPEND_TYPE_EXT\",\"outputs\":[{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"SPEND_TYPE_NO_EXT\",\"outputs\":[{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"SYSTEM_CALLER\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"acceptOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32[]\",\"name\":\"txIds\",\"type\":\"bytes32[]\"},{\"internalType\":\"bytes4[]\",\"name\":\"outputIds\",\"type\":\"bytes4[]\"}],\"name\":\"batchWithdraw\",\"outputs\":[],\"stateMutability\":\"payable\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"internalType\":\"bytes4\",\"name\":\"version\",\"type\":\"bytes4\"},{\"internalType\":\"bytes2\",\"name\":\"flag\",\"type\":\"bytes2\"},{\"internalType\":\"bytes\",\"name\":\"vin\",\"type\":\"bytes\"},{\"internalType\":\"bytes\",\"name\":\"vout\",\"type\":\"bytes\"},{\"internalType\":\"bytes\",\"name\":\"witness\",\"type\":\"bytes\"},{\"internalType\":\"bytes4\",\"name\":\"locktime\",\"type\":\"bytes4\"}],\"internalType\":\"struct Bridge.Transaction\",\"name\":\"moveTx\",\"type\":\"tuple\"},{\"components\":[{\"internalType\":\"bytes\",\"name\":\"intermediateNodes\",\"type\":\"bytes\"},{\"internalType\":\"uint256\",\"name\":\"blockHeight\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"index\",\"type\":\"uint256\"}],\"internalType\":\"struct Bridge.MerkleProof\",\"name\":\"proof\",\"type\":\"tuple\"},{\"internalType\":\"bytes32\",\"name\":\"shaScriptPubkeys\",\"type\":\"bytes32\"}],\"name\":\"deposit\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"depositAmount\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"depositPrefix\",\"outputs\":[{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"depositSuffix\",\"outputs\":[{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"depositTxIds\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"failedDepositVault\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getAggregatedKey\",\"outputs\":[{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getWithdrawalCount\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"_depositPrefix\",\"type\":\"bytes\"},{\"internalType\":\"bytes\",\"name\":\"_depositSuffix\",\"type\":\"bytes\"},{\"internalType\":\"uint256\",\"name\":\"_depositAmount\",\"type\":\"uint256\"}],\"name\":\"initialize\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"initialized\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"operator\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"pendingOwner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"name\":\"processedTxIds\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"renounceOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"internalType\":\"bytes4\",\"name\":\"version\",\"type\":\"bytes4\"},{\"internalType\":\"bytes2\",\"name\":\"flag\",\"type\":\"bytes2\"},{\"internalType\":\"bytes\",\"name\":\"vin\",\"type\":\"bytes\"},{\"internalType\":\"bytes\",\"name\":\"vout\",\"type\":\"bytes\"},{\"internalType\":\"bytes\",\"name\":\"witness\",\"type\":\"bytes\"},{\"internalType\":\"bytes4\",\"name\":\"locktime\",\"type\":\"bytes4\"}],\"internalType\":\"struct Bridge.Transaction\",\"name\":\"replaceTx\",\"type\":\"tuple\"},{\"components\":[{\"internalType\":\"bytes\",\"name\":\"intermediateNodes\",\"type\":\"bytes\"},{\"internalType\":\"uint256\",\"name\":\"blockHeight\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"index\",\"type\":\"uint256\"}],\"internalType\":\"struct Bridge.MerkleProof\",\"name\":\"proof\",\"type\":\"tuple\"},{\"internalType\":\"uint256\",\"name\":\"idToReplace\",\"type\":\"uint256\"},{\"internalType\":\"bytes32\",\"name\":\"shaScriptPubkeys\",\"type\":\"bytes32\"}],\"name\":\"replaceDeposit\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"replacePrefix\",\"outputs\":[{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"replaceSuffix\",\"outputs\":[{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"internalType\":\"bytes4\",\"name\":\"version\",\"type\":\"bytes4\"},{\"internalType\":\"bytes2\",\"name\":\"flag\",\"type\":\"bytes2\"},{\"internalType\":\"bytes\",\"name\":\"vin\",\"type\":\"bytes\"},{\"internalType\":\"bytes\",\"name\":\"vout\",\"type\":\"bytes\"},{\"internalType\":\"bytes\",\"name\":\"witness\",\"type\":\"bytes\"},{\"internalType\":\"bytes4\",\"name\":\"locktime\",\"type\":\"bytes4\"}],\"internalType\":\"struct Bridge.Transaction\",\"name\":\"prepareTx\",\"type\":\"tuple\"},{\"components\":[{\"internalType\":\"bytes\",\"name\":\"intermediateNodes\",\"type\":\"bytes\"},{\"internalType\":\"uint256\",\"name\":\"blockHeight\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"index\",\"type\":\"uint256\"}],\"internalType\":\"struct Bridge.MerkleProof\",\"name\":\"prepareProof\",\"type\":\"tuple\"},{\"components\":[{\"internalType\":\"bytes4\",\"name\":\"version\",\"type\":\"bytes4\"},{\"internalType\":\"bytes2\",\"name\":\"flag\",\"type\":\"bytes2\"},{\"internalType\":\"bytes\",\"name\":\"vin\",\"type\":\"bytes\"},{\"internalType\":\"bytes\",\"name\":\"vout\",\"type\":\"bytes\"},{\"internalType\":\"bytes\",\"name\":\"witness\",\"type\":\"bytes\"},{\"internalType\":\"bytes4\",\"name\":\"locktime\",\"type\":\"bytes4\"}],\"internalType\":\"struct Bridge.Transaction\",\"name\":\"payoutTx\",\"type\":\"tuple\"},{\"internalType\":\"bytes\",\"name\":\"blockHeader\",\"type\":\"bytes\"},{\"internalType\":\"bytes\",\"name\":\"scriptPubKey\",\"type\":\"bytes\"}],\"name\":\"safeWithdraw\",\"outputs\":[],\"stateMutability\":\"payable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"_depositPrefix\",\"type\":\"bytes\"},{\"internalType\":\"bytes\",\"name\":\"_depositSuffix\",\"type\":\"bytes\"}],\"name\":\"setDepositScript\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_failedDepositVault\",\"type\":\"address\"}],\"name\":\"setFailedDepositVault\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_operator\",\"type\":\"address\"}],\"name\":\"setOperator\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"_replacePrefix\",\"type\":\"bytes\"},{\"internalType\":\"bytes\",\"name\":\"_replaceSuffix\",\"type\":\"bytes\"}],\"name\":\"setReplaceScript\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"newOwner\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"txId\",\"type\":\"bytes32\"},{\"internalType\":\"bytes4\",\"name\":\"outputId\",\"type\":\"bytes4\"}],\"name\":\"withdraw\",\"outputs\":[],\"stateMutability\":\"payable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"withdrawalUTXOs\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"txId\",\"type\":\"bytes32\"},{\"internalType\":\"bytes4\",\"name\":\"outputId\",\"type\":\"bytes4\"}],\"stateMutability\":\"view\",\"type\":\"function\"}],\"devdoc\":{\"author\":\"Citrea\",\"errors\":{\"InvalidInitialization()\":[{\"details\":\"The contract is already initialized.\"}],\"NotInitializing()\":[{\"details\":\"The contract is not initializing.\"}],\"OwnableInvalidOwner(address)\":[{\"details\":\"The owner is not a valid owner account. (eg. `address(0)`)\"}],\"OwnableUnauthorizedAccount(address)\":[{\"details\":\"The caller account is not authorized to perform an operation.\"}]},\"events\":{\"Initialized(uint64)\":{\"details\":\"Triggered when the contract has been initialized or reinitialized.\"}},\"kind\":\"dev\",\"methods\":{\"acceptOwnership()\":{\"details\":\"The new owner accepts the ownership transfer.\"},\"batchWithdraw(bytes32[],bytes4[])\":{\"details\":\"Takes in multiple Bitcoin addresses as recipient addresses should be unique\",\"params\":{\"outputIds\":\"the outputIds of the outputs in the withdrawal transactions\",\"txIds\":\"the txIds of the withdrawal transactions on Bitcoin\"}},\"deposit((bytes4,bytes2,bytes,bytes,bytes,bytes4),(bytes,uint256,uint256),bytes32)\":{\"params\":{\"moveTx\":\"Transaction parameters of the move transaction on Bitcoin\",\"proof\":\"Merkle proof of the move transaction\",\"shaScriptPubkeys\":\"`shaScriptPubkeys` is the only component of the P2TR message hash that cannot be derived solely on the transaction itself in our case, as it requires knowledge of the previous transaction output that is being spent. Thus we calculate this component off-chain.\"}},\"getWithdrawalCount()\":{\"returns\":{\"_0\":\"The count of withdrawals happened so far\"}},\"initialize(bytes,bytes,uint256)\":{\"params\":{\"_depositAmount\":\"The CBTC amount that can be deposited and withdrawn\",\"_depositPrefix\":\"First part of the deposit script expected in the witness field for all L1 deposits \",\"_depositSuffix\":\"The suffix of the deposit script that follows the receiver address\"}},\"owner()\":{\"details\":\"Returns the address of the current owner.\"},\"pendingOwner()\":{\"details\":\"Returns the address of the pending owner.\"},\"renounceOwnership()\":{\"details\":\"Leaves the contract without owner. It will not be possible to call `onlyOwner` functions. Can only be called by the current owner. NOTE: Renouncing ownership will leave the contract without an owner, thereby disabling any functionality that is only available to the owner.\"},\"replaceDeposit((bytes4,bytes2,bytes,bytes,bytes,bytes4),(bytes,uint256,uint256),uint256,bytes32)\":{\"params\":{\"idToReplace\":\"The index of the deposit transaction to be replaced in the `depositTxIds` array\",\"proof\":\"Merkle proof of the replacement transaction\",\"replaceTx\":\"Transaction parameters of the replacement transaction on Bitcoin\",\"shaScriptPubkeys\":\"`shaScriptPubkeys` is the only component of the P2TR message hash that cannot be derived solely on the transaction itself in our case, as it requires knowledge of the previous transaction output that is being spent. Thus we calculate this component off-chain.\"}},\"safeWithdraw((bytes4,bytes2,bytes,bytes,bytes,bytes4),(bytes,uint256,uint256),(bytes4,bytes2,bytes,bytes,bytes,bytes4),bytes,bytes)\":{\"params\":{\"blockHeader\":\"Block header of the associated Bitcoin block\",\"payoutTx\":\"Transaction parameters of the payout transaction on Bitcoin\",\"prepareProof\":\"Merkle proof of the prepare transaction\",\"prepareTx\":\"Transaction parameters of the prepare transaction on Bitcoin\",\"scriptPubKey\":\"The script pubkey of the user, included for extra validation\"}},\"setDepositScript(bytes,bytes)\":{\"details\":\"Deposit script contains a fixed script that checks signatures of verifiers and pushes EVM address of the receiver\",\"params\":{\"_depositPrefix\":\"The new deposit script prefix\",\"_depositSuffix\":\"The part of the deposit script that succeeds the receiver address\"}},\"setFailedDepositVault(address)\":{\"params\":{\"_failedDepositVault\":\"The address of the failed deposit vault\"}},\"setOperator(address)\":{\"params\":{\"_operator\":\"Address of the privileged operator\"}},\"setReplaceScript(bytes,bytes)\":{\"details\":\"Replace script contains a fixed script that checks signatures of verifiers and pushes txId of the deposit transaction to be replaced\",\"params\":{\"_replacePrefix\":\"The new replace prefix\",\"_replaceSuffix\":\"The part of the replace script that succeeds the txId\"}},\"transferOwnership(address)\":{\"details\":\"Starts the ownership transfer of the contract to a new account. Replaces the pending transfer if there is one. Can only be called by the current owner.\"},\"withdraw(bytes32,bytes4)\":{\"params\":{\"outputId\":\"The outputId of the output in the withdrawal transaction\",\"txId\":\"The txId of the withdrawal transaction on Bitcoin\"}}},\"title\":\"Bridge contract for the Citrea end of Citrea <> Bitcoin bridge\",\"version\":1},\"userdoc\":{\"kind\":\"user\",\"methods\":{\"batchWithdraw(bytes32[],bytes4[])\":{\"notice\":\"Batch version of `withdraw` that can accept multiple cBTC\"},\"deposit((bytes4,bytes2,bytes,bytes,bytes,bytes4),(bytes,uint256,uint256),bytes32)\":{\"notice\":\"Checks if the deposit amount is sent to the bridge multisig on Bitcoin, and if so, sends the deposit amount to the receiver\"},\"initialize(bytes,bytes,uint256)\":{\"notice\":\"Initializes the bridge contract and sets the deposit script\"},\"replaceDeposit((bytes4,bytes2,bytes,bytes,bytes,bytes4),(bytes,uint256,uint256),uint256,bytes32)\":{\"notice\":\"Operator can replace a deposit transaction with its replacement if the replacement transaction is included in Bitcoin and signed by N-of-N with the replacement script\"},\"safeWithdraw((bytes4,bytes2,bytes,bytes,bytes,bytes4),(bytes,uint256,uint256),(bytes4,bytes2,bytes,bytes,bytes,bytes4),bytes,bytes)\":{\"notice\":\"Same operation as `withdraw` with extra validations at the cost of gas. Validates the transactions, checks the inclusion of the transaction being spent and checks if the signature is valid.\"},\"setDepositScript(bytes,bytes)\":{\"notice\":\"Sets the expected deposit script of the deposit transaction on Bitcoin, contained in the witness\"},\"setFailedDepositVault(address)\":{\"notice\":\"Sets the address of the failed deposit vault\"},\"setOperator(address)\":{\"notice\":\"Sets the operator address that can process user deposits\"},\"setReplaceScript(bytes,bytes)\":{\"notice\":\"Sets the replace script of the replacement transaction on Bitcoin, contained in the witness\"},\"withdraw(bytes32,bytes4)\":{\"notice\":\"Accepts 1 cBTC from the sender and inserts this withdrawal request of 1 BTC on Bitcoin into the withdrawals array so that later on can be processed by the operator \"}},\"version\":1}},\"settings\":{\"compilationTarget\":{\"src/Bridge.sol\":\"Bridge\"},\"evmVersion\":\"cancun\",\"libraries\":{},\"metadata\":{\"appendCBOR\":false,\"bytecodeHash\":\"none\"},\"optimizer\":{\"enabled\":true,\"runs\":200},\"remappings\":[\":@openzeppelin/contracts-upgradeable/=lib/openzeppelin-contracts-upgradeable/contracts/\",\":@openzeppelin/contracts/=lib/openzeppelin-contracts-upgradeable/lib/openzeppelin-contracts/contracts/\",\":bitcoin-spv/=lib/bitcoin-spv/\",\":ds-test/=lib/openzeppelin-contracts-upgradeable/lib/forge-std/lib/ds-test/src/\",\":erc4626-tests/=lib/openzeppelin-contracts-upgradeable/lib/erc4626-tests/\",\":forge-std/=lib/forge-std/src/\",\":openzeppelin-contracts-upgradeable/=lib/openzeppelin-contracts-upgradeable/\",\":openzeppelin-contracts/=lib/openzeppelin-contracts/\",\":openzeppelin/=lib/openzeppelin-contracts/contracts/\"],\"viaIR\":true},\"sources\":{\"lib/WitnessUtils.sol\":{\"keccak256\":\"0x0b0d59b4e73d6f5b4bbf1032f72bb15c2f1548c2ee319b80ae9e4c22576a70af\",\"license\":\"LGPL-3.0-or-later\",\"urls\":[\"bzz-raw://8499a5fc520941cb1b970637850cabfbc2d5a51abed824886063420c686b57de\",\"dweb:/ipfs/QmaLYLJ36PyFAaP3MgvFWW3knDsSUtVfCfs7Lp7oYFPZ1w\"]},\"lib/bitcoin-spv/solidity/contracts/BTCUtils.sol\":{\"keccak256\":\"0x439eaa97e9239705f3d31e8d39dccbad32311f1f119e295d53c65e0ae3c5a5fc\",\"urls\":[\"bzz-raw://976a361a89c21afc44b5e0a552271d9288b12cf34a9925c25f3c6975ece4e667\",\"dweb:/ipfs/QmNTb4eJyxV5iZj8RJGFBGSKXWsuvoMYqLLBgk16dhWePH\"]},\"lib/bitcoin-spv/solidity/contracts/BytesLib.sol\":{\"keccak256\":\"0x43e0f3b3b23c861bd031588bf410dfdd02e2af17941a89aa38d70e534e0380d1\",\"urls\":[\"bzz-raw://76011d699a8b229dbfdc698b3ece658daad9d96778e86d679aa576bc966209d6\",\"dweb:/ipfs/QmRZEWAeRQtsTUvfzEd1jb2wAqpTNR5KAme92gBRn4SYiT\"]},\"lib/bitcoin-spv/solidity/contracts/SafeMath.sol\":{\"keccak256\":\"0x35930d982394c7ffde439b82e5e696c5b21a6f09699d44861dfe409ef64084a3\",\"urls\":[\"bzz-raw://090e9d78755d4916fa2f5f5d8f9fd2fc59bfc5a25a5e91636a92c4c07aee9c6b\",\"dweb:/ipfs/QmXfz4TPDvgnuYz9eS5AL87GfCLxHQZJV1Y8ieJU9M8yTe\"]},\"lib/bitcoin-spv/solidity/contracts/ValidateSPV.sol\":{\"keccak256\":\"0xce3febbf3ad3a7ff8a8effd0c7ccaf7ccfa2719578b537d49ea196f0bae8062b\",\"urls\":[\"bzz-raw://5f18942483bf20507ae6c0abb5421df96b1aebb7af15f541bda8470f6277312a\",\"dweb:/ipfs/QmPzEpA8w5k6pVFadm3UCLqNdxFAjPwP9Lpi5HMQsQg52J\"]},\"lib/openzeppelin-contracts-upgradeable/contracts/access/Ownable2StepUpgradeable.sol\":{\"keccak256\":\"0xbca4a4f66d98028293dba695851d1b20d3e0ba2fff7453fb241f192fa3fc6b6f\",\"license\":\"MIT\",\"urls\":[\"bzz-raw://013b3cfd9d1e34dad409c3b9a340860e8651e61cda509de33599fb5102f62fe7\",\"dweb:/ipfs/QmTVjDKofM9Nst8w8LAA3HHgi1eCnGYBpFb7Nbat71e2xz\"]},\"lib/openzeppelin-contracts-upgradeable/contracts/access/OwnableUpgradeable.sol\":{\"keccak256\":\"0xc163fcf9bb10138631a9ba5564df1fa25db9adff73bd9ee868a8ae1858fe093a\",\"license\":\"MIT\",\"urls\":[\"bzz-raw://9706d43a0124053d9880f6e31a59f31bc0a6a3dc1acd66ce0a16e1111658c5f6\",\"dweb:/ipfs/QmUFmfowzkRwGtDu36cXV9SPTBHJ3n7dG9xQiK5B28jTf2\"]},\"lib/openzeppelin-contracts-upgradeable/contracts/proxy/utils/Initializable.sol\":{\"keccak256\":\"0x631188737069917d2f909d29ce62c4d48611d326686ba6683e26b72a23bfac0b\",\"license\":\"MIT\",\"urls\":[\"bzz-raw://7a61054ae84cd6c4d04c0c4450ba1d6de41e27e0a2c4f1bcdf58f796b401c609\",\"dweb:/ipfs/QmUvtdp7X1mRVyC3CsHrtPbgoqWaXHp3S1ZR24tpAQYJWM\"]},\"lib/openzeppelin-contracts-upgradeable/contracts/utils/ContextUpgradeable.sol\":{\"keccak256\":\"0xdbef5f0c787055227243a7318ef74c8a5a1108ca3a07f2b3a00ef67769e1e397\",\"license\":\"MIT\",\"urls\":[\"bzz-raw://08e39f23d5b4692f9a40803e53a8156b72b4c1f9902a88cd65ba964db103dab9\",\"dweb:/ipfs/QmPKn6EYDgpga7KtpkA8wV2yJCYGMtc9K4LkJfhKX2RVSV\"]},\"src/BitcoinLightClient.sol\":{\"keccak256\":\"0x480b7e0492d955afc75a48edf467580700de2d736b91061fedfbdccaee421b92\",\"license\":\"MIT\",\"urls\":[\"bzz-raw://1d337385dbf93792536f5fa83258f4bcc7d1bc57c971214d692a374a21f4230e\",\"dweb:/ipfs/QmSwZ9nDUJtUL9EvWPX3oWEWBFnuwvUw6WWqgqABZmKmxm\"]},\"src/Bridge.sol\":{\"keccak256\":\"0xd04b831319a257ca187779341cd11f296c75451f5845610ebf538aa1f5004dd5\",\"license\":\"UNLICENSED\",\"urls\":[\"bzz-raw://49df8d7539005d014bbf085b5aa6eab8476c1b02f347b2863b71ae48636b2f5c\",\"dweb:/ipfs/QmQZdDqFYcLmqrhSRGUP2YDAxw2sAZiJbSXxXZXw5zMVY7\"]},\"src/interfaces/IBitcoinLightClient.sol\":{\"keccak256\":\"0xc2c31dad4bb43601935c6226efd6d9ad6f38fdd9e57f6cb7c4ec609ae1f220e5\",\"license\":\"MIT\",\"urls\":[\"bzz-raw://1c95da69bee1725b91079ef1af4a5b405f98a69cf82aa2009db683689c3fd1eb\",\"dweb:/ipfs/QmPweYQF1xUexHiaqKZwEv5THdUPhmdGNcvYvw8k4bazkA\"]}},\"version\":1}","metadata":{"compiler":{"version":"0.8.28+commit.7893614a"},"language":"Solidity","output":{"abi":[{"inputs":[],"type":"error","name":"InvalidInitialization"},{"inputs":[],"type":"error","name":"NotInitializing"},{"inputs":[{"internalType":"address","name":"owner","type":"address"}],"type":"error","name":"OwnableInvalidOwner"},{"inputs":[{"internalType":"address","name":"account","type":"address"}],"type":"error","name":"OwnableUnauthorizedAccount"},{"inputs":[{"internalType":"bytes32","name":"wtxId","type":"bytes32","indexed":false},{"internalType":"bytes32","name":"txId","type":"bytes32","indexed":false},{"internalType":"address","name":"recipient","type":"address","indexed":false},{"internalType":"uint256","name":"timestamp","type":"uint256","indexed":false},{"internalType":"uint256","name":"depositId","type":"uint256","indexed":false}],"type":"event","name":"Deposit","anonymous":false},{"inputs":[{"internalType":"uint256","name":"index","type":"uint256","indexed":false},{"internalType":"bytes32","name":"oldTxId","type":"bytes32","indexed":false},{"internalType":"bytes32","name":"newTxId","type":"bytes32","indexed":false}],"type":"event","name":"DepositReplaced","anonymous":false},{"inputs":[{"internalType":"bytes","name":"depositPrefix","type":"bytes","indexed":false},{"internalType":"bytes","name":"depositSuffix","type":"bytes","indexed":false}],"type":"event","name":"DepositScriptUpdate","anonymous":false},{"inputs":[{"internalType":"bytes32","name":"wtxId","type":"bytes32","indexed":false},{"internalType":"bytes32","name":"txId","type":"bytes32","indexed":false},{"internalType":"address","name":"recipient","type":"address","indexed":false},{"internalType":"uint256","name":"timestamp","type":"uint256","indexed":false},{"internalType":"uint256","name":"depositId","type":"uint256","indexed":false}],"type":"event","name":"DepositTransferFailed","anonymous":false},{"inputs":[{"internalType":"address","name":"oldVault","type":"address","indexed":false},{"internalType":"address","name":"newVault","type":"address","indexed":false}],"type":"event","name":"FailedDepositVaultUpdated","anonymous":false},{"inputs":[{"internalType":"uint64","name":"version","type":"uint64","indexed":false}],"type":"event","name":"Initialized","anonymous":false},{"inputs":[{"internalType":"address","name":"oldOperator","type":"address","indexed":false},{"internalType":"address","name":"newOperator","type":"address","indexed":false}],"type":"event","name":"OperatorUpdated","anonymous":false},{"inputs":[{"internalType":"address","name":"previousOwner","type":"address","indexed":true},{"internalType":"address","name":"newOwner","type":"address","indexed":true}],"type":"event","name":"OwnershipTransferStarted","anonymous":false},{"inputs":[{"internalType":"address","name":"previousOwner","type":"address","indexed":true},{"internalType":"address","name":"newOwner","type":"address","indexed":true}],"type":"event","name":"OwnershipTransferred","anonymous":false},{"inputs":[{"internalType":"bytes","name":"replacePrefix","type":"bytes","indexed":false},{"internalType":"bytes","name":"replaceSuffix","type":"bytes","indexed":false}],"type":"event","name":"ReplaceScriptUpdate","anonymous":false},{"inputs":[{"internalType":"struct Bridge.Transaction","name":"payoutTx","type":"tuple","components":[{"internalType":"bytes4","name":"version","type":"bytes4"},{"internalType":"bytes2","name":"flag","type":"bytes2"},{"internalType":"bytes","name":"vin","type":"bytes"},{"internalType":"bytes","name":"vout","type":"bytes"},{"internalType":"bytes","name":"witness","type":"bytes"},{"internalType":"bytes4","name":"locktime","type":"bytes4"}],"indexed":false},{"internalType":"struct Bridge.UTXO","name":"spentUtxo","type":"tuple","components":[{"internalType":"bytes32","name":"txId","type":"bytes32"},{"internalType":"bytes4","name":"outputId","type":"bytes4"}],"indexed":false},{"internalType":"uint256","name":"index","type":"uint256","indexed":false}],"type":"event","name":"SafeWithdrawal","anonymous":false},{"inputs":[{"internalType":"struct Bridge.UTXO","name":"utxo","type":"tuple","components":[{"internalType":"bytes32","name":"txId","type":"bytes32"},{"internalType":"bytes4","name":"outputId","type":"bytes4"}],"indexed":false},{"internalType":"uint256","name":"index","type":"uint256","indexed":false},{"internalType":"uint256","name":"timestamp","type":"uint256","indexed":false}],"type":"event","name":"Withdrawal","anonymous":false},{"inputs":[],"stateMutability":"view","type":"function","name":"CODESEP_POS","outputs":[{"internalType":"bytes","name":"","type":"bytes"}]},{"inputs":[],"stateMutability":"view","type":"function","name":"EPOCH","outputs":[{"internalType":"bytes","name":"","type":"bytes"}]},{"inputs":[],"stateMutability":"view","type":"function","name":"INPUT_INDEX","outputs":[{"internalType":"bytes","name":"","type":"bytes"}]},{"inputs":[],"stateMutability":"view","type":"function","name":"KEY_VERSION","outputs":[{"internalType":"bytes","name":"","type":"bytes"}]},{"inputs":[],"stateMutability":"view","type":"function","name":"LIGHT_CLIENT","outputs":[{"internalType":"contract BitcoinLightClient","name":"","type":"address"}]},{"inputs":[],"stateMutability":"view","type":"function","name":"SCHNORR_VERIFIER_PRECOMPILE","outputs":[{"internalType":"address","name":"","type":"address"}]},{"inputs":[],"stateMutability":"view","type":"function","name":"SIGHASH_ALL_HASH_TYPE","outputs":[{"internalType":"bytes","name":"","type":"bytes"}]},{"inputs":[],"stateMutability":"view","type":"function","name":"SIGHASH_SINGLE_ANYONECANPAY_HASH_TYPE","outputs":[{"internalType":"bytes","name":"","type":"bytes"}]},{"inputs":[],"stateMutability":"view","type":"function","name":"SPEND_TYPE_EXT","outputs":[{"internalType":"bytes","name":"","type":"bytes"}]},{"inputs":[],"stateMutability":"view","type":"function","name":"SPEND_TYPE_NO_EXT","outputs":[{"internalType":"bytes","name":"","type":"bytes"}]},{"inputs":[],"stateMutability":"view","type":"function","name":"SYSTEM_CALLER","outputs":[{"internalType":"address","name":"","type":"address"}]},{"inputs":[],"stateMutability":"nonpayable","type":"function","name":"acceptOwnership"},{"inputs":[{"internalType":"bytes32[]","name":"txIds","type":"bytes32[]"},{"internalType":"bytes4[]","name":"outputIds","type":"bytes4[]"}],"stateMutability":"payable","type":"function","name":"batchWithdraw"},{"inputs":[{"internalType":"struct Bridge.Transaction","name":"moveTx","type":"tuple","components":[{"internalType":"bytes4","name":"version","type":"bytes4"},{"internalType":"bytes2","name":"flag","type":"bytes2"},{"internalType":"bytes","name":"vin","type":"bytes"},{"internalType":"bytes","name":"vout","type":"bytes"},{"internalType":"bytes","name":"witness","type":"bytes"},{"internalType":"bytes4","name":"locktime","type":"bytes4"}]},{"internalType":"struct Bridge.MerkleProof","name":"proof","type":"tuple","components":[{"internalType":"bytes","name":"intermediateNodes","type":"bytes"},{"internalType":"uint256","name":"blockHeight","type":"uint256"},{"internalType":"uint256","name":"index","type":"uint256"}]},{"internalType":"bytes32","name":"shaScriptPubkeys","type":"bytes32"}],"stateMutability":"nonpayable","type":"function","name":"deposit"},{"inputs":[],"stateMutability":"view","type":"function","name":"depositAmount","outputs":[{"internalType":"uint256","name":"","type":"uint256"}]},{"inputs":[],"stateMutability":"view","type":"function","name":"depositPrefix","outputs":[{"internalType":"bytes","name":"","type":"bytes"}]},{"inputs":[],"stateMutability":"view","type":"function","name":"depositSuffix","outputs":[{"internalType":"bytes","name":"","type":"bytes"}]},{"inputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function","name":"depositTxIds","outputs":[{"internalType":"bytes32","name":"","type":"bytes32"}]},{"inputs":[],"stateMutability":"view","type":"function","name":"failedDepositVault","outputs":[{"internalType":"address","name":"","type":"address"}]},{"inputs":[],"stateMutability":"view","type":"function","name":"getAggregatedKey","outputs":[{"internalType":"bytes","name":"","type":"bytes"}]},{"inputs":[],"stateMutability":"view","type":"function","name":"getWithdrawalCount","outputs":[{"internalType":"uint256","name":"","type":"uint256"}]},{"inputs":[{"internalType":"bytes","name":"_depositPrefix","type":"bytes"},{"internalType":"bytes","name":"_depositSuffix","type":"bytes"},{"internalType":"uint256","name":"_depositAmount","type":"uint256"}],"stateMutability":"nonpayable","type":"function","name":"initialize"},{"inputs":[],"stateMutability":"view","type":"function","name":"initialized","outputs":[{"internalType":"bool","name":"","type":"bool"}]},{"inputs":[],"stateMutability":"view","type":"function","name":"operator","outputs":[{"internalType":"address","name":"","type":"address"}]},{"inputs":[],"stateMutability":"view","type":"function","name":"owner","outputs":[{"internalType":"address","name":"","type":"address"}]},{"inputs":[],"stateMutability":"view","type":"function","name":"pendingOwner","outputs":[{"internalType":"address","name":"","type":"address"}]},{"inputs":[{"internalType":"bytes32","name":"","type":"bytes32"}],"stateMutability":"view","type":"function","name":"processedTxIds","outputs":[{"internalType":"bool","name":"","type":"bool"}]},{"inputs":[],"stateMutability":"nonpayable","type":"function","name":"renounceOwnership"},{"inputs":[{"internalType":"struct Bridge.Transaction","name":"replaceTx","type":"tuple","components":[{"internalType":"bytes4","name":"version","type":"bytes4"},{"internalType":"bytes2","name":"flag","type":"bytes2"},{"internalType":"bytes","name":"vin","type":"bytes"},{"internalType":"bytes","name":"vout","type":"bytes"},{"internalType":"bytes","name":"witness","type":"bytes"},{"internalType":"bytes4","name":"locktime","type":"bytes4"}]},{"internalType":"struct Bridge.MerkleProof","name":"proof","type":"tuple","components":[{"internalType":"bytes","name":"intermediateNodes","type":"bytes"},{"internalType":"uint256","name":"blockHeight","type":"uint256"},{"internalType":"uint256","name":"index","type":"uint256"}]},{"internalType":"uint256","name":"idToReplace","type":"uint256"},{"internalType":"bytes32","name":"shaScriptPubkeys","type":"bytes32"}],"stateMutability":"nonpayable","type":"function","name":"replaceDeposit"},{"inputs":[],"stateMutability":"view","type":"function","name":"replacePrefix","outputs":[{"internalType":"bytes","name":"","type":"bytes"}]},{"inputs":[],"stateMutability":"view","type":"function","name":"replaceSuffix","outputs":[{"internalType":"bytes","name":"","type":"bytes"}]},{"inputs":[{"internalType":"struct Bridge.Transaction","name":"prepareTx","type":"tuple","components":[{"internalType":"bytes4","name":"version","type":"bytes4"},{"internalType":"bytes2","name":"flag","type":"bytes2"},{"internalType":"bytes","name":"vin","type":"bytes"},{"internalType":"bytes","name":"vout","type":"bytes"},{"internalType":"bytes","name":"witness","type":"bytes"},{"internalType":"bytes4","name":"locktime","type":"bytes4"}]},{"internalType":"struct Bridge.MerkleProof","name":"prepareProof","type":"tuple","components":[{"internalType":"bytes","name":"intermediateNodes","type":"bytes"},{"internalType":"uint256","name":"blockHeight","type":"uint256"},{"internalType":"uint256","name":"index","type":"uint256"}]},{"internalType":"struct Bridge.Transaction","name":"payoutTx","type":"tuple","components":[{"internalType":"bytes4","name":"version","type":"bytes4"},{"internalType":"bytes2","name":"flag","type":"bytes2"},{"internalType":"bytes","name":"vin","type":"bytes"},{"internalType":"bytes","name":"vout","type":"bytes"},{"internalType":"bytes","name":"witness","type":"bytes"},{"internalType":"bytes4","name":"locktime","type":"bytes4"}]},{"internalType":"bytes","name":"blockHeader","type":"bytes"},{"internalType":"bytes","name":"scriptPubKey","type":"bytes"}],"stateMutability":"payable","type":"function","name":"safeWithdraw"},{"inputs":[{"internalType":"bytes","name":"_depositPrefix","type":"bytes"},{"internalType":"bytes","name":"_depositSuffix","type":"bytes"}],"stateMutability":"nonpayable","type":"function","name":"setDepositScript"},{"inputs":[{"internalType":"address","name":"_failedDepositVault","type":"address"}],"stateMutability":"nonpayable","type":"function","name":"setFailedDepositVault"},{"inputs":[{"internalType":"address","name":"_operator","type":"address"}],"stateMutability":"nonpayable","type":"function","name":"setOperator"},{"inputs":[{"internalType":"bytes","name":"_replacePrefix","type":"bytes"},{"internalType":"bytes","name":"_replaceSuffix","type":"bytes"}],"stateMutability":"nonpayable","type":"function","name":"setReplaceScript"},{"inputs":[{"internalType":"address","name":"newOwner","type":"address"}],"stateMutability":"nonpayable","type":"function","name":"transferOwnership"},{"inputs":[{"internalType":"bytes32","name":"txId","type":"bytes32"},{"internalType":"bytes4","name":"outputId","type":"bytes4"}],"stateMutability":"payable","type":"function","name":"withdraw"},{"inputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function","name":"withdrawalUTXOs","outputs":[{"internalType":"bytes32","name":"txId","type":"bytes32"},{"internalType":"bytes4","name":"outputId","type":"bytes4"}]}],"devdoc":{"kind":"dev","methods":{"acceptOwnership()":{"details":"The new owner accepts the ownership transfer."},"batchWithdraw(bytes32[],bytes4[])":{"details":"Takes in multiple Bitcoin addresses as recipient addresses should be unique","params":{"outputIds":"the outputIds of the outputs in the withdrawal transactions","txIds":"the txIds of the withdrawal transactions on Bitcoin"}},"deposit((bytes4,bytes2,bytes,bytes,bytes,bytes4),(bytes,uint256,uint256),bytes32)":{"params":{"moveTx":"Transaction parameters of the move transaction on Bitcoin","proof":"Merkle proof of the move transaction","shaScriptPubkeys":"`shaScriptPubkeys` is the only component of the P2TR message hash that cannot be derived solely on the transaction itself in our case, as it requires knowledge of the previous transaction output that is being spent. Thus we calculate this component off-chain."}},"getWithdrawalCount()":{"returns":{"_0":"The count of withdrawals happened so far"}},"initialize(bytes,bytes,uint256)":{"params":{"_depositAmount":"The CBTC amount that can be deposited and withdrawn","_depositPrefix":"First part of the deposit script expected in the witness field for all L1 deposits ","_depositSuffix":"The suffix of the deposit script that follows the receiver address"}},"owner()":{"details":"Returns the address of the current owner."},"pendingOwner()":{"details":"Returns the address of the pending owner."},"renounceOwnership()":{"details":"Leaves the contract without owner. It will not be possible to call `onlyOwner` functions. Can only be called by the current owner. NOTE: Renouncing ownership will leave the contract without an owner, thereby disabling any functionality that is only available to the owner."},"replaceDeposit((bytes4,bytes2,bytes,bytes,bytes,bytes4),(bytes,uint256,uint256),uint256,bytes32)":{"params":{"idToReplace":"The index of the deposit transaction to be replaced in the `depositTxIds` array","proof":"Merkle proof of the replacement transaction","replaceTx":"Transaction parameters of the replacement transaction on Bitcoin","shaScriptPubkeys":"`shaScriptPubkeys` is the only component of the P2TR message hash that cannot be derived solely on the transaction itself in our case, as it requires knowledge of the previous transaction output that is being spent. Thus we calculate this component off-chain."}},"safeWithdraw((bytes4,bytes2,bytes,bytes,bytes,bytes4),(bytes,uint256,uint256),(bytes4,bytes2,bytes,bytes,bytes,bytes4),bytes,bytes)":{"params":{"blockHeader":"Block header of the associated Bitcoin block","payoutTx":"Transaction parameters of the payout transaction on Bitcoin","prepareProof":"Merkle proof of the prepare transaction","prepareTx":"Transaction parameters of the prepare transaction on Bitcoin","scriptPubKey":"The script pubkey of the user, included for extra validation"}},"setDepositScript(bytes,bytes)":{"details":"Deposit script contains a fixed script that checks signatures of verifiers and pushes EVM address of the receiver","params":{"_depositPrefix":"The new deposit script prefix","_depositSuffix":"The part of the deposit script that succeeds the receiver address"}},"setFailedDepositVault(address)":{"params":{"_failedDepositVault":"The address of the failed deposit vault"}},"setOperator(address)":{"params":{"_operator":"Address of the privileged operator"}},"setReplaceScript(bytes,bytes)":{"details":"Replace script contains a fixed script that checks signatures of verifiers and pushes txId of the deposit transaction to be replaced","params":{"_replacePrefix":"The new replace prefix","_replaceSuffix":"The part of the replace script that succeeds the txId"}},"transferOwnership(address)":{"details":"Starts the ownership transfer of the contract to a new account. Replaces the pending transfer if there is one. Can only be called by the current owner."},"withdraw(bytes32,bytes4)":{"params":{"outputId":"The outputId of the output in the withdrawal transaction","txId":"The txId of the withdrawal transaction on Bitcoin"}}},"version":1},"userdoc":{"kind":"user","methods":{"batchWithdraw(bytes32[],bytes4[])":{"notice":"Batch version of `withdraw` that can accept multiple cBTC"},"deposit((bytes4,bytes2,bytes,bytes,bytes,bytes4),(bytes,uint256,uint256),bytes32)":{"notice":"Checks if the deposit amount is sent to the bridge multisig on Bitcoin, and if so, sends the deposit amount to the receiver"},"initialize(bytes,bytes,uint256)":{"notice":"Initializes the bridge contract and sets the deposit script"},"replaceDeposit((bytes4,bytes2,bytes,bytes,bytes,bytes4),(bytes,uint256,uint256),uint256,bytes32)":{"notice":"Operator can replace a deposit transaction with its replacement if the replacement transaction is included in Bitcoin and signed by N-of-N with the replacement script"},"safeWithdraw((bytes4,bytes2,bytes,bytes,bytes,bytes4),(bytes,uint256,uint256),(bytes4,bytes2,bytes,bytes,bytes,bytes4),bytes,bytes)":{"notice":"Same operation as `withdraw` with extra validations at the cost of gas. Validates the transactions, checks the inclusion of the transaction being spent and checks if the signature is valid."},"setDepositScript(bytes,bytes)":{"notice":"Sets the expected deposit script of the deposit transaction on Bitcoin, contained in the witness"},"setFailedDepositVault(address)":{"notice":"Sets the address of the failed deposit vault"},"setOperator(address)":{"notice":"Sets the operator address that can process user deposits"},"setReplaceScript(bytes,bytes)":{"notice":"Sets the replace script of the replacement transaction on Bitcoin, contained in the witness"},"withdraw(bytes32,bytes4)":{"notice":"Accepts 1 cBTC from the sender and inserts this withdrawal request of 1 BTC on Bitcoin into the withdrawals array so that later on can be processed by the operator "}},"version":1}},"settings":{"remappings":["@openzeppelin/contracts-upgradeable/=lib/openzeppelin-contracts-upgradeable/contracts/","@openzeppelin/contracts/=lib/openzeppelin-contracts-upgradeable/lib/openzeppelin-contracts/contracts/","bitcoin-spv/=lib/bitcoin-spv/","ds-test/=lib/openzeppelin-contracts-upgradeable/lib/forge-std/lib/ds-test/src/","erc4626-tests/=lib/openzeppelin-contracts-upgradeable/lib/erc4626-tests/","forge-std/=lib/forge-std/src/","openzeppelin-contracts-upgradeable/=lib/openzeppelin-contracts-upgradeable/","openzeppelin-contracts/=lib/openzeppelin-contracts/","openzeppelin/=lib/openzeppelin-contracts/contracts/"],"optimizer":{"enabled":true,"runs":200},"metadata":{"bytecodeHash":"none","appendCBOR":false},"compilationTarget":{"src/Bridge.sol":"Bridge"},"evmVersion":"cancun","libraries":{},"viaIR":true},"sources":{"lib/WitnessUtils.sol":{"keccak256":"0x0b0d59b4e73d6f5b4bbf1032f72bb15c2f1548c2ee319b80ae9e4c22576a70af","urls":["bzz-raw://8499a5fc520941cb1b970637850cabfbc2d5a51abed824886063420c686b57de","dweb:/ipfs/QmaLYLJ36PyFAaP3MgvFWW3knDsSUtVfCfs7Lp7oYFPZ1w"],"license":"LGPL-3.0-or-later"},"lib/bitcoin-spv/solidity/contracts/BTCUtils.sol":{"keccak256":"0x439eaa97e9239705f3d31e8d39dccbad32311f1f119e295d53c65e0ae3c5a5fc","urls":["bzz-raw://976a361a89c21afc44b5e0a552271d9288b12cf34a9925c25f3c6975ece4e667","dweb:/ipfs/QmNTb4eJyxV5iZj8RJGFBGSKXWsuvoMYqLLBgk16dhWePH"],"license":null},"lib/bitcoin-spv/solidity/contracts/BytesLib.sol":{"keccak256":"0x43e0f3b3b23c861bd031588bf410dfdd02e2af17941a89aa38d70e534e0380d1","urls":["bzz-raw://76011d699a8b229dbfdc698b3ece658daad9d96778e86d679aa576bc966209d6","dweb:/ipfs/QmRZEWAeRQtsTUvfzEd1jb2wAqpTNR5KAme92gBRn4SYiT"],"license":null},"lib/bitcoin-spv/solidity/contracts/SafeMath.sol":{"keccak256":"0x35930d982394c7ffde439b82e5e696c5b21a6f09699d44861dfe409ef64084a3","urls":["bzz-raw://090e9d78755d4916fa2f5f5d8f9fd2fc59bfc5a25a5e91636a92c4c07aee9c6b","dweb:/ipfs/QmXfz4TPDvgnuYz9eS5AL87GfCLxHQZJV1Y8ieJU9M8yTe"],"license":null},"lib/bitcoin-spv/solidity/contracts/ValidateSPV.sol":{"keccak256":"0xce3febbf3ad3a7ff8a8effd0c7ccaf7ccfa2719578b537d49ea196f0bae8062b","urls":["bzz-raw://5f18942483bf20507ae6c0abb5421df96b1aebb7af15f541bda8470f6277312a","dweb:/ipfs/QmPzEpA8w5k6pVFadm3UCLqNdxFAjPwP9Lpi5HMQsQg52J"],"license":null},"lib/openzeppelin-contracts-upgradeable/contracts/access/Ownable2StepUpgradeable.sol":{"keccak256":"0xbca4a4f66d98028293dba695851d1b20d3e0ba2fff7453fb241f192fa3fc6b6f","urls":["bzz-raw://013b3cfd9d1e34dad409c3b9a340860e8651e61cda509de33599fb5102f62fe7","dweb:/ipfs/QmTVjDKofM9Nst8w8LAA3HHgi1eCnGYBpFb7Nbat71e2xz"],"license":"MIT"},"lib/openzeppelin-contracts-upgradeable/contracts/access/OwnableUpgradeable.sol":{"keccak256":"0xc163fcf9bb10138631a9ba5564df1fa25db9adff73bd9ee868a8ae1858fe093a","urls":["bzz-raw://9706d43a0124053d9880f6e31a59f31bc0a6a3dc1acd66ce0a16e1111658c5f6","dweb:/ipfs/QmUFmfowzkRwGtDu36cXV9SPTBHJ3n7dG9xQiK5B28jTf2"],"license":"MIT"},"lib/openzeppelin-contracts-upgradeable/contracts/proxy/utils/Initializable.sol":{"keccak256":"0x631188737069917d2f909d29ce62c4d48611d326686ba6683e26b72a23bfac0b","urls":["bzz-raw://7a61054ae84cd6c4d04c0c4450ba1d6de41e27e0a2c4f1bcdf58f796b401c609","dweb:/ipfs/QmUvtdp7X1mRVyC3CsHrtPbgoqWaXHp3S1ZR24tpAQYJWM"],"license":"MIT"},"lib/openzeppelin-contracts-upgradeable/contracts/utils/ContextUpgradeable.sol":{"keccak256":"0xdbef5f0c787055227243a7318ef74c8a5a1108ca3a07f2b3a00ef67769e1e397","urls":["bzz-raw://08e39f23d5b4692f9a40803e53a8156b72b4c1f9902a88cd65ba964db103dab9","dweb:/ipfs/QmPKn6EYDgpga7KtpkA8wV2yJCYGMtc9K4LkJfhKX2RVSV"],"license":"MIT"},"src/BitcoinLightClient.sol":{"keccak256":"0x480b7e0492d955afc75a48edf467580700de2d736b91061fedfbdccaee421b92","urls":["bzz-raw://1d337385dbf93792536f5fa83258f4bcc7d1bc57c971214d692a374a21f4230e","dweb:/ipfs/QmSwZ9nDUJtUL9EvWPX3oWEWBFnuwvUw6WWqgqABZmKmxm"],"license":"MIT"},"src/Bridge.sol":{"keccak256":"0xd04b831319a257ca187779341cd11f296c75451f5845610ebf538aa1f5004dd5","urls":["bzz-raw://49df8d7539005d014bbf085b5aa6eab8476c1b02f347b2863b71ae48636b2f5c","dweb:/ipfs/QmQZdDqFYcLmqrhSRGUP2YDAxw2sAZiJbSXxXZXw5zMVY7"],"license":"UNLICENSED"},"src/interfaces/IBitcoinLightClient.sol":{"keccak256":"0xc2c31dad4bb43601935c6226efd6d9ad6f38fdd9e57f6cb7c4ec609ae1f220e5","urls":["bzz-raw://1c95da69bee1725b91079ef1af4a5b405f98a69cf82aa2009db683689c3fd1eb","dweb:/ipfs/QmPweYQF1xUexHiaqKZwEv5THdUPhmdGNcvYvw8k4bazkA"],"license":"MIT"}},"version":1},"id":10} \ No newline at end of file diff --git a/scripts/docker-entrypoint.sh b/scripts/docker-entrypoint.sh new file mode 100755 index 000000000..9e212d9c8 --- /dev/null +++ b/scripts/docker-entrypoint.sh @@ -0,0 +1,9 @@ +#!/bin/sh +set -e + +bitcoind "$@" & + +sleep 2 +/init-bitcoin.sh + +wait \ No newline at end of file diff --git a/scripts/docker/Dockerfile b/scripts/docker/Dockerfile new file mode 100644 index 000000000..de40a8feb --- /dev/null +++ b/scripts/docker/Dockerfile @@ -0,0 +1,13 @@ +# Compile Clementine +FROM rust:1.85.0 +COPY . . +WORKDIR /clementine +RUN cargo +1.85.0 build --release --bin clementine-core + +# Copy only the compiled binary to a new image +FROM ubuntu:22.04 +COPY --from=0 /target/release/clementine-core /clementine-core + +# Set up Clementine +ENTRYPOINT [ "/clementine-core" ] +EXPOSE 17000 diff --git a/scripts/docker/Dockerfile.automation b/scripts/docker/Dockerfile.automation new file mode 100644 index 000000000..317da2a67 --- /dev/null +++ b/scripts/docker/Dockerfile.automation @@ -0,0 +1,13 @@ +# Compile Clementine +FROM rust:1.85.0 +COPY . . +WORKDIR /clementine +RUN cargo +1.85.0 build --release --bin clementine-core --features=automation + +# Copy only the compiled binary to a new image +FROM ubuntu:22.04 +COPY --from=0 /target/release/clementine-core /clementine-core + +# Set up Clementine +ENTRYPOINT [ "/clementine-core" ] +EXPOSE 17000 diff --git a/scripts/docker/configs/regtest/.env.regtest b/scripts/docker/configs/regtest/.env.regtest new file mode 100644 index 000000000..54deac2e7 --- /dev/null +++ b/scripts/docker/configs/regtest/.env.regtest @@ -0,0 +1,79 @@ +# SECRET_KEY="1111111111111111111111111111111111111111111111111111111111111111" + +HOST=0.0.0.0 +PORT=17000 + +READ_PARAMSET_FROM_ENV=1 +READ_CONFIG_FROM_ENV=1 + +BITCOIN_RPC_URL=http://bitcoin_regtest:20443/wallet/admin +BITCOIN_RPC_USER=admin +BITCOIN_RPC_PASSWORD=admin + +DB_HOST=postgres_db_regtest +DB_PORT=5432 +DB_USER=clementine +DB_PASSWORD=clementine +# DB_NAME=clementine + +CITREA_CHAIN_ID=62298 +CITREA_RPC_URL=http://citrea_sequencer_regtest:12345 +CITREA_LIGHT_CLIENT_PROVER_URL=http://citrea_light_client_prover_regtest:12349 +BRIDGE_CONTRACT_ADDRESS=3100000000000000000000000000000000000002 + +SECURITY_COUNCIL=2:b496bfbae14987817c53d592be0aa66c45c7b94443c1f74551373f9ce34d2346,9c00b80d739933388f136f4519fed20cbaee4153899810703ca216d2320e20c4,994283e4c648fbeded4ecf579490622dd4469152e3b4bc8290607ed365fd29be +HEADER_CHAIN_PROOF_BATCH_SIZE=100 + +NETWORK=regtest +NUM_ROUND_TXS=3 +NUM_KICKOFFS_PER_ROUND=100 +NUM_SIGNED_KICKOFFS=2 +BRIDGE_AMOUNT=1000000000 +KICKOFF_AMOUNT=25000 +OPERATOR_CHALLENGE_AMOUNT=130000000 +COLLATERAL_FUNDING_AMOUNT=90000000 +KICKOFF_BLOCKHASH_COMMIT_LENGTH=40 +WATCHTOWER_CHALLENGE_BYTES=144 +WINTERNITZ_LOG_D=4 +WINTERNITZ_SECRET_KEY=2222222222222222222222222222222222222222222222222222222222222222 +USER_TAKES_AFTER=200 +OPERATOR_CHALLENGE_TIMEOUT_TIMELOCK=144 +OPERATOR_CHALLENGE_NACK_TIMELOCK=432 +DISPROVE_TIMEOUT_TIMELOCK=720 +ASSERT_TIMEOUT_TIMELOCK=576 +OPERATOR_REIMBURSE_TIMELOCK=12 +WATCHTOWER_CHALLENGE_TIMEOUT_TIMELOCK=288 +TIME_TO_SEND_WATCHTOWER_CHALLENGE=216 +TIME_TO_DISPROVE=648 +LATEST_BLOCKHASH_TIMEOUT_TIMELOCK=360 +FINALITY_DEPTH=1 +START_HEIGHT=2 +GENESIS_HEIGHT=91000 +GENESIS_CHAIN_STATE_HASH=b6e2031e9350450084537e72c05f81b497db4f41f63ee25b55e5de8924a2c5cf +OPERATOR_WITHDRAWAL_FEE_SATS=100000 + +BITVM_CACHE_PATH=/bitvm_cache.bin +JSON_LOGS=1 +RUST_LOG=info +RUST_MIN_STACK=33554432 +DBG_PACKAGE_HEX=1 +RISC0_SKIP_BUILD=1 +RISC0_DEV_MODE=1 +BRIDGE_CIRCUIT_METHOD_ID_CONSTANT=e246ef42e7795aa55cf0f6677cbabb78dc1fc461c20c2addd8ff70c8c8d019db +CA_CERT_PATH=/certs/ca/ca.pem +SERVER_CERT_PATH=/certs/server/server.pem +SERVER_KEY_PATH=/certs/server/server.key +CLIENT_CERT_PATH=/certs/client/client.pem +CLIENT_KEY_PATH=/certs/client/client.key +AGGREGATOR_CERT_PATH=/certs/aggregator/aggregator.pem +CLIENT_VERIFICATION=1 +DISABLE_NOFN_CHECK=1 +BRIDGE_NONSTANDARD=false +TELEMETRY_HOST=0.0.0.0 +TELEMETRY_PORT=9000 + +VERIFIERS_PUBLIC_KEYS=034f355bdcb7cc0af728ef3cceb9615d90684bb5b2ca5f859ab0f0b704075871aa,02466d7fcae563e5cb09a0d1870bb580344804617879a14949cf22285f1bae3f27,023c72addb4fdf09af94f0c94d7fe92a386a7e70cf8a1d85916386bb2535c7b1b1,032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991 +OPERATOR_XONLY_PKS=4f355bdcb7cc0af728ef3cceb9615d90684bb5b2ca5f859ab0f0b704075871aa,466d7fcae563e5cb09a0d1870bb580344804617879a14949cf22285f1bae3f27 + +VERIFIER_ENDPOINTS="https://regtest0_verifier.docker.internal:17000,https://regtest1_verifier.docker.internal:17000,https://regtest2_verifier.docker.internal:17000,https://regtest3_verifier.docker.internal:17000" +OPERATOR_ENDPOINTS="https://regtest0_operator.docker.internal:17000,https://regtest1_operator.docker.internal:17000" diff --git a/scripts/docker/configs/regtest/citrea/batch_prover_config.toml b/scripts/docker/configs/regtest/citrea/batch_prover_config.toml new file mode 100644 index 000000000..b4cb613ae --- /dev/null +++ b/scripts/docker/configs/regtest/citrea/batch_prover_config.toml @@ -0,0 +1,3 @@ +proving_mode = "execute" +proof_sampling_number = 0 +enable_recovery = true diff --git a/scripts/docker/configs/regtest/citrea/batch_prover_rollup_config.toml b/scripts/docker/configs/regtest/citrea/batch_prover_rollup_config.toml new file mode 100644 index 000000000..0d5f14499 --- /dev/null +++ b/scripts/docker/configs/regtest/citrea/batch_prover_rollup_config.toml @@ -0,0 +1,30 @@ +[public_keys] +sequencer_public_key = "036360e856310ce5d294e8be33fc807077dc56ac80d95d9cd4ddbd21325eff73f7" +sequencer_da_pub_key = "02588d202afcc1ee4ab5254c7847ec25b9a135bbda0f2bc69ee1a714749fd77dc9" +prover_da_pub_key = "" + +[da] +# fill here +node_url = "http://bitcoin_regtest:20443/wallet/batch-prover-wallet" +# fill here +node_username = "admin" +# fill here +node_password = "admin" +tx_backup_dir = "resources/bitcoin/inscription_txs" +da_private_key = "56D08C2DDE7F412F80EC99A0A328F76688C904BD4D1435281EFC9270EC8C8707" + +[storage] +# The path to the rollup's data directory. Paths that do not begin with `/` are interpreted as relative paths. +path = "resources/dbs/batch-prover-db" +db_max_open_files = 5000 + +[rpc] +# the host and port to bind the rpc server for +bind_host = "0.0.0.0" +bind_port = 12348 +enable_subscriptions = false + +[runner] +sequencer_client_url = "http://citrea_sequencer_regtest:12345" +include_tx_body = false +scan_l1_start_height = 1 diff --git a/scripts/docker/configs/regtest/citrea/genesis-bitcoin-regtest/accounts.json b/scripts/docker/configs/regtest/citrea/genesis-bitcoin-regtest/accounts.json new file mode 100644 index 000000000..f65b078f6 --- /dev/null +++ b/scripts/docker/configs/regtest/citrea/genesis-bitcoin-regtest/accounts.json @@ -0,0 +1,3 @@ +{ + "pub_keys": [] +} diff --git a/scripts/docker/configs/regtest/citrea/genesis-bitcoin-regtest/evm.json b/scripts/docker/configs/regtest/citrea/genesis-bitcoin-regtest/evm.json new file mode 100644 index 000000000..386a55292 --- /dev/null +++ b/scripts/docker/configs/regtest/citrea/genesis-bitcoin-regtest/evm.json @@ -0,0 +1,182 @@ +{ + "data": [ + { + "address": "0x0f820f428ae436c1000b27577bf5bbf09bfec8f2", + "balance": "0xffffffffffffffffffffffffffffff", + "code": "0x" + }, + { + "address": "0x3100000000000000000000000000000000000001", + "balance": "0x0", + "code": "0x60806040523661001357610011610017565b005b6100115b61001f610168565b6001600160a01b0316330361015e5760606001600160e01b03195f35166364d3180d60e11b81016100595761005261019a565b9150610156565b63587086bd60e11b6001600160e01b0319821601610079576100526101ed565b63070d7c6960e41b6001600160e01b031982160161009957610052610231565b621eb96f60e61b6001600160e01b03198216016100b857610052610261565b63a39f25e560e01b6001600160e01b03198216016100d8576100526102a0565b60405162461bcd60e51b815260206004820152604260248201527f5472616e73706172656e745570677261646561626c6550726f78793a2061646d60448201527f696e2063616e6e6f742066616c6c6261636b20746f2070726f78792074617267606482015261195d60f21b608482015260a4015b60405180910390fd5b815160208301f35b6101666102b3565b565b5f7fb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d61035b546001600160a01b0316919050565b60606101a46102c3565b5f6101b23660048184610668565b8101906101bf91906106aa565b90506101da8160405180602001604052805f8152505f6102cd565b505060408051602081019091525f815290565b60605f806101fe3660048184610668565b81019061020b91906106d7565b9150915061021b828260016102cd565b60405180602001604052805f8152509250505090565b606061023b6102c3565b5f6102493660048184610668565b81019061025691906106aa565b90506101da816102f8565b606061026b6102c3565b5f610274610168565b604080516001600160a01b03831660208201529192500160405160208183030381529060405291505090565b60606102aa6102c3565b5f61027461034f565b6101666102be61034f565b61035d565b3415610166575f5ffd5b6102d68361037b565b5f825111806102e25750805b156102f3576102f183836103ba565b505b505050565b7f7e644d79422f17c01e4894b5f4f588d331ebfa28653d42ae832dc59e38c9798f610321610168565b604080516001600160a01b03928316815291841660208301520160405180910390a161034c816103e6565b50565b5f61035861048f565b905090565b365f5f375f5f365f845af43d5f5f3e808015610377573d5ff35b3d5ffd5b610384816104b6565b6040516001600160a01b038216907fbc7cd75a20ee27fd9adebab32041f755214dbc6bffa90cc0225b39da2e5c2d3b905f90a250565b60606103df83836040518060600160405280602781526020016107e76027913961054a565b9392505050565b6001600160a01b03811661044b5760405162461bcd60e51b815260206004820152602660248201527f455243313936373a206e65772061646d696e20697320746865207a65726f206160448201526564647265737360d01b606482015260840161014d565b807fb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d61035b80546001600160a01b0319166001600160a01b039290921691909117905550565b5f7f360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc61018b565b6001600160a01b0381163b6105235760405162461bcd60e51b815260206004820152602d60248201527f455243313936373a206e657720696d706c656d656e746174696f6e206973206e60448201526c1bdd08184818dbdb9d1c9858dd609a1b606482015260840161014d565b807f360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc61046e565b60605f5f856001600160a01b031685604051610566919061079b565b5f60405180830381855af49150503d805f811461059e576040519150601f19603f3d011682016040523d82523d5f602084013e6105a3565b606091505b50915091506105b4868383876105be565b9695505050505050565b6060831561062c5782515f03610625576001600160a01b0385163b6106255760405162461bcd60e51b815260206004820152601d60248201527f416464726573733a2063616c6c20746f206e6f6e2d636f6e7472616374000000604482015260640161014d565b5081610636565b610636838361063e565b949350505050565b81511561064e5781518083602001fd5b8060405162461bcd60e51b815260040161014d91906107b1565b5f5f85851115610676575f5ffd5b83861115610682575f5ffd5b5050820193919092039150565b80356001600160a01b03811681146106a5575f5ffd5b919050565b5f602082840312156106ba575f5ffd5b6103df8261068f565b634e487b7160e01b5f52604160045260245ffd5b5f5f604083850312156106e8575f5ffd5b6106f18361068f565b9150602083013567ffffffffffffffff81111561070c575f5ffd5b8301601f8101851361071c575f5ffd5b803567ffffffffffffffff811115610736576107366106c3565b604051601f8201601f19908116603f0116810167ffffffffffffffff81118282101715610765576107656106c3565b60405281815282820160200187101561077c575f5ffd5b816020840160208301375f602083830101528093505050509250929050565b5f82518060208501845e5f920191825250919050565b602081525f82518060208401528060208501604085015e5f604082850101526040601f19601f8301168401019150509291505056fe416464726573733a206c6f772d6c6576656c2064656c65676174652063616c6c206661696c6564", + "storage": { + "0x360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc": "0x0000000000000000000000003200000000000000000000000000000000000001", + "0xb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d6103": "0x00000000000000000000000031ffffffffffffffffffffffffffffffffffffff" + } + }, + { + "address": "0x3100000000000000000000000000000000000002", + "balance": "0x115eec47f6cf7e35000000", + "code": "0x60806040523661001357610011610017565b005b6100115b61001f610168565b6001600160a01b0316330361015e5760606001600160e01b03195f35166364d3180d60e11b81016100595761005261019a565b9150610156565b63587086bd60e11b6001600160e01b0319821601610079576100526101ed565b63070d7c6960e41b6001600160e01b031982160161009957610052610231565b621eb96f60e61b6001600160e01b03198216016100b857610052610261565b63a39f25e560e01b6001600160e01b03198216016100d8576100526102a0565b60405162461bcd60e51b815260206004820152604260248201527f5472616e73706172656e745570677261646561626c6550726f78793a2061646d60448201527f696e2063616e6e6f742066616c6c6261636b20746f2070726f78792074617267606482015261195d60f21b608482015260a4015b60405180910390fd5b815160208301f35b6101666102b3565b565b5f7fb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d61035b546001600160a01b0316919050565b60606101a46102c3565b5f6101b23660048184610668565b8101906101bf91906106aa565b90506101da8160405180602001604052805f8152505f6102cd565b505060408051602081019091525f815290565b60605f806101fe3660048184610668565b81019061020b91906106d7565b9150915061021b828260016102cd565b60405180602001604052805f8152509250505090565b606061023b6102c3565b5f6102493660048184610668565b81019061025691906106aa565b90506101da816102f8565b606061026b6102c3565b5f610274610168565b604080516001600160a01b03831660208201529192500160405160208183030381529060405291505090565b60606102aa6102c3565b5f61027461034f565b6101666102be61034f565b61035d565b3415610166575f5ffd5b6102d68361037b565b5f825111806102e25750805b156102f3576102f183836103ba565b505b505050565b7f7e644d79422f17c01e4894b5f4f588d331ebfa28653d42ae832dc59e38c9798f610321610168565b604080516001600160a01b03928316815291841660208301520160405180910390a161034c816103e6565b50565b5f61035861048f565b905090565b365f5f375f5f365f845af43d5f5f3e808015610377573d5ff35b3d5ffd5b610384816104b6565b6040516001600160a01b038216907fbc7cd75a20ee27fd9adebab32041f755214dbc6bffa90cc0225b39da2e5c2d3b905f90a250565b60606103df83836040518060600160405280602781526020016107e76027913961054a565b9392505050565b6001600160a01b03811661044b5760405162461bcd60e51b815260206004820152602660248201527f455243313936373a206e65772061646d696e20697320746865207a65726f206160448201526564647265737360d01b606482015260840161014d565b807fb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d61035b80546001600160a01b0319166001600160a01b039290921691909117905550565b5f7f360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc61018b565b6001600160a01b0381163b6105235760405162461bcd60e51b815260206004820152602d60248201527f455243313936373a206e657720696d706c656d656e746174696f6e206973206e60448201526c1bdd08184818dbdb9d1c9858dd609a1b606482015260840161014d565b807f360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc61046e565b60605f5f856001600160a01b031685604051610566919061079b565b5f60405180830381855af49150503d805f811461059e576040519150601f19603f3d011682016040523d82523d5f602084013e6105a3565b606091505b50915091506105b4868383876105be565b9695505050505050565b6060831561062c5782515f03610625576001600160a01b0385163b6106255760405162461bcd60e51b815260206004820152601d60248201527f416464726573733a2063616c6c20746f206e6f6e2d636f6e7472616374000000604482015260640161014d565b5081610636565b610636838361063e565b949350505050565b81511561064e5781518083602001fd5b8060405162461bcd60e51b815260040161014d91906107b1565b5f5f85851115610676575f5ffd5b83861115610682575f5ffd5b5050820193919092039150565b80356001600160a01b03811681146106a5575f5ffd5b919050565b5f602082840312156106ba575f5ffd5b6103df8261068f565b634e487b7160e01b5f52604160045260245ffd5b5f5f604083850312156106e8575f5ffd5b6106f18361068f565b9150602083013567ffffffffffffffff81111561070c575f5ffd5b8301601f8101851361071c575f5ffd5b803567ffffffffffffffff811115610736576107366106c3565b604051601f8201601f19908116603f0116810167ffffffffffffffff81118282101715610765576107656106c3565b60405281815282820160200187101561077c575f5ffd5b816020840160208301375f602083830101528093505050509250929050565b5f82518060208501845e5f920191825250919050565b602081525f82518060208401528060208501604085015e5f604082850101526040601f19601f8301168401019150509291505056fe416464726573733a206c6f772d6c6576656c2064656c65676174652063616c6c206661696c6564", + "storage": { + "0x360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc": "0x0000000000000000000000003200000000000000000000000000000000000002", + "0x9016d09d72d40fdae2fd8ceac6b6234c7706214fd39c1cd1e609a0528c199300": "0x000000000000000000000000f39fd6e51aad88f6f4ce6ab8827279cfffb92266", + "0xb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d6103": "0x00000000000000000000000031ffffffffffffffffffffffffffffffffffffff" + } + }, + { + "address": "0x3100000000000000000000000000000000000003", + "balance": "0x0", + "code": "0x60806040523661001357610011610017565b005b6100115b61001f610168565b6001600160a01b0316330361015e5760606001600160e01b03195f35166364d3180d60e11b81016100595761005261019a565b9150610156565b63587086bd60e11b6001600160e01b0319821601610079576100526101ed565b63070d7c6960e41b6001600160e01b031982160161009957610052610231565b621eb96f60e61b6001600160e01b03198216016100b857610052610261565b63a39f25e560e01b6001600160e01b03198216016100d8576100526102a0565b60405162461bcd60e51b815260206004820152604260248201527f5472616e73706172656e745570677261646561626c6550726f78793a2061646d60448201527f696e2063616e6e6f742066616c6c6261636b20746f2070726f78792074617267606482015261195d60f21b608482015260a4015b60405180910390fd5b815160208301f35b6101666102b3565b565b5f7fb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d61035b546001600160a01b0316919050565b60606101a46102c3565b5f6101b23660048184610668565b8101906101bf91906106aa565b90506101da8160405180602001604052805f8152505f6102cd565b505060408051602081019091525f815290565b60605f806101fe3660048184610668565b81019061020b91906106d7565b9150915061021b828260016102cd565b60405180602001604052805f8152509250505090565b606061023b6102c3565b5f6102493660048184610668565b81019061025691906106aa565b90506101da816102f8565b606061026b6102c3565b5f610274610168565b604080516001600160a01b03831660208201529192500160405160208183030381529060405291505090565b60606102aa6102c3565b5f61027461034f565b6101666102be61034f565b61035d565b3415610166575f5ffd5b6102d68361037b565b5f825111806102e25750805b156102f3576102f183836103ba565b505b505050565b7f7e644d79422f17c01e4894b5f4f588d331ebfa28653d42ae832dc59e38c9798f610321610168565b604080516001600160a01b03928316815291841660208301520160405180910390a161034c816103e6565b50565b5f61035861048f565b905090565b365f5f375f5f365f845af43d5f5f3e808015610377573d5ff35b3d5ffd5b610384816104b6565b6040516001600160a01b038216907fbc7cd75a20ee27fd9adebab32041f755214dbc6bffa90cc0225b39da2e5c2d3b905f90a250565b60606103df83836040518060600160405280602781526020016107e76027913961054a565b9392505050565b6001600160a01b03811661044b5760405162461bcd60e51b815260206004820152602660248201527f455243313936373a206e65772061646d696e20697320746865207a65726f206160448201526564647265737360d01b606482015260840161014d565b807fb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d61035b80546001600160a01b0319166001600160a01b039290921691909117905550565b5f7f360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc61018b565b6001600160a01b0381163b6105235760405162461bcd60e51b815260206004820152602d60248201527f455243313936373a206e657720696d706c656d656e746174696f6e206973206e60448201526c1bdd08184818dbdb9d1c9858dd609a1b606482015260840161014d565b807f360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc61046e565b60605f5f856001600160a01b031685604051610566919061079b565b5f60405180830381855af49150503d805f811461059e576040519150601f19603f3d011682016040523d82523d5f602084013e6105a3565b606091505b50915091506105b4868383876105be565b9695505050505050565b6060831561062c5782515f03610625576001600160a01b0385163b6106255760405162461bcd60e51b815260206004820152601d60248201527f416464726573733a2063616c6c20746f206e6f6e2d636f6e7472616374000000604482015260640161014d565b5081610636565b610636838361063e565b949350505050565b81511561064e5781518083602001fd5b8060405162461bcd60e51b815260040161014d91906107b1565b5f5f85851115610676575f5ffd5b83861115610682575f5ffd5b5050820193919092039150565b80356001600160a01b03811681146106a5575f5ffd5b919050565b5f602082840312156106ba575f5ffd5b6103df8261068f565b634e487b7160e01b5f52604160045260245ffd5b5f5f604083850312156106e8575f5ffd5b6106f18361068f565b9150602083013567ffffffffffffffff81111561070c575f5ffd5b8301601f8101851361071c575f5ffd5b803567ffffffffffffffff811115610736576107366106c3565b604051601f8201601f19908116603f0116810167ffffffffffffffff81118282101715610765576107656106c3565b60405281815282820160200187101561077c575f5ffd5b816020840160208301375f602083830101528093505050509250929050565b5f82518060208501845e5f920191825250919050565b602081525f82518060208401528060208501604085015e5f604082850101526040601f19601f8301168401019150509291505056fe416464726573733a206c6f772d6c6576656c2064656c65676174652063616c6c206661696c6564", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000000": "0x000000000000000000000000f39fd6e51aad88f6f4ce6ab8827279cfffb92266", + "0x0000000000000000000000000000000000000000000000000000000000000001": "0x00000000000000000000000000000000000000000000000006f05b59d3b20000", + "0x360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc": "0x0000000000000000000000003200000000000000000000000000000000000003", + "0x9016d09d72d40fdae2fd8ceac6b6234c7706214fd39c1cd1e609a0528c199300": "0x000000000000000000000000f39fd6e51aad88f6f4ce6ab8827279cfffb92266", + "0xb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d6103": "0x00000000000000000000000031ffffffffffffffffffffffffffffffffffffff" + } + }, + { + "address": "0x3100000000000000000000000000000000000004", + "balance": "0x0", + "code": "0x60806040523661001357610011610017565b005b6100115b61001f610168565b6001600160a01b0316330361015e5760606001600160e01b03195f35166364d3180d60e11b81016100595761005261019a565b9150610156565b63587086bd60e11b6001600160e01b0319821601610079576100526101ed565b63070d7c6960e41b6001600160e01b031982160161009957610052610231565b621eb96f60e61b6001600160e01b03198216016100b857610052610261565b63a39f25e560e01b6001600160e01b03198216016100d8576100526102a0565b60405162461bcd60e51b815260206004820152604260248201527f5472616e73706172656e745570677261646561626c6550726f78793a2061646d60448201527f696e2063616e6e6f742066616c6c6261636b20746f2070726f78792074617267606482015261195d60f21b608482015260a4015b60405180910390fd5b815160208301f35b6101666102b3565b565b5f7fb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d61035b546001600160a01b0316919050565b60606101a46102c3565b5f6101b23660048184610668565b8101906101bf91906106aa565b90506101da8160405180602001604052805f8152505f6102cd565b505060408051602081019091525f815290565b60605f806101fe3660048184610668565b81019061020b91906106d7565b9150915061021b828260016102cd565b60405180602001604052805f8152509250505090565b606061023b6102c3565b5f6102493660048184610668565b81019061025691906106aa565b90506101da816102f8565b606061026b6102c3565b5f610274610168565b604080516001600160a01b03831660208201529192500160405160208183030381529060405291505090565b60606102aa6102c3565b5f61027461034f565b6101666102be61034f565b61035d565b3415610166575f5ffd5b6102d68361037b565b5f825111806102e25750805b156102f3576102f183836103ba565b505b505050565b7f7e644d79422f17c01e4894b5f4f588d331ebfa28653d42ae832dc59e38c9798f610321610168565b604080516001600160a01b03928316815291841660208301520160405180910390a161034c816103e6565b50565b5f61035861048f565b905090565b365f5f375f5f365f845af43d5f5f3e808015610377573d5ff35b3d5ffd5b610384816104b6565b6040516001600160a01b038216907fbc7cd75a20ee27fd9adebab32041f755214dbc6bffa90cc0225b39da2e5c2d3b905f90a250565b60606103df83836040518060600160405280602781526020016107e76027913961054a565b9392505050565b6001600160a01b03811661044b5760405162461bcd60e51b815260206004820152602660248201527f455243313936373a206e65772061646d696e20697320746865207a65726f206160448201526564647265737360d01b606482015260840161014d565b807fb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d61035b80546001600160a01b0319166001600160a01b039290921691909117905550565b5f7f360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc61018b565b6001600160a01b0381163b6105235760405162461bcd60e51b815260206004820152602d60248201527f455243313936373a206e657720696d706c656d656e746174696f6e206973206e60448201526c1bdd08184818dbdb9d1c9858dd609a1b606482015260840161014d565b807f360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc61046e565b60605f5f856001600160a01b031685604051610566919061079b565b5f60405180830381855af49150503d805f811461059e576040519150601f19603f3d011682016040523d82523d5f602084013e6105a3565b606091505b50915091506105b4868383876105be565b9695505050505050565b6060831561062c5782515f03610625576001600160a01b0385163b6106255760405162461bcd60e51b815260206004820152601d60248201527f416464726573733a2063616c6c20746f206e6f6e2d636f6e7472616374000000604482015260640161014d565b5081610636565b610636838361063e565b949350505050565b81511561064e5781518083602001fd5b8060405162461bcd60e51b815260040161014d91906107b1565b5f5f85851115610676575f5ffd5b83861115610682575f5ffd5b5050820193919092039150565b80356001600160a01b03811681146106a5575f5ffd5b919050565b5f602082840312156106ba575f5ffd5b6103df8261068f565b634e487b7160e01b5f52604160045260245ffd5b5f5f604083850312156106e8575f5ffd5b6106f18361068f565b9150602083013567ffffffffffffffff81111561070c575f5ffd5b8301601f8101851361071c575f5ffd5b803567ffffffffffffffff811115610736576107366106c3565b604051601f8201601f19908116603f0116810167ffffffffffffffff81118282101715610765576107656106c3565b60405281815282820160200187101561077c575f5ffd5b816020840160208301375f602083830101528093505050509250929050565b5f82518060208501845e5f920191825250919050565b602081525f82518060208401528060208501604085015e5f604082850101526040601f19601f8301168401019150509291505056fe416464726573733a206c6f772d6c6576656c2064656c65676174652063616c6c206661696c6564", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000000": "0x000000000000000000000000f39fd6e51aad88f6f4ce6ab8827279cfffb92266", + "0x0000000000000000000000000000000000000000000000000000000000000001": "0x00000000000000000000000000000000000000000000000006f05b59d3b20000", + "0x360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc": "0x0000000000000000000000003200000000000000000000000000000000000004", + "0x9016d09d72d40fdae2fd8ceac6b6234c7706214fd39c1cd1e609a0528c199300": "0x000000000000000000000000f39fd6e51aad88f6f4ce6ab8827279cfffb92266", + "0xb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d6103": "0x00000000000000000000000031ffffffffffffffffffffffffffffffffffffff" + } + }, + { + "address": "0x3100000000000000000000000000000000000005", + "balance": "0x0", + "code": "0x60806040523661001357610011610017565b005b6100115b61001f610168565b6001600160a01b0316330361015e5760606001600160e01b03195f35166364d3180d60e11b81016100595761005261019a565b9150610156565b63587086bd60e11b6001600160e01b0319821601610079576100526101ed565b63070d7c6960e41b6001600160e01b031982160161009957610052610231565b621eb96f60e61b6001600160e01b03198216016100b857610052610261565b63a39f25e560e01b6001600160e01b03198216016100d8576100526102a0565b60405162461bcd60e51b815260206004820152604260248201527f5472616e73706172656e745570677261646561626c6550726f78793a2061646d60448201527f696e2063616e6e6f742066616c6c6261636b20746f2070726f78792074617267606482015261195d60f21b608482015260a4015b60405180910390fd5b815160208301f35b6101666102b3565b565b5f7fb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d61035b546001600160a01b0316919050565b60606101a46102c3565b5f6101b23660048184610668565b8101906101bf91906106aa565b90506101da8160405180602001604052805f8152505f6102cd565b505060408051602081019091525f815290565b60605f806101fe3660048184610668565b81019061020b91906106d7565b9150915061021b828260016102cd565b60405180602001604052805f8152509250505090565b606061023b6102c3565b5f6102493660048184610668565b81019061025691906106aa565b90506101da816102f8565b606061026b6102c3565b5f610274610168565b604080516001600160a01b03831660208201529192500160405160208183030381529060405291505090565b60606102aa6102c3565b5f61027461034f565b6101666102be61034f565b61035d565b3415610166575f5ffd5b6102d68361037b565b5f825111806102e25750805b156102f3576102f183836103ba565b505b505050565b7f7e644d79422f17c01e4894b5f4f588d331ebfa28653d42ae832dc59e38c9798f610321610168565b604080516001600160a01b03928316815291841660208301520160405180910390a161034c816103e6565b50565b5f61035861048f565b905090565b365f5f375f5f365f845af43d5f5f3e808015610377573d5ff35b3d5ffd5b610384816104b6565b6040516001600160a01b038216907fbc7cd75a20ee27fd9adebab32041f755214dbc6bffa90cc0225b39da2e5c2d3b905f90a250565b60606103df83836040518060600160405280602781526020016107e76027913961054a565b9392505050565b6001600160a01b03811661044b5760405162461bcd60e51b815260206004820152602660248201527f455243313936373a206e65772061646d696e20697320746865207a65726f206160448201526564647265737360d01b606482015260840161014d565b807fb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d61035b80546001600160a01b0319166001600160a01b039290921691909117905550565b5f7f360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc61018b565b6001600160a01b0381163b6105235760405162461bcd60e51b815260206004820152602d60248201527f455243313936373a206e657720696d706c656d656e746174696f6e206973206e60448201526c1bdd08184818dbdb9d1c9858dd609a1b606482015260840161014d565b807f360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc61046e565b60605f5f856001600160a01b031685604051610566919061079b565b5f60405180830381855af49150503d805f811461059e576040519150601f19603f3d011682016040523d82523d5f602084013e6105a3565b606091505b50915091506105b4868383876105be565b9695505050505050565b6060831561062c5782515f03610625576001600160a01b0385163b6106255760405162461bcd60e51b815260206004820152601d60248201527f416464726573733a2063616c6c20746f206e6f6e2d636f6e7472616374000000604482015260640161014d565b5081610636565b610636838361063e565b949350505050565b81511561064e5781518083602001fd5b8060405162461bcd60e51b815260040161014d91906107b1565b5f5f85851115610676575f5ffd5b83861115610682575f5ffd5b5050820193919092039150565b80356001600160a01b03811681146106a5575f5ffd5b919050565b5f602082840312156106ba575f5ffd5b6103df8261068f565b634e487b7160e01b5f52604160045260245ffd5b5f5f604083850312156106e8575f5ffd5b6106f18361068f565b9150602083013567ffffffffffffffff81111561070c575f5ffd5b8301601f8101851361071c575f5ffd5b803567ffffffffffffffff811115610736576107366106c3565b604051601f8201601f19908116603f0116810167ffffffffffffffff81118282101715610765576107656106c3565b60405281815282820160200187101561077c575f5ffd5b816020840160208301375f602083830101528093505050509250929050565b5f82518060208501845e5f920191825250919050565b602081525f82518060208401528060208501604085015e5f604082850101526040601f19601f8301168401019150509291505056fe416464726573733a206c6f772d6c6576656c2064656c65676174652063616c6c206661696c6564", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000000": "0x000000000000000000000000f39fd6e51aad88f6f4ce6ab8827279cfffb92266", + "0x0000000000000000000000000000000000000000000000000000000000000001": "0x00000000000000000000000000000000000000000000000006f05b59d3b20000", + "0x360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc": "0x0000000000000000000000003200000000000000000000000000000000000005", + "0x9016d09d72d40fdae2fd8ceac6b6234c7706214fd39c1cd1e609a0528c199300": "0x000000000000000000000000f39fd6e51aad88f6f4ce6ab8827279cfffb92266", + "0xb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d6103": "0x00000000000000000000000031ffffffffffffffffffffffffffffffffffffff" + } + }, + { + "address": "0x3100000000000000000000000000000000000006", + "balance": "0x0", + "code": "0x60806040526004361061009c5760003560e01c8063313ce56711610064578063313ce5671461021157806370a082311461023c57806395d89b411461026f578063a9059cbb14610284578063d0e30db01461009c578063dd62ed3e146102bd5761009c565b806306fdde03146100a6578063095ea7b31461013057806318160ddd1461017d57806323b872dd146101a45780632e1a7d4d146101e7575b6100a46102f8565b005b3480156100b257600080fd5b506100bb610347565b6040805160208082528351818301528351919283929083019185019080838360005b838110156100f55781810151838201526020016100dd565b50505050905090810190601f1680156101225780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b34801561013c57600080fd5b506101696004803603604081101561015357600080fd5b506001600160a01b0381351690602001356103d5565b604080519115158252519081900360200190f35b34801561018957600080fd5b5061019261043b565b60408051918252519081900360200190f35b3480156101b057600080fd5b50610169600480360360608110156101c757600080fd5b506001600160a01b0381358116916020810135909116906040013561043f565b3480156101f357600080fd5b506100a46004803603602081101561020a57600080fd5b5035610573565b34801561021d57600080fd5b50610226610608565b6040805160ff9092168252519081900360200190f35b34801561024857600080fd5b506101926004803603602081101561025f57600080fd5b50356001600160a01b0316610611565b34801561027b57600080fd5b506100bb610623565b34801561029057600080fd5b50610169600480360360408110156102a757600080fd5b506001600160a01b03813516906020013561067d565b3480156102c957600080fd5b50610192600480360360408110156102e057600080fd5b506001600160a01b0381358116916020013516610691565b33600081815260036020908152604091829020805434908101909155825190815291517fe1fffcc4923d04b559f4d29a8bfc6cda04eb5b0d3c460751c2402c5c5cc9109c9281900390910190a2565b6000805460408051602060026001851615610100026000190190941693909304601f810184900484028201840190925281815292918301828280156103cd5780601f106103a2576101008083540402835291602001916103cd565b820191906000526020600020905b8154815290600101906020018083116103b057829003601f168201915b505050505081565b3360008181526004602090815260408083206001600160a01b038716808552908352818420869055815186815291519394909390927f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925928290030190a350600192915050565b4790565b6001600160a01b03831660009081526003602052604081205482111561046457600080fd5b6001600160a01b03841633148015906104a257506001600160a01b038416600090815260046020908152604080832033845290915290205460001914155b15610502576001600160a01b03841660009081526004602090815260408083203384529091529020548211156104d757600080fd5b6001600160a01b03841660009081526004602090815260408083203384529091529020805483900390555b6001600160a01b03808516600081815260036020908152604080832080548890039055938716808352918490208054870190558351868152935191937fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef929081900390910190a35060019392505050565b3360009081526003602052604090205481111561058f57600080fd5b33600081815260036020526040808220805485900390555183156108fc0291849190818181858888f193505050501580156105ce573d6000803e3d6000fd5b5060408051828152905133917f7fcf532c15f0a6db0bd6d0e038bea71d30d808c7d98cb3bf7268a95bf5081b65919081900360200190a250565b60025460ff1681565b60036020526000908152604090205481565b60018054604080516020600284861615610100026000190190941693909304601f810184900484028201840190925281815292918301828280156103cd5780601f106103a2576101008083540402835291602001916103cd565b600061068a33848461043f565b9392505050565b60046020908152600092835260408084209091529082529020548156fea265627a7a72315820b5d0f3fd9806e804fac1e6f2c471920c73129f13ebc9111db67de5c6254be08464736f6c63430005110032", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000000": "0x577261707065642043697472656120426974636f696e0000000000000000002c", + "0x0000000000000000000000000000000000000000000000000000000000000001": "0x574342544300000000000000000000000000000000000000000000000000000a", + "0x0000000000000000000000000000000000000000000000000000000000000002": "0x0000000000000000000000000000000000000000000000000000000000000012" + } + }, + { + "address": "0x3100000000000000000000000000000000000007", + "balance": "0x0", + "code": "0x60806040523661001357610011610017565b005b6100115b61001f610168565b6001600160a01b0316330361015e5760606001600160e01b03195f35166364d3180d60e11b81016100595761005261019a565b9150610156565b63587086bd60e11b6001600160e01b0319821601610079576100526101ed565b63070d7c6960e41b6001600160e01b031982160161009957610052610231565b621eb96f60e61b6001600160e01b03198216016100b857610052610261565b63a39f25e560e01b6001600160e01b03198216016100d8576100526102a0565b60405162461bcd60e51b815260206004820152604260248201527f5472616e73706172656e745570677261646561626c6550726f78793a2061646d60448201527f696e2063616e6e6f742066616c6c6261636b20746f2070726f78792074617267606482015261195d60f21b608482015260a4015b60405180910390fd5b815160208301f35b6101666102b3565b565b5f7fb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d61035b546001600160a01b0316919050565b60606101a46102c3565b5f6101b23660048184610668565b8101906101bf91906106aa565b90506101da8160405180602001604052805f8152505f6102cd565b505060408051602081019091525f815290565b60605f806101fe3660048184610668565b81019061020b91906106d7565b9150915061021b828260016102cd565b60405180602001604052805f8152509250505090565b606061023b6102c3565b5f6102493660048184610668565b81019061025691906106aa565b90506101da816102f8565b606061026b6102c3565b5f610274610168565b604080516001600160a01b03831660208201529192500160405160208183030381529060405291505090565b60606102aa6102c3565b5f61027461034f565b6101666102be61034f565b61035d565b3415610166575f5ffd5b6102d68361037b565b5f825111806102e25750805b156102f3576102f183836103ba565b505b505050565b7f7e644d79422f17c01e4894b5f4f588d331ebfa28653d42ae832dc59e38c9798f610321610168565b604080516001600160a01b03928316815291841660208301520160405180910390a161034c816103e6565b50565b5f61035861048f565b905090565b365f5f375f5f365f845af43d5f5f3e808015610377573d5ff35b3d5ffd5b610384816104b6565b6040516001600160a01b038216907fbc7cd75a20ee27fd9adebab32041f755214dbc6bffa90cc0225b39da2e5c2d3b905f90a250565b60606103df83836040518060600160405280602781526020016107e76027913961054a565b9392505050565b6001600160a01b03811661044b5760405162461bcd60e51b815260206004820152602660248201527f455243313936373a206e65772061646d696e20697320746865207a65726f206160448201526564647265737360d01b606482015260840161014d565b807fb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d61035b80546001600160a01b0319166001600160a01b039290921691909117905550565b5f7f360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc61018b565b6001600160a01b0381163b6105235760405162461bcd60e51b815260206004820152602d60248201527f455243313936373a206e657720696d706c656d656e746174696f6e206973206e60448201526c1bdd08184818dbdb9d1c9858dd609a1b606482015260840161014d565b807f360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc61046e565b60605f5f856001600160a01b031685604051610566919061079b565b5f60405180830381855af49150503d805f811461059e576040519150601f19603f3d011682016040523d82523d5f602084013e6105a3565b606091505b50915091506105b4868383876105be565b9695505050505050565b6060831561062c5782515f03610625576001600160a01b0385163b6106255760405162461bcd60e51b815260206004820152601d60248201527f416464726573733a2063616c6c20746f206e6f6e2d636f6e7472616374000000604482015260640161014d565b5081610636565b610636838361063e565b949350505050565b81511561064e5781518083602001fd5b8060405162461bcd60e51b815260040161014d91906107b1565b5f5f85851115610676575f5ffd5b83861115610682575f5ffd5b5050820193919092039150565b80356001600160a01b03811681146106a5575f5ffd5b919050565b5f602082840312156106ba575f5ffd5b6103df8261068f565b634e487b7160e01b5f52604160045260245ffd5b5f5f604083850312156106e8575f5ffd5b6106f18361068f565b9150602083013567ffffffffffffffff81111561070c575f5ffd5b8301601f8101851361071c575f5ffd5b803567ffffffffffffffff811115610736576107366106c3565b604051601f8201601f19908116603f0116810167ffffffffffffffff81118282101715610765576107656106c3565b60405281815282820160200187101561077c575f5ffd5b816020840160208301375f602083830101528093505050509250929050565b5f82518060208501845e5f920191825250919050565b602081525f82518060208401528060208501604085015e5f604082850101526040601f19601f8301168401019150509291505056fe416464726573733a206c6f772d6c6576656c2064656c65676174652063616c6c206661696c6564", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000000": "0x000000000000000000000000f39fd6e51aad88f6f4ce6ab8827279cfffb92266", + "0x0000000000000000000000000000000000000000000000000000000000000001": "0x00000000000000000000000000000000000000000000000006f05b59d3b20000", + "0x360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc": "0x0000000000000000000000003200000000000000000000000000000000000007", + "0x9016d09d72d40fdae2fd8ceac6b6234c7706214fd39c1cd1e609a0528c199300": "0x000000000000000000000000f39fd6e51aad88f6f4ce6ab8827279cfffb92266", + "0xb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d6103": "0x00000000000000000000000031ffffffffffffffffffffffffffffffffffffff" + } + }, + { + "address": "0x31ffffffffffffffffffffffffffffffffffffff", + "balance": "0x0", + "code": "0x608060405260043610610079575f3560e01c80639623609d1161004c5780639623609d1461010957806399a88ec41461011c578063f2fde38b1461013b578063f3b7dead1461015a575f5ffd5b8063204e1c7a1461007d578063715018a6146100b85780637eff275e146100ce5780638da5cb5b146100ed575b5f5ffd5b348015610088575f5ffd5b5061009c610097366004610479565b610179565b6040516001600160a01b03909116815260200160405180910390f35b3480156100c3575f5ffd5b506100cc610204565b005b3480156100d9575f5ffd5b506100cc6100e836600461049b565b610217565b3480156100f8575f5ffd5b505f546001600160a01b031661009c565b6100cc6101173660046104e6565b61027a565b348015610127575f5ffd5b506100cc61013636600461049b565b6102e5565b348015610146575f5ffd5b506100cc610155366004610479565b61031b565b348015610165575f5ffd5b5061009c610174366004610479565b610399565b5f5f5f836001600160a01b031660405161019d90635c60da1b60e01b815260040190565b5f60405180830381855afa9150503d805f81146101d5576040519150601f19603f3d011682016040523d82523d5f602084013e6101da565b606091505b5091509150816101e8575f5ffd5b808060200190518101906101fc91906105bd565b949350505050565b61020c6103bd565b6102155f610416565b565b61021f6103bd565b6040516308f2839760e41b81526001600160a01b038281166004830152831690638f283970906024015b5f604051808303815f87803b158015610260575f5ffd5b505af1158015610272573d5f5f3e3d5ffd5b505050505050565b6102826103bd565b60405163278f794360e11b81526001600160a01b03841690634f1ef2869034906102b290869086906004016105d8565b5f604051808303818588803b1580156102c9575f5ffd5b505af11580156102db573d5f5f3e3d5ffd5b5050505050505050565b6102ed6103bd565b604051631b2ce7f360e11b81526001600160a01b038281166004830152831690633659cfe690602401610249565b6103236103bd565b6001600160a01b03811661038d5760405162461bcd60e51b815260206004820152602660248201527f4f776e61626c653a206e6577206f776e657220697320746865207a65726f206160448201526564647265737360d01b60648201526084015b60405180910390fd5b61039681610416565b50565b5f5f5f836001600160a01b031660405161019d906303e1469160e61b815260040190565b5f546001600160a01b031633146102155760405162461bcd60e51b815260206004820181905260248201527f4f776e61626c653a2063616c6c6572206973206e6f7420746865206f776e65726044820152606401610384565b5f80546001600160a01b038381166001600160a01b0319831681178455604051919092169283917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e09190a35050565b6001600160a01b0381168114610396575f5ffd5b5f60208284031215610489575f5ffd5b813561049481610465565b9392505050565b5f5f604083850312156104ac575f5ffd5b82356104b781610465565b915060208301356104c781610465565b809150509250929050565b634e487b7160e01b5f52604160045260245ffd5b5f5f5f606084860312156104f8575f5ffd5b833561050381610465565b9250602084013561051381610465565b9150604084013567ffffffffffffffff81111561052e575f5ffd5b8401601f8101861361053e575f5ffd5b803567ffffffffffffffff811115610558576105586104d2565b604051601f8201601f19908116603f0116810167ffffffffffffffff81118282101715610587576105876104d2565b60405281815282820160200188101561059e575f5ffd5b816020840160208301375f602083830101528093505050509250925092565b5f602082840312156105cd575f5ffd5b815161049481610465565b60018060a01b0383168152604060208201525f82518060408401528060208501606085015e5f606082850101526060601f19601f830116840101915050939250505056", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000000": "0x000000000000000000000000f39fd6e51aad88f6f4ce6ab8827279cfffb92266" + } + }, + { + "address": "0x3200000000000000000000000000000000000001", + "balance": "0x0", + "code": "0x608060405234801561000f575f5ffd5b50600436106100cb575f3560e01c8063a91d8b3d11610088578063d269a03e11610063578063d269a03e146101dd578063d5ba11fa146101f0578063d761753e14610203578063ee82ac5e14610236575f5ffd5b8063a91d8b3d1461018c578063abb068d6146101ab578063cd4cc08f146101ca575f5ffd5b80630466efc4146100cf5780631f5783331461010157806334cdf78d146101165780634ffd344a1461013557806357e871e71461015857806361b207e214610160575b5f5ffd5b6100ee6100dd36600461080e565b5f9081526002602052604090205490565b6040519081526020015b60405180910390f35b61011461010f36600461080e565b610255565b005b6100ee61012436600461080e565b60016020525f908152604090205481565b61014861014336600461086a565b610306565b60405190151581526020016100f8565b6100ee5f5481565b6100ee61016e36600461080e565b5f908152600160209081526040808320548352600290915290205490565b6100ee61019a36600461080e565b60026020525f908152604090205481565b6100ee6101b936600461080e565b60036020525f908152604090205481565b6101486101d83660046108c0565b61032b565b6101486101eb36600461086a565b6104b1565b6101146101fe36600461094a565b6104bf565b61021e73deaddeaddeaddeaddeaddeaddeaddeaddeaddead81565b6040516001600160a01b0390911681526020016100f8565b6100ee61024436600461080e565b5f9081526001602052604090205490565b3373deaddeaddeaddeaddeaddeaddeaddeaddeaddead146102bd5760405162461bcd60e51b815260206004820152601f60248201527f63616c6c6572206973206e6f74207468652073797374656d2063616c6c65720060448201526064015b60405180910390fd5b5f54156103025760405162461bcd60e51b8152602060048201526013602482015272105b1c9958591e481a5b9a5d1a585b1a5e9959606a1b60448201526064016102b4565b5f55565b5f8581526001602052604081205461032190868686866105f1565b9695505050505050565b5f5f61036b87878080601f0160208091040260200160405190810160405280939291908181526020018383808284375f920191909152506106ab92505050565b5f8a81526001602052604090205490915081146103c15760405162461bcd60e51b815260206004820152601460248201527324b73b30b634b210313637b1b5903432b0b232b960611b60448201526064016102b4565b5f818152600360209081526040909120546103db91610987565b84146104205760405162461bcd60e51b8152602060048201526014602482015273092dcecc2d8d2c840e0e4dedecc40d8cadccee8d60631b60448201526064016102b4565b5f61045f88888080601f0160208091040260200160405190810160405280939291908181526020018383808284375f920191909152506106cd92505050565b90506104a3898288888080601f0160208091040260200160405190810160405280939291908181526020018383808284375f920191909152508a92506106db915050565b9a9950505050505050505050565b5f61032186868686866105f1565b3373deaddeaddeaddeaddeaddeaddeaddeaddeaddead146105225760405162461bcd60e51b815260206004820152601f60248201527f63616c6c6572206973206e6f74207468652073797374656d2063616c6c65720060448201526064016102b4565b5f8054908190036105675760405162461bcd60e51b815260206004820152600f60248201526e139bdd081a5b9a5d1a585b1a5e9959608a1b60448201526064016102b4565b5f81815260016020819052604090912085905561058590829061099e565b5f90815584815260026020908152604080832086905560038252918290208490558151838152908101869052908101849052606081018390527f4975e407627f5c539dcd7c961396db91c315f4421c3b0023ba1bcf2e9e9b41f19060800160405180910390a150505050565b5f85815260036020908152604082205461060a91610987565b831461064f5760405162461bcd60e51b8152602060048201526014602482015273092dcecc2d8d2c840e0e4dedecc40d8cadccee8d60631b60448201526064016102b4565b5f86815260026020908152604091829020548251601f8701839004830281018301909352858352916106a0918891849189908990819084018382808284375f920191909152508992506106db915050565b979650505050505050565b5f60205f83516020850160025afa5060205f60205f60025afa50505f51919050565b60448101515f905b92915050565b5f83851480156106e9575081155b80156106f457508251155b1561070157506001610710565b61070d85848685610718565b90505b949350505050565b5f6020845161072791906109b1565b1561073357505f610710565b83515f0361074257505f610710565b81855f5b86518110156107c85761075a6002846109b1565b600103610796575f61076f8883016020015190565b9050828103610784575f945050505050610710565b61078e81846107d5565b9250506107af565b6107ac826107a78984016020015190565b6107d5565b91505b60019290921c916107c160208261099e565b9050610746565b5090931495945050505050565b5f6107e083836107e7565b9392505050565b5f825f528160205260205f60405f60025afa5060205f60205f60025afa50505f5192915050565b5f6020828403121561081e575f5ffd5b5035919050565b5f5f83601f840112610835575f5ffd5b50813567ffffffffffffffff81111561084c575f5ffd5b602083019150836020828501011115610863575f5ffd5b9250929050565b5f5f5f5f5f6080868803121561087e575f5ffd5b8535945060208601359350604086013567ffffffffffffffff8111156108a2575f5ffd5b6108ae88828901610825565b96999598509660600135949350505050565b5f5f5f5f5f5f5f60a0888a0312156108d6575f5ffd5b8735965060208801359550604088013567ffffffffffffffff8111156108fa575f5ffd5b6109068a828b01610825565b909650945050606088013567ffffffffffffffff811115610925575f5ffd5b6109318a828b01610825565b989b979a50959894979596608090950135949350505050565b5f5f5f6060848603121561095c575f5ffd5b505081359360208301359350604090920135919050565b634e487b7160e01b5f52601160045260245ffd5b80820281158282048414176106d5576106d5610973565b808201808211156106d5576106d5610973565b5f826109cb57634e487b7160e01b5f52601260045260245ffd5b50069056" + }, + { + "address": "0x3200000000000000000000000000000000000002", + "balance": "0x0", + "code": "0x60806040526004361015610011575f80fd5b5f3560e01c80630659216714610285578063092ac5d4146102085780630bd89ab7146102085780631369ac3e14610208578063158ef93e14610280578063198546231461027b57806323dacd29146102765780632594f107146102715780633c918b6c1461026c5780634126013714610267578063419759f514610262578063428bcd351461025d5780634379caa514610258578063471ba1e314610253578063570ca7351461024e5780635b4f894d146102495780636b0b5a94146102445780636cf7d6411461023f578063715018a61461023a578063781952a81461023557806379ba5097146102305780637ec9732a1461022b57806385fb7151146102265780638752b6b2146102215780638786dba71461021c5780638da5cb5b146102175780639072f747146102125780639a4f308d1461020d578063a0dc275814610208578063a670e7ed14610203578063b2497e70146101fe578063b3ab15fb146101f9578063d761753e146101f4578063e30c3978146101ef578063e613ae00146101ea578063f2fde38b146101e5578063f42cb4fc146101e0578063f8e655d2146101db578063fb11d7b9146101d65763fdecdf53146101d1575f80fd5b611981565b611890565b611744565b611729565b6116b0565b61168e565b61165a565b61162c565b6115a9565b61158e565b611543565b6103ba565b611527565b611474565b6113ec565b6113c7565b61138f565b6112d7565b611111565b611017565b610ffa565b610f7d565b610f62565b610f28565b610f0d565b610c36565b610bdf565b610bb0565b610b95565b610b5d565b61093a565b6108ea565b6105ca565b610593565b610436565b6103e5565b6102bb565b634e487b7160e01b5f52603260045260245ffd5b6008548110156102b65760085f5260205f2001905f90565b61028a565b3461030b57602036600319011261030b5760043560085481101561030b5760209060085f527ff3f7a9fe364faab93b216da50a3214154f22a0a2b415b23a84c8169e8b636ee30154604051908152f35b5f80fd5b634e487b7160e01b5f52604160045260245ffd5b90601f801991011681019081106001600160401b0382111761034457604052565b61030f565b60405190610358604083610323565b565b6001600160401b03811161034457601f01601f191660200190565b60405190610384604083610323565b600182525f6020830152565b602060409281835280519182918282860152018484015e5f828201840152601f01601f1916010190565b3461030b575f36600319011261030b576103e16103d5610375565b60405191829182610390565b0390f35b3461030b575f36600319011261030b57602060ff5f54166040519015158152f35b9181601f8401121561030b578235916001600160401b03831161030b576020808501948460051b01011161030b57565b604036600319011261030b576004356001600160401b03811161030b57610461903690600401610406565b6024356001600160401b03811161030b57610480903690600401610406565b929083830361053d5761049f610498846001546119b4565b34146119cc565b600754935f5b8481106104ae57005b807f3311a04a346a103ac115cca33028a2bc82f1964805860d0d3fc84a2772496ada6104dd6001938888611a18565b356105106104f46104ef85888a611a18565b611a28565b6104fc610349565b9283526001600160e01b0319166020830152565b61051981611a32565b610523838a611ad1565b610534604051928392429184611ade565b0390a1016104a5565b60405162461bcd60e51b815260206004820152600f60248201526e098cadccee8d040dad2e6dac2e8c6d608b1b6044820152606490fd5b60405190610583604083610323565b60018252600160f91b6020830152565b3461030b575f36600319011261030b576103e16103d5610574565b908160c091031261030b5790565b9081606091031261030b5790565b3461030b57608036600319011261030b576004356001600160401b03811161030b576105fa9036906004016105ae565b6024356001600160401b03811161030b576106199036906004016105bc565b906044356064355f5490929060081c6001600160a01b031633036108a55761075e6108a09161068b7f4d7c644a48da4c7857af62a00bad9806f0388564f22955ed846d938c244047f0966106706008548710611b0f565b61068561067e600554610c61565b1515611b4b565b826132f0565b5050604081016107486107586106b36106ae6106a78587611b97565b3691611420565b6134e3565b9761075060608601956107226106e96106cf6106a78a85611b97565b6106e36106dc8b86611b97565b9050611bc9565b906135b4565b936107026106fd6106a76080860186611b97565b6138ff565b9c8d61070d85611a28565b9060a086019761071c89611a28565b93613a30565b61074061073861073183611a28565b9783611b97565b989092611b97565b959093611a28565b963691611420565b923691611420565b91613cc3565b610789610784610780610779845f52600960205260405f2090565b5460ff1690565b1590565b611bf3565b6107ab61079e825f52600960205260405f2090565b805460ff19166001179055565b61088361087b6107f96107ca6107c08761029e565b90549060031b1c90565b966107de856107d88961029e565b90611c3f565b6107f460036107ec83614640565b905014611c5c565b613d54565b61087661087161086961080d600554610c61565b610858610853610845610821600654610c61565b9361083f89516108398761083485611a99565b611ad1565b14611ca0565b8861363c565b61084d610c99565b90613ee2565b611ce4565b610863818651611be6565b85613881565b61084d610d4e565b611d30565b613f8d565b948514611d7c565b604051938493846040919493926060820195825260208201520152565b0390a1005b60405162461bcd60e51b815260206004820152601a60248201527f63616c6c6572206973206e6f7420746865206f70657261746f720000000000006044820152606490fd5b3461030b575f36600319011261030b576103e16103d5610908610de3565b61368a565b9181601f8401121561030b578235916001600160401b03831161030b576020838186019501011161030b57565b3461030b57606036600319011261030b576004356001600160401b03811161030b5761096a90369060040161090d565b6024356001600160401b03811161030b5761098990369060040161090d565b9092906044359073deaddeaddeaddeaddeaddeaddeaddeaddeaddead3303610b18577f80bd1fdfe157286ce420ee763f91748455b249605748e5df12dad9844402bafc94610a3a610ad6936109ea6109e560ff5f541615151590565b611dd2565b6109f5811515611e1e565b610a00871515611e6a565b610a116402540be400820615611eb6565b610a21600160ff195f5416175f55565b610a2b8787612006565b610a3584846120e7565b600155565b5f8054610100600160a81b03191674deaddeaddeaddeaddeaddeaddeaddeaddeaddead00179055600280546001600160a01b0319166007603160981b011790557ffbe5b6cbafb274f445d7fed869dc77a838d8243a22c460de156560e8857cad0360405180610ac7819073deaddeaddeaddeaddeaddeaddeaddeaddeaddead602060408401935f81520152565b0390a1604051948594856122b1565b0390a1604080515f81526007603160981b0160208201527f79250b96878fd457364d1c1b77a660973c4f4ab67bda5e2fdb42caaa4d515f9d91819081016108a0565b60405162461bcd60e51b815260206004820152601f60248201527f63616c6c6572206973206e6f74207468652073797374656d2063616c6c6572006044820152606490fd5b3461030b575f36600319011261030b576020600154604051908152f35b60405190610b89604083610323565b600482525f6020830152565b3461030b575f36600319011261030b576103e16103d5610b7a565b3461030b57602036600319011261030b576004355f526009602052602060ff60405f2054166040519015158152f35b3461030b57602036600319011261030b5760043560075481101561030b576007548110156102b65760409060075f5260205f209060011b016001815491015460e01b825191825263ffffffff60e01b166020820152f35b3461030b575f36600319011261030b575f5460405160089190911c6001600160a01b03168152602090f35b90600182811c92168015610c8f575b6020831014610c7b57565b634e487b7160e01b5f52602260045260245ffd5b91607f1691610c70565b604051905f8260055491610cac83610c61565b8083529260018116908115610d2f5750600114610cd0575b61035892500383610323565b5060055f90815290917f036b6384b5eca791c62761152d0c79bb0604c104a5fb6f4eb0703f3154bb3db05b818310610d1357505090602061035892820101610cc4565b6020919350806001915483858901015201910190918492610cfb565b6020925061035894915060ff191682840152151560051b820101610cc4565b604051905f8260065491610d6183610c61565b8083529260018116908115610d2f5750600114610d845761035892500383610323565b5060065f90815290917ff652222313e28459528d920b65115c16c04f3efc82aaedc97be59f3f377c0d3f5b818310610dc757505090602061035892820101610cc4565b6020919350806001915483858901015201910190918492610daf565b604051905f8260035491610df683610c61565b8083529260018116908115610d2f5750600114610e195761035892500383610323565b5060035f90815290917fc2575a0e9e593c00f959f8c92f12db2869c3395a3b0502d05e2516446f71f85b5b818310610e5c57505090602061035892820101610cc4565b6020919350806001915483858901015201910190918492610e44565b604051905f8260045491610e8b83610c61565b8083529260018116908115610d2f5750600114610eae5761035892500383610323565b5060045f90815290917f8a35acfbc15ff81a39ae7d344fd709f28e8600b4aa8c65c6b64bfe7fe36bd19b5b818310610ef157505090602061035892820101610cc4565b6020919350806001915483858901015201910190918492610ed9565b3461030b575f36600319011261030b576103e16103d5610de3565b3461030b575f36600319011261030b576103e16103d5610c99565b60405190610f52604083610323565b60018252608360f81b6020830152565b3461030b575f36600319011261030b576103e16103d5610f43565b3461030b575f36600319011261030b57610f95614000565b5f5160206149d75f395f51905f5280546001600160a01b03199081169091555f5160206149b75f395f51905f52805491821690555f906001600160a01b03167f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e08280a3005b3461030b575f36600319011261030b576020600754604051908152f35b3461030b575f36600319011261030b575f5160206149d75f395f51905f5254336001600160a01b03909116036110ae575f5160206149d75f395f51905f5280546001600160a01b03199081169091555f5160206149b75f395f51905f5280543392811683179091556001600160a01b03167f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e05f80a3005b63118cdaa760e01b5f523360045260245ffd5b604060031982011261030b576004356001600160401b03811161030b57816110eb9160040161090d565b92909291602435906001600160401b03821161030b5761110d9160040161090d565b9091565b3461030b5761111f366110c1565b61112a939193614000565b8215611270576001600160401b038311610344576111528361114d600554610c61565b611f17565b5f93601f84116001146111ca57906108a0916111a585807f6c9ac69a5e351d3e7ac9be95040d29a264d1ce6a409ca9f042c64c66c3f2a23a985f916111bf575b508160011b915f199060031b1c19161790565b6005555b6111b382826121bc565b604051948594856122b1565b90508601355f611192565b60055f52601f1984167f036b6384b5eca791c62761152d0c79bb0604c104a5fb6f4eb0703f3154bb3db0905f5b8181106112585750907f6c9ac69a5e351d3e7ac9be95040d29a264d1ce6a409ca9f042c64c66c3f2a23a96866108a09594931061123f575b5050600185811b016005556111a9565b8501355f19600388901b60f8161c191690555f8061122f565b858801358355602097880197600190930192016111f7565b60405162461bcd60e51b815260206004820152601e60248201527f5265706c616365207363726970742063616e6e6f7420626520656d70747900006044820152606490fd5b602090600319011261030b576004356001600160a01b038116810361030b5790565b3461030b576112e5366112b5565b6112ed614000565b6001600160a01b03811690811561135857600280546001600160a01b031981169093179055604080516001600160a01b0393841681529290911660208301527f79250b96878fd457364d1c1b77a660973c4f4ab67bda5e2fdb42caaa4d515f9d9190819081016108a0565b60405162461bcd60e51b815260206004820152600f60248201526e496e76616c6964206164647265737360881b6044820152606490fd5b3461030b575f36600319011261030b576103e16103d5610e78565b6001600160e01b031981160361030b57565b3590610358826113aa565b604036600319011261030b576113ea6024356004356113e5826113aa565b6122d8565b005b3461030b575f36600319011261030b575f5160206149b75f395f51905f52546040516001600160a01b039091168152602090f35b92919261142c8261035a565b9161143a6040519384610323565b82948184528183011161030b578281602093845f960137010152565b9080601f8301121561030b5781602061147193359101611420565b90565b60a036600319011261030b576004356001600160401b03811161030b5761149f9036906004016105ae565b6024356001600160401b03811161030b576114be9036906004016105bc565b906044356001600160401b03811161030b576114de9036906004016105ae565b6064356001600160401b03811161030b576114fd90369060040161090d565b91608435946001600160401b03861161030b576115216113ea963690600401611456565b94612a5a565b3461030b575f36600319011261030b5760206040516102008152f35b3461030b575f36600319011261030b576002546040516001600160a01b039091168152602090f35b6040519061157a604083610323565b600482526001600160e01b03196020830152565b3461030b575f36600319011261030b576103e16103d561156b565b3461030b577ffbe5b6cbafb274f445d7fed869dc77a838d8243a22c460de156560e8857cad036115d8366112b5565b6115e0614000565b5f8054610100600160a81b031916600883811b610100600160a81b03169190911791829055604080519290911c6001600160a01b039081168352909216602082015290819081016108a0565b3461030b575f36600319011261030b57602060405173deaddeaddeaddeaddeaddeaddeaddeaddeaddead8152f35b3461030b575f36600319011261030b575f5160206149d75f395f51905f52546040516001600160a01b039091168152602090f35b3461030b575f36600319011261030b576040516001603160981b018152602090f35b3461030b576116be366112b5565b6116c6614000565b5f5160206149d75f395f51905f5280546001600160a01b0319166001600160a01b039283169081179091555f5160206149b75f395f51905f52549091167f38d16b8cac22d99fc7c124b9cd0de2d3fa1faef420bfe791d8c362d765e227005f80a3005b3461030b575f36600319011261030b576103e16103d5610d4e565b3461030b57611752366110c1565b61175d939193614000565b611768831515611e6a565b6001600160401b0383116103445761178a83611785600354610c61565b611f67565b5f93601f84116001146117ea57906108a0916117dc85807f80bd1fdfe157286ce420ee763f91748455b249605748e5df12dad9844402bafc985f916111bf57508160011b915f199060031b1c19161790565b6003555b6111b382826120e7565b60035f52601f1984167fc2575a0e9e593c00f959f8c92f12db2869c3395a3b0502d05e2516446f71f85b905f5b8181106118785750907f80bd1fdfe157286ce420ee763f91748455b249605748e5df12dad9844402bafc96866108a09594931061185f575b5050600185811b016003556117e0565b8501355f19600388901b60f8161c191690555f8061184f565b85880135835560209788019760019093019201611817565b3461030b57606036600319011261030b576004356001600160401b03811161030b576118c09036906004016105ae565b6024356001600160401b03811161030b576118df9036906004016105bc565b6044359073deaddeaddeaddeaddeaddeaddeaddeaddeaddead33148015611968575b1561190f576113ea926130b8565b60405162461bcd60e51b815260206004820152602b60248201527f63616c6c6572206973206e6f74207468652073797374656d2063616c6c65722060448201526a37b91037b832b930ba37b960a91b6064820152608490fd5b505f543360089190911c6001600160a01b031614611901565b3461030b575f36600319011261030b5760206040516402540be4008152f35b634e487b7160e01b5f52601160045260245ffd5b818102929181159184041417156119c757565b6119a0565b156119d357565b60405162461bcd60e51b815260206004820152601760248201527f496e76616c696420776974686472617720616d6f756e740000000000000000006044820152606490fd5b91908110156102b65760051b0190565b35611471816113aa565b600754600160401b81101561034457600181016007556007548110156102b65760075f5260011b7fa66cc928b5edb82af9bd49922954155ab7b0942694bea4ce44661d9a8736c688016001602091835181550191015160e01c63ffffffff19825416179055565b90602082018092116119c757565b90601482018092116119c757565b90600182018092116119c757565b60010190816001116119c757565b919082018092116119c757565b606091949392611b06826080810197602090805183528163ffffffff60e01b91015116910152565b60408201520152565b15611b1657565b60405162461bcd60e51b815260206004820152600d60248201526c092dcecc2d8d2c840d2dcc8caf609b1b6044820152606490fd5b15611b5257565b60405162461bcd60e51b815260206004820152601960248201527f5265706c61636520736372697074206973206e6f7420736574000000000000006044820152606490fd5b903590601e198136030182121561030b57018035906001600160401b03821161030b5760200191813603831361030b57565b5f198101919082116119c757565b601f198101919082116119c757565b919082039182116119c757565b15611bfa57565b60405162461bcd60e51b815260206004820152601c60248201527f7478496420616c7265616479207573656420746f207265706c616365000000006044820152606490fd5b91611c589183549060031b91821b915f19901b19161790565b9055565b15611c6357565b60405162461bcd60e51b8152602060048201526015602482015274496e76616c6964207769746e657373206974656d7360581b6044820152606490fd5b15611ca757565b60405162461bcd60e51b8152602060048201526015602482015274092dcecc2d8d2c840e6c6e4d2e0e840d8cadccee8d605b1b6044820152606490fd5b15611ceb57565b60405162461bcd60e51b815260206004820152601d60248201527f496e76616c6964207265706c61636520736372697074207072656669780000006044820152606490fd5b15611d3757565b60405162461bcd60e51b815260206004820152601d60248201527f496e76616c6964207265706c61636520736372697074207375666669780000006044820152606490fd5b15611d8357565b606460405162461bcd60e51b815260206004820152602060248201527f496e76616c6964207478496420746f207265706c6163652070726f76696465646044820152fd5b611471610908610de3565b15611dd957565b60405162461bcd60e51b815260206004820152601f60248201527f436f6e747261637420697320616c726561647920696e697469616c697a6564006044820152606490fd5b15611e2557565b60405162461bcd60e51b815260206004820152601a60248201527f4465706f73697420616d6f756e742063616e6e6f7420626520300000000000006044820152606490fd5b15611e7157565b60405162461bcd60e51b815260206004820152601e60248201527f4465706f736974207363726970742063616e6e6f7420626520656d70747900006044820152606490fd5b15611ebd57565b60405162461bcd60e51b815260206004820152602c60248201527f4465706f73697420616d6f756e74206d75737420686176652076616c6964207360448201526b61746f7368692076616c756560a01b6064820152608490fd5b601f8111611f23575050565b60055f5260205f20906020601f840160051c83019310611f5d575b601f0160051c01905b818110611f52575050565b5f8155600101611f47565b9091508190611f3e565b601f8111611f73575050565b60035f5260205f20906020601f840160051c83019310611fad575b601f0160051c01905b818110611fa2575050565b5f8155600101611f97565b9091508190611f8e565b601f8211611fc457505050565b5f5260205f20906020601f840160051c83019310611ffc575b601f0160051c01905b818110611ff1575050565b5f8155600101611fe6565b9091508190611fdd565b91906001600160401b0381116103445761202c81612025600354610c61565b6003611fb7565b5f601f821160011461206a57819061205a93945f9261205f575b50508160011b915f199060031b1c19161790565b600355565b013590505f80612046565b60035f52601f198216937fc2575a0e9e593c00f959f8c92f12db2869c3395a3b0502d05e2516446f71f85b915f5b8681106120cf57508360019596106120b6575b505050811b01600355565b01355f19600384901b60f8161c191690555f80806120ab565b90926020600181928686013581550194019101612098565b91906001600160401b0381116103445761210d81612106600454610c61565b6004611fb7565b5f601f821160011461213f57819061213a93945f9261205f5750508160011b915f199060031b1c19161790565b600455565b60045f52601f198216937f8a35acfbc15ff81a39ae7d344fd709f28e8600b4aa8c65c6b64bfe7fe36bd19b915f5b8681106121a4575083600195961061218b575b505050811b01600455565b01355f19600384901b60f8161c191690555f8080612180565b9092602060018192868601358155019401910161216d565b91906001600160401b038111610344576121e2816121db600654610c61565b6006611fb7565b5f601f821160011461221457819061220f93945f9261205f5750508160011b915f199060031b1c19161790565b600655565b60065f52601f198216937ff652222313e28459528d920b65115c16c04f3efc82aaedc97be59f3f377c0d3f915f5b8681106122795750836001959610612260575b505050811b01600655565b01355f19600384901b60f8161c191690555f8080612255565b90926020600181928686013581550194019101612242565b908060209392818452848401375f828201840152601f01601f1916010190565b92906122ca906114719593604086526040860191612291565b926020818503910152612291565b906122e660015434146119cc565b6040519060408201928284106001600160401b03851117610344577f3311a04a346a103ac115cca33028a2bc82f1964805860d0d3fc84a2772496ada93604052825263ffffffff60e01b16602082015260075461234282611a32565b612353604051928392429184611ade565b0390a1565b1561235f57565b60405162461bcd60e51b815260206004820152601d60248201527f56696e206973206e6f742070726f7065726c7920666f726d61747465640000006044820152606490fd5b156123ab57565b60405162461bcd60e51b815260206004820152601e60248201527f566f7574206973206e6f742070726f7065726c7920666f726d617474656400006044820152606490fd5b9081602091031261030b5751801515810361030b5790565b979695919261243f9460809694612431938b5260208b015260a060408b015260a08a0191612291565b918783036060890152612291565b930152565b6040513d5f823e3d90fd5b1561245657565b60405162461bcd60e51b815260206004820152601b60248201527f5472616e73616374696f6e206973206e6f7420696e20626c6f636b00000000006044820152606490fd5b156124a257565b60405162461bcd60e51b8152602060048201526024808201527f5061796f75742076696e206973206e6f742070726f7065726c7920666f726d616044820152631d1d195960e21b6064820152608490fd5b156124fa57565b60405162461bcd60e51b815260206004820152602860248201527f5061796f75742076696e2073686f756c6420686176652065786163746c79206f6044820152671b99481a5b9c1d5d60c21b6064820152608490fd5b1561255757565b60405162461bcd60e51b815260206004820152602a60248201527f5061796f757420766f75742073686f756c6420686176652065786163746c79206044820152691bdb99481bdd5d1c1d5d60b21b6064820152608490fd5b156125b657565b60405162461bcd60e51b815260206004820152602560248201527f5061796f757420766f7574206973206e6f742070726f7065726c7920666f726d604482015264185d1d195960da1b6064820152608490fd5b1561261057565b60405162461bcd60e51b815260206004820152602860248201527f5061796f7574207769746e657373206973206e6f742070726f7065726c7920666044820152671bdc9b585d1d195960c21b6064820152608490fd5b1561266d57565b60405162461bcd60e51b815260206004820152602360248201527f496e76616c6964207061796f7574206f757470757420736372697074207075626044820152626b657960e81b6064820152608490fd5b156126c557565b60405162461bcd60e51b8152602060048201526012602482015271125b9d985b1a59081cdc195b9d081d1e125960721b6044820152606490fd5b1561270657565b60405162461bcd60e51b815260206004820152601b60248201527f496e76616c6964207370656e74206f7574707574206c656e67746800000000006044820152606490fd5b6040519061275a604083610323565b60018252601160f91b6020830152565b1561277157565b60405162461bcd60e51b815260206004820152602960248201527f496e76616c6964207370656e74206f757470757420736372697074207075626b6044820152680caf240d8cadccee8d60bb1b6064820152608490fd5b604051906127d7604083610323565b6002825261028960f51b6020830152565b156127ef57565b60405162461bcd60e51b815260206004820152602160248201527f5370656e74206f7574707574206973206e6f7420612050325452206f757470756044820152601d60fa1b6064820152608490fd5b805191908290602001825e015f815290565b6128ac979360249b999795612877612893956128718f9a969560089661283e565b9061283e565b6001600160e01b0319928316815291166004820152019061283e565b9182526001600160e01b0319166020820152019061283e565b6001600160e01b0319909216825260048201520190565b604051906128d2604083610323565b600a8252690a8c2e0a6d2ced0c2e6d60b31b6020830152565b156128f257565b60405162461bcd60e51b8152602060048201526011602482015270496e76616c6964207369676e617475726560781b6044820152606490fd5b6001600160f01b031981160361030b57565b9035601e198236030181121561030b5701602081359101916001600160401b03821161030b57813603831361030b57565b612a5660609295949395608083528035612987816113aa565b6001600160e01b031916608084015260208101356129a48161292b565b61ffff60f01b1660a0840152612a36612a2460a0612a1d6129fd6129df6129ce604088018861293d565b60c0808c01526101408b0191612291565b6129eb8a88018861293d565b8a8303607f190160e08c015290612291565b612a0a608087018761293d565b898303607f19016101008b015290612291565b93016113bc565b6001600160e01b031916610120850152565b8651602084810191909152909601516001600160e01b0319166040830152565b0152565b94919092602061075095612b1695612ae1612ad88a612a90612a8b612a866106a7604085018095611b97565b614020565b612358565b60a06107588d606081019d8e612ab9612ab4612aaf6106a78487611b97565b614098565b6123a4565b610748612acf612ac885611a28565b9785611b97565b98909285611b97565b96909401611a28565b96879260408584013593612af58180611b97565b929091013592604051988997889763cd4cc08f60e01b895260048901612408565b03816001603160981b015afa8015612ea257612b39915f91612ea7575b5061244f565b60408101612b478183611b97565b3690612b5292611420565b612b5b90614020565b612b649061249b565b612b6e8183611b97565b3690612b7992611420565b612b8290614640565b60608401979150612b938885611b97565b3690612b9e92611420565b612ba790614640565b91612bb591506001146124f3565b600114612bc190612550565b612bcb8784611b97565b3690612bd692611420565b612bdf90614098565b612be8906125af565b6080830191612bf78385611b97565b3690612c0292611420565b612c0b90614107565b612c1490612609565b612c1e9084611b97565b3690612c2992611420565b612c32906134e3565b96612c3d9084611b97565b3690612c4892611420565b612c51906141e2565b91612c5c9084611b97565b3690612c6792611420565b612c70906138ff565b93612c7a836136d3565b90612c8491613ee2565b612c8d90612666565b6020870151948514612c9e906126be565b60408701516001600160e01b03191695612cb791611b97565b3690612cc292611420565b8560e01c612cf490600881811c62ff00ff1691901b63ff00ff001617601081811b63ffff00001691901c61ffff161790565b63ffffffff16612d039161426f565b8051602b14612d11906126ff565b612d1a8161371c565b612d2261274b565b612d2b91613ee2565b612d349061276a565b612d3d81613765565b612d456127c8565b612d4e91613ee2565b612d57906127e8565b612d60816137ae565b6045909701516001600160e01b03191691604051809160208201612d839161283e565b03601f1981018252612d959082610323565b604051612da381809361283e565b03905a915f916002602094fa15612ea257610358967fd77102e5369b5b1a9db1972cb3de26ee79abc69de5cde41eeaa67fe3939c1c5594612e61612e57612e5187612e448b612e368e612e6c9b612e679b5f5192612dff610375565b95612e08610f43565b98612e1e60a0612e1783611a28565b9201611a28565b612e26610375565b916040519b8c9a60208c01612850565b03601f198101835282610323565b612e4c6128c3565b6142db565b92613e04565b6106e38151611bc9565b91614343565b6128eb565b612e74610349565b8381526001600160e01b03198516602082015290612e9a6007546040519384938461296e565b0390a16122d8565b612444565b612ec9915060203d602011612ecf575b612ec18183610323565b8101906123f0565b5f612b33565b503d612eb7565b15612edd57565b60405162461bcd60e51b815260206004820152601660248201527513db9b1e481bdb99481a5b9c1d5d08185b1b1bddd95960521b6044820152606490fd5b15612f2257565b60405162461bcd60e51b81526020600482015260126024820152711d1e125908185b1c9958591e481cdc195b9d60721b6044820152606490fd5b600854600160401b81101561034457600181016008556008548110156102b65760085f527ff3f7a9fe364faab93b216da50a3214154f22a0a2b415b23a84c8169e8b636ee30155565b15612fac57565b60405162461bcd60e51b8152602060048201526016602482015275125b9d985b1a590819195c1bdcda5d081cd8dc9a5c1d60521b6044820152606490fd5b15612ff157565b60405162461bcd60e51b8152602060048201526015602482015274092dcecc2d8d2c840e6c6e4d2e0e840e6eaccccd2f605b1b6044820152606490fd5b3d15613058573d9061303f8261035a565b9161304d6040519384610323565b82523d5f602084013e565b606090565b1561306457565b60405162461bcd60e51b815260206004820152602660248201527f4661696c656420746f2073656e6420746f206661696c6564206465706f736974604482015265081d985d5b1d60d21b6064820152608490fd5b6131336130f461318d926130da6130d260019796836132f0565b909714612ed6565b604081016107486107586106b36106ae6106a78587611b97565b92613115613110610780610779875f52600960205260405f2090565b612f1b565b61312a61079e855f52600960205260405f2090565b6107de84612f5c565b61318861318361317b613147600354610c61565b61085861317661316e61315b600454610c61565b9361083f89516108398761083485611aa7565b61084d610de3565b612fa5565b61084d610e78565b612fea565b61441b565b905f808080600154865af16131a061302e565b5061323d5761320e7fabd361bc68da04a386a8de9d0fb3044cca0856cbd86e9e4a63237e015b3e4bb9936131d5600854611bc9565b6040805192835260208301949094526001600160a01b0390941692810192909252426060830152608082019290925290819060a0820190565b0390a16103585f80808061322960025460018060a01b031690565b600154905af161323761302e565b5061305d565b6123537fa82453ca34121b3ecb910d957824e27c5dc6465315949facd15fb72886490058936131d5600854611bc9565b356114718161292b565b1561327e57565b60405162461bcd60e51b815260206004820152602160248201527f5769746e657373206973206e6f742070726f7065726c7920666f726d617474656044820152601960fa1b6064820152608490fd5b95949361243f926060949288526020880152608060408801526080870191612291565b91906132fb83611a28565b906133086020850161326d565b93604081016133178183611b97565b94909660608401956133298786611b97565b608087019a916133398c89611b97565b94909361334860a08b01611a28565b96613352986144c8565b9361335d8284611b97565b369061336892611420565b61337190614020565b61337a90612358565b6133849083611b97565b369061338f92611420565b61339890614098565b6133a1906123a4565b6133ab9082611b97565b36906133b692611420565b6133bf90614640565b956133cc91508692611b97565b36906133d792611420565b906133e191614147565b6133ea90613277565b6020810135906133fa8180611b97565b604080516327fe9a2560e11b8152948594613420949201359291908790600487016132cd565b6001603160981b0191839103815a93602094fa8015612ea257613449915f91612ea7575061244f565b9190565b1561345457565b60405162461bcd60e51b815260206004820152602260248201527f52656164206f76657272756e20647572696e6720566172496e742070617273696044820152616e6760f01b6064820152608490fd5b156134ab57565b60405162461bcd60e51b815260206004820152601060248201526f2b34b7103932b0b21037bb32b9393ab760811b6044820152606490fd5b6135066134ef82614640565b9091906134ff5f1984141561344d565b15156134a4565b600101806001116119c75761351b8183614539565b905f19821461352d5761147192613881565b60405162461bcd60e51b815260206004820152601760248201527f42616420566172496e7420696e207363726970745369670000000000000000006044820152606490fd5b1561357957565b60405162461bcd60e51b8152602060048201526013602482015272536c696365206f7574206f6620626f756e647360681b6044820152606490fd5b91909182156136255782600101806001116119c7578060016135de92119081613619575b50613572565b60405192604081850101604052808452602182850391818401930101915b82811061360857505050565b80518282015f1901526020016135fc565b9050825110155f6135d8565b509050604051613636602082610323565b5f815290565b9190918215613625576136528382511015613572565b60405192604081850101604052808452602082850391818401930101915b82811061367c57505050565b805182820152602001613670565b90613699602283511015613572565b6040519160608301604052602083528083036042602283019201915b8281106136c157505050565b805182820160011901526020016136b5565b906136e2602b83511015613572565b604051916062830160405260228352808303604b602983019201915b82811061370a57505050565b805182820160081901526020016136fe565b9061372b600983511015613572565b6040519160418301604052600183528083036029602883019201915b82811061375357505050565b80518282016007190152602001613747565b90613774600b83511015613572565b604051916042830160405260028352808303602b602983019201915b82811061379c57505050565b80518282016008190152602001613790565b906137bd602b83511015613572565b604051916060830160405260208352808303604b602b83019201915b8281106137e557505050565b8051828201600a1901526020016137d9565b90613806604083511015613572565b6040519160808301604052604083528083036060602083019201915b82811061382e57505050565b805182820152602001613822565b9061384b602483511015613572565b6040519160648301604052602483528083036044602083019201915b82811061387357505050565b805182820152602001613867565b92919081156138ed578181018082116119c75780826138a7921190816138e15750613572565b604051936040838601016040528285520190602082850391818401930101915b8281106138d357505050565b8051828201526020016138c7565b9050855110155f6135d8565b50509050604051613636602082610323565b6139095f82614578565b5f19811461391b575f61147192613881565b60405162461bcd60e51b815260206004820152601560248201527442616420566172496e7420696e207769746e65737360581b6044820152606490fd5b8051600110156102b65760210190565b8051601f10156102b657603f0190565b9081518110156102b6570160200190565b600190611471939260ff60f81b168152019061283e565b604051906139af604083610323565b60078252662a30b82632b0b360c91b6020830152565b96612871966114719f9e9c989660a8966128719f9c956139f19060209f9a61287190613a269f9861283e565b6001600160e01b031997881681529616600487015260088601526028850152604884015260688301526088820152019061283e565b908152019061283e565b91949390929360205f613a51613a458661383c565b6040519182809261283e565b039060025afa15612ea2575f519060205f613b38612e36613a45613b1a613b0a613a91613a856001546402540be400900490565b6001600160401b031690565b65ffff0000ffff67ffffffffffff000067ff00ff00ff00ff008360081c9360081b169264ff000000ff65ffff0000ff0065ffffffffffff67ffffffffffffff00871666ff00ff00ff00ff85161760101c16951691161760101b1691161767ffffffff0000000063ffffffff8260201c169160201b161790565b60c01b6001600160c01b03191690565b6040519283918783016008916001600160401b0360c01b1681520190565b039060025afa15612ea25760205f613b81612e36613a45613b66845199604563ffffffff60e01b9101511690565b60405192839187830160049163ffffffff60e01b1681520190565b039060025afa15612ea25760205f613baa612e36613a458351996040519283918783019061283e565b039060025afa15612ea2575f5193613bc188613d54565b607f60f91b613bcf8a613e4a565b613bd890613958565b516001600160f81b031916604051928392613bf7921660208401613989565b03601f1981018252613c099082610323565b613c116139a0565b90613c1b916142db565b91613c24610375565b95613c2d610375565b97613c36610574565b92613c3f610b7a565b94613c48610375565b97613c5161156b565b996040519d8e9d60208f019d613c669e6139c5565b03601f1981018252613c789082610323565b613c806128c3565b90613c8a916142db565b90613c9490613e04565b8051613c9f90611bc9565b613ca8916135b4565b613cb0611dc7565b91613cba92614343565b610358906128eb565b60049061147194613cf3613d0e949561287160405197889563ffffffff60e01b166020870152602486019061283e565b9063ffffffff60e01b16815203601b19810184520182610323565b614612565b15613d1a57565b60405162461bcd60e51b815260206004820152601260248201527142616420566172496e7420696e206974656d60701b6044820152606490fd5b90613d79613d6183614640565b909190613d715f1984141561344d565b6001106134a4565b600101806001116119c757915f925b60018410613dbe57611471929350613db8613db3613da6838561479c565b6108345f19821415613d13565b611ab5565b91613881565b613dc8818361479c565b9190613dd75f19841415613d13565b8060010192836001116119c75760019101018092116119c757600191613dfc91611ad1565b930192613d88565b613e106134ef82614640565b60010190816001116119c757613e26828261479c565b9290613e355f19851415613d13565b83018093116119c757613db861147193611ab5565b90613e6f613e5783614640565b909190613e675f1984141561344d565b6002106134a4565b600101806001116119c757915f925b60028410613e9c57611471929350613db8613db3613da6838561479c565b613ea6818361479c565b9190613eb55f19841415613d13565b8060010192836001116119c75760019101018092116119c757600191613eda91611ad1565b930192613e7e565b9081519181518303613f865760205b83811115613f6b57613f0290611bd7565b838110613f125750505050600190565b613f2d613f1f8284613978565b516001600160f81b03191690565b613f4a613f3d613f1f8487613978565b6001600160f81b03191690565b6001600160f81b031990911603613f6357600101613f02565b505050505f90565b818101518382015160209092019114613ef157505050505f90565b5050505f90565b613f98600554610c61565b602081018082116119c7578082613fb692119081613ff45750613572565b604051916060830160405260208352018082036040602083019201915b828110613fe6575050506114719061485d565b805182820152602001613fd3565b9050835110155f6135d8565b5f5160206149b75f395f51905f52546001600160a01b031633036110ae57565b61402981614640565b91908215801561408e575b613f8657600101806001116119c757915f905b808210614055575050511490565b90928251811015613f635761406a8184614539565b5f1981146140855781018091116119c7579260010190614047565b50505050505f90565b505f198114614034565b6140a181614640565b9190821580156140fd575b613f8657600101806001116119c757915f905b8082106140cd575050511490565b90928251811015613f63576140e281846148cd565b5f1981146140855781018091116119c75792600101906140bf565b505f1981146140ac565b5f905f5b600181106141195750511490565b918151811015613f865761412d8183614578565b5f198114613f635781018091116119c7579160010161410b565b8115614190575f915f905b808210614160575050511490565b90928251811015613f63576141758184614578565b5f1981146140855781018091116119c7579260010190614152565b50505f90565b1561419d57565b60405162461bcd60e51b815260206004820152601a60248201527f42616420566172496e7420696e207363726970745075626b65790000000000006044820152606490fd5b6141eb81614640565b9091906141fb5f1984141561344d565b1561423657600161420e61147193611ac3565b90614231575b61421e81836148cd565b9161422c5f19841415614196565b613881565b614214565b60405162461bcd60e51b81526020600482015260116024820152702b37baba103932b0b21037bb32b9393ab760791b6044820152606490fd5b91909161427b81614640565b6142885f1983141561344d565b8410156142365761429890611ac3565b5f935b8085106142b3575061147192935061421e81836148cd565b906142d2816142c4600193866148cd565b906108345f19831415614196565b9401939061429b565b5f6142ee6020926040519182809261283e565b039060025afa15612ea2575f614322602092613a458351612e3660405193828593898501526040840152606083019061283e565b039060025afa15612ea2575f5190565b91613a26611471949360209361283e565b919091815160408114908115614410575b50156143cb575f9261437e61436985946137f7565b91612e36604051938492602084019687614332565b51906102005afa61438d61302e565b816143bf575b8161439c575090565b600160f81b91506001600160f81b0319906143ba90613f1f90613968565b161490565b80516020149150614393565b60405162461bcd60e51b815260206004820152601860248201527f496e76616c6964207369676e6174757265206c656e67746800000000000000006044820152606490fd5b60419150145f614354565b614426600354610c61565b601481018082116119c757808261444492119081613ff45750613572565b604051916054830160405260148352018082036034602083019201915b8281106144ba5750505060208151910151906bffffffffffffffffffffffff1982169160148210614495575b505060601c90565b6bffffffffffffffffffffffff1960149290920360031b82901b161690505f8061448d565b805182820152602001614461565b969483869482949a9896939a6040519b8c9b63ffffffff60e01b1660208d015261ffff60f01b1660248c015260268b0137880191602683015f81523701602601915f83528237019063ffffffff60e01b16815203601b19810182526004016145309082610323565b61147190614612565b906145439161491b565b5f198214614571578160250191826025116119c75701602581018092116119c7576029018091116119c75790565b50505f1990565b90614583818361479c565b92905f19811461460957600101806001116119c7579291905f915b8383106145ac575050505090565b909192938082018083116119c7576145c4908461479c565b91905f1983146145fd578060010192836001116119c75760019101018092116119c7576001916145f391611ad1565b949301919061459e565b505050505050505f1990565b505050505f1990565b5f602091828151910160025afa5060205f818160025afa505f5190565b60ff166001019060ff82116119c757565b9061464b5f8361494d565b9160ff83169283156147795761466f5f61083461466985519461462f565b60ff1690565b11614770575f600284036146c657506146bf6146ac6146a6611471936146945f611ac3565b01602001516001600160f01b03191690565b60f01c90565b61ff0060ff8260081c169160081b161790565b61ffff1690565b6004840361473057506147276146fd6146f7611471936146e55f611ac3565b01602001516001600160e01b03191690565b60e01c90565b600881811c62ff00ff1691901b63ff00ff001617601081811b63ffff00001691901c61ffff161790565b63ffffffff1690565b92906008821461473f57509190565b611471919350613a9161476a613a85926147585f611ac3565b01602001516001600160c01b03191690565b60c01c90565b505f1991505f90565b50614793919250613f1f5f61478d92613978565b60f81c90565b9060ff5f921690565b9190916147a9838261494d565b9260ff8416938415614849576147c78261083461466986519461462f565b1161483e575f600285036147ec57506146ac6146a6611471936146946146bf94611ac3565b6004850361480b57506146fd6146f7611471936146e561472794611ac3565b9391906008831461481d575b50509190565b61483692945061476a613a8592614758613a9193611ac3565b915f80614817565b505f1992505f919050565b50614793929350613f1f9061478d92613978565b8051908115614190576020821161487d57602001519060200360031b1c90565b60405162461bcd60e51b815260206004820152602260248201527f42797465732063616e6e6f74206265206d6f7265207468616e20333220627974604482015261657360f01b6064820152608490fd5b9081518160090190816009116119c7571061457157600801806008116119c7576148f69161479c565b905f198114614571578060090191826009116119c75760099101018091116119c75790565b9081518160250190816025116119c7571061494457602481018091116119c75761110d9161479c565b505f19915f9150565b9060ff61495a8284613978565b5160f81c146149af5760fe60ff6149718385613978565b8160f81b90511660f81c16146149a85761498f60ff9160fd93613978565b8160f81b90511660f81c16146149a3575f90565b600290565b5050600490565b505060089056fe9016d09d72d40fdae2fd8ceac6b6234c7706214fd39c1cd1e609a0528c199300237e158222e3e6968b72b9db0d8043aacf074ad9f650f0d1606b4d82ee432c00" + }, + { + "address": "0x3200000000000000000000000000000000000003", + "balance": "0x0", + "code": "0x608060405260043610610092575f3560e01c806379ba50971161005757806379ba5097146101405780638da5cb5b14610154578063cc3d272114610168578063e30c39781461018b578063f2fde38b1461019f575f5ffd5b806335aa134a1461009d5780633bbed4a0146100be5780633ccfd60b146100dd57806366d003ac146100f1578063715018a61461012c575f5ffd5b3661009957005b5f5ffd5b3480156100a8575f5ffd5b506100bc6100b7366004610596565b6101be565b005b3480156100c9575f5ffd5b506100bc6100d83660046105ad565b61020c565b3480156100e8575f5ffd5b506100bc61026d565b3480156100fc575f5ffd5b505f5461010f906001600160a01b031681565b6040516001600160a01b0390911681526020015b60405180910390f35b348015610137575f5ffd5b506100bc61037f565b34801561014b575f5ffd5b506100bc610392565b34801561015f575f5ffd5b5061010f6103d7565b348015610173575f5ffd5b5061017d60015481565b604051908152602001610123565b348015610196575f5ffd5b5061010f61040b565b3480156101aa575f5ffd5b506100bc6101b93660046105ad565b610433565b6101c66104b8565b600180549082905560408051828152602081018490527f3c4f4d8cd2a65b4b1f4eeaf43669b14ab54e43d4842aa0ac8f0e4f9fe0bf5bf991015b60405180910390a15050565b6102146104b8565b5f80546001600160a01b038381166001600160a01b031983168117909355604080519190921680825260208201939093527f62e69886a5df0ba8ffcacbfc1388754e7abd9bde24b036354c561f1acd4e45939101610200565b6001544710156102ea5760405162461bcd60e51b815260206004820152603e60248201527f5769746864726177616c20616d6f756e74206d7573742062652067726561746560448201527f72207468616e206d696e696d756d20776974686472617720616d6f756e74000060648201526084015b60405180910390fd5b5f80546040516001600160a01b039091169047908381818185875af1925050503d805f8114610334576040519150601f19603f3d011682016040523d82523d5f602084013e610339565b606091505b505090508061037c5760405162461bcd60e51b815260206004820152600f60248201526e151c985b9cd9995c8819985a5b1959608a1b60448201526064016102e1565b50565b6103876104b8565b6103905f6104ea565b565b338061039c61040b565b6001600160a01b0316146103ce5760405163118cdaa760e01b81526001600160a01b03821660048201526024016102e1565b61037c816104ea565b5f807f9016d09d72d40fdae2fd8ceac6b6234c7706214fd39c1cd1e609a0528c1993005b546001600160a01b031692915050565b5f807f237e158222e3e6968b72b9db0d8043aacf074ad9f650f0d1606b4d82ee432c006103fb565b61043b6104b8565b7f237e158222e3e6968b72b9db0d8043aacf074ad9f650f0d1606b4d82ee432c0080546001600160a01b0319166001600160a01b038316908117825561047f6103d7565b6001600160a01b03167f38d16b8cac22d99fc7c124b9cd0de2d3fa1faef420bfe791d8c362d765e2270060405160405180910390a35050565b336104c16103d7565b6001600160a01b0316146103905760405163118cdaa760e01b81523360048201526024016102e1565b7f237e158222e3e6968b72b9db0d8043aacf074ad9f650f0d1606b4d82ee432c0080546001600160a01b031916815561052282610526565b5050565b7f9016d09d72d40fdae2fd8ceac6b6234c7706214fd39c1cd1e609a0528c19930080546001600160a01b031981166001600160a01b03848116918217845560405192169182907f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0905f90a3505050565b5f602082840312156105a6575f5ffd5b5035919050565b5f602082840312156105bd575f5ffd5b81356001600160a01b03811681146105d3575f5ffd5b939250505056" + }, + { + "address": "0x3200000000000000000000000000000000000004", + "balance": "0x0", + "code": "0x608060405260043610610092575f3560e01c806379ba50971161005757806379ba5097146101405780638da5cb5b14610154578063cc3d272114610168578063e30c39781461018b578063f2fde38b1461019f575f5ffd5b806335aa134a1461009d5780633bbed4a0146100be5780633ccfd60b146100dd57806366d003ac146100f1578063715018a61461012c575f5ffd5b3661009957005b5f5ffd5b3480156100a8575f5ffd5b506100bc6100b7366004610596565b6101be565b005b3480156100c9575f5ffd5b506100bc6100d83660046105ad565b61020c565b3480156100e8575f5ffd5b506100bc61026d565b3480156100fc575f5ffd5b505f5461010f906001600160a01b031681565b6040516001600160a01b0390911681526020015b60405180910390f35b348015610137575f5ffd5b506100bc61037f565b34801561014b575f5ffd5b506100bc610392565b34801561015f575f5ffd5b5061010f6103d7565b348015610173575f5ffd5b5061017d60015481565b604051908152602001610123565b348015610196575f5ffd5b5061010f61040b565b3480156101aa575f5ffd5b506100bc6101b93660046105ad565b610433565b6101c66104b8565b600180549082905560408051828152602081018490527f3c4f4d8cd2a65b4b1f4eeaf43669b14ab54e43d4842aa0ac8f0e4f9fe0bf5bf991015b60405180910390a15050565b6102146104b8565b5f80546001600160a01b038381166001600160a01b031983168117909355604080519190921680825260208201939093527f62e69886a5df0ba8ffcacbfc1388754e7abd9bde24b036354c561f1acd4e45939101610200565b6001544710156102ea5760405162461bcd60e51b815260206004820152603e60248201527f5769746864726177616c20616d6f756e74206d7573742062652067726561746560448201527f72207468616e206d696e696d756d20776974686472617720616d6f756e74000060648201526084015b60405180910390fd5b5f80546040516001600160a01b039091169047908381818185875af1925050503d805f8114610334576040519150601f19603f3d011682016040523d82523d5f602084013e610339565b606091505b505090508061037c5760405162461bcd60e51b815260206004820152600f60248201526e151c985b9cd9995c8819985a5b1959608a1b60448201526064016102e1565b50565b6103876104b8565b6103905f6104ea565b565b338061039c61040b565b6001600160a01b0316146103ce5760405163118cdaa760e01b81526001600160a01b03821660048201526024016102e1565b61037c816104ea565b5f807f9016d09d72d40fdae2fd8ceac6b6234c7706214fd39c1cd1e609a0528c1993005b546001600160a01b031692915050565b5f807f237e158222e3e6968b72b9db0d8043aacf074ad9f650f0d1606b4d82ee432c006103fb565b61043b6104b8565b7f237e158222e3e6968b72b9db0d8043aacf074ad9f650f0d1606b4d82ee432c0080546001600160a01b0319166001600160a01b038316908117825561047f6103d7565b6001600160a01b03167f38d16b8cac22d99fc7c124b9cd0de2d3fa1faef420bfe791d8c362d765e2270060405160405180910390a35050565b336104c16103d7565b6001600160a01b0316146103905760405163118cdaa760e01b81523360048201526024016102e1565b7f237e158222e3e6968b72b9db0d8043aacf074ad9f650f0d1606b4d82ee432c0080546001600160a01b031916815561052282610526565b5050565b7f9016d09d72d40fdae2fd8ceac6b6234c7706214fd39c1cd1e609a0528c19930080546001600160a01b031981166001600160a01b03848116918217845560405192169182907f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0905f90a3505050565b5f602082840312156105a6575f5ffd5b5035919050565b5f602082840312156105bd575f5ffd5b81356001600160a01b03811681146105d3575f5ffd5b939250505056" + }, + { + "address": "0x3200000000000000000000000000000000000005", + "balance": "0x0", + "code": "0x608060405260043610610092575f3560e01c806379ba50971161005757806379ba5097146101405780638da5cb5b14610154578063cc3d272114610168578063e30c39781461018b578063f2fde38b1461019f575f5ffd5b806335aa134a1461009d5780633bbed4a0146100be5780633ccfd60b146100dd57806366d003ac146100f1578063715018a61461012c575f5ffd5b3661009957005b5f5ffd5b3480156100a8575f5ffd5b506100bc6100b7366004610596565b6101be565b005b3480156100c9575f5ffd5b506100bc6100d83660046105ad565b61020c565b3480156100e8575f5ffd5b506100bc61026d565b3480156100fc575f5ffd5b505f5461010f906001600160a01b031681565b6040516001600160a01b0390911681526020015b60405180910390f35b348015610137575f5ffd5b506100bc61037f565b34801561014b575f5ffd5b506100bc610392565b34801561015f575f5ffd5b5061010f6103d7565b348015610173575f5ffd5b5061017d60015481565b604051908152602001610123565b348015610196575f5ffd5b5061010f61040b565b3480156101aa575f5ffd5b506100bc6101b93660046105ad565b610433565b6101c66104b8565b600180549082905560408051828152602081018490527f3c4f4d8cd2a65b4b1f4eeaf43669b14ab54e43d4842aa0ac8f0e4f9fe0bf5bf991015b60405180910390a15050565b6102146104b8565b5f80546001600160a01b038381166001600160a01b031983168117909355604080519190921680825260208201939093527f62e69886a5df0ba8ffcacbfc1388754e7abd9bde24b036354c561f1acd4e45939101610200565b6001544710156102ea5760405162461bcd60e51b815260206004820152603e60248201527f5769746864726177616c20616d6f756e74206d7573742062652067726561746560448201527f72207468616e206d696e696d756d20776974686472617720616d6f756e74000060648201526084015b60405180910390fd5b5f80546040516001600160a01b039091169047908381818185875af1925050503d805f8114610334576040519150601f19603f3d011682016040523d82523d5f602084013e610339565b606091505b505090508061037c5760405162461bcd60e51b815260206004820152600f60248201526e151c985b9cd9995c8819985a5b1959608a1b60448201526064016102e1565b50565b6103876104b8565b6103905f6104ea565b565b338061039c61040b565b6001600160a01b0316146103ce5760405163118cdaa760e01b81526001600160a01b03821660048201526024016102e1565b61037c816104ea565b5f807f9016d09d72d40fdae2fd8ceac6b6234c7706214fd39c1cd1e609a0528c1993005b546001600160a01b031692915050565b5f807f237e158222e3e6968b72b9db0d8043aacf074ad9f650f0d1606b4d82ee432c006103fb565b61043b6104b8565b7f237e158222e3e6968b72b9db0d8043aacf074ad9f650f0d1606b4d82ee432c0080546001600160a01b0319166001600160a01b038316908117825561047f6103d7565b6001600160a01b03167f38d16b8cac22d99fc7c124b9cd0de2d3fa1faef420bfe791d8c362d765e2270060405160405180910390a35050565b336104c16103d7565b6001600160a01b0316146103905760405163118cdaa760e01b81523360048201526024016102e1565b7f237e158222e3e6968b72b9db0d8043aacf074ad9f650f0d1606b4d82ee432c0080546001600160a01b031916815561052282610526565b5050565b7f9016d09d72d40fdae2fd8ceac6b6234c7706214fd39c1cd1e609a0528c19930080546001600160a01b031981166001600160a01b03848116918217845560405192169182907f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0905f90a3505050565b5f602082840312156105a6575f5ffd5b5035919050565b5f602082840312156105bd575f5ffd5b81356001600160a01b03811681146105d3575f5ffd5b939250505056" + }, + { + "address": "0x3200000000000000000000000000000000000007", + "balance": "0x0", + "code": "0x608060405260043610610092575f3560e01c806379ba50971161005757806379ba5097146101405780638da5cb5b14610154578063cc3d272114610168578063e30c39781461018b578063f2fde38b1461019f575f5ffd5b806335aa134a1461009d5780633bbed4a0146100be5780633ccfd60b146100dd57806366d003ac146100f1578063715018a61461012c575f5ffd5b3661009957005b5f5ffd5b3480156100a8575f5ffd5b506100bc6100b7366004610596565b6101be565b005b3480156100c9575f5ffd5b506100bc6100d83660046105ad565b61020c565b3480156100e8575f5ffd5b506100bc61026d565b3480156100fc575f5ffd5b505f5461010f906001600160a01b031681565b6040516001600160a01b0390911681526020015b60405180910390f35b348015610137575f5ffd5b506100bc61037f565b34801561014b575f5ffd5b506100bc610392565b34801561015f575f5ffd5b5061010f6103d7565b348015610173575f5ffd5b5061017d60015481565b604051908152602001610123565b348015610196575f5ffd5b5061010f61040b565b3480156101aa575f5ffd5b506100bc6101b93660046105ad565b610433565b6101c66104b8565b600180549082905560408051828152602081018490527f3c4f4d8cd2a65b4b1f4eeaf43669b14ab54e43d4842aa0ac8f0e4f9fe0bf5bf991015b60405180910390a15050565b6102146104b8565b5f80546001600160a01b038381166001600160a01b031983168117909355604080519190921680825260208201939093527f62e69886a5df0ba8ffcacbfc1388754e7abd9bde24b036354c561f1acd4e45939101610200565b6001544710156102ea5760405162461bcd60e51b815260206004820152603e60248201527f5769746864726177616c20616d6f756e74206d7573742062652067726561746560448201527f72207468616e206d696e696d756d20776974686472617720616d6f756e74000060648201526084015b60405180910390fd5b5f80546040516001600160a01b039091169047908381818185875af1925050503d805f8114610334576040519150601f19603f3d011682016040523d82523d5f602084013e610339565b606091505b505090508061037c5760405162461bcd60e51b815260206004820152600f60248201526e151c985b9cd9995c8819985a5b1959608a1b60448201526064016102e1565b50565b6103876104b8565b6103905f6104ea565b565b338061039c61040b565b6001600160a01b0316146103ce5760405163118cdaa760e01b81526001600160a01b03821660048201526024016102e1565b61037c816104ea565b5f807f9016d09d72d40fdae2fd8ceac6b6234c7706214fd39c1cd1e609a0528c1993005b546001600160a01b031692915050565b5f807f237e158222e3e6968b72b9db0d8043aacf074ad9f650f0d1606b4d82ee432c006103fb565b61043b6104b8565b7f237e158222e3e6968b72b9db0d8043aacf074ad9f650f0d1606b4d82ee432c0080546001600160a01b0319166001600160a01b038316908117825561047f6103d7565b6001600160a01b03167f38d16b8cac22d99fc7c124b9cd0de2d3fa1faef420bfe791d8c362d765e2270060405160405180910390a35050565b336104c16103d7565b6001600160a01b0316146103905760405163118cdaa760e01b81523360048201526024016102e1565b7f237e158222e3e6968b72b9db0d8043aacf074ad9f650f0d1606b4d82ee432c0080546001600160a01b031916815561052282610526565b5050565b7f9016d09d72d40fdae2fd8ceac6b6234c7706214fd39c1cd1e609a0528c19930080546001600160a01b031981166001600160a01b03848116918217845560405192169182907f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0905f90a3505050565b5f602082840312156105a6575f5ffd5b5035919050565b5f602082840312156105bd575f5ffd5b81356001600160a01b03811681146105d3575f5ffd5b939250505056" + }, + { + "address": "0x3aeeb871f83c85e68ffd1868bef3425ed6649d39", + "balance": "0xffffffffffffffffffffffffffffff", + "code": "0x" + }, + { + "address": "0x66f68692c03eb9c0656d676f2f4bd13eba40d1b7", + "balance": "0xffffffffffffffffffffffffffffff", + "code": "0x" + }, + { + "address": "0x70997970c51812dc3a010c7d01b50e0d17dc79c8", + "balance": "0xffffffffffffffffffffffffffffff", + "code": "0x" + }, + { + "address": "0x9fcdf8f60d3009656e50bf805cd53c7335b284fb", + "balance": "0xffffffffffffffffffffffffffffff", + "code": "0x" + }, + { + "address": "0xaafb7442f7f00b64057c2e9eae2815bb63ee0ece", + "balance": "0xffffffffffffffffffffffffffffff", + "code": "0x" + }, + { + "address": "0xc2f8eed77da1583f7bae0a3125dc7bc426002dde", + "balance": "0xffffffffffffffffffffffffffffff", + "code": "0x" + }, + { + "address": "0xd44821f906e3909b8ae944f7060551c33b922cc9", + "balance": "0xffffffffffffffffffffffffffffff", + "code": "0x" + }, + { + "address": "0xe756fdf89367ef428b48bca2d272ec8ecec053fd", + "balance": "0xffffffffffffffffffffffffffffff", + "code": "0x" + }, + { + "address": "0xf39fd6e51aad88f6f4ce6ab8827279cfffb92266", + "balance": "0xffffffffffffffffffffffffffffff", + "code": "0x" + } + ], + "chain_id": 5655, + "limit_contract_code_size": 24576, + "coinbase": "0x3100000000000000000000000000000000000005", + "starting_base_fee": 1000000000, + "block_gas_limit": 30000000, + "base_fee_params": { + "max_change_denominator": 8, + "elasticity_multiplier": 2 + }, + "difficulty": 0, + "extra_data": "0x", + "timestamp": 0, + "nonce": 0 +} \ No newline at end of file diff --git a/scripts/docker/configs/regtest/citrea/genesis-bitcoin-regtest/l2_block_rule_enforcer.json b/scripts/docker/configs/regtest/citrea/genesis-bitcoin-regtest/l2_block_rule_enforcer.json new file mode 100644 index 000000000..b9eac6405 --- /dev/null +++ b/scripts/docker/configs/regtest/citrea/genesis-bitcoin-regtest/l2_block_rule_enforcer.json @@ -0,0 +1,4 @@ +{ + "max_l2_blocks_per_l1": 86400, + "authority": "sov1kqrxxkwkf7t7kfuegllwkzp6jc6r6h66pgkfe7pggtm0gayl756qku2u5p" +} \ No newline at end of file diff --git a/scripts/docker/configs/regtest/citrea/light_client_prover_config.toml b/scripts/docker/configs/regtest/citrea/light_client_prover_config.toml new file mode 100644 index 000000000..3623f9adb --- /dev/null +++ b/scripts/docker/configs/regtest/citrea/light_client_prover_config.toml @@ -0,0 +1,4 @@ +initial_da_height = 1 +proving_mode = "execute" +proof_sampling_number = 0 +enable_recovery = true diff --git a/scripts/docker/configs/regtest/citrea/light_client_prover_rollup_config.toml b/scripts/docker/configs/regtest/citrea/light_client_prover_rollup_config.toml new file mode 100644 index 000000000..a516e0fd9 --- /dev/null +++ b/scripts/docker/configs/regtest/citrea/light_client_prover_rollup_config.toml @@ -0,0 +1,24 @@ +[public_keys] +sequencer_public_key = "036360e856310ce5d294e8be33fc807077dc56ac80d95d9cd4ddbd21325eff73f7" +sequencer_da_pub_key = "02588d202afcc1ee4ab5254c7847ec25b9a135bbda0f2bc69ee1a714749fd77dc9" +prover_da_pub_key = "03eedab888e45f3bdc3ec9918c491c11e5cf7af0a91f38b97fbc1e135ae4056601" + +[da] +# fill here +node_url = "http://bitcoin_regtest:20443/wallet/admin" +# fill here +node_username = "admin" +# fill here +node_password = "admin" +tx_backup_dir = "resources/bitcoin/inscription_txs" + +[storage] +# The path to the rollup's data directory. Paths that do not begin with `/` are interpreted as relative paths. +path = "resources/dbs/light-client-prover-db" +db_max_open_files = 5000 + +[rpc] +# the host and port to bind the rpc server for +bind_host = "0.0.0.0" +bind_port = 12349 +enable_subscriptions = false diff --git a/scripts/docker/configs/regtest/citrea/rollup_config.toml b/scripts/docker/configs/regtest/citrea/rollup_config.toml new file mode 100644 index 000000000..eb32d1a33 --- /dev/null +++ b/scripts/docker/configs/regtest/citrea/rollup_config.toml @@ -0,0 +1,36 @@ +[public_keys] +sequencer_public_key = "036360e856310ce5d294e8be33fc807077dc56ac80d95d9cd4ddbd21325eff73f7" +sequencer_da_pub_key = "02588d202afcc1ee4ab5254c7847ec25b9a135bbda0f2bc69ee1a714749fd77dc9" +prover_da_pub_key = "03eedab888e45f3bdc3ec9918c491c11e5cf7af0a91f38b97fbc1e135ae4056601" + +[da] +# fill here +node_url = "http://bitcoin_regtest:20443/wallet/admin" +# fill here +node_username = "admin" +# fill here +node_password = "admin" +tx_backup_dir = "" +da_private_key = "E9873D79C6D87DC0FB6A5778633389F4453213303DA61F20BD67FC233AA33262" + +[storage] +# The path to the rollup's data directory. Paths that do not begin with `/` are interpreted as relative paths. +path = "resources/dbs/full-node-db" + +[rpc] +# the host and port to bind the rpc server for +bind_host = "0.0.0.0" +bind_port = 12346 +enable_subscriptions = true +max_subscriptions_per_connection = 100 + +[runner] +sequencer_client_url = "http://citrea_sequencer_regtest:12345" +include_tx_body = false +scan_l1_start_height = 1 + +# WARNING: State pruning is not completely implemented. +# Enabling this might lead to state corruption and therefore, +# avoid using it for now. +# [runner.pruning_config] +# distance = 6000 diff --git a/scripts/docker/configs/regtest/citrea/sequencer_config.toml b/scripts/docker/configs/regtest/citrea/sequencer_config.toml new file mode 100644 index 000000000..79d8fe900 --- /dev/null +++ b/scripts/docker/configs/regtest/citrea/sequencer_config.toml @@ -0,0 +1,16 @@ +private_key = "1212121212121212121212121212121212121212121212121212121212121212" +max_l2_blocks_per_commitment = 10 +test_mode = false +deposit_mempool_fetch_limit = 10 +block_production_interval_ms = 1000 +da_update_interval_ms = 2000 +bridge_initialize_params = "000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000c00000000000000000000000000000000000000000000000008ac7230489e80000000000000000000000000000000000000000000000000000000000000000002d41203b48ffb437c2ee08ceb8b9bb9e5555c002fb304c112e7e1233fe233f2a3dfc1dac006306636974726561140000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000016800000000000000000000000000000000000000000000000000000000000000" + +[mempool_conf] # Mempool Configuration - https://github.com/ledgerwatch/erigon/wiki/Transaction-Pool-Design +pending_tx_limit = 100000 +pending_tx_size = 200 +queue_tx_limit = 100000 +queue_tx_size = 200 +base_fee_tx_limit = 100000 +base_fee_tx_size = 200 +max_account_slots = 16 diff --git a/scripts/docker/configs/regtest/citrea/sequencer_rollup_config.toml b/scripts/docker/configs/regtest/citrea/sequencer_rollup_config.toml new file mode 100644 index 000000000..f879729a9 --- /dev/null +++ b/scripts/docker/configs/regtest/citrea/sequencer_rollup_config.toml @@ -0,0 +1,26 @@ +[public_keys] +sequencer_public_key = "036360e856310ce5d294e8be33fc807077dc56ac80d95d9cd4ddbd21325eff73f7" +sequencer_da_pub_key = "" +prover_da_pub_key = "" + +[da] +# fill here +node_url = "http://bitcoin_regtest:20443/wallet/sequencer-wallet" +# fill here +node_username = "admin" +# fill here +node_password = "admin" +da_private_key = "E9873D79C6D87DC0FB6A5778633389F4453213303DA61F20BD67FC233AA33262" +tx_backup_dir = "resources/bitcoin/inscription_txs" + +[storage] +# The path to the rollup's data directory. Paths that do not begin with `/` are interpreted as relative paths. +path = "resources/dbs/sequencer-db" +db_max_open_files = 5000 + +[rpc] +# the host and port to bind the rpc server for +bind_host = "0.0.0.0" +bind_port = 12345 +enable_subscriptions = true +max_subscriptions_per_connection = 100 diff --git a/scripts/docker/configs/regtest/create-multiple-postgresql-databases.sh b/scripts/docker/configs/regtest/create-multiple-postgresql-databases.sh new file mode 100644 index 000000000..aa665fa46 --- /dev/null +++ b/scripts/docker/configs/regtest/create-multiple-postgresql-databases.sh @@ -0,0 +1,22 @@ +#!/bin/bash + +set -e +set -u + +function create_user_and_database() { + local database=$1 + echo " Creating user and database '$database'" + psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" <<-EOSQL + CREATE USER $database; + CREATE DATABASE $database; + GRANT ALL PRIVILEGES ON DATABASE $database TO $database; +EOSQL +} + +if [ -n "$POSTGRES_MULTIPLE_DATABASES" ]; then + echo "Multiple database creation requested: $POSTGRES_MULTIPLE_DATABASES" + for db in $(echo $POSTGRES_MULTIPLE_DATABASES | tr ',' ' '); do + create_user_and_database $db + done + echo "Multiple databases created" +fi diff --git a/scripts/docker/configs/testnet4/bridge_config.toml b/scripts/docker/configs/testnet4/bridge_config.toml new file mode 100644 index 000000000..122d0f5c4 --- /dev/null +++ b/scripts/docker/configs/testnet4/bridge_config.toml @@ -0,0 +1,93 @@ +# Host, port and index of the current actor (operator, verifier, or watchtower) +protocol_paramset = "regtest" + +host = "0.0.0.0" +port = 17000 +collateral_funding_amount = 99000000 +timeout_block_count = 6 +max_withdrawal_time_block_count = 4032 + +# Secret key of the current actor (operator or verifier) +secret_key = "1111111111111111111111111111111111111111111111111111111111111111" + +# All of the verifiers public keys +num_verifiers = 4 +verifiers_public_keys = [ + "034f355bdcb7cc0af728ef3cceb9615d90684bb5b2ca5f859ab0f0b704075871aa", + "02466d7fcae563e5cb09a0d1870bb580344804617879a14949cf22285f1bae3f27", + "023c72addb4fdf09af94f0c94d7fe92a386a7e70cf8a1d85916386bb2535c7b1b1", + "032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991", +] + +# All of the operators x-only public keys. +num_operators = 2 +num_round_txs = 2 +num_kickoffs_per_round = 2 +operators_xonly_pks = [ + "4f355bdcb7cc0af728ef3cceb9615d90684bb5b2ca5f859ab0f0b704075871aa", + "466d7fcae563e5cb09a0d1870bb580344804617879a14949cf22285f1bae3f27", + "3c72addb4fdf09af94f0c94d7fe92a386a7e70cf8a1d85916386bb2535c7b1b1", +] + +# Operator reimbursement addresses after the 2 week period. +operator_wallet_addresses = [ + "bcrt1pvaua4gvvglk27al5trh337xz8l8zzhgzageky0xt0dgv64xee8tqwwvzmf", + "bcrt1pvaua4gvvglk27al5trh337xz8l8zzhgzageky0xt0dgv64xee8tqwwvzmf", + "bcrt1pvaua4gvvglk27al5trh337xz8l8zzhgzageky0xt0dgv64xee8tqwwvzmf", +] +operator_withdrawal_fee_sats = 100000 + +operator_num_kickoff_utxos_per_tx = 10 + +# User can take funds back after this amount of blocks, if deposit fails. +user_takes_after = 200 + +# Bitcoin node configuration options +network = "regtest" +bitcoin_rpc_url = "http://bitcoin_testnet4:20443/wallet/admin" +bitcoin_rpc_user = "admin" +bitcoin_rpc_password = "admin" + +# PostgreSQL database credentials. +db_host = "postgres_db" +db_port = 5432 +db_user = "clementine" +db_password = "clementine" +db_name = "clementine" + + +confirmation_threshold = 1 + +citrea_rpc_url = "http://citrea_full_node:12346" +citrea_light_client_prover_url = "https://light-client-prover.testnet.citrea.xyz/" +citrea_chain_id = 5655 +bridge_contract_address = "3100000000000000000000000000000000000002" + +# Header chain prover's assumption to start with. +# header_chain_proof_path = "../core/src/test/data/first_1.bin" + +# TLS certificate and key paths +server_cert_path = "/certs/server/server.pem" +server_key_path = "/certs/server/server.key" +ca_cert_path = "/certs/ca/ca.pem" +client_cert_path = "/certs/client/client.pem" +client_key_path = "/certs/client/client.key" +aggregator_cert_path = "/certs/aggregator/aggregator.pem" +client_verification = true +security_council = "1:50929b74c1a04954b78b4b6035e97a5e078a5a0f28ec96d547bfee9ace803ac0" + +winternitz_secret_key = "2222222222222222222222222222222222222222222222222222222222222222" + +socket_path = "/" + +[telemetry] +host = "0.0.0.0" +port = 8081 + +[grpc] +max_message_size = 4194304 +timeout_secs = 43200 +tcp_keepalive_secs = 60 +req_concurrency_limit = 300 +ratelimit_req_count = 1000 +ratelimit_req_interval_secs = 60 diff --git a/scripts/docker/configs/testnet4/protocol_paramset.toml b/scripts/docker/configs/testnet4/protocol_paramset.toml new file mode 100644 index 000000000..036dfa48d --- /dev/null +++ b/scripts/docker/configs/testnet4/protocol_paramset.toml @@ -0,0 +1,59 @@ +network = "testnet4" # "bitcoin", "testnet4", or "regtest" +num_round_txs = 2 +num_kickoffs_per_round = 10 +num_signed_kickoffs = 2 +bridge_amount = 1000000000 # in satoshis +kickoff_amount = 0 # in satoshis +operator_challenge_amount = 200000000 # in satoshis +collateral_funding_amount = 99000000 +kickoff_blockhash_commit_length = 40 +watchtower_challenge_bytes = 144 +winternitz_log_d = 4 +user_takes_after = 200 +operator_challenge_timeout_timelock = 144 # BLOCKS_PER_DAY +operator_challenge_nack_timelock = 432 # BLOCKS_PER_DAY * 3 +disprove_timeout_timelock = 720 # BLOCKS_PER_DAY * 5 +assert_timeout_timelock = 576 # BLOCKS_PER_DAY * 4 +operator_reimburse_timelock = 12 # BLOCKS_PER_HOUR * 2 +watchtower_challenge_timeout_timelock = 288 # BLOCKS_PER_DAY * 2 +time_to_send_watchtower_challenge = 216 # BLOCKS_PER_DAY * 3 / 2 +latest_blockhash_timeout_timelock = 360 # BLOCKS_PER_DAY * 5 / 2 +finality_depth = 6 +start_height = 92752 +genesis_height = 0 +genesis_chain_state_hash = [ + 95, + 115, + 2, + 173, + 22, + 200, + 189, + 158, + 242, + 243, + 190, + 0, + 200, + 25, + 154, + 134, + 249, + 224, + 186, + 134, + 20, + 132, + 171, + 180, + 175, + 95, + 126, + 69, + 127, + 140, + 34, + 22, +] +header_chain_proof_batch_size = 100 +bridge_nonstandard = true diff --git a/scripts/docker/docker-compose.full.regtest.yml b/scripts/docker/docker-compose.full.regtest.yml new file mode 100644 index 000000000..e5f279c56 --- /dev/null +++ b/scripts/docker/docker-compose.full.regtest.yml @@ -0,0 +1,297 @@ +name: Clementine test deployment on Regtest + +# Common stuff for services ---------------------------------------------------- +x-clementine: &clementine_regtest + image: ${CLEMENTINE_IMAGE:-chainwayxyz/clementine} + platform: linux/amd64 + + depends_on: + postgres_db_regtest: + condition: service_healthy + bitcoin_regtest: + condition: service_healthy + citrea_sequencer_regtest: + condition: service_started + citrea_batch_prover_regtest: + condition: service_started + citrea_light_client_prover_regtest: + condition: service_started + + env_file: + - ./configs/regtest/.env.regtest + + volumes: + - ../../bitvm_cache.bin:/bitvm_cache.bin + - ../../bitvm_cache_dev.bin:/bitvm_cache_dev.bin + - ../../core/certs:/certs + networks: + - clementine-network + +x-verifier: &clementine_verifier_regtest + <<: *clementine_regtest + command: + verifier + +x-operator: &clementine_operator_regtest + <<: *clementine_regtest + depends_on: + clementine_verifier_regtest_0: + condition: service_healthy + clementine_verifier_regtest_1: + condition: service_healthy + clementine_verifier_regtest_2: + condition: service_healthy + clementine_verifier_regtest_3: + condition: service_healthy + command: + operator + +# Services --------------------------------------------------------------------- +services: + postgres_db_regtest: + image: 'postgres:latest' + attach: false + container_name: postgres_db_regtest + volumes: + - ./configs/regtest/create-multiple-postgresql-databases.sh:/docker-entrypoint-initdb.d/init.sh + environment: + POSTGRES_MULTIPLE_DATABASES: clementine0,clementine1,clementine2,clementine3 + POSTGRES_USER: clementine + POSTGRES_PASSWORD: clementine + POSTGRES_HOST_AUTH_METHOD: trust + healthcheck: + test: ["CMD-SHELL", "pg_isready -U clementine -d clementine"] + interval: 2s + timeout: 5s + retries: 10 + restart: unless-stopped + networks: + - clementine-network + + bitcoin_regtest: + image: bitcoin/bitcoin:29 + attach: false + container_name: bitcoin_regtest + ports: + - "20443:20443" + - "20444:20444" + volumes: + - ../init-bitcoin.sh:/init-bitcoin.sh + - ../docker-entrypoint.sh:/docker-entrypoint.sh + - bitcoin_regtest:/home/bitcoin/.bitcoin + command: + -printtoconsole + -regtest=1 + -rest + -rpcbind=0.0.0.0 + -rpcallowip=0.0.0.0/0 + -rpcport=20443 + -rpcuser=admin + -rpcpassword=admin + -server + -txindex=1 + -fallbackfee=0.00001 + -maxtxfee=5 + entrypoint: ["/docker-entrypoint.sh"] + + + healthcheck: + test: [ + "CMD-SHELL", + "WALLETS=$(bitcoin-cli -regtest -rpcuser=admin -rpcpassword=admin -rpcport=20443 listwallets) && \ + echo \"$$WALLETS\" | grep -q '\"admin\"' && \ + echo \"$$WALLETS\" | grep -q '\"sequencer-wallet\"' && \ + echo \"$$WALLETS\" | grep -q '\"batch-prover-wallet\"'" + ] + interval: 2s + timeout: 15s + retries: 10 + networks: + - clementine-network + + citrea_sequencer_regtest: + image: chainwayxyz/citrea-test:ca479a4147be1c3a472e76a3f117124683d81ab5 + depends_on: + bitcoin_regtest: + condition: service_healthy + container_name: citrea_sequencer_regtest + command: + --dev --da-layer bitcoin --rollup-config-path /sequencer_rollup_config.toml --sequencer /sequencer_config.toml --genesis-paths /genesis-bitcoin-regtest/ + ports: + - "12345:12345" + volumes: + - citrea_sequencer_regtest:/mnt/task/citrea-db + - ./configs/regtest/citrea/sequencer_rollup_config.toml:/sequencer_rollup_config.toml + - ./configs/regtest/citrea/sequencer_config.toml:/sequencer_config.toml + - ./configs/regtest/citrea/genesis-bitcoin-regtest:/genesis-bitcoin-regtest + networks: + - clementine-network + + citrea_batch_prover_regtest: + image: chainwayxyz/citrea-test:ca479a4147be1c3a472e76a3f117124683d81ab5 + depends_on: + bitcoin_regtest: + condition: service_healthy + citrea_sequencer_regtest: + condition: service_started + container_name: citrea_batch_prover_regtest + environment: + - PARALLEL_PROOF_LIMIT=1 + command: + --dev --da-layer bitcoin --rollup-config-path /batch_prover_rollup_config.toml --batch-prover /batch_prover_config.toml --genesis-paths /genesis-bitcoin-regtest/ + ports: + - "12348:12348" + volumes: + - citrea_batch_prover_regtest:/mnt/task/citrea-db + - ./configs/regtest/citrea/batch_prover_rollup_config.toml:/batch_prover_rollup_config.toml + - ./configs/regtest/citrea/batch_prover_config.toml:/batch_prover_config.toml + - ./configs/regtest/citrea/genesis-bitcoin-regtest:/genesis-bitcoin-regtest/ + networks: + - clementine-network + + citrea_light_client_prover_regtest: + image: chainwayxyz/citrea-test:ca479a4147be1c3a472e76a3f117124683d81ab5 + depends_on: + bitcoin_regtest: + condition: service_healthy + citrea_sequencer_regtest: + condition: service_started + citrea_batch_prover_regtest: + condition: service_started + container_name: citrea_light_client_prover_regtest + command: + --dev --da-layer bitcoin --rollup-config-path /light_client_prover_rollup_config.toml --light-client-prover /light_client_prover_config.toml --genesis-paths /genesis-bitcoin-regtest/ + ports: + - "12349:12349" + volumes: + - citrea_light_client_prover_regtest:/mnt/task/citrea-db + - ./configs/regtest/citrea/light_client_prover_rollup_config.toml:/light_client_prover_rollup_config.toml + - ./configs/regtest/citrea/light_client_prover_config.toml:/light_client_prover_config.toml + - ./configs/regtest/citrea/genesis-bitcoin-regtest:/genesis-bitcoin-regtest/ + networks: + - clementine-network + +# Aggregator -------------------------------------------------------------------- + clementine_aggregator_regtest: + <<: *clementine_regtest + depends_on: + clementine_verifier_regtest_0: + condition: service_healthy + clementine_verifier_regtest_1: + condition: service_healthy + clementine_verifier_regtest_2: + condition: service_healthy + clementine_verifier_regtest_3: + condition: service_healthy + clementine_operator_regtest_0: + condition: service_healthy + clementine_operator_regtest_1: + condition: service_healthy + environment: + - SECRET_KEY=1111111111111111111111111111111111111111111111111111111111111111 + - DB_NAME=clementine0 + - CLIENT_CERT_PATH=/certs/aggregator/aggregator.pem + - CLIENT_KEY_PATH=/certs/aggregator/aggregator.key + command: + aggregator + ports: + - "17000:17000" + +# Verifiers -------------------------------------------------------------------- + clementine_verifier_regtest_0: + <<: *clementine_verifier_regtest + hostname: regtest0_verifier.docker.internal + environment: + - SECRET_KEY=1111111111111111111111111111111111111111111111111111111111111111 + - DB_NAME=clementine0 + ports: + - "17001:17000" + healthcheck: + test: ["CMD-SHELL", "timeout 1 bash -c ' openssl_x509.cnf << EOF +[req] +distinguished_name = req_distinguished_name +req_extensions = v3_req +prompt = no + +[req_distinguished_name] +CN = localhost + +[v3_req] +basicConstraints = CA:FALSE +keyUsage = nonRepudiation, digitalSignature, keyEncipherment +subjectAltName = @alt_names + +[v3_ca] +subjectKeyIdentifier = hash +authorityKeyIdentifier = keyid:always,issuer:always +basicConstraints = CA:true + +[alt_names] +DNS.1 = localhost +DNS.2 = *.docker.internal +IP.1 = 127.0.0.1 +IP.2 = 172.17.0.1 +EOF + +# Generate CA key and certificate +echo "Generating CA certificate..." +openssl genrsa -out $CA_DIR/ca.key 4096 +openssl req -new -x509 -sha256 -days 365 -key $CA_DIR/ca.key -out $CA_DIR/ca.pem \ + -subj "/C=US/ST=California/L=San Francisco/O=Clementine/OU=CA/CN=clementine-ca" \ + -extensions v3_ca -config openssl_x509.cnf + +# Generate server key and CSR +echo "Generating server certificate..." +openssl genrsa -out $SERVER_DIR/server.key 2048 +openssl req -new -key $SERVER_DIR/server.key -out $SERVER_DIR/server.csr \ + -subj "/C=US/ST=California/L=San Francisco/O=Clementine/OU=Server/CN=localhost" \ + -config openssl_x509.cnf + +# Sign server certificate with CA +openssl x509 -req -sha256 -days 365 -in $SERVER_DIR/server.csr \ + -CA $CA_DIR/ca.pem -CAkey $CA_DIR/ca.key -CAcreateserial \ + -out $SERVER_DIR/server.pem \ + -extfile openssl_x509.cnf -extensions v3_req + +# Generate client key and CSR +echo "Generating client certificate..." +openssl genrsa -out $CLIENT_DIR/client.key 2048 +openssl req -new -key $CLIENT_DIR/client.key -out $CLIENT_DIR/client.csr \ + -subj "/C=US/ST=California/L=San Francisco/O=Clementine/OU=Client/CN=clementine-client" \ + -config openssl_x509.cnf + +# Sign client certificate with CA +openssl x509 -req -sha256 -days 365 -in $CLIENT_DIR/client.csr \ + -CA $CA_DIR/ca.pem -CAkey $CA_DIR/ca.key -CAcreateserial \ + -out $CLIENT_DIR/client.pem \ + -extfile openssl_x509.cnf -extensions v3_req + +# Generate aggregator key and CSR +echo "Generating aggregator certificate..." +openssl genrsa -out $AGGREGATOR_DIR/aggregator.key 2048 +openssl req -new -key $AGGREGATOR_DIR/aggregator.key -out $AGGREGATOR_DIR/aggregator.csr \ + -subj "/C=US/ST=California/L=San Francisco/O=Clementine/OU=Aggregator/CN=clementine-aggregator" \ + -config openssl_x509.cnf + +# Sign client certificate with CA +openssl x509 -req -sha256 -days 365 -in $AGGREGATOR_DIR/aggregator.csr \ + -CA $CA_DIR/ca.pem -CAkey $CA_DIR/ca.key -CAcreateserial \ + -out $AGGREGATOR_DIR/aggregator.pem \ + -extfile openssl_x509.cnf -extensions v3_req + +# Copy CA certificate to both directories for convenience +cp $CA_DIR/ca.pem $SERVER_DIR/ +cp $CA_DIR/ca.pem $CLIENT_DIR/ +cp $CA_DIR/ca.pem $AGGREGATOR_DIR/ + +# Clean up temporary files +rm -f openssl_x509.cnf +rm -f $SERVER_DIR/server.csr +rm -f $CLIENT_DIR/client.csr +rm -f $AGGREGATOR_DIR/aggregator.csr + +echo "Certificate generation complete!" +echo "CA certificate: $CA_DIR/ca.pem" +echo "Server certificate: $SERVER_DIR/server.pem" +echo "Server key: $SERVER_DIR/server.key" +echo "Client certificate: $CLIENT_DIR/client.pem" +echo "Client key: $CLIENT_DIR/client.key" +echo "Aggregator certificate: $AGGREGATOR_DIR/aggregator.pem" +echo "Aggregator key: $AGGREGATOR_DIR/aggregator.key" diff --git a/scripts/init-bitcoin.sh b/scripts/init-bitcoin.sh new file mode 100755 index 000000000..3b963daea --- /dev/null +++ b/scripts/init-bitcoin.sh @@ -0,0 +1,22 @@ +#!/bin/bash + +set -e + +echo "[Init] Waiting for bitcoind to start..." +until bitcoin-cli -regtest -rpcuser=admin -rpcpassword=admin -rpcport=20443 getblockchaininfo > /dev/null 2>&1; do + sleep 1 +done + +for WALLET in admin sequencer-wallet batch-prover-wallet; do + if bitcoin-cli -regtest -rpcuser=admin -rpcpassword=admin -rpcport=20443 listwalletdir \ + | grep -q "\"name\": \"$WALLET\""; then + echo "Wallet $WALLET exists" + bitcoin-cli -regtest -rpcuser=admin -rpcpassword=admin -rpcport=20443 loadwallet "$WALLET" 2>/dev/null || true + else + echo "Creating wallet $WALLET" + bitcoin-cli -regtest -rpcuser=admin -rpcpassword=admin -rpcport=20443 createwallet "$WALLET" + fi + + ADDR=$(bitcoin-cli -regtest -rpcuser=admin -rpcpassword=admin -rpcport=20443 -rpcwallet="$WALLET" getnewaddress) + bitcoin-cli -regtest -rpcuser=admin -rpcpassword=admin -rpcport=20443 -rpcwallet="$WALLET" generatetoaddress 202 "$ADDR" +done diff --git a/scripts/listall.sh b/scripts/listall.sh new file mode 100755 index 000000000..f4add4406 --- /dev/null +++ b/scripts/listall.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +# Get the list of tables +tables=$(psql -t -c "SELECT table_name FROM information_schema.tables WHERE table_schema = 'public'") + +# Loop through each table and print all rows +for table in $tables; do + echo "Rows for table '$table':" + psql -c "SELECT * FROM $table" -P pager=off + echo -e "\n-------------------------------------------------\n" +done \ No newline at end of file diff --git a/scripts/prepare_database.sh b/scripts/prepare_database.sh new file mode 100755 index 000000000..a08381073 --- /dev/null +++ b/scripts/prepare_database.sh @@ -0,0 +1,14 @@ +#!/bin/bash +# +# This script isn't a strict requirement for preparing database. One can take +# this only as a reference. + +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) + +echo "Preparing database for $PGDATABASE" + +dropdb -U $PGUSER $PGDATABASE +createdb -U $PGUSER -O $PGUSER $PGDATABASE + +cat $SCRIPT_DIR/schema.sql | psql -U $PGUSER $PGDATABASE +cat $SCRIPT_DIR/pgmq.sql | psql -U $PGUSER $PGDATABASE diff --git a/scripts/replacement_test.py b/scripts/replacement_test.py new file mode 100644 index 000000000..162cf14eb --- /dev/null +++ b/scripts/replacement_test.py @@ -0,0 +1,169 @@ +# Before running this script you need to start the actors (you can use run-test.sh) +# Bitcoin also needs to be running, example for regtest: +# bitcoind -regtest -rpcuser=admin -rpcpassword=admin -rpcport=18443 -fallbackfee=0.00001 -wallet=admin -txindex=1 -daemon -maxtxfee=1 +import subprocess +import json +import time + +NODE_URL = "http://127.0.0.1:17000" +RPC = "bitcoin-cli -regtest -rpcport=18443 -rpcuser=admin -rpcpassword=admin" + + +def run_cmd(cmd, capture=True): + print(f"\nRunning: {cmd}") + result = subprocess.run(cmd, shell=True, capture_output=capture, text=True) + if result.returncode != 0: + print(f"Error:\n{result.stderr}") + return None + return result.stdout.strip() if capture else None + + +def get_deposit_address(): + output = run_cmd( + f"cargo run --bin clementine-cli -- --node-url {NODE_URL} aggregator get-deposit-address" + ) + for word in output.split(): + if word.startswith("bcrt") or word.startswith("tb1") or word.startswith("bc1"): + return word.strip() + return None + + +def send_deposit(address, amount=10): + txid = run_cmd(f"{RPC} sendtoaddress {address} {amount}") + run_cmd(f"{RPC} -generate 1") + return txid + + +def get_output_index(txid, address): + raw = run_cmd(f"{RPC} getrawtransaction {txid} 2") + tx = json.loads(raw) + for vout in tx.get("vout", []): + if address == vout.get("scriptPubKey", {}).get("address", []): + return vout["n"] + return None + + +def register_deposit(txid, vout): + output = run_cmd( + f"cargo run --bin clementine-cli -- --node-url {NODE_URL} aggregator new-deposit " + f"--deposit-outpoint-txid {txid} --deposit-outpoint-vout {vout}" + ) + + return output.split("Move txid: ")[1].strip() + + +def setup(): + output = run_cmd( + f"cargo run --bin clementine-cli -- --node-url {NODE_URL} aggregator setup" + ) + + return output + + +def get_replacement_address(move_txid): + output = run_cmd( + f"cargo run --bin clementine-cli -- --node-url {NODE_URL} aggregator get-replacement-deposit-address " + f"--move-txid {move_txid}" + ) + for word in output.split(): + if word.startswith("bcrt") or word.startswith("tb1") or word.startswith("bc1"): + return word.strip() + return None + + +def register_replacement_deposit(new_txid, new_vout, old_txid): + final_output = run_cmd( + f"cargo run --bin clementine-cli -- --node-url {NODE_URL} aggregator new-replacement-deposit " + f"--deposit-outpoint-txid {new_txid} " + f"--deposit-outpoint-vout {new_vout} " + f"--old-move-txid {old_txid}" + ) + + run_cmd(f"{RPC} -generate 1") + + return final_output.split("Move txid: ")[1].strip() + + +def big_to_little_endian(hex_str): + bytes_list = [hex_str[i : i + 2] for i in range(0, len(hex_str), 2)] + little_endian = "".join(reversed(bytes_list)) + return little_endian + + +def get_balance(): + balance = run_cmd(f"{RPC} getbalance") + return float(balance) if balance else 0.0 + + +if __name__ == "__main__": + + balance = get_balance() + + NUMBER_OF_REPLACEMENTS = 1 + + setup_output = setup() + print(f"Setup output: {setup_output}") + + while balance < 20 * NUMBER_OF_REPLACEMENTS: + print(f"Balance: {balance} BTC") + print("Generating blocks...") + # Generate blocks to increase balance + run_cmd(f"{RPC} -generate {NUMBER_OF_REPLACEMENTS}") + balance = get_balance() + print(f"Balance: {balance} BTC") + + for _ in range(NUMBER_OF_REPLACEMENTS): + deposit_address = get_deposit_address() + if not deposit_address: + print("Failed to get deposit address.") + exit(1) + print(f"Deposit address: {deposit_address}") + + txid = send_deposit(deposit_address) + print(f"Deposit TXID: {txid}") + + vout = get_output_index(txid, deposit_address) + if vout is None: + print("Failed to find vout.") + exit(1) + print(f"Output index: {vout}") + + move_tx = register_deposit(txid, vout) + print("Deposit registered.") + + # Replacement flow + OLD_TXID = move_tx # Replace if needed + print(f"\nStarting replacement for move-txid: {OLD_TXID}") + replacement_address = get_replacement_address(OLD_TXID) + if not replacement_address: + print("Failed to get replacement address.") + exit(1) + print(f"Replacement address: {replacement_address}") + + replacement_txid = send_deposit(replacement_address) + print(f"Replacement TXID: {replacement_txid}") + + replacement_vout = get_output_index(replacement_txid, replacement_address) + if replacement_vout is None: + print("Failed to get replacement vout.") + exit(1) + print(f"Replacement output index: {replacement_vout}") + + new_tx_id = register_replacement_deposit( + replacement_txid, replacement_vout, OLD_TXID + ) + + while not (raw := run_cmd(f"{RPC} getrawtransaction {new_tx_id}")): + run_cmd(f"{RPC} -generate 1") + time.sleep(1) + + old_txid_le = big_to_little_endian(OLD_TXID) + + print(f"Converted OLD_TXID (le): {old_txid_le}") + + if old_txid_le not in raw: + print("Replacement txid not found in raw transaction.") + exit(1) + + print("All replacements completed successfully.") + print(f"Final balance: {balance} BTC") diff --git a/scripts/run-deposit_docker_no_auto.sh b/scripts/run-deposit_docker_no_auto.sh new file mode 100755 index 000000000..a1dd56e1e --- /dev/null +++ b/scripts/run-deposit_docker_no_auto.sh @@ -0,0 +1,121 @@ +#!/bin/bash + +# CPFP Deposit Flow Script for Clementine +set -e # Exit on any error + +# Configuration +AGGREGATOR_URL="https://127.0.0.1:17000" +DEPOSIT_AMOUNT="10" +BQR_ALIAS=${BQR_ALIAS:="bitcoin-cli -regtest -rpcport=20443 -rpcuser=admin -rpcpassword=admin -rpcwallet=admin"} +BITCOIN_RPC_URL=${BITCOIN_RPC_URL:="http://127.0.0.1:20443/wallet/admin"} +BITCOIN_RPC_USER="admin" +BITCOIN_RPC_PASSWORD="admin" +FEE_RATE="10.0" # sat/vB +FEE_PAYER_AMOUNT="1" +export RUSTFLAGS="-Awarnings" + +# Check if jq is installed +if ! command -v jq &> /dev/null; then + echo "โŒ Error: 'jq' command is required but not installed." + echo "Please install jq to parse JSON responses:" + echo " - macOS: brew install jq" + echo " - Ubuntu/Debian: sudo apt-get install jq" + echo " - CentOS/RHEL: sudo yum install jq" + echo " - Or visit: https://stedolan.github.io/jq/download/" + exit 1 +fi + +echo "๐Ÿงฑ Step 1: Setup Aggregator" +cargo run --bin clementine-cli -- --node-url $AGGREGATOR_URL aggregator setup + +echo "๐Ÿ“ฌ Step 2: Get Deposit Address" +DEPOSIT_ADDRESS=$(cargo run --bin clementine-cli -- --node-url $AGGREGATOR_URL aggregator get-deposit-address | grep -o 'bcrt1[a-zA-Z0-9]*') +echo "Deposit address: $DEPOSIT_ADDRESS" + +echo "๐Ÿช™ Step 3: Send Deposit" +DEPOSIT_TXID=$($BQR_ALIAS sendtoaddress $DEPOSIT_ADDRESS $DEPOSIT_AMOUNT) +echo "Deposit TXID: $DEPOSIT_TXID" +$BQR_ALIAS -generate 1 + +echo "๐Ÿ”Ž Step 4: Get Deposit VOUT + Raw TX" +RAW_TX_JSON=$($BQR_ALIAS getrawtransaction $DEPOSIT_TXID 1) +VOUT_INDEX=$(echo "$RAW_TX_JSON" | jq -r --arg addr "$DEPOSIT_ADDRESS" '.vout[] | select(.scriptPubKey.address == $addr) | .n') +RAW_TX_HEX=$($BQR_ALIAS getrawtransaction $DEPOSIT_TXID) +echo "VOUT: $VOUT_INDEX" + +STEP_START=$(date +%s) +echo "๐Ÿ“ฅ Step 4.5: Registering deposit on aggregator..." +MOVE_TX_RAW=$(cargo run --bin clementine-cli -- --node-url $AGGREGATOR_URL aggregator new-deposit \ + --deposit-outpoint-txid $DEPOSIT_TXID \ + --deposit-outpoint-vout $VOUT_INDEX | awk '/Please send manually:/ { print $NF }') +STEP_END=$(date +%s) + +if [ -z "$MOVE_TX_RAW" ]; then + echo "โŒ Failed to extract raw move transaction!" + exit 1 +fi +echo "Move TX raw: $MOVE_TX_RAW" +echo "โฑ๏ธ Step 4.5 took $((STEP_END - STEP_START)) seconds" + +echo "๐Ÿงพ Step 5: Create Move-to-Vault TX (CPFP step 1)" +FEE_PAYER_ADDRESS=$(cargo run --bin clementine-cli -- --node-url $BITCOIN_RPC_URL bitcoin send-tx-with-cpfp \ + --bitcoin-rpc-user $BITCOIN_RPC_USER \ + --bitcoin-rpc-password $BITCOIN_RPC_PASSWORD \ + --raw-tx $MOVE_TX_RAW | grep -o 'bcrt1[a-zA-Z0-9]*') +echo "Fee payer address: $FEE_PAYER_ADDRESS" + +echo "๐Ÿ’ธ Step 6: Send fee to fee payer address" +$BQR_ALIAS sendtoaddress $FEE_PAYER_ADDRESS $FEE_PAYER_AMOUNT +$BQR_ALIAS -generate 1 + +echo "๐Ÿงพ Step 7: Finalize CPFP Move TX" +MOVE_TX_DETAILS=$(cargo run --bin clementine-cli -- --node-url $BITCOIN_RPC_URL bitcoin send-tx-with-cpfp \ + --bitcoin-rpc-user $BITCOIN_RPC_USER \ + --bitcoin-rpc-password $BITCOIN_RPC_PASSWORD \ + --fee-payer-address $FEE_PAYER_ADDRESS \ + --raw-tx $MOVE_TX_RAW) + + +for i in {1..2}; do + $BQR_ALIAS -generate 5 + sleep 2 +done + +echo "โœ… CPFP move transaction sent and confirmed." +echo "Summary:" +echo " - Deposit Address: $DEPOSIT_ADDRESS" +echo " - Deposit TXID: $DEPOSIT_TXID" +echo " - Output Index: $VOUT_INDEX" +echo " - Move TX Details: $MOVE_TX_DETAILS" + +PARENT_TXID=$(echo "$MOVE_TX_DETAILS" | grep -oP 'Parent transaction TXID: \K[a-f0-9]{64}') + +if [ -z "$PARENT_TXID" ]; then + echo "โŒ Failed to extract parent transaction TXID!" + exit 1 +fi + +echo "Step 8: Get Calldata for Deposit" +CALLDATA=$(clementine --network regtest deposit get-deposit-params $PARENT_TXID \ + $BITCOIN_RPC_URL $BITCOIN_RPC_USER $BITCOIN_RPC_PASSWORD | tail -n1 | tr -d '\n\r ' | xargs) + +if [ -z "$CALLDATA" ]; then + echo "โŒ Failed to get deposit parameters!" + exit 1 +fi + +echo "Calldata: $CALLDATA" + +echo "๐Ÿ“ฆ Step 9: Submit Calldata to Citrea" +CITREA_RESPONSE=$(jq -nc --arg cal "$CALLDATA" \ + '{jsonrpc:"2.0", method:"citrea_sendRawDepositTransaction", params:[$cal], id:1}' | + curl -s -X POST http://127.0.0.1:12345 -H "Content-Type: application/json" --data @-) + +echo "Citrea submission response: $CITREA_RESPONSE" + +if echo "$CITREA_RESPONSE" | grep -q '"result":null'; then + echo "โœ… Calldata submission successful" +else + echo "โŒ Calldata submission failed" + exit 1 +fi diff --git a/scripts/run.sh b/scripts/run.sh new file mode 100755 index 000000000..08816c751 --- /dev/null +++ b/scripts/run.sh @@ -0,0 +1,191 @@ +#!/bin/bash + +echo "Run this script in the root of the project" + +# Check if BITVM_CACHE_PATH is set, if not try to find cache file automatically +if [ -z "$BITVM_CACHE_PATH" ]; then + if [ -f "./core/bitvm_cache.bin" ]; then + export BITVM_CACHE_PATH="./core/bitvm_cache.bin" + echo "Using cache file: $BITVM_CACHE_PATH" + elif [ -f "./bitvm_cache.bin" ]; then + export BITVM_CACHE_PATH="./bitvm_cache.bin" + echo "Using cache file: $BITVM_CACHE_PATH" + else + echo "BITVM_CACHE_PATH is not set and no cache file found in ./core/bitvm_cache.bin or ./bitvm_cache.bin" + echo "Please set BITVM_CACHE_PATH or ensure a cache file exists in one of the above locations." + exit 1 + fi +fi + +export READ_CONFIG_FROM_ENV=1 +export READ_PARAMSET_FROM_ENV=1 + +export PROTOCOL_PARAMSET=${PROTOCOL_PARAMSET:=regtest} +export HOST=${HOST:=127.0.0.1} +export WINTERNITZ_SECRET_KEY=${WINTERNITZ_SECRET_KEY:=2222222222222222222222222222222222222222222222222222222222222222} +export VERIFIERS_PUBLIC_KEYS=${VERIFIERS_PUBLIC_KEYS:="034f355bdcb7cc0af728ef3cceb9615d90684bb5b2ca5f859ab0f0b704075871aa,02466d7fcae563e5cb09a0d1870bb580344804617879a14949cf22285f1bae3f27,023c72addb4fdf09af94f0c94d7fe92a386a7e70cf8a1d85916386bb2535c7b1b1,032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991"} +export OPERATOR_XONLY_PKS=${OPERATOR_XONLY_PKS:="4f355bdcb7cc0af728ef3cceb9615d90684bb5b2ca5f859ab0f0b704075871aa,466d7fcae563e5cb09a0d1870bb580344804617879a14949cf22285f1bae3f27"} +export NUM_OPERATORS=${NUM_OPERATORS:=2} +export OPERATOR_WITHDRAWAL_FEE_SATS=${OPERATOR_WITHDRAWAL_FEE_SATS:=100000} + +export CITREA_CHAIN_ID=${CITREA_CHAIN_ID:=5115} +export CITREA_RPC_URL=${CITREA_RPC_URL:="http://127.0.0.1:12345"} +export CITREA_LIGHT_CLIENT_PROVER_URL=${CITREA_LIGHT_CLIENT_PROVER_URL:="http://127.0.0.1:12346"} +export BRIDGE_CONTRACT_ADDRESS=${BRIDGE_CONTRACT_ADDRESS:="3100000000000000000000000000000000000002"} +export VERIFIER_ENDPOINTS=${VERIFIER_ENDPOINTS:="https://127.0.0.1:17001,https://127.0.0.1:17002,https://127.0.0.1:17003,https://127.0.0.1:17004"} +export OPERATOR_ENDPOINTS=${OPERATOR_ENDPOINTS:="https://127.0.0.1:17005,https://127.0.0.1:17006"} +export BITCOIN_RPC_URL=${BITCOIN_RPC_URL:="http://127.0.0.1:18443"} +export BITCOIN_RPC_USER=${BITCOIN_RPC_USER:=admin} +export BITCOIN_RPC_PASSWORD=${BITCOIN_RPC_PASSWORD:=admin} +export DB_HOST=${DB_HOST:=127.0.0.1} +export DB_PORT=${DB_PORT:=5432} +export DB_USER=${DB_USER:=clementine} +export DB_PASSWORD=${DB_PASSWORD:=clementine} +export DB_NAME=${DB_NAME:=clementine} +export PROTOCOL_CONFIG_PATH=${PROTOCOL_CONFIG_PATH:="core/src/config/protocol_paramset.toml"} +export DBG_PACKAGE_HEX=${DBG_PACKAGE_HEX:=1} +export RUST_MIN_STACK=${RUST_MIN_STACK:=33554432} +export RISC0_SKIP_BUILD=${RISC0_SKIP_BUILD:=1} +export LOG_FORMAT=json +export RUST_LOG=info +# TLS +export CA_CERT_PATH=${CA_CERT_PATH:="core/certs/ca/ca.pem"} +export SERVER_CERT_PATH=${SERVER_CERT_PATH:="core/certs/server/server.pem"} +export SERVER_KEY_PATH=${SERVER_KEY_PATH:="core/certs/server/server.key"} +export CLIENT_CERT_PATH=${CLIENT_CERT_PATH:="core/certs/server/server.pem"} +export CLIENT_KEY_PATH=${CLIENT_KEY_PATH:="core/certs/server/server.key"} +export AGGREGATOR_CERT_PATH=${AGGREGATOR_CERT_PATH:="core/certs/server/server.pem"} +export CLIENT_VERIFICATION=${CLIENT_VERIFICATION:=1} +export DISABLE_NOFN_CHECK=${DISABLE_NOFN_CHECK:=1} +export OPERATOR_WITHDRAWAL_FEE_SATS=${OPERATOR_WITHDRAWAL_FEE_SATS:=100000} + +export SECURITY_COUNCIL=${SECURITY_COUNCIL:="1:50929b74c1a04954b78b4b6035e97a5e078a5a0f28ec96d547bfee9ace803ac0"} +export HEADER_CHAIN_PROOF_BATCH_SIZE=100 + +# System parameters +export NETWORK=${NETWORK:=regtest} +export NUM_ROUND_TXS=${NUM_ROUND_TXS:=200} +export NUM_KICKOFFS_PER_ROUND=${NUM_KICKOFFS_PER_ROUND:=100} +export NUM_SIGNED_KICKOFFS=${NUM_SIGNED_KICKOFFS:=5} +export BRIDGE_AMOUNT=${BRIDGE_AMOUNT:=1000000000} +export KICKOFF_AMOUNT=${KICKOFF_AMOUNT:=0} +export OPERATOR_CHALLENGE_AMOUNT=${OPERATOR_CHALLENGE_AMOUNT:=200000000} +export COLLATERAL_FUNDING_AMOUNT=${COLLATERAL_FUNDING_AMOUNT:=200000000} +export KICKOFF_BLOCKHASH_COMMIT_LENGTH=${KICKOFF_BLOCKHASH_COMMIT_LENGTH:=40} +export WATCHTOWER_CHALLENGE_BYTES=${WATCHTOWER_CHALLENGE_BYTES:=144} +export WINTERNITZ_LOG_D=${WINTERNITZ_LOG_D:=4} +export USER_TAKES_AFTER=${USER_TAKES_AFTER:=200} +export OPERATOR_CHALLENGE_TIMEOUT_TIMELOCK=${OPERATOR_CHALLENGE_TIMEOUT_TIMELOCK:=144} +export OPERATOR_CHALLENGE_NACK_TIMELOCK=${OPERATOR_CHALLENGE_NACK_TIMELOCK:=432} +export DISPROVE_TIMEOUT_TIMELOCK=${DISPROVE_TIMEOUT_TIMELOCK:=720} +export ASSERT_TIMEOUT_TIMELOCK=${ASSERT_TIMEOUT_TIMELOCK:=576} +export OPERATOR_REIMBURSE_TIMELOCK=${OPERATOR_REIMBURSE_TIMELOCK:=12} +export WATCHTOWER_CHALLENGE_TIMEOUT_TIMELOCK=${WATCHTOWER_CHALLENGE_TIMEOUT_TIMELOCK:=288} +export TIME_TO_SEND_WATCHTOWER_CHALLENGE=${TIME_TO_SEND_WATCHTOWER_CHALLENGE:=216} +export LATEST_BLOCKHASH_TIMEOUT_TIMELOCK=${LATEST_BLOCKHASH_TIMEOUT_TIMELOCK:=360} +export FINALITY_DEPTH=${FINALITY_DEPTH:=100} +export START_HEIGHT=${START_HEIGHT:=190} +export GENESIS_HEIGHT=${GENESIS_HEIGHT:=0} +export GENESIS_CHAIN_STATE_HASH=${GENESIS_CHAIN_STATE_HASH:=5f7302ad16c8bd9ef2f3be00c8199a86f9e0ba861484abb4af5f7e457f8c2216} +export BRIDGE_NONSTANDARD=${BRIDGE_NONSTANDARD:=false} +export TELEMETRY_HOST=0.0.0.0 +export TELEMETRY_PORT=8081 +export RUST_MIN_STACK=33554432 +export RISC0_DEV_MODE=1 + +# Define databases to drop and recreate +databases=("clementine0" "clementine1" "clementine2" "clementine3") + +# Clear logs folder +rm -rf logs/* + +export PGUSER=${PGUSER:=clementine} +export PGPASSWORD=${PGPASSWORD:=clementine} +export PGHOST=${PGHOST:=127.0.0.1} +export PGPORT=${PGPORT:=5432} + +# Drop and recreate databases +for db in "${databases[@]}"; do + echo "Dropping database: $db" + dropdb "$db" 2>/dev/null + echo "Creating database: $db" + createdb -O $DB_USER "$db" +done + +# Build the project once +echo "Building clementine-core..." +cargo build --package clementine-core --all-features --bin clementine-core +if [ $? -ne 0 ]; then + echo "Build failed, exiting..." + exit 1 +fi +BIN_PATH="./target/debug/clementine-core" + +# Corresponding roles +roles=( + "verifier" + "verifier" + "verifier" + "verifier" + "operator" + "operator" + "aggregator" +) +role_indexes=( + 0 + 1 + 2 + 3 + 0 + 1 + 0 +) + +# Store PIDs +pids=() + +# Function to kill all processes on exit +cleanup() { + echo "Stopping all processes..." + for pid in "${pids[@]}"; do + kill "$pid" 2>/dev/null + done + exit 1 +} + +# Trap Ctrl+C and call cleanup +trap cleanup SIGINT + +# Run processes in the background +for i in "${!roles[@]}"; do + role="${roles[$i]}" + index="${role_indexes[$i]}" + filename=$(basename -- "$role$index") + log_file="logs/${filename%.toml}.jsonl" + + # Set dynamic config vars for each actor + secret_key_digit=$((index + 1)) + export SECRET_KEY=$(printf "%064d" | tr '0' "$secret_key_digit") + export PORT=$((17000 + i + 1)) + export DB_NAME="${databases[$index]}" + + # Aggregator overwrites + if [ $role == "aggregator" ]; then + export TELEMETRY_PORT=8082 + export PORT=$((17000)) + export SECRET_KEY=$(printf "%064d" | tr '0' "1") + fi + + echo "Starting process with role $role, logging to $log_file" + echo "Secret key is $SECRET_KEY" + echo "Port is $PORT" + + "$BIN_PATH" "$role" > "$log_file" 2> "logs/${filename%.toml}_error.log" & + pids+=("$!") + + # Small delay between starts + sleep 1 +done + +# Wait for all processes +wait