diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 3ab74042..87b22c0f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -17,19 +17,15 @@ jobs: lean-test: runs-on: warp-ubuntu-latest-x64-16x steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@v6 + - uses: actions-rust-lang/setup-rust-toolchain@v1 - uses: leanprover/lean-action@v1 with: build-args: "--wfail -v" - test: true - - name: Build all targets - run: lake run build-all --wfail - name: Test Ix CLI run: lake test -- cli - name: Aiur tests run: lake test -- --ignored aiur aiur-hashes ixvm - - name: Check lean.h.hash - run: lake run check-lean-h-hash - name: Check Lean versions match for Ix and compiler bench run: diff lean-toolchain Benchmarks/Compile/lean-toolchain @@ -37,32 +33,21 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v6 - with: - repository: argumentcomputer/ci-workflows - - uses: ./.github/actions/ci-env - - uses: actions/checkout@v6 - - uses: dtolnay/rust-toolchain@stable + - uses: actions-rust-lang/setup-rust-toolchain@v1 - uses: taiki-e/install-action@nextest - - uses: Swatinem/rust-cache@v2 - - name: Tests - run: cargo nextest run --release --profile ci --workspace --run-ignored all - - rust-lint: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v6 + # Install Lean for rust-bindgen step + - uses: leanprover/lean-action@v1 with: - repository: argumentcomputer/ci-workflows - - uses: ./.github/actions/ci-env - - uses: actions/checkout@v6 - - uses: dtolnay/rust-toolchain@stable - - uses: Swatinem/rust-cache@v2 + auto-config: false + use-github-cache: false - name: Check Rustfmt code style - run: cargo fmt --all --check + uses: actions-rust-lang/rustfmt@v1 + - name: Check clippy warnings + run: cargo xclippy - name: Check *everything* compiles run: cargo check --all-targets --all-features --workspace - - name: Check clippy warnings - run: cargo xclippy -D warnings + - name: Tests + run: cargo nextest run --release --profile ci --workspace --run-ignored all - name: Get Rust version run: | echo "RUST_VERSION=$(awk -F '"' '/^channel/ {print $2}' rust-toolchain.toml)" | tee -a $GITHUB_ENV diff --git a/.github/workflows/compile.yml b/.github/workflows/compile.yml index f7e0fb17..d1c94f48 100644 --- a/.github/workflows/compile.yml +++ b/.github/workflows/compile.yml @@ -20,8 +20,9 @@ jobs: - uses: actions/checkout@v6 - uses: leanprover/lean-action@v1 with: + auto-config: false + build: true build-args: "ix --wfail -v" - test: false - run: | mkdir -p ~/.local/bin echo | lake run install @@ -56,7 +57,7 @@ jobs: - uses: leanprover/lean-action@v1 with: lake-package-directory: ${{ env.COMPILE_DIR }} - build: false + auto-config: false use-github-cache: false # FLT and FC take a few minutes to rebuild, so we cache the build artifacts - if: matrix.cache_pkg diff --git a/.github/workflows/ignored.yml b/.github/workflows/ignored.yml new file mode 100644 index 00000000..cb0af803 --- /dev/null +++ b/.github/workflows/ignored.yml @@ -0,0 +1,47 @@ +name: Extended CI tests + +on: + push: + branches: main + workflow_dispatch: + +permissions: + contents: read + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +jobs: + ignored-test: + runs-on: warp-ubuntu-latest-x64-32x # Needs 128 GB RAM for Lean compilation + steps: + - uses: actions/checkout@v6 + - uses: actions-rust-lang/setup-rust-toolchain@v1 + - uses: leanprover/lean-action@v1 + with: + auto-config: false + test: true + test-args: "-- --ignored" + + valgrind: + runs-on: warp-ubuntu-latest-x64-16x + steps: + - uses: actions/checkout@v6 + - uses: actions-rust-lang/setup-rust-toolchain@v1 + - uses: leanprover/lean-action@v1 + with: + auto-config: false + build: true + build-args: "IxTests" + - name: Install valgrind + run: sudo apt-get update && sudo apt-get install -y valgrind + - name: Run tests under valgrind + run: | + valgrind \ + --leak-check=full \ + --show-leak-kinds=definite,possible \ + --errors-for-leak-kinds=definite \ + --track-origins=yes \ + --error-exitcode=1 \ + .lake/build/bin/IxTests -- --include-ignored aiur aiur-hashes ixvm diff --git a/Cargo.lock b/Cargo.lock index 793be9fc..6fef6f8d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -211,6 +211,26 @@ dependencies = [ "virtue", ] +[[package]] +name = "bindgen" +version = "0.71.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f58bf3d7db68cfbac37cfc485a8d711e87e064c3d0fe0435b92f7a407f9d6b3" +dependencies = [ + "bitflags", + "cexpr", + "clang-sys", + "itertools 0.13.0", + "log", + "prettyplease", + "proc-macro2", + "quote", + "regex", + "rustc-hash", + "shlex", + "syn", +] + [[package]] name = "bitflags" version = "2.10.0" @@ -280,6 +300,15 @@ dependencies = [ "shlex", ] +[[package]] +name = "cexpr" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766" +dependencies = [ + "nom", +] + [[package]] name = "cfg-if" version = "1.0.4" @@ -326,6 +355,17 @@ dependencies = [ "zeroize", ] +[[package]] +name = "clang-sys" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b023947811758c97c59bf9d1c188fd619ad4718dcaa767947df1cadb14f39f4" +dependencies = [ + "glob", + "libc", + "libloading", +] + [[package]] name = "cobs" version = "0.3.0" @@ -1705,6 +1745,15 @@ dependencies = [ "z32", ] +[[package]] +name = "itertools" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186" +dependencies = [ + "either", +] + [[package]] name = "itertools" version = "0.14.0" @@ -1732,7 +1781,8 @@ dependencies = [ "indexmap", "iroh", "iroh-base", - "itertools", + "itertools 0.14.0", + "lean-ffi", "multi-stark", "n0-snafu", "n0-watcher", @@ -1766,12 +1816,31 @@ version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" +[[package]] +name = "lean-ffi" +version = "0.1.0" +dependencies = [ + "bindgen", + "cc", + "num-bigint", +] + [[package]] name = "libc" version = "0.2.180" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bcc35a38544a891a5f7c865aca548a982ccb3b8650a5b06d0fd33a10283c56fc" +[[package]] +name = "libloading" +version = "0.8.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d7c4b02199fee7c5d21a5ae7d8cfa79a6ef5bb2fc834d6e9058e89c825efdc55" +dependencies = [ + "cfg-if", + "windows-link 0.2.1", +] + [[package]] name = "litemap" version = "0.8.1" @@ -1848,6 +1917,12 @@ version = "2.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f52b00d39961fc5b2736ea853c9cc86238e165017a493d1d5c8eac6bdc4cc273" +[[package]] +name = "minimal-lexical" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" + [[package]] name = "miniz_oxide" version = "0.8.9" @@ -2102,6 +2177,16 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "43794a0ace135be66a25d3ae77d41b91615fb68ae937f904090203e81f755b65" +[[package]] +name = "nom" +version = "7.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" +dependencies = [ + "memchr", + "minimal-lexical", +] + [[package]] name = "ntimestamp" version = "1.0.0" @@ -2234,7 +2319,7 @@ name = "p3-commit" version = "0.4.2" source = "git+https://github.com/Plonky3/Plonky3?rev=0835481398d2b481bef0c6d0e8188b484ab9a636#0835481398d2b481bef0c6d0e8188b484ab9a636" dependencies = [ - "itertools", + "itertools 0.14.0", "p3-challenger", "p3-dft", "p3-field", @@ -2248,7 +2333,7 @@ name = "p3-dft" version = "0.4.2" source = "git+https://github.com/Plonky3/Plonky3?rev=0835481398d2b481bef0c6d0e8188b484ab9a636#0835481398d2b481bef0c6d0e8188b484ab9a636" dependencies = [ - "itertools", + "itertools 0.14.0", "p3-field", "p3-matrix", "p3-maybe-rayon", @@ -2262,7 +2347,7 @@ name = "p3-field" version = "0.4.2" source = "git+https://github.com/Plonky3/Plonky3?rev=0835481398d2b481bef0c6d0e8188b484ab9a636#0835481398d2b481bef0c6d0e8188b484ab9a636" dependencies = [ - "itertools", + "itertools 0.14.0", "num-bigint", "p3-maybe-rayon", "p3-util", @@ -2277,7 +2362,7 @@ name = "p3-fri" version = "0.4.2" source = "git+https://github.com/Plonky3/Plonky3?rev=0835481398d2b481bef0c6d0e8188b484ab9a636#0835481398d2b481bef0c6d0e8188b484ab9a636" dependencies = [ - "itertools", + "itertools 0.14.0", "p3-challenger", "p3-commit", "p3-dft", @@ -2337,7 +2422,7 @@ name = "p3-matrix" version = "0.4.2" source = "git+https://github.com/Plonky3/Plonky3?rev=0835481398d2b481bef0c6d0e8188b484ab9a636#0835481398d2b481bef0c6d0e8188b484ab9a636" dependencies = [ - "itertools", + "itertools 0.14.0", "p3-field", "p3-maybe-rayon", "p3-util", @@ -2372,7 +2457,7 @@ name = "p3-merkle-tree" version = "0.4.2" source = "git+https://github.com/Plonky3/Plonky3?rev=0835481398d2b481bef0c6d0e8188b484ab9a636#0835481398d2b481bef0c6d0e8188b484ab9a636" dependencies = [ - "itertools", + "itertools 0.14.0", "p3-commit", "p3-field", "p3-matrix", @@ -2390,7 +2475,7 @@ name = "p3-monty-31" version = "0.4.2" source = "git+https://github.com/Plonky3/Plonky3?rev=0835481398d2b481bef0c6d0e8188b484ab9a636#0835481398d2b481bef0c6d0e8188b484ab9a636" dependencies = [ - "itertools", + "itertools 0.14.0", "num-bigint", "p3-dft", "p3-field", @@ -2425,7 +2510,7 @@ name = "p3-symmetric" version = "0.4.2" source = "git+https://github.com/Plonky3/Plonky3?rev=0835481398d2b481bef0c6d0e8188b484ab9a636#0835481398d2b481bef0c6d0e8188b484ab9a636" dependencies = [ - "itertools", + "itertools 0.14.0", "p3-field", "serde", ] @@ -2787,6 +2872,16 @@ dependencies = [ "ucd-parse", ] +[[package]] +name = "prettyplease" +version = "0.2.37" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "479ca8adacdd7ce8f1fb39ce9ecccbfe93a3f1344b3d0d97f20bc0196208f62b" +dependencies = [ + "proc-macro2", + "syn", +] + [[package]] name = "proc-macro-crate" version = "3.4.0" diff --git a/Cargo.toml b/Cargo.toml index 917e4ecf..1f86fa1e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,3 +1,6 @@ +[workspace] +members = ["lean-ffi"] + [package] name = "ix_rs" version = "0.1.0" @@ -11,6 +14,7 @@ anyhow = "1" blake3 = "1.8.2" itertools = "0.14.0" indexmap = { version = "2", features = ["rayon"] } +lean-ffi = { path = "lean-ffi" } multi-stark = { git = "https://github.com/argumentcomputer/multi-stark.git", rev = "14b70601317e4500c7246c32a13ad08b3f560f2e" } num-bigint = "0.4.6" rayon = "1" @@ -31,7 +35,6 @@ tracing-subscriber = { version = "0.3", features = ["env-filter"], optional = tr bincode = { version = "2.0.1", optional = true } serde = { version = "1.0.219", features = ["derive"], optional = true } - [dev-dependencies] quickcheck = "1.0.3" rand = "0.8.5" diff --git a/Ix/Aiur/Protocol.lean b/Ix/Aiur/Protocol.lean index aee0b44d..fb6fa701 100644 --- a/Ix/Aiur/Protocol.lean +++ b/Ix/Aiur/Protocol.lean @@ -12,10 +12,10 @@ instance : Nonempty Proof := PoofNonempty.property namespace Proof -@[extern "c_rs_aiur_proof_to_bytes"] +@[extern "rs_aiur_proof_to_bytes"] opaque toBytes : @& Proof → ByteArray -@[extern "c_rs_aiur_proof_of_bytes"] +@[extern "rs_aiur_proof_of_bytes"] opaque ofBytes : @& ByteArray → Proof end Proof @@ -58,10 +58,10 @@ instance : BEq IOBuffer where namespace AiurSystem -@[extern "c_rs_aiur_system_build"] +@[extern "rs_aiur_system_build"] opaque build : @&Bytecode.Toplevel → @&CommitmentParameters → AiurSystem -@[extern "c_rs_aiur_system_prove"] +@[extern "rs_aiur_system_prove"] private opaque prove' : @& AiurSystem → @& FriParameters → @& Bytecode.FunIdx → @& Array G → (ioData : @& Array G) → (ioMap : @& Array (Array G × IOKeyInfo)) → @@ -77,7 +77,7 @@ def prove (system : @& AiurSystem) (friParameters : @& FriParameters) let ioMap := ioMap.foldl (fun acc (k, v) => acc.insert k v) ∅ (claim, proof, ⟨ioData, ioMap⟩) -@[extern "c_rs_aiur_system_verify"] +@[extern "rs_aiur_system_verify"] opaque verify : @& AiurSystem → @& FriParameters → @& Array G → @& Proof → Except String Unit diff --git a/Ix/Iroh/Connect.lean b/Ix/Iroh/Connect.lean index ec770855..8de05f9a 100644 --- a/Ix/Iroh/Connect.lean +++ b/Ix/Iroh/Connect.lean @@ -15,7 +15,7 @@ structure GetResponse where bytes: ByteArray deriving Inhabited -@[never_extract, extern "c_rs_iroh_put"] +@[never_extract, extern "rs_iroh_put"] private opaque putBytes' : @& String → @& Array String → @& String → @& String → Except String PutResponse def putBytes (nodeId : @& String) (addrs : @& Array String) (relayUrl : @& String) (input : @& String) : IO Unit := do @@ -23,7 +23,7 @@ def putBytes (nodeId : @& String) (addrs : @& Array String) (relayUrl : @& Strin | .ok response => IO.println s!"Pinned hash {response.hash}" | .error e => throw (IO.userError e) -@[never_extract, extern "c_rs_iroh_get"] +@[never_extract, extern "rs_iroh_get"] private opaque getBytes' : @& String → @& Array String → @& String → @& String → Except String GetResponse def getBytes (nodeId : @& String) (addrs : @& Array String) (relayUrl : @& String) (hash : @& String) (writeToDisk : Bool): IO Unit := do diff --git a/Ix/Iroh/Serve.lean b/Ix/Iroh/Serve.lean index 6a098a63..2fe46ef7 100644 --- a/Ix/Iroh/Serve.lean +++ b/Ix/Iroh/Serve.lean @@ -4,7 +4,7 @@ public section namespace Iroh.Serve -@[never_extract, extern "c_rs_iroh_serve"] +@[never_extract, extern "rs_iroh_serve"] private opaque serve' : Unit → Except String Unit def serve : IO Unit := diff --git a/Ix/Keccak.lean b/Ix/Keccak.lean index f6107fea..abde63da 100644 --- a/Ix/Keccak.lean +++ b/Ix/Keccak.lean @@ -12,13 +12,13 @@ instance : Nonempty Hasher := GenericNonempty.property namespace Hasher -@[extern "c_rs_keccak256_hasher_init"] +@[extern "rs_keccak256_hasher_init"] opaque init : Unit → Hasher -@[extern "c_rs_keccak256_hasher_update"] +@[extern "rs_keccak256_hasher_update"] opaque update : (hasher: Hasher) → (input: @& ByteArray) → Hasher -@[extern "c_rs_keccak256_hasher_finalize"] +@[extern "rs_keccak256_hasher_finalize"] opaque finalize : (hasher: Hasher) → ByteArray end Hasher diff --git a/Ix/Meta.lean b/Ix/Meta.lean index b0a83169..0c7ee565 100644 --- a/Ix/Meta.lean +++ b/Ix/Meta.lean @@ -23,7 +23,9 @@ def getFileEnv (path : FilePath) : IO Environment := do let source ← IO.FS.readFile path let inputCtx := Parser.mkInputContext source path.toString let (header, parserState, messages) ← Parser.parseHeader inputCtx - let (env, messages) ← processHeader header default messages inputCtx 0 + let (env, messages) ← processHeaderCore + (HeaderSyntax.startPos header) (HeaderSyntax.imports header) + (isModule := false) default messages inputCtx 0 if messages.hasErrors then throw $ IO.userError $ "\n\n".intercalate $ (← messages.toList.mapM (·.toString)).map (String.trimAscii · |>.toString) diff --git a/README.md b/README.md index ee26cf51..0f7af7e6 100644 --- a/README.md +++ b/README.md @@ -176,7 +176,13 @@ Ix consists of the following core components: Compiler performance benchmarks are tracked at https://bencher.dev/console/projects/ix/plots -## Build & Install +## Usage + +### Prerequisites + +- Install Clang to enable Bindgen, then set `LIBCLANG_PATH` per https://rust-lang.github.io/rust-bindgen/requirements.html + +### Build - Build and test the Ix library with `lake build` and `lake test` - Install the `ix` binary with `lake run install`, or run with `lake exe ix` @@ -185,15 +191,18 @@ Compiler performance benchmarks are tracked at https://bencher.dev/console/proje **Lean tests:** `lake test` -- `lake test -- ` runs a specific test suite. Primary suites: `ffi`, `byte-array`, `ixon`, `claim`, `commit`, `canon`, `keccak`, `sharing`, `graph-unit`, `condense-unit` -- `lake test -- --ignored` runs expensive test suites: `shard-map`, `rust-canon-roundtrip`, `serial-canon-roundtrip`, `parallel-canon-roundtrip`, `graph-cross`, `condense-cross`, `compile`, `decompile`, `rust-serialize`, `rust-decompile`, `commit-io`, `aiur`, `aiur-hashes`, `ixvm` - - Any `canon` or `compile` test will require significant RAM, beware of OOM +- `lake test -- ` runs one or multiple primary test suites. Primary suites: `ffi`, `byte-array`, `ixon`, `claim`, `commit`, `canon`, `keccak`, `sharing`, `graph-unit`, `condense-unit` +- `lake test -- --ignored` runs only the expensive test suites: `shard-map`, `rust-canon-roundtrip`, `serial-canon-roundtrip`, `parallel-canon-roundtrip`, `graph-cross`, `condense-cross`, `compile`, `decompile`, `rust-serialize`, `rust-decompile`, `commit-io`, `aiur`, `aiur-hashes`, `ixvm` + - Most tests require at least 32 GB RAM + - The `compile` and `decompile` tests require 128 GB RAM - `aiur` and `aiur-hashes` generate ZK proofs and use significant CPU -- `lake test -- --ignored ` runs a specific expensive suite by name +- `lake test -- --ignored ` runs one or multiple expensive suites by name +- `lake test -- --include-ignored` runs both primary and expensive test suites +- `lake test -- --include-ignored ` runs all primary suites plus one or multiple expensive suites - `lake test -- cli` runs CLI integration tests - `lake test -- rust-compile` runs the Rust cross-compilation diagnostic -**Rust tests:** `cargo test` +**Rust tests:** `cargo test` or `cargo nextest run` ### Nix diff --git a/Tests/Ix/Ixon.lean b/Tests/Ix/Ixon.lean index caf6764d..9355efc7 100644 --- a/Tests/Ix/Ixon.lean +++ b/Tests/Ix/Ixon.lean @@ -1,8 +1,3 @@ -/- - Pure Lean serialization tests for Ixon types. - Generators have been moved to Tests/Gen/Ixon.lean. --/ - module public import Ix.Ixon public import Ix.Sharing diff --git a/Tests/Main.lean b/Tests/Main.lean index e80952e5..e57732a2 100644 --- a/Tests/Main.lean +++ b/Tests/Main.lean @@ -86,17 +86,20 @@ def main (args : List String) : IO UInt32 := do return ← Tests.Cli.suite let runIgnored := args.contains "--ignored" - let filterArgs := args.filter (· != "--ignored") + let includeIgnored := args.contains "--include-ignored" + let filterArgs := args.filter fun a => a != "--ignored" && a != "--include-ignored" - -- Run primary tests - let primaryResult ← LSpec.lspecIO primarySuites filterArgs - if primaryResult != 0 then return primaryResult + -- Run primary tests unless --ignored (without --include-ignored) is specified + if !runIgnored || includeIgnored then + let primaryArgs := if runIgnored || includeIgnored then [] else filterArgs + let primaryResult ← LSpec.lspecIO primarySuites primaryArgs + if primaryResult != 0 then return primaryResult - -- Run ignored tests only when --ignored is specified - if runIgnored then + -- Run ignored tests when --ignored or --include-ignored is specified + if runIgnored || includeIgnored then let mut result ← LSpec.lspecIO ignoredSuites filterArgs let filtered := if filterArgs.isEmpty then ignoredRunners - else ignoredRunners.filter fun (key, _) => filterArgs.any fun arg => key == arg + else filterArgs.filterMap fun arg => ignoredRunners.find? fun (key, _) => key == arg for (_, action) in filtered do let r ← action if r != 0 then result := r diff --git a/c/aiur.c b/c/aiur.c deleted file mode 100644 index 075bfd7c..00000000 --- a/c/aiur.c +++ /dev/null @@ -1,158 +0,0 @@ -#include "lean/lean.h" -#include "common.h" -#include "rust.h" - -static lean_external_class *g_aiur_proof_class = NULL; - -static lean_external_class *get_aiur_proof_class() { - if (g_aiur_proof_class == NULL) { - g_aiur_proof_class = lean_register_external_class( - &rs_aiur_proof_free, - &noop_foreach - ); - } - return g_aiur_proof_class; -} - -extern lean_obj_res c_rs_aiur_proof_to_bytes(b_lean_obj_arg proof) { - bytes_data *proof_bytes = rs_aiur_proof_to_bytes(lean_get_external_data(proof)); - size_t proof_size = proof_bytes->size; - lean_object *byte_array = lean_alloc_sarray(1, proof_size, proof_size); - rs_move_bytes(proof_bytes, byte_array); - return byte_array; -} - -extern lean_obj_res c_rs_aiur_proof_of_bytes(b_lean_obj_arg bytes) { - void *proof = rs_aiur_proof_of_bytes(bytes); - return lean_alloc_external(get_aiur_proof_class(), proof); -} - -static lean_external_class *g_aiur_system_class = NULL; - -static lean_external_class *get_aiur_system_class() { - if (g_aiur_system_class == NULL) { - g_aiur_system_class = lean_register_external_class( - &rs_aiur_system_free, - &noop_foreach - ); - } - return g_aiur_system_class; -} - -extern lean_obj_res c_rs_aiur_system_build(b_lean_obj_arg toplevel) { - void *aiur_system = rs_aiur_system_build(toplevel); - return lean_alloc_external(get_aiur_system_class(), aiur_system); -} - -extern lean_obj_res c_rs_aiur_system_prove( - b_lean_obj_arg aiur_system, - b_lean_obj_arg fri_parameters, - b_lean_obj_arg fun_idx, - b_lean_obj_arg args, - b_lean_obj_arg input_io_data, - b_lean_obj_arg input_io_map -) { - assert(lean_is_scalar(fun_idx)); - prove_data *pd = rs_aiur_system_prove( - lean_get_external_data(aiur_system), - fri_parameters, - fun_idx, - args, - input_io_data, - input_io_map - ); - - // Build the claim object - size_t claim_size = pd->claim_size; - lean_object *claim = lean_alloc_array(claim_size, claim_size); - lean_object **claim_values = lean_array_cptr(claim); - for (size_t i = 0; i < claim_size; i++) { - claim_values[i] = lean_box_uint64(0); - } - rs_set_array_g_values(claim, pd->claim); - rs_aiur_claim_free(pd->claim); - - // Build the io_data - size_t io_data_size = pd->io_data_size; - lean_object *io_data = lean_alloc_array(io_data_size, io_data_size); - lean_object **io_data_values = lean_array_cptr(io_data); - for (size_t i = 0; i < io_data_size; i++) { - io_data_values[i] = lean_box_uint64(0); - } - rs_set_aiur_io_data_values(io_data, pd->io_buffer); - - // Build io_map - size_t io_map_size = pd->io_map_size; - lean_object *io_map = lean_alloc_array(io_map_size, io_map_size); - lean_object **io_map_values = lean_array_cptr(io_map); - for (size_t i = 0; i < io_map_size; i++) { - // Array G - size_t key_size = pd->io_keys_sizes[i]; - lean_object *key = lean_alloc_array(key_size, key_size); - lean_object **key_values = lean_array_cptr(key); - for (size_t j = 0; j < key_size; j++) { - key_values[j] = lean_box_uint64(0); - } - - // IOKeyInfo - lean_object *key_info = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(key_info, 0, lean_box(0)); - lean_ctor_set(key_info, 1, lean_box(0)); - - // Array G × IOKeyInfo - lean_object *map_elt = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(map_elt, 0, key); - lean_ctor_set(map_elt, 1, key_info); - io_map_values[i] = map_elt; - } - rs_set_aiur_io_map_values(io_map, pd->io_buffer); - - // Free data regarding the io buffer - rs_aiur_prove_data_io_buffer_free(pd); - - // Array G × Array (Array G × IOKeyInfo) - lean_object *io_tuple = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(io_tuple, 0, io_data); - lean_ctor_set(io_tuple, 1, io_map); - - // Proof × Array G × Array (Array G × IOKeyInfo) - lean_object *proof_io_tuple = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(proof_io_tuple, 0, lean_alloc_external(get_aiur_proof_class(), pd->proof)); - lean_ctor_set(proof_io_tuple, 1, io_tuple); - - // Array G × Proof × Array G × Array (Array G × IOKeyInfo) - lean_object *claim_proof_io_tuple = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(claim_proof_io_tuple, 0, claim); - lean_ctor_set(claim_proof_io_tuple, 1, proof_io_tuple); - - // Free the outer ProveData struct (note: the proof object still lives!) - rs_aiur_prove_data_free(pd); - - return claim_proof_io_tuple; -} - -extern lean_obj_res c_rs_aiur_system_verify( - b_lean_obj_arg aiur_system, - b_lean_obj_arg fri_parameters, - b_lean_obj_arg claim, - b_lean_obj_arg proof -) { - c_result *result = rs_aiur_system_verify( - lean_get_external_data(aiur_system), - fri_parameters, - claim, - lean_get_external_data(proof) - ); - - lean_object *except; - if (result->is_ok) { - except = lean_alloc_ctor(1, 1, 0); - lean_ctor_set(except, 0, lean_box(0)); - } else { - except = lean_alloc_ctor(0, 1, 0); - lean_ctor_set(except, 0, lean_mk_string(result->data)); - } - rs__c_result_unit_string_free(result); - - return except; -} diff --git a/c/iroh.c b/c/iroh.c deleted file mode 100644 index a2bcbc72..00000000 --- a/c/iroh.c +++ /dev/null @@ -1,91 +0,0 @@ -#include "lean/lean.h" -#include "rust.h" - -extern lean_obj_res c_rs_iroh_serve() { - c_result *result = rs_iroh_serve(); - - lean_object *except; - if (result->is_ok) { - except = lean_alloc_ctor(1, 1, 0); - lean_ctor_set(except, 0, lean_box(0)); - } else { - except = lean_alloc_ctor(0, 1, 0); - lean_ctor_set(except, 0, lean_mk_string(result->data)); - } - - rs__c_result_unit_string_free(result); - return except; -} - -typedef struct { - char *message; - char *hash; -} put_response_ffi; - -extern lean_obj_res c_rs_iroh_put(b_lean_obj_arg node_id, b_lean_obj_arg addrs, b_lean_obj_arg relay_url, b_lean_obj_arg input) { - char const *node_id_str = lean_string_cstr(node_id); - char const *relay_url_str = lean_string_cstr(relay_url); - char const *input_str = lean_string_cstr(input); - - c_result *result = rs_iroh_put(node_id_str, addrs, relay_url_str, input_str); - - lean_object *except; - if (result->is_ok) { - put_response_ffi *put_response = result->data; - lean_object *message = lean_mk_string(put_response->message); - lean_object *hash = lean_mk_string(put_response->hash); - - lean_object *put_response_ctor = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(put_response_ctor, 0, message); - lean_ctor_set(put_response_ctor, 1, hash); - - except = lean_alloc_ctor(1, 1, 0); - lean_ctor_set(except, 0, put_response_ctor); - } else { - except = lean_alloc_ctor(0, 1, 0); - lean_ctor_set(except, 0, lean_mk_string(result->data)); - } - - rs__c_result_iroh_put_response_string_free(result); - return except; -} - -typedef struct { - char *message; - char *hash; - bytes_data *bytes; -} get_response_ffi; - -extern lean_obj_res c_rs_iroh_get(b_lean_obj_arg node_id, b_lean_obj_arg addrs, b_lean_obj_arg relay_url, b_lean_obj_arg hash) { - char const *node_id_str = lean_string_cstr(node_id); - char const *relay_url_str = lean_string_cstr(relay_url); - char const *hash_str = lean_string_cstr(hash); - - c_result *result = rs_iroh_get(node_id_str, addrs, relay_url_str, hash_str); - - lean_object *except; - if (result->is_ok) { - get_response_ffi *get_response = result->data; - lean_object *message = lean_mk_string(get_response->message); - lean_object *hash = lean_mk_string(get_response->hash); - - bytes_data *rs_bytes = get_response->bytes; - size_t bytes_size = rs_bytes->size; - lean_object *byte_array = lean_alloc_sarray(1, bytes_size, bytes_size); - rs_move_bytes(rs_bytes, byte_array); - - lean_object *get_response_ctor = lean_alloc_ctor(0, 3, 0); - lean_ctor_set(get_response_ctor, 0, message); - lean_ctor_set(get_response_ctor, 1, hash); - lean_ctor_set(get_response_ctor, 2, byte_array); - - except = lean_alloc_ctor(1, 1, 0); - lean_ctor_set(except, 0, get_response_ctor); - } else { - except = lean_alloc_ctor(0, 1, 0); - lean_ctor_set(except, 0, lean_mk_string(result->data)); - } - - rs__c_result_iroh_get_response_string_free(result); - return except; -} diff --git a/c/ixon_ffi.c b/c/ixon_ffi.c deleted file mode 100644 index 4823b87a..00000000 --- a/c/ixon_ffi.c +++ /dev/null @@ -1,243 +0,0 @@ -#include "lean/lean.h" -#include - -// Lean's internal mpz allocation - takes ownership of the mpz_t value -// (declared in Lean's runtime but not exposed in public headers) -extern lean_object * lean_alloc_mpz(mpz_t v); -#include "common.h" -#include "rust.h" - -// External class for RustCompiledEnv -static lean_external_class *g_rust_compiled_env_class = NULL; - -static lean_external_class *get_rust_compiled_env_class() { - if (g_rust_compiled_env_class == NULL) { - g_rust_compiled_env_class = lean_register_external_class( - &rs_free_rust_env, - &noop_foreach - ); - } - return g_rust_compiled_env_class; -} - -// FFI wrapper: Test round-trip (just pass through, returns scalar) -extern uint64_t c_rs_test_ffi_roundtrip(b_lean_obj_arg name) { - return rs_test_ffi_roundtrip(name); -} - -// FFI wrapper: Compile environment with Rust -// Returns: IO RustCompiledEnv (external object) -extern lean_obj_res c_rs_compile_env_rust_first(b_lean_obj_arg env_consts, lean_obj_arg world) { - void *rust_env = rs_compile_env_rust_first(env_consts); - if (rust_env == NULL) { - // Return IO error - lean_object *err = lean_mk_string("Rust compilation failed"); - lean_object *io_err = lean_io_result_mk_error(lean_mk_io_user_error(err)); - return io_err; - } - lean_object *external = lean_alloc_external(get_rust_compiled_env_class(), rust_env); - return lean_io_result_mk_ok(external); -} - -// FFI wrapper: Free RustCompiledEnv -// Returns: IO Unit -extern lean_obj_res c_rs_free_rust_env(lean_obj_arg rust_env_obj, lean_obj_arg world) { - // The external object will be freed by Lean's GC when it's no longer referenced - // We don't need to do anything here since we registered a finalizer - lean_dec(rust_env_obj); - return lean_io_result_mk_ok(lean_box(0)); -} - -// FFI wrapper: Get block count -extern uint64_t c_rs_get_rust_env_block_count(b_lean_obj_arg rust_env_obj) { - void *rust_env = lean_get_external_data(rust_env_obj); - return rs_get_rust_env_block_count(rust_env); -} - -// FFI wrapper: Compare a single block -extern uint64_t c_rs_compare_block( - b_lean_obj_arg rust_env_obj, - b_lean_obj_arg name, - b_lean_obj_arg lean_bytes -) { - void *rust_env = lean_get_external_data(rust_env_obj); - return rs_compare_block(rust_env, name, lean_bytes); -} - -// FFI wrapper: Get Rust block bytes as ByteArray -// Returns: IO ByteArray -extern lean_obj_res c_rs_get_block_bytes( - b_lean_obj_arg rust_env_obj, - b_lean_obj_arg name, - lean_obj_arg world -) { - void *rust_env = lean_get_external_data(rust_env_obj); - - // Get the length first - uint64_t len = rs_get_block_bytes_len(rust_env, name); - - // Allocate ByteArray - lean_object *byte_array = lean_alloc_sarray(1, len, len); - - // Copy bytes into it - if (len > 0) { - rs_copy_block_bytes(rust_env, name, byte_array); - } - - return lean_io_result_mk_ok(byte_array); -} - -// FFI wrapper: Get Rust sharing vector length -extern uint64_t c_rs_get_block_sharing_len( - b_lean_obj_arg rust_env_obj, - b_lean_obj_arg name -) { - void *rust_env = lean_get_external_data(rust_env_obj); - return rs_get_block_sharing_len(rust_env, name); -} - -// FFI wrapper: Get pre-sharing expressions buffer length -extern uint64_t c_rs_get_pre_sharing_exprs_len( - b_lean_obj_arg rust_env_obj, - b_lean_obj_arg name -) { - void *rust_env = lean_get_external_data(rust_env_obj); - return rs_get_pre_sharing_exprs_len(rust_env, name); -} - -// FFI wrapper: Get pre-sharing expressions -// Returns: IO UInt64 (number of expressions) -extern lean_obj_res c_rs_get_pre_sharing_exprs( - b_lean_obj_arg rust_env_obj, - b_lean_obj_arg name, - lean_obj_arg out_buf, - lean_obj_arg world -) { - void *rust_env = lean_get_external_data(rust_env_obj); - uint64_t n_exprs = rs_get_pre_sharing_exprs(rust_env, name, out_buf); - return lean_io_result_mk_ok(lean_box_uint64(n_exprs)); -} - -// FFI wrapper: Look up a constant's compiled address -// Returns: IO Bool (true if found) -extern lean_obj_res c_rs_lookup_const_addr( - b_lean_obj_arg rust_env_obj, - b_lean_obj_arg name, - lean_obj_arg out_addr, - lean_obj_arg world -) { - void *rust_env = lean_get_external_data(rust_env_obj); - uint64_t found = rs_lookup_const_addr(rust_env, name, out_addr); - return lean_io_result_mk_ok(lean_box(found != 0)); -} - -// FFI wrapper: Get compiled constant count -extern uint64_t c_rs_get_compiled_const_count(b_lean_obj_arg rust_env_obj) { - void *rust_env = lean_get_external_data(rust_env_obj); - return rs_get_compiled_const_count(rust_env); -} - -// ============================================================================= -// Lean C API wrappers for Rust to call -// These wrap Lean's allocation functions so they can be linked from Rust -// ============================================================================= - -lean_object *c_lean_alloc_ctor(unsigned tag, unsigned num_objs, unsigned scalar_sz) { - return lean_alloc_ctor(tag, num_objs, scalar_sz); -} - -void c_lean_ctor_set(lean_object *o, unsigned i, lean_object *v) { - lean_ctor_set(o, i, v); -} - -lean_object *c_lean_ctor_get(lean_object *o, unsigned i) { - return lean_ctor_get(o, i); -} - -unsigned c_lean_obj_tag(lean_object *o) { - return lean_obj_tag(o); -} - -void c_lean_ctor_set_uint8(lean_object *o, unsigned offset, uint8_t v) { - lean_ctor_set_uint8(o, offset, v); -} - -void c_lean_ctor_set_uint64(lean_object *o, unsigned offset, uint64_t v) { - lean_ctor_set_uint64(o, offset, v); -} - -lean_object *c_lean_mk_string(char const *s) { - return lean_mk_string(s); -} - -lean_object *c_lean_alloc_sarray(unsigned elem_size, size_t size, size_t capacity) { - return lean_alloc_sarray(elem_size, size, capacity); -} - -uint8_t *c_lean_sarray_cptr(lean_object *o) { - return lean_sarray_cptr(o); -} - -lean_object *c_lean_alloc_array(size_t size, size_t capacity) { - return lean_alloc_array(size, capacity); -} - -void c_lean_array_set_core(lean_object *o, size_t i, lean_object *v) { - lean_array_set_core(o, i, v); -} - -lean_object *c_lean_array_get_core(lean_object *o, size_t i) { - return lean_array_get_core(o, i); -} - -void c_lean_inc(lean_object *o) { - lean_inc(o); -} - -void c_lean_inc_n(lean_object *o, size_t n) { - lean_inc_n(o, n); -} - -lean_object *c_lean_io_result_mk_ok(lean_object *v) { - return lean_io_result_mk_ok(v); -} - -lean_object *c_lean_io_result_mk_error(lean_object *err) { - return lean_io_result_mk_error(err); -} - -lean_object *c_lean_mk_io_user_error(lean_object *msg) { - return lean_mk_io_user_error(msg); -} - -lean_object *c_lean_uint64_to_nat(uint64_t n) { - return lean_uint64_to_nat(n); -} - -// Create a big Nat from limbs (little-endian u64 array) -// This uses GMP's mpz_import and Lean's lean_alloc_mpz -lean_object *c_lean_nat_from_limbs(size_t num_limbs, uint64_t const *limbs) { - if (num_limbs == 0) { - return lean_box(0); - } - if (num_limbs == 1 && limbs[0] <= LEAN_MAX_SMALL_NAT) { - return lean_box(limbs[0]); - } - if (num_limbs == 1) { - return lean_uint64_to_nat(limbs[0]); - } - - // For multi-limb values, use GMP - mpz_t value; - mpz_init(value); - // Import limbs: little-endian order, native endian within limbs - // order = -1 (least significant limb first) - // size = 8 bytes per limb - // endian = 0 (native) - // nails = 0 (full limbs) - mpz_import(value, num_limbs, -1, sizeof(uint64_t), 0, 0, limbs); - - lean_object *result = lean_alloc_mpz(value); - // lean_alloc_mpz takes ownership, so we don't clear - return result; -} diff --git a/c/keccak.c b/c/keccak.c deleted file mode 100644 index 02d47895..00000000 --- a/c/keccak.c +++ /dev/null @@ -1,35 +0,0 @@ -#include "lean/lean.h" -#include "common.h" -#include "linear.h" -#include "rust.h" - -static lean_external_class *g_keccak256_hasher_class = NULL; - -static lean_external_class *get_keccak256_hasher_class() { - if (g_keccak256_hasher_class == NULL) { - g_keccak256_hasher_class = lean_register_external_class( - &rs_keccak256_hasher_free, - &noop_foreach - ); - } - return g_keccak256_hasher_class; -} - -extern lean_obj_res c_rs_keccak256_hasher_init() { - void *hasher = rs_keccak256_hasher_init(); - return lean_alloc_external(get_keccak256_hasher_class(), hasher); -} - -extern lean_obj_res c_rs_keccak256_hasher_update( - lean_obj_arg hasher, - b_lean_obj_arg input -) { - void* hasher_cloned = rs_keccak256_hasher_update(lean_get_external_data(hasher), input); - return lean_alloc_external(get_keccak256_hasher_class(), hasher_cloned); -} - -extern lean_obj_res c_rs_keccak256_hasher_finalize(lean_obj_arg hasher) { - lean_object *buffer = lean_alloc_sarray(1, 0, 32); - rs_keccak256_hasher_finalize(lean_get_external_data(hasher), buffer); - return buffer; -} diff --git a/c/linear.h b/c/linear.h index 56160cb1..9febff87 100644 --- a/c/linear.h +++ b/c/linear.h @@ -1,3 +1,175 @@ +/* + +NOTE: This file and the linear API in general are currently unused, as we have decided to not pass mutable objects to and from Rust in order to keep the FFI boundary simple. + +However, we may revisit the `linear.h` API in the future, at which point this file would be ported to Rust in the `lean_sys` crate. + +For now, the `linear.h` documenation is provided below as a docstring. + +*/ + +/* + +## Dealing with mutable objects + +As a functional language, Lean primarily uses purely functional data structures, +whereas Rust functions often mutate objects. This fundamental difference in +computational paradigms requires special care; otherwise, we risk introducing +Lean code with unintended or incorrect behavior. + +Let's consider a type `T` and a Rust function `f(&mut T)`. In Lean, we would +like to have the corresponding `f : T → T`, which returns a modified `T` but +leaves the input `T` intact. How can we use Rust's `f` as the implementation of +Lean's `f`? + +One approach is to use a Rust function `g(&T) -> T`, implemented as follows: + +```rust +fn g(t: &T) -> T { + let mut clone = t.clone(); + f(&mut clone); + clone +} +``` + +Already we can see two problems. First, `g` requires `T` to implement `Clone`. +Second, even when `T: Clone`, cloning might be expensive. The fact is that the +implementation provided, Rust's `f`, was designed to mutate `T` and we shouldn't +be fighting against that. + +So Ix goes with the flow and mutates `T` with Rust's `f`. Consequently, Lean's +`f : T → T` will, in fact, mutate the input, which will be returned as the +output. A direct sin against the purity of the functional world. + +At this point, the best we can do is to create guardrails to protect us against +ourselves and force us to use terms of `T` linearly when `f` is involved. That +is, after applying `f (t : T)`, reusing `t` should be prohibitive. + +### The birth of `linear.h` + +We've explored the motivation for the API provided by `linear.h`, in which a +`linear_object` wraps the reference to the raw Rust object and has a +`bool outdated` attribute telling whether the linear object can be used or not. +Then, instead of `lean_external_object` pointing directly to the Rust object, it +points to a `linear_object`. When we ought to use the Rust object, we must +always "assert linearity", which panics if `outdated` is `true`. + +To illustrate it, let's use "E" for `lean_external_object`, "L" for +`linear_object` and "R" for potentially mutating Rust objects. Right after +initialization, we have: + +``` +E0 ──> L0 (outdated = false) ──> R +``` + +Now suppose we need to mutate `R`. We do it and then we perform a "linear bump", +which copies `L` and sets it as outdated. Then we wrap it as another external +object: + +``` +E1 ──> L1 (outdated = false) ─┐ +E0 ──> L0 (outdated = true) ──┴> R +``` + +And after `N` linear bumps: + +``` +EN ──> LN (outdated = false) ─┐ +... ┆ +E2 ──> L2 (outdated = true) ──┤ +E1 ──> L1 (outdated = true) ──┤ +E0 ──> L0 (outdated = true) ──┴> R +``` + +Great. Now imagine Lean wants to free these external objects. The function that +frees a linear object should only free the Rust object when `outdated == false`. +Following up with the image above, let's free `E1`. + +``` +EN ──> LN (outdated = false) ─┐ +... ┆ +E2 ──> L2 (outdated = true) ──┤ + │ +E0 ──> L0 (outdated = true) ──┴> R +``` + +When freeing `EN`, the Rust object will be deallocated: + +``` +... ┆ +E2 ──> L2 (outdated = true) ──┤ + │ +E0 ──> L0 (outdated = true) ──┴> X +``` + +All remaining external objects are outdated so their respective linear objects +won't try to free the (already dropped) Rust object. + +## What if a Rust function takes ownership of the object? + +When ownership is required, we mutate the Rust object by "taking" or "replacing" +it with a dummy object. Concretely, `std::mem::take` or `std::mem::replace` are +used, returning the actual `T` from a `&mut T`. And with `T` at hand, the target +function can be called. + +The latest linear object is marked as outdated and the chain of linear objects +is broken. But then, how will the residual Rust object be dropped once Lean +wants to drop all external objects? + +It turns out we also need a `bool finalize_even_if_outdated` attribute on the +`linear_object` struct, which becomes `true` in these scenarios. By doing this, +we're "ditching" the linear object. And the logic to free linear objects needs +one small adjustment: the Rust object must be dropped when either the linear +object is not outdated or when `finalize_even_if_outdated` is set to `true`. + +The invariant that needs to be maintained is that *only one* linear object can +free the shared Rust object. + +## Preventing unintentional Lean optimizations + +We've done our lower level homework and now we have an `f : T → T` in Lean that +should panic at runtime when its input is reused. So we do: + +```lean4 + ... + let a := f t + let b := f t -- reuses `t`! + ... +``` + +We run the code and it executes smoothly. Why!? + +The Lean compilation process detects that both `a` and `b` are equal to `f t` so +instead of calling `f` a second time it just sets `b` with the value of `a`. It +appears to be harmless but in fact we want discourage this kind of source code +at all costs. + +Lean provides the tag `never_extract` precisely for this. It's used internally +when some function performs side-effects and should never be optimized away. + +And to conclude, there are cases in which this optimization is truly harmful. +Consider an initialization function `T.init : Unit → T` in the following code: + +```lean4 + ... + let t1 := T.init () + let t2 := T.init () + let a := f t1 + let b := f t2 + ... +``` + +If `T.init` is not tagged with `never_extract`, `t2` and `t1` will point to the +same object, the first call to `f` will mark it as outdated and thus the second +call will panic! + +So the `never_extract` tag must be applied to functions that: + +* Mutate their input or +* Return objects that work on the basis of mutation + +*/ + #pragma once #include "lean/lean.h" #include "common.h" diff --git a/c/rust.h b/c/rust.h deleted file mode 100644 index 03060475..00000000 --- a/c/rust.h +++ /dev/null @@ -1,191 +0,0 @@ -#pragma once - -#include "lean/lean.h" - -typedef struct { - bool is_ok; - void *data; -} c_result; - -/* --- Aiur -- */ - -typedef struct { - size_t size; - void *bytes_vec; -} bytes_data; - -void rs_move_bytes(bytes_data*, lean_obj_arg); - -bytes_data *rs_aiur_proof_to_bytes(void*); -void *rs_aiur_proof_of_bytes(b_lean_obj_arg); - -void rs_aiur_system_free(void*); -void *rs_aiur_system_build(b_lean_obj_arg); - -typedef struct { - size_t claim_size; - void *claim; - void *proof; - void *io_buffer; - size_t io_data_size; - size_t io_map_size; - size_t *io_keys_sizes; -} prove_data; - -void rs_aiur_claim_free(void*); -void rs_aiur_proof_free(void*); -void rs_aiur_prove_data_io_buffer_free(void*); -void rs_aiur_prove_data_free(prove_data*); - -prove_data *rs_aiur_system_prove( - void*, b_lean_obj_arg, b_lean_obj_arg, b_lean_obj_arg, b_lean_obj_arg, b_lean_obj_arg -); -void rs_set_array_g_values(lean_obj_arg, void*); -void rs_set_aiur_io_data_values(lean_obj_arg, void*); -void rs_set_aiur_io_map_values(lean_obj_arg, void*); - -c_result *rs_aiur_system_verify(void*, b_lean_obj_arg, b_lean_obj_arg, void*); - -void rs__c_result_unit_string_free(c_result *); - -/* --- Iroh --- */ - -c_result *rs_iroh_serve(void); -c_result *rs_iroh_put(char const *, b_lean_obj_arg, char const *, char const *); -c_result *rs_iroh_get(char const *, b_lean_obj_arg, char const *, char const *); - -void rs__c_result_iroh_put_response_string_free(c_result *); -void rs__c_result_iroh_get_response_string_free(c_result *); - -/* --- Keccak Hasher --- */ - -void *rs_keccak256_hasher_init(void); -void rs_keccak256_hasher_free(void*); -void *rs_keccak256_hasher_update(void*, void*); -void *rs_keccak256_hasher_finalize(void*, void*); - -/* --- Ixon FFI (incremental block comparison) --- */ - -// Test FFI round-trip -uint64_t rs_test_ffi_roundtrip(b_lean_obj_arg name); - -// Compile environment with Rust, returns opaque RustCompiledEnv* -void *rs_compile_env_rust_first(b_lean_obj_arg env_consts); - -// Free a RustCompiledEnv -void rs_free_rust_env(void *rust_env); - -// Get block count from RustCompiledEnv -uint64_t rs_get_rust_env_block_count(void const *rust_env); - -// Compare a single block, returns packed result -uint64_t rs_compare_block(void const *rust_env, b_lean_obj_arg name, b_lean_obj_arg lean_bytes); - -// Get the length of Rust's compiled bytes for a block -uint64_t rs_get_block_bytes_len(void const *rust_env, b_lean_obj_arg name); - -// Copy Rust's compiled bytes into a pre-allocated ByteArray -void rs_copy_block_bytes(void const *rust_env, b_lean_obj_arg name, lean_obj_arg dest); - -// Get Rust's sharing vector length for a block -uint64_t rs_get_block_sharing_len(void const *rust_env, b_lean_obj_arg name); - -// Compare block with typed result (returns BlockCompareDetail) -lean_obj_res rs_compare_block_v2(void const *rust_env, b_lean_obj_arg name, b_lean_obj_arg lean_bytes, uint64_t lean_sharing_len); - -// Get the buffer length needed for pre-sharing expressions -uint64_t rs_get_pre_sharing_exprs_len(void const *rust_env, b_lean_obj_arg name); - -// Get pre-sharing root expressions for a constant -uint64_t rs_get_pre_sharing_exprs(void const *rust_env, b_lean_obj_arg name, lean_obj_arg out_buf); - -// Look up a constant's compiled address (32-byte blake3 hash) -// Returns 1 on success, 0 if name not found -uint64_t rs_lookup_const_addr(void const *rust_env, b_lean_obj_arg name, lean_obj_arg out_addr); - -// Get the total number of compiled constants -uint64_t rs_get_compiled_const_count(void const *rust_env); - -/* --- Utility FFI --- */ - -// Read first 8 bytes of ByteArray as little-endian UInt64 (for Address.Hashable) -uint64_t rs_bytearray_to_u64_le(b_lean_obj_arg ba); - -/* --- Ix Canonicalization FFI --- */ - -// Canonicalize environment and return Ix.Environment -// Takes: List (Lean.Name × Lean.ConstantInfo) -// Returns: IO Ix.Environment -lean_obj_res rs_canonicalize_env_to_ix(b_lean_obj_arg env_consts); - -/* --- Round-trip FFI for testing Lean object construction --- */ - -// Round-trip basic types: Lean -> Rust -> Lean -lean_object *rs_roundtrip_nat(b_lean_obj_arg nat); -lean_object *rs_roundtrip_string(b_lean_obj_arg str); -lean_object *rs_roundtrip_list_nat(b_lean_obj_arg list); -lean_object *rs_roundtrip_array_nat(b_lean_obj_arg arr); -lean_object *rs_roundtrip_bytearray(b_lean_obj_arg ba); - -// Round-trip Ix types: Lean -> Rust -> Lean -lean_object *rs_roundtrip_ix_address(b_lean_obj_arg addr); -lean_object *rs_roundtrip_ix_name(b_lean_obj_arg name); -lean_object *rs_roundtrip_ix_level(b_lean_obj_arg level); -lean_object *rs_roundtrip_ix_expr(b_lean_obj_arg expr); -lean_object *rs_roundtrip_ix_int(b_lean_obj_arg int_val); -lean_object *rs_roundtrip_ix_substring(b_lean_obj_arg sub); -lean_object *rs_roundtrip_ix_source_info(b_lean_obj_arg si); -lean_object *rs_roundtrip_ix_syntax_preresolved(b_lean_obj_arg sp); -lean_object *rs_roundtrip_ix_syntax(b_lean_obj_arg syn); -lean_object *rs_roundtrip_ix_data_value(b_lean_obj_arg dv); -lean_object *rs_roundtrip_bool(b_lean_obj_arg b); -lean_object *rs_roundtrip_ix_constant_info(b_lean_obj_arg info); -lean_object *rs_roundtrip_ix_environment(b_lean_obj_arg env); -lean_object *rs_roundtrip_ix_raw_environment(b_lean_obj_arg raw_env); - -// Round-trip BlockCompareResult and BlockCompareDetail -lean_object *rs_roundtrip_block_compare_result(b_lean_obj_arg ptr); -lean_object *rs_roundtrip_block_compare_detail(b_lean_obj_arg ptr); - -/* --- RawCompiledEnv FFI --- */ - -// Compile environment and return RawCompiledEnv -// Takes: List (Lean.Name × Lean.ConstantInfo) -// Returns: IO RawCompiledEnv -lean_obj_res rs_compile_env_to_raw(b_lean_obj_arg env_consts); - -// Complete compilation pipeline - returns RustCompilationResult -// (rawEnv, condensed, compiled) -lean_obj_res rs_compile_env_full(b_lean_obj_arg env_consts); - -// Compile environment to Ixon RawEnv (structured Lean objects) -// Takes: List (Lean.Name × Lean.ConstantInfo) -// Returns: IO RawEnv -lean_obj_res rs_compile_env_to_ixon(b_lean_obj_arg env_consts); - -// Round-trip RawEnv for FFI testing -lean_object *rs_roundtrip_raw_env(b_lean_obj_arg raw_env); - -// Round-trip RustCondensedBlocks for FFI testing -lean_object *rs_roundtrip_rust_condensed_blocks(b_lean_obj_arg condensed); - -// Round-trip RustCompilePhases for FFI testing -lean_object *rs_roundtrip_rust_compile_phases(b_lean_obj_arg phases); - -// Combined compilation phases - returns RustCompilePhases -// (rawEnv, condensed, compileEnv) -// Takes: List (Lean.Name × Lean.ConstantInfo) -// Returns: IO RustCompilePhases -lean_obj_res rs_compile_phases(b_lean_obj_arg env_consts); - -/* --- Graph/SCC FFI --- */ - -// Build reference graph in Rust (returns Ix.Name-based graph) -// Takes: List (Lean.Name × Lean.ConstantInfo) -// Returns: IO (Array (Ix.Name × Array Ix.Name)) -lean_obj_res rs_build_ref_graph(b_lean_obj_arg env_consts); - -// Compute SCCs in Rust (returns Ix.Name-based CondensedBlocks) -// Takes: List (Lean.Name × Lean.ConstantInfo) -// Returns: IO RustCondensedBlocks -lean_obj_res rs_compute_sccs(b_lean_obj_arg env_consts); diff --git a/c/unsigned.c b/c/unsigned.c deleted file mode 100644 index e1493bd7..00000000 --- a/c/unsigned.c +++ /dev/null @@ -1,26 +0,0 @@ -#include "lean/lean.h" -#include "rust.h" - -#define memcpy __builtin_memcpy // Avoids including `string.h` - -static inline lean_obj_res mk_byte_array(size_t size, uint8_t *bytes) { - lean_object *o = lean_alloc_sarray(1, size, size); - memcpy(lean_sarray_cptr(o), bytes, size); - return o; -} - -extern lean_obj_res c_u16_to_le_bytes(uint16_t u16) { - return mk_byte_array(sizeof(uint16_t), (uint8_t*)&u16); -} - -extern lean_obj_res c_u32_to_le_bytes(uint32_t u32) { - return mk_byte_array(sizeof(uint32_t), (uint8_t*)&u32); -} - -extern lean_obj_res c_u64_to_le_bytes(uint64_t u64) { - return mk_byte_array(sizeof(uint64_t), (uint8_t*)&u64); -} - -extern lean_obj_res c_usize_to_le_bytes(size_t usize) { - return mk_byte_array(sizeof(size_t), (uint8_t*)&usize); -} diff --git a/docs/ffi.md b/docs/ffi.md index eb8f9e02..a86a88d6 100644 --- a/docs/ffi.md +++ b/docs/ffi.md @@ -1,224 +1,119 @@ # Ix FFI framework Ix extensively utilizes Lean's FFI capabilities to interface with Rust -implementations while minimizing overhead. This document consolidates the -principles for doing so responsibly. - -We follow a strict dependency order: - -* Lean can interface with C -* C can interface with Rust -* Lean can interface with Rust - -Hence we use the following naming conventions: - -* Names of external C functions start with "c_" -* Names of external Rust functions start with "rs_" -* Names of external C functions that depend on Rust functions start with "c_rs_" +implementations for performance benefits while minimizing overhead. This document +describes the approach used in Ix and best practices for writing Lean->Rust FFI. Interfacing with C is a well-established and well-supported case in Lean. After all, Lean's runtime is implemented in C and the API for reading, allocating and populating Lean objects is rich enough to support this interaction. Interfacing -with Rust, however, introduces a new set of challenges. - -## Reading data from Lean - -Making sense of data that's produced by Lean already poses an initial challenge. -One possible approach is as follows: - -1. Serialize the data in Lean as a `ByteArray` and provide it to a C function -2. Get the reference to the slice of bytes and pass it to the Rust function -3. Deserialize the data and use it as needed - -While that's possible (and plausible!) it adds a recurring serde cost overhead. -So the approach taken in Ix is different. - -The Ix's Rust static lib mimics the memory layout of Lean runtime objects and -uses `unsafe` code to turn `*const c_void` pointers into appropriate `&T` -references. Though, when possible, raw data extraction of Lean objects is -preferably done in C with the API provided by the Lean toolchain (via `lean.h`). - -For example, when targeting a Rust function that consumes a string, we don't -need to pass a reference to the whole `lean_string_object`. Instead, we make use -of the fact that Lean strings are `\0`-terminated and only pass a `char const *` -from C to Rust, which receives it as a `*const c_char` and then (unsafely) turns -it into a `&str`. - -Extra care must be taken when dealing with -[inductive types](https://github.com/leanprover/lean4/blob/master/doc/dev/ffi.md#inductive-types), -as the order of arguments in the Lean objects may not match the same order from -the higher level type definition in Lean. - -## Producing data for Lean - -Since we can mimic the memory layout of Lean objects in Rust, we should allocate -and populate them in Rust, right? Well, the answer is "no". - -Lean employs different allocation methods depending on compilation flags, making -it impractical to track them in Rust. Instead, we allocate the inner data on the -heap and return a raw pointer to C, which then wraps it using the appropriate -API. +with Rust, however, is not trivial because of Rust's distinct +ownership-based memory management system. + +## Bindgen Rust bindings to `lean.h` + +In order to avoid this complexity and keep Lean in control of memory +management for objects created via FFI to Rust, we use +[rust-bindgen](https://github.com/rust-lang/rust-bindgen) to automatically +generate Rust bindings to +[`lean.h`](https://github.com/leanprover/lean4/blob/master/src/include/lean/lean.h). +This allows us to create and manage Lean objects in Rust without taking +control of the underlying memory, needing to implement `Drop`, or having to +know about the state of Lean's reference counting mechanism. Bindgen runs in +`build.rs` and generates unsafe Rust functions that link to the `lean.h` +library. This external module can then be found at +`target/release/lean-ffi-/out/lean.rs`. + +## `LeanObject` API + +To facilitate working with Lean objects in Rust, we also designed an +ergonomic API in the `lean-ffi` crate to wrap raw C pointers in Rust types, +with methods to abstract the low-level binding function calls from `lean.h`. +The fundamental building block is `LeanObject`, a wrapper around an opaque +Lean value represented in Rust as `*const c_void`. This value is either a +pointer to a heap-allocated object or a tagged scalar (a raw value that fits +into one pointer's width, e.g. a `Bool` or small `Nat`). `LeanObject` is +then itself wrapped into Lean types such as `LeanCtor` for inductives, +`LeanArray` for arrays, etc. + +A `lean_domain_type!` macro is also defined to allow for easy construction +of arbitrary Lean object types, which can then be used directly in FFI +functions to disambiguate between other `LeanObject`s. In Ix these are +defined in `src/lean.rs`. To construct custom data in Rust, the user can +define their own constructor methods using `LeanCtor` (e.g. +[`LeanPutResponse`](src/ffi/iroh.rs)). It is possible to use `LeanObject` +or `*const c_void` directly in an `extern "C" fn`, but this is generally +not recommended as internal Rust functions may pass in the wrong object +more easily, and any low-level constructors would not be hidden behind the +API boundary. To enforce this, the `From for LeanObject` trait is +implemented to get the underlying `LeanObject`, but creating a wrapper type +from a `LeanObject` requires an explicit constructor for clarity. A key concept in this design is that ownership of the data is transferred to Lean, making it responsible for deallocation. If the data type is intended to be -used as a black box by Lean, `lean_external_object` is an useful abstraction. It -requires a function pointer for deallocation, meaning the Rust code must provide -a function that properly frees the object's memory by dropping it. - -## Dealing with mutable objects - -As a functional language, Lean primarily uses purely functional data structures, -whereas Rust functions often mutate objects. This fundamental difference in -computational paradigms requires special care; otherwise, we risk introducing -Lean code with unintended or incorrect behavior. - -Let's consider a type `T` and a Rust function `f(&mut T)`. In Lean, we would -like to have the corresponding `f : T → T`, which returns a modified `T` but -leaves the input `T` intact. How can we use Rust's `f` as the implementation of -Lean's `f`? - -One approach is to use a Rust function `g(&T) -> T`, implemented as follows: - -```rust -fn g(t: &T) -> T { - let mut clone = t.clone(); - f(&mut clone); - clone -} -``` - -Already we can see two problems. First, `g` requires `T` to implement `Clone`. -Second, even when `T: Clone`, cloning might be expensive. The fact is that the -implementation provided, Rust's `f`, was designed to mutate `T` and we shouldn't -be fighting against that. - -So Ix goes with the flow and mutates `T` with Rust's `f`. Consequently, Lean's -`f : T → T` will, in fact, mutate the input, which will be returned as the -output. A direct sin against the purity of the functional world. - -At this point, the best we can do is to create guardrails to protect us against -ourselves and force us to use terms of `T` linearly when `f` is involved. That -is, after applying `f (t : T)`, reusing `t` should be prohibitive. - -### The birth of `linear.h` - -We've explored the motivation for the API provided by `linear.h`, in which a -`linear_object` wraps the reference to the raw Rust object and has a -`bool outdated` attribute telling whether the linear object can be used or not. -Then, instead of `lean_external_object` pointing directly to the Rust object, it -points to a `linear_object`. When we ought to use the Rust object, we must -always "assert linearity", which panics if `outdated` is `true`. - -To illustrate it, let's use "E" for `lean_external_object`, "L" for -`linear_object` and "R" for potentially mutating Rust objects. Right after -initialization, we have: +used as a black box by Lean, `ExternalClass` is a useful abstraction. It +requires a function pointer for deallocation, meaning the Rust code must +provide a function that properly frees the object's memory by dropping it. +See [`KECCAK_CLASS`](src/ffi/keccak.rs) for an example. -``` -E0 ──> L0 (outdated = false) ──> R -``` +## Notes -Now suppose we need to mutate `R`. We do it and then we perform a "linear bump", -which copies `L` and sets it as outdated. Then we wrap it as another external -object: +By convention, names of external Rust functions start with `rs_`. -``` -E1 ──> L1 (outdated = false) ─┐ -E0 ──> L0 (outdated = true) ──┴> R -``` +### Inductive Types -And after `N` linear bumps: +Extra care must be taken when dealing with [inductive +types](https://lean-lang.org/doc/reference/latest/The-Type-System/Inductive-Types/#run-time-inductives) +as the runtime memory layout of constructor fields may not match the +declaration order in Lean. Fields are reordered into three groups: -``` -EN ──> LN (outdated = false) ─┐ -... ┆ -E2 ──> L2 (outdated = true) ──┤ -E1 ──> L1 (outdated = true) ──┤ -E0 ──> L0 (outdated = true) ──┴> R -``` +1. Non-scalar fields (lean_object *), in declaration order +2. `USize` fields, in declaration order +3. Other scalar fields, in decreasing order by size, then declaration order within each size -Great. Now imagine Lean wants to free these external objects. The function that -frees a linear object should only free the Rust object when `outdated == false`. -Following up with the image above, let's free `E1`. +This means a structure like -``` -EN ──> LN (outdated = false) ─┐ -... ┆ -E2 ──> L2 (outdated = true) ──┤ - │ -E0 ──> L0 (outdated = true) ──┴> R -``` - -When freeing `EN`, the Rust object will be deallocated: - -``` -... ┆ -E2 ──> L2 (outdated = true) ──┤ - │ -E0 ──> L0 (outdated = true) ──┴> X +```lean +structure Reorder where + flag : Bool + obj : Array Nat + size : UInt64 ``` -All remaining external objects are outdated so their respective linear objects -won't try to free the (already dropped) Rust object. +would be laid out as [obj, size, flag] at runtime — the `UInt64` is placed +before the `Bool`. Trivial wrapper types (e.g. `Char` wraps `UInt32`) count as +their underlying scalar type. -## What if a Rust function takes ownership of the object? +To avoid issues, define Lean structures with fields already in runtime order +(objects first, then scalars in decreasing size), so that declaration order +matches the reordered layout. -When ownership is required, we mutate the Rust object by "taking" or "replacing" -it with a dummy object. Concretely, `std::mem::take` or `std::mem::replace` are -used, returning the actual `T` from a `&mut T`. And with `T` at hand, the target -function can be called. +### Enum FFI convention -The latest linear object is marked as outdated and the chain of linear objects -is broken. But then, how will the residual Rust object be dropped once Lean -wants to drop all external objects? +Lean passes simple enums (inductives where all constructors have zero fields, +e.g. `DefKind`, `QuotKind`) as **raw unboxed tag values** (`0`, `1`, `2`, ...) +across the FFI boundary, not as `lean_box(tag)`. To decode, use +`obj.as_ptr() as usize`; to build, use `LeanObject::from_raw(tag as *const c_void)`. +Do **not** use `box_usize`/`unbox_usize` for these — doing so will silently +corrupt the value. -It turns out we also need a `bool finalize_even_if_outdated` attribute on the -`linear_object` struct, which becomes `true` in these scenarios. By doing this, -we're "ditching" the linear object. And the logic to free linear objects needs -one small adjustment: the Rust object must be dropped when either the linear -object is not outdated or when `finalize_even_if_outdated` is set to `true`. +### Reference counting for reused objects -The invariant that needs to be maintained is that *only one* linear object can -free the shared Rust object. - -## Preventing unintentional Lean optimizations - -We've done our lower level homework and now we have an `f : T → T` in Lean that -should panic at runtime when its input is reused. So we do: - -```lean4 - ... - let a := f t - let b := f t -- reuses `t`! - ... -``` - -We run the code and it executes smoothly. Why!? - -The Lean compilation process detects that both `a` and `b` are equal to `f t` so -instead of calling `f` a second time it just sets `b` with the value of `a`. It -appears to be harmless but in fact we want discourage this kind of source code -at all costs. - -Lean provides the tag `never_extract` precisely for this. It's used internally -when some function performs side-effects and should never be optimized away. - -And to conclude, there are cases in which this optimization is truly harmful. -Consider an initialization function `T.init : Unit → T` in the following code: - -```lean4 - ... - let t1 := T.init () - let t2 := T.init () - let a := f t1 - let b := f t2 - ... -``` +When building a new Lean object, if you construct all fields from scratch (e.g. +`LeanString::new(...)`, `LeanByteArray::from_bytes(...)`), ownership is +straightforward — the freshly allocated objects start with rc=1 and Lean manages +them from there. -If `T.init` is not tagged with `never_extract`, `t2` and `t1` will point to the -same object, the first call to `f` will mark it as outdated and thus the second -call will panic! +However, if you take a Lean object received as a **borrowed** argument (`@&` in +Lean, `b_lean_obj_arg` in C) and store it directly into a new object via +`.set()`, you must call `.inc_ref()` on it first. Otherwise Lean will free the +original while the new object still references it. If you only read/decode the +argument into Rust types and then build fresh Lean objects, this does not apply. -So the `never_extract` tag must be applied to functions that: +### `lean_string_size` vs `lean_string_byte_size` -* Mutate their input or -* Return objects that work on the basis of mutation +`lean_string_byte_size` returns the **total object memory size** +(`sizeof(lean_string_object) + m_size`), not the string data length. +Use `lean_string_size` instead, which returns `m_size` — the number of data +bytes including the NUL terminator. The `LeanString::byte_len()` wrapper handles +this correctly by returning `lean_string_size(obj) - 1`. diff --git a/flake.nix b/flake.nix index d4e6a802..62258918 100644 --- a/flake.nix +++ b/flake.nix @@ -73,6 +73,11 @@ inherit src; strictDeps = true; + # build.rs uses LEAN_SYSROOT to locate lean/lean.h for bindgen + LEAN_SYSROOT = "${pkgs.lean.lean-all}"; + # bindgen needs libclang to parse C headers + LIBCLANG_PATH = "${pkgs.llvmPackages.libclang.lib}/lib"; + buildInputs = [] ++ pkgs.lib.optionals pkgs.stdenv.isDarwin [ @@ -138,6 +143,10 @@ # Provide a unified dev shell with Lean + Rust devShells.default = pkgs.mkShell { + # Disable fortify hardening as it causes warnings with cargo debug builds + hardeningDisable = ["fortify"]; + # Add libclang for FFI with rust-bindgen + LIBCLANG_PATH = "${pkgs.llvmPackages.libclang.lib}/lib"; packages = with pkgs; [ pkg-config openssl @@ -147,6 +156,7 @@ lean.lean-all # Includes Lean compiler, lake, stdlib, etc. gmp cargo-deny + valgrind ]; }; diff --git a/lake-manifest.json b/lake-manifest.json index fcd9c3a4..7af6c508 100644 --- a/lake-manifest.json +++ b/lake-manifest.json @@ -35,10 +35,10 @@ "type": "git", "subDir": null, "scope": "", - "rev": "41c8a9b2f08679212e075ff89fa33694a2536d64", + "rev": "928f27c7de8318455ba0be7461dbdf7096f4075a", "name": "LSpec", "manifestFile": "lake-manifest.json", - "inputRev": "41c8a9b2f08679212e075ff89fa33694a2536d64", + "inputRev": "928f27c7de8318455ba0be7461dbdf7096f4075a", "inherited": false, "configFile": "lakefile.toml"}], "name": "ix", diff --git a/lakefile.lean b/lakefile.lean index 87cb7a6a..16d37449 100644 --- a/lakefile.lean +++ b/lakefile.lean @@ -12,7 +12,7 @@ lean_exe ix where supportInterpreter := true require LSpec from git - "https://github.com/argumentcomputer/LSpec" @ "41c8a9b2f08679212e075ff89fa33694a2536d64" + "https://github.com/argumentcomputer/LSpec" @ "928f27c7de8318455ba0be7461dbdf7096f4075a" require Blake3 from git "https://github.com/argumentcomputer/Blake3.lean" @ "564e0ab364ebaa3b1153defe2f49c9fe58a2d77c" @@ -66,37 +66,6 @@ end IxApplications section FFI -/-- Build the static lib for the C files -/ -extern_lib ix_c pkg := do - let compiler := "gcc" - let cDir := pkg.dir / "c" - let buildCDir := pkg.buildDir / "c" - let weakArgs := #["-fPIC", "-I", (← getLeanIncludeDir).toString, "-I", cDir.toString] - - let cDirEntries ← cDir.readDir - - -- Include every C header file in the trace mix - let extraDepTrace := cDirEntries.foldl (init := getLeanTrace) fun acc dirEntry => - let filePath := dirEntry.path - if filePath.extension == some "h" then do - let x ← acc - let y ← computeTrace $ TextFilePath.mk filePath - pure $ x.mix y - else acc - - -- Collect a build job for every C file in `cDir` - let mut buildJobs := #[] - for dirEntry in cDirEntries do - let filePath := dirEntry.path - if filePath.extension == some "c" then - let oFile := buildCDir / dirEntry.fileName |>.withExtension "o" - let srcJob ← inputTextFile filePath - let buildJob ← buildO oFile srcJob weakArgs #[] compiler extraDepTrace - buildJobs := buildJobs.push buildJob - - let libName := nameToStaticLib "ix_c" - buildStaticLib (pkg.staticLibDir / libName) buildJobs - /-- Build the static lib for the Rust crate -/ extern_lib ix_rs pkg := do -- Defaults to `--features parallel`, configured via env var @@ -149,23 +118,6 @@ script install := do setAccessRights tgtPath fileRight return 0 -script "check-lean-h-hash" := do - let cachedLeanHHash := 14792798158057885278 - - let leanIncludeDir ← getLeanIncludeDir - let includedLeanHPath := leanIncludeDir / "lean" / "lean.h" - let includedLeanHBytes ← IO.FS.readBinFile includedLeanHPath - let includedLeanHHash := includedLeanHBytes.hash - - if cachedLeanHHash ≠ includedLeanHHash then - IO.eprintln "Mismatching lean/lean.h hash" - IO.eprintln " 1. Double-check changes made to lean/lean.h" - IO.eprintln s!" 2. Cache {includedLeanHHash} instead" - return 1 - else - IO.println "lean/lean.h hash matches ✓" - return 0 - script "get-exe-targets" := do let pkg ← getRootPackage let exeTargets := pkg.configTargets LeanExe.configKind @@ -173,6 +125,7 @@ script "get-exe-targets" := do IO.println <| tgt.name.toString |>.dropPrefix "«" |>.dropSuffix "»" |>.toString return 0 +@[lint_driver] script "build-all" (args) := do let pkg ← getRootPackage let libNames := pkg.configTargets LeanLib.configKind |>.map (·.name.toString) diff --git a/lean-ffi/Cargo.toml b/lean-ffi/Cargo.toml new file mode 100644 index 00000000..fc2c4939 --- /dev/null +++ b/lean-ffi/Cargo.toml @@ -0,0 +1,11 @@ +[package] +name = "lean-ffi" +version = "0.1.0" +edition = "2024" + +[dependencies] +num-bigint = "0.4.6" + +[build-dependencies] +bindgen = "0.71" +cc = "1" diff --git a/lean-ffi/LICENSE-APACHE b/lean-ffi/LICENSE-APACHE new file mode 100644 index 00000000..4252ff0c --- /dev/null +++ b/lean-ffi/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2025 Argument Computer Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/lean-ffi/LICENSE-MIT b/lean-ffi/LICENSE-MIT new file mode 100644 index 00000000..829c2986 --- /dev/null +++ b/lean-ffi/LICENSE-MIT @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2025 Argument Computer Corporation + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/lean-ffi/build.rs b/lean-ffi/build.rs new file mode 100644 index 00000000..e8b98e57 --- /dev/null +++ b/lean-ffi/build.rs @@ -0,0 +1,60 @@ +use std::{env, path::PathBuf, process::Command}; + +fn find_lean_include_dir() -> PathBuf { + // 1. Try LEAN_SYSROOT env var + if let Ok(sysroot) = env::var("LEAN_SYSROOT") { + let inc = PathBuf::from(sysroot).join("include"); + if inc.exists() { + return inc; + } + } + // 2. Try `lean --print-prefix` + if let Ok(output) = Command::new("lean").arg("--print-prefix").output() + && output.status.success() + { + let prefix = String::from_utf8_lossy(&output.stdout).trim().to_string(); + let inc = PathBuf::from(prefix).join("include"); + if inc.exists() { + return inc; + } + } + panic!( + "Cannot find Lean include directory. \ + Set LEAN_SYSROOT or ensure `lean` is on PATH." + ); +} + +fn main() { + let lean_include = find_lean_include_dir(); + let lean_h = lean_include.join("lean").join("lean.h"); + let out_dir = PathBuf::from(env::var("OUT_DIR").unwrap()); + let wrapper_c = out_dir.join("lean_static_fns.c"); + + // Generate C wrappers for lean.h's static inline functions and + // Rust bindings for all types and functions. + bindgen::Builder::default() + .header(lean_h.to_str().unwrap()) + .clang_arg(format!("-I{}", lean_include.display())) + .parse_callbacks(Box::new(bindgen::CargoCallbacks::new())) + .wrap_static_fns(true) + .wrap_static_fns_path(&wrapper_c) + // lean_get_rc_mt_addr returns `_Atomic(int)*` which bindgen + // cannot wrap. Types using `_Atomic` are made opaque. + .blocklist_function("lean_get_rc_mt_addr") + .opaque_type("lean_thunk_object") + .opaque_type("lean_task_object") + .generate() + .expect("bindgen failed to process lean.h") + .write_to_file(out_dir.join("lean.rs")) + .expect("Couldn't write bindings"); + + // Compile the generated C wrappers into a static library. + cc::Build::new() + .file(&wrapper_c) + .include(&lean_include) + .compile("lean_static_fns"); + + println!("cargo:rerun-if-env-changed=LEAN_SYSROOT"); + println!("cargo:rerun-if-changed={}", lean_h.display()); + println!("cargo:rerun-if-changed=build.rs"); +} diff --git a/lean-ffi/src/lib.rs b/lean-ffi/src/lib.rs new file mode 100644 index 00000000..3926e554 --- /dev/null +++ b/lean-ffi/src/lib.rs @@ -0,0 +1,75 @@ +//! Low-level Lean FFI bindings and type-safe wrappers. +//! +//! The `include` submodule contains auto-generated bindings from `lean.h` via +//! bindgen. Higher-level helpers are in `object` and `nat`. + +#[allow( + non_upper_case_globals, + non_camel_case_types, + non_snake_case, + dead_code, + unsafe_op_in_unsafe_fn, + unused_qualifications, + clippy::all, + clippy::ptr_as_ptr, + clippy::cast_possible_wrap, + clippy::cast_possible_truncation, + clippy::derive_partial_eq_without_eq +)] +pub mod include { + include!(concat!(env!("OUT_DIR"), "/lean.rs")); +} + +pub mod nat; +pub mod object; + +use std::ffi::{CString, c_void}; + +/// Create a CString from a str, stripping any interior null bytes. +/// Lean strings are length-prefixed and can contain null bytes, but the +/// `lean_mk_string` FFI requires a null-terminated C string. This function +/// ensures conversion always succeeds by filtering out interior nulls. +pub fn safe_cstring(s: &str) -> CString { + CString::new(s).unwrap_or_else(|_| { + let bytes: Vec = s.bytes().filter(|&b| b != 0).collect(); + CString::new(bytes).expect("filtered string should have no nulls") + }) +} + +/// No-op foreach callback for external classes that hold no Lean references. +/// +/// # Safety +/// Must only be used as a `lean_external_foreach_fn` callback. +pub unsafe extern "C" fn noop_foreach( + _: *mut c_void, + _: *mut include::lean_object, +) { +} + +/// Generate a `#[repr(transparent)]` newtype over `LeanObject` for a specific +/// Lean type, with `Deref`, `From`, and a `new` constructor. +#[macro_export] +macro_rules! lean_domain_type { + ($($(#[$meta:meta])* $name:ident;)*) => {$( + $(#[$meta])* + #[derive(Clone, Copy)] + #[repr(transparent)] + pub struct $name($crate::object::LeanObject); + + impl std::ops::Deref for $name { + type Target = $crate::object::LeanObject; + #[inline] + fn deref(&self) -> &$crate::object::LeanObject { &self.0 } + } + + impl From<$name> for $crate::object::LeanObject { + #[inline] + fn from(x: $name) -> Self { x.0 } + } + + impl $name { + #[inline] + pub fn new(obj: $crate::object::LeanObject) -> Self { Self(obj) } + } + )*}; +} diff --git a/lean-ffi/src/nat.rs b/lean-ffi/src/nat.rs new file mode 100644 index 00000000..a099ccc4 --- /dev/null +++ b/lean-ffi/src/nat.rs @@ -0,0 +1,161 @@ +//! Lean `Nat` (arbitrary-precision natural number) representation. +//! +//! Lean stores small naturals as tagged scalars and large ones as GMP +//! `mpz_object`s on the heap. This module handles both representations. + +use std::ffi::c_int; +use std::fmt; +use std::mem::MaybeUninit; + +use num_bigint::BigUint; + +use crate::object::LeanObject; + +/// Arbitrary-precision natural number, wrapping `BigUint`. +#[derive(Hash, PartialEq, Eq, Debug, Clone, PartialOrd, Ord)] +pub struct Nat(pub BigUint); + +impl fmt::Display for Nat { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.0) + } +} + +impl From for Nat { + fn from(x: u64) -> Self { + Nat(BigUint::from(x)) + } +} + +impl Nat { + pub const ZERO: Self = Self(BigUint::ZERO); + + /// Try to convert to u64, returning None if the value is too large. + #[inline] + pub fn to_u64(&self) -> Option { + u64::try_from(&self.0).ok() + } + + /// Decode a `Nat` from a `LeanObject`. Handles both scalar (unboxed) + /// and heap-allocated (GMP `mpz_object`) representations. + pub fn from_obj(obj: LeanObject) -> Nat { + if obj.is_scalar() { + let u = obj.unbox_usize(); + Nat(BigUint::from_bytes_le(&u.to_le_bytes())) + } else { + // Heap-allocated big integer (mpz_object) + let mpz: &MpzObject = unsafe { &*obj.as_ptr().cast() }; + Nat(mpz.m_value.to_biguint()) + } + } + + #[inline] + pub fn from_le_bytes(bytes: &[u8]) -> Nat { + Nat(BigUint::from_bytes_le(bytes)) + } + + #[inline] + pub fn to_le_bytes(&self) -> Vec { + self.0.to_bytes_le() + } +} + +/// From https://github.com/leanprover/lean4/blob/master/src/runtime/object.h: +/// ```cpp +/// struct mpz_object { +/// lean_object m_header; +/// mpz m_value; +/// mpz_object() {} +/// explicit mpz_object(mpz const & m):m_value(m) {} +/// }; +/// ``` +#[repr(C)] +struct MpzObject { + _header: [u8; 8], + m_value: Mpz, +} + +#[repr(C)] +struct Mpz { + alloc: i32, + size: i32, + d: *const u64, +} + +impl Mpz { + fn to_biguint(&self) -> BigUint { + let nlimbs = self.size.unsigned_abs() as usize; + let limbs = unsafe { std::slice::from_raw_parts(self.d, nlimbs) }; + + // Convert limbs (little-endian by limb) + let bytes: Vec<_> = + limbs.iter().flat_map(|&limb| limb.to_le_bytes()).collect(); + + BigUint::from_bytes_le(&bytes) + } +} + +// ============================================================================= +// GMP interop for building Lean Nat objects from limbs +// ============================================================================= + +use crate::include::lean_uint64_to_nat; + +/// LEAN_MAX_SMALL_NAT = SIZE_MAX >> 1 +const LEAN_MAX_SMALL_NAT: u64 = (usize::MAX >> 1) as u64; + +unsafe extern "C" { + #[link_name = "__gmpz_init"] + fn mpz_init(x: *mut Mpz); + + #[link_name = "__gmpz_import"] + fn mpz_import( + rop: *mut Mpz, + count: usize, + order: c_int, + size: usize, + endian: c_int, + nails: usize, + op: *const u64, + ); + + #[link_name = "__gmpz_clear"] + fn mpz_clear(x: *mut Mpz); + + /// Lean's internal mpz allocation — deep-copies the mpz value. + /// Caller must still call mpz_clear on the original. + fn lean_alloc_mpz(v: *mut Mpz) -> *mut std::ffi::c_void; +} + +/// Create a Lean `Nat` from a little-endian array of u64 limbs. +/// Replaces the C function `c_lean_nat_from_limbs` from `ixon_ffi.c`. +/// # Safety +/// `limbs` must be valid for reading `num_limbs` elements. +pub unsafe fn lean_nat_from_limbs( + num_limbs: usize, + limbs: *const u64, +) -> LeanObject { + if num_limbs == 0 { + return LeanObject::box_usize(0); + } + let first = unsafe { *limbs }; + if num_limbs == 1 && first <= LEAN_MAX_SMALL_NAT { + #[allow(clippy::cast_possible_truncation)] // only targets 64-bit + return LeanObject::box_usize(first as usize); + } + if num_limbs == 1 { + return unsafe { LeanObject::from_lean_ptr(lean_uint64_to_nat(first)) }; + } + // Multi-limb: use GMP + unsafe { + let mut value = MaybeUninit::::uninit(); + mpz_init(value.as_mut_ptr()); + // order = -1 (least significant limb first) + // size = 8 bytes per limb, endian = 0 (native), nails = 0 + mpz_import(value.as_mut_ptr(), num_limbs, -1, 8, 0, 0, limbs); + // lean_alloc_mpz deep-copies; we must free the original + let result = lean_alloc_mpz(value.as_mut_ptr()); + mpz_clear(value.as_mut_ptr()); + LeanObject::from_raw(result) + } +} diff --git a/lean-ffi/src/object.rs b/lean-ffi/src/object.rs new file mode 100644 index 00000000..6e73647e --- /dev/null +++ b/lean-ffi/src/object.rs @@ -0,0 +1,1055 @@ +//! Type-safe wrappers for Lean FFI object pointers. +//! +//! Each wrapper is a `#[repr(transparent)]` `Copy` newtype over `*const c_void` +//! that asserts the correct Lean tag on construction and provides safe accessor +//! methods. Reference counting is left to Lean (no `Drop` impl). + +use std::ffi::c_void; +use std::marker::PhantomData; +use std::ops::Deref; + +use crate::include; +use crate::safe_cstring; + +// Tag constants from lean.h +const LEAN_MAX_CTOR_TAG: u8 = 243; +const LEAN_TAG_ARRAY: u8 = 246; +const LEAN_TAG_SCALAR_ARRAY: u8 = 248; +const LEAN_TAG_STRING: u8 = 249; +const LEAN_TAG_EXTERNAL: u8 = 254; + +/// Constructor tag for `IO.Error.userError`. +const IO_ERROR_USER_ERROR_TAG: u8 = 7; + +// ============================================================================= +// LeanObject — Untyped base wrapper +// ============================================================================= + +/// Untyped wrapper around a raw Lean object pointer. +#[derive(Clone, Copy)] +#[repr(transparent)] +pub struct LeanObject(*const c_void); + +impl LeanObject { + /// Wrap a raw pointer without any tag check. + /// + /// # Safety + /// The pointer must be a valid Lean object (or tagged scalar). + #[inline] + pub unsafe fn from_raw(ptr: *const c_void) -> Self { + Self(ptr) + } + + /// Wrap a `*mut lean_object` returned from a `lean_ffi` function. + /// + /// # Safety + /// The pointer must be a valid Lean object (or tagged scalar). + #[inline] + pub unsafe fn from_lean_ptr(ptr: *mut include::lean_object) -> Self { + Self(ptr.cast()) + } + + /// Create a Lean `Nat` from a `u64` value. + /// + /// Small values are stored as tagged scalars; larger ones are heap-allocated + /// via the Lean runtime. + #[inline] + pub fn from_nat_u64(n: u64) -> Self { + unsafe { Self::from_lean_ptr(include::lean_uint64_to_nat(n)) } + } + + #[inline] + pub fn as_ptr(self) -> *const c_void { + self.0 + } + + #[inline] + pub fn as_mut_ptr(self) -> *mut c_void { + self.0 as *mut c_void + } + + /// True if this is a tagged scalar (bit 0 set). + #[inline] + pub fn is_scalar(self) -> bool { + self.0 as usize & 1 == 1 + } + + /// Return the object tag. Panics if the object is a scalar. + #[inline] + pub fn tag(self) -> u8 { + assert!(!self.is_scalar(), "tag() called on scalar"); + #[allow(clippy::cast_possible_truncation)] + unsafe { + include::lean_obj_tag(self.0 as *mut _) as u8 + } + } + + #[inline] + pub fn inc_ref(self) { + if !self.is_scalar() { + unsafe { include::lean_inc_ref(self.0 as *mut _) } + } + } + + #[inline] + pub fn dec_ref(self) { + if !self.is_scalar() { + unsafe { include::lean_dec_ref(self.0 as *mut _) } + } + } + + /// Create a `LeanObject` from a raw tag value for zero-field enum constructors. + /// Lean passes simple enums (all constructors have zero fields) as unboxed + /// tag values (0, 1, 2, ...) across FFI, not as `lean_box(tag)`. + #[inline] + pub fn from_enum_tag(tag: usize) -> Self { + Self(tag as *const c_void) + } + + /// Extract the raw tag value from a zero-field enum constructor. + /// Inverse of `from_enum_tag`. + #[inline] + pub fn as_enum_tag(self) -> usize { + self.0 as usize + } + + /// Box a `usize` into a tagged scalar pointer. + #[inline] + pub fn box_usize(n: usize) -> Self { + Self(((n << 1) | 1) as *const c_void) + } + + /// Unbox a tagged scalar pointer into a `usize`. + #[inline] + pub fn unbox_usize(self) -> usize { + self.0 as usize >> 1 + } + + #[inline] + pub fn box_u64(n: u64) -> Self { + Self(unsafe { include::lean_box_uint64(n) }.cast()) + } + + #[inline] + pub fn unbox_u64(self) -> u64 { + unsafe { include::lean_unbox_uint64(self.0 as *mut _) } + } + + /// Interpret as a constructor object (tag 0–`LEAN_MAX_CTOR_TAG`). + /// + /// Debug-asserts the tag is in range. + #[inline] + pub fn as_ctor(self) -> LeanCtor { + debug_assert!(!self.is_scalar() && self.tag() <= LEAN_MAX_CTOR_TAG); + LeanCtor(self) + } + + /// Interpret as a `String` object (tag `LEAN_TAG_STRING`). + /// + /// Debug-asserts the tag is correct. + #[inline] + pub fn as_string(self) -> LeanString { + debug_assert!(!self.is_scalar() && self.tag() == LEAN_TAG_STRING); + LeanString(self) + } + + /// Interpret as an `Array` object (tag `LEAN_TAG_ARRAY`). + /// + /// Debug-asserts the tag is correct. + #[inline] + pub fn as_array(self) -> LeanArray { + debug_assert!(!self.is_scalar() && self.tag() == LEAN_TAG_ARRAY); + LeanArray(self) + } + + /// Interpret as a `List` (nil = scalar, cons = tag 1). + /// + /// Debug-asserts the tag is valid for a list. + #[inline] + pub fn as_list(self) -> LeanList { + debug_assert!(self.is_scalar() || self.tag() == 1); + LeanList(self) + } + + /// Interpret as a `ByteArray` object (tag `LEAN_TAG_SCALAR_ARRAY`). + #[inline] + pub fn as_byte_array(self) -> LeanByteArray { + debug_assert!(!self.is_scalar() && self.tag() == LEAN_TAG_SCALAR_ARRAY); + LeanByteArray(self) + } + + #[inline] + pub fn box_u32(n: u32) -> Self { + Self(unsafe { include::lean_box_uint32(n) }.cast()) + } + + #[inline] + pub fn unbox_u32(self) -> u32 { + unsafe { include::lean_unbox_uint32(self.0 as *mut _) } + } +} + +// ============================================================================= +// LeanNat — Nat (scalar or heap mpz) +// ============================================================================= + +/// Typed wrapper for a Lean `Nat` (small = tagged scalar, big = heap `mpz_object`). +#[derive(Clone, Copy)] +#[repr(transparent)] +pub struct LeanNat(LeanObject); + +impl Deref for LeanNat { + type Target = LeanObject; + #[inline] + fn deref(&self) -> &LeanObject { + &self.0 + } +} + +impl From for LeanObject { + #[inline] + fn from(x: LeanNat) -> Self { + x.0 + } +} + +impl LeanNat { + /// Wrap a raw `LeanObject` as a `LeanNat`. + #[inline] + pub fn new(obj: LeanObject) -> Self { + Self(obj) + } +} + +// ============================================================================= +// LeanBool — Bool (unboxed scalar: false = 0, true = 1) +// ============================================================================= + +/// Typed wrapper for a Lean `Bool` (always an unboxed scalar: false = 0, true = 1). +#[derive(Clone, Copy)] +#[repr(transparent)] +pub struct LeanBool(LeanObject); + +impl Deref for LeanBool { + type Target = LeanObject; + #[inline] + fn deref(&self) -> &LeanObject { + &self.0 + } +} + +impl From for LeanObject { + #[inline] + fn from(x: LeanBool) -> Self { + x.0 + } +} + +impl LeanBool { + /// Wrap a raw `LeanObject` as a `LeanBool`. + #[inline] + pub fn new(obj: LeanObject) -> Self { + Self(obj) + } +} + +impl LeanBool { + /// Decode to a Rust `bool`. + #[inline] + pub fn to_bool(self) -> bool { + self.0.as_enum_tag() != 0 + } +} + +// ============================================================================= +// LeanArray — Array α (tag LEAN_TAG_ARRAY) +// ============================================================================= + +/// Typed wrapper for a Lean `Array α` object (tag `LEAN_TAG_ARRAY`). +#[derive(Clone, Copy)] +#[repr(transparent)] +pub struct LeanArray(LeanObject); + +impl Deref for LeanArray { + type Target = LeanObject; + #[inline] + fn deref(&self) -> &LeanObject { + &self.0 + } +} + +impl LeanArray { + /// Wrap a raw pointer, asserting it is an `Array` (tag `LEAN_TAG_ARRAY`). + /// + /// # Safety + /// The pointer must be a valid Lean `Array` object. + pub unsafe fn from_raw(ptr: *const c_void) -> Self { + let obj = LeanObject(ptr); + debug_assert!(!obj.is_scalar() && obj.tag() == LEAN_TAG_ARRAY); + Self(obj) + } + + /// Allocate a new array with `size` elements (capacity = size). + pub fn alloc(size: usize) -> Self { + let obj = unsafe { include::lean_alloc_array(size, size) }; + Self(LeanObject(obj.cast())) + } + + pub fn len(&self) -> usize { + unsafe { include::lean_array_size(self.0.as_ptr() as *mut _) } + } + + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + + pub fn get(&self, i: usize) -> LeanObject { + LeanObject( + unsafe { include::lean_array_get_core(self.0.as_ptr() as *mut _, i) } + .cast(), + ) + } + + pub fn set(&self, i: usize, val: impl Into) { + let val: LeanObject = val.into(); + unsafe { + include::lean_array_set_core( + self.0.as_ptr() as *mut _, + i, + val.as_ptr() as *mut _, + ); + } + } + + /// Return a slice over the array elements. + pub fn data(&self) -> &[LeanObject] { + unsafe { + let cptr = include::lean_array_cptr(self.0.as_ptr() as *mut _); + // Safety: LeanObject is repr(transparent) over *const c_void, and + // lean_array_cptr returns *mut *mut lean_object which has the same layout. + std::slice::from_raw_parts(cptr.cast(), self.len()) + } + } + + pub fn iter(&self) -> impl Iterator + '_ { + self.data().iter().copied() + } + + pub fn map(&self, f: impl Fn(LeanObject) -> T) -> Vec { + self.iter().map(f).collect() + } +} + +// ============================================================================= +// LeanByteArray — ByteArray (tag LEAN_TAG_SCALAR_ARRAY) +// ============================================================================= + +/// Typed wrapper for a Lean `ByteArray` object (tag `LEAN_TAG_SCALAR_ARRAY`). +#[derive(Clone, Copy)] +#[repr(transparent)] +pub struct LeanByteArray(LeanObject); + +impl Deref for LeanByteArray { + type Target = LeanObject; + #[inline] + fn deref(&self) -> &LeanObject { + &self.0 + } +} + +impl LeanByteArray { + /// Wrap a raw pointer, asserting it is a `ByteArray` (tag `LEAN_TAG_SCALAR_ARRAY`). + /// + /// # Safety + /// The pointer must be a valid Lean `ByteArray` object. + pub unsafe fn from_raw(ptr: *const c_void) -> Self { + let obj = LeanObject(ptr); + debug_assert!(!obj.is_scalar() && obj.tag() == LEAN_TAG_SCALAR_ARRAY); + Self(obj) + } + + /// Allocate a new byte array with `size` bytes (capacity = size). + pub fn alloc(size: usize) -> Self { + let obj = unsafe { include::lean_alloc_sarray(1, size, size) }; + Self(LeanObject(obj.cast())) + } + + /// Allocate a new byte array and copy `data` into it. + pub fn from_bytes(data: &[u8]) -> Self { + let arr = Self::alloc(data.len()); + unsafe { + let cptr = include::lean_sarray_cptr(arr.0.as_ptr() as *mut _); + std::ptr::copy_nonoverlapping(data.as_ptr(), cptr, data.len()); + } + arr + } + + pub fn len(&self) -> usize { + unsafe { include::lean_sarray_size(self.0.as_ptr() as *mut _) } + } + + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + + /// Return the byte contents as a slice. + pub fn as_bytes(&self) -> &[u8] { + unsafe { + let cptr = include::lean_sarray_cptr(self.0.as_ptr() as *mut _); + std::slice::from_raw_parts(cptr, self.len()) + } + } + + /// Copy `data` into the byte array and update its size. + /// + /// # Safety + /// The caller must ensure the array has sufficient capacity for `data`. + pub unsafe fn set_data(&self, data: &[u8]) { + unsafe { + let obj = self.0.as_mut_ptr(); + let cptr = include::lean_sarray_cptr(obj.cast()); + std::ptr::copy_nonoverlapping(data.as_ptr(), cptr, data.len()); + // Update m_size: at offset 8 (after lean_object header) + *obj.cast::().add(8).cast::() = data.len(); + } + } +} + +// ============================================================================= +// LeanString — String (tag LEAN_TAG_STRING) +// ============================================================================= + +/// Typed wrapper for a Lean `String` object (tag `LEAN_TAG_STRING`). +#[derive(Clone, Copy)] +#[repr(transparent)] +pub struct LeanString(LeanObject); + +impl Deref for LeanString { + type Target = LeanObject; + #[inline] + fn deref(&self) -> &LeanObject { + &self.0 + } +} + +impl LeanString { + /// Wrap a raw pointer, asserting it is a `String` (tag `LEAN_TAG_STRING`). + /// + /// # Safety + /// The pointer must be a valid Lean `String` object. + pub unsafe fn from_raw(ptr: *const c_void) -> Self { + let obj = LeanObject(ptr); + debug_assert!(!obj.is_scalar() && obj.tag() == LEAN_TAG_STRING); + Self(obj) + } + + /// Create a Lean string from a Rust `&str`. + pub fn new(s: &str) -> Self { + let c = safe_cstring(s); + let obj = unsafe { include::lean_mk_string(c.as_ptr()) }; + Self(LeanObject(obj.cast())) + } + + /// Number of data bytes (excluding the trailing NUL). + pub fn byte_len(&self) -> usize { + unsafe { include::lean_string_size(self.0.as_ptr() as *mut _) - 1 } + } +} + +impl std::fmt::Display for LeanString { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + unsafe { + let obj = self.0.as_ptr() as *mut _; + let len = include::lean_string_size(obj) - 1; // m_size includes NUL + let data = include::lean_string_cstr(obj); + let bytes = std::slice::from_raw_parts(data.cast::(), len); + let s = std::str::from_utf8_unchecked(bytes); + f.write_str(s) + } + } +} + +// ============================================================================= +// LeanCtor — Constructor objects (tag 0–LEAN_MAX_CTOR_TAG) +// ============================================================================= + +/// Typed wrapper for a Lean constructor object (tag 0–`LEAN_MAX_CTOR_TAG`). +#[derive(Clone, Copy)] +#[repr(transparent)] +pub struct LeanCtor(LeanObject); + +impl Deref for LeanCtor { + type Target = LeanObject; + #[inline] + fn deref(&self) -> &LeanObject { + &self.0 + } +} + +impl LeanCtor { + /// Wrap a raw pointer, asserting it is a constructor (tag <= `LEAN_MAX_CTOR_TAG`). + /// + /// # Safety + /// The pointer must be a valid Lean constructor object. + pub unsafe fn from_raw(ptr: *const c_void) -> Self { + let obj = LeanObject(ptr); + debug_assert!(!obj.is_scalar() && obj.tag() <= LEAN_MAX_CTOR_TAG); + Self(obj) + } + + /// Allocate a new constructor object. + pub fn alloc(tag: u8, num_objs: usize, scalar_size: usize) -> Self { + #[allow(clippy::cast_possible_truncation)] + let obj = unsafe { + include::lean_alloc_ctor(tag as u32, num_objs as u32, scalar_size as u32) + }; + Self(LeanObject(obj.cast())) + } + + pub fn tag(&self) -> u8 { + self.0.tag() + } + + /// Get the `i`-th object field via `lean_ctor_get`. + pub fn get(&self, i: usize) -> LeanObject { + #[allow(clippy::cast_possible_truncation)] + LeanObject( + unsafe { include::lean_ctor_get(self.0.as_ptr() as *mut _, i as u32) } + .cast(), + ) + } + + /// Set the `i`-th object field via `lean_ctor_set`. + pub fn set(&self, i: usize, val: impl Into) { + let val: LeanObject = val.into(); + #[allow(clippy::cast_possible_truncation)] + unsafe { + include::lean_ctor_set( + self.0.as_ptr() as *mut _, + i as u32, + val.as_ptr() as *mut _, + ); + } + } + + /// Set a `u8` scalar field at the given byte offset (past all object fields). + pub fn set_u8(&self, offset: usize, val: u8) { + #[allow(clippy::cast_possible_truncation)] + unsafe { + include::lean_ctor_set_uint8( + self.0.as_ptr() as *mut _, + offset as u32, + val, + ); + } + } + + /// Set a `u32` scalar field at the given byte offset (past all object fields). + pub fn set_u32(&self, offset: usize, val: u32) { + #[allow(clippy::cast_possible_truncation)] + unsafe { + include::lean_ctor_set_uint32( + self.0.as_ptr() as *mut _, + offset as u32, + val, + ); + } + } + + /// Set a `u64` scalar field at the given byte offset (past all object fields). + pub fn set_u64(&self, offset: usize, val: u64) { + #[allow(clippy::cast_possible_truncation)] + unsafe { + include::lean_ctor_set_uint64( + self.0.as_ptr() as *mut _, + offset as u32, + val, + ); + } + } + + /// Read `N` object-field pointers using raw pointer math. + /// + /// This bypasses `lean_ctor_get`'s bounds check, which is necessary when + /// reading past the declared object fields into the scalar area (e.g. for + /// `Expr.Data`). + pub fn objs(&self) -> [LeanObject; N] { + let base = unsafe { self.0.as_ptr().cast::<*const c_void>().add(1) }; + std::array::from_fn(|i| LeanObject(unsafe { *base.add(i) })) + } + + /// Read a `u64` scalar at `offset` bytes past `num_objs` object fields. + pub fn scalar_u64(&self, num_objs: usize, offset: usize) -> u64 { + unsafe { + std::ptr::read_unaligned( + self.0.as_ptr().cast::().add(8 + num_objs * 8 + offset).cast(), + ) + } + } + + /// Read a `u32` scalar at `offset` bytes past `num_objs` object fields. + pub fn scalar_u32(&self, num_objs: usize, offset: usize) -> u32 { + unsafe { + std::ptr::read_unaligned( + self.0.as_ptr().cast::().add(8 + num_objs * 8 + offset).cast(), + ) + } + } + + /// Read a `u8` scalar at `offset` bytes past `num_objs` object fields. + pub fn scalar_u8(&self, num_objs: usize, offset: usize) -> u8 { + unsafe { *self.0.as_ptr().cast::().add(8 + num_objs * 8 + offset) } + } + + /// Read a `bool` scalar at `offset` bytes past `num_objs` object fields. + pub fn scalar_bool(&self, num_objs: usize, offset: usize) -> bool { + self.scalar_u8(num_objs, offset) != 0 + } +} + +// ============================================================================= +// LeanExternal — External objects (tag LEAN_TAG_EXTERNAL) +// ============================================================================= + +/// Typed wrapper for a Lean external object (tag `LEAN_TAG_EXTERNAL`) holding a `T`. +#[derive(Clone, Copy)] +#[repr(transparent)] +pub struct LeanExternal(LeanObject, PhantomData); + +impl Deref for LeanExternal { + type Target = LeanObject; + #[inline] + fn deref(&self) -> &LeanObject { + &self.0 + } +} + +impl LeanExternal { + /// Wrap a raw pointer, asserting it is an external object (tag `LEAN_TAG_EXTERNAL`). + /// + /// # Safety + /// The pointer must be a valid Lean external object whose data pointer + /// points to a valid `T`. + pub unsafe fn from_raw(ptr: *const c_void) -> Self { + let obj = LeanObject(ptr); + debug_assert!(!obj.is_scalar() && obj.tag() == LEAN_TAG_EXTERNAL); + Self(obj, PhantomData) + } + + /// Allocate a new external object holding `data`. + pub fn alloc(class: &ExternalClass, data: T) -> Self { + let data_ptr = Box::into_raw(Box::new(data)); + let obj = + unsafe { include::lean_alloc_external(class.0.cast(), data_ptr.cast()) }; + Self(LeanObject(obj.cast()), PhantomData) + } + + /// Get a reference to the wrapped data. + pub fn get(&self) -> &T { + unsafe { + &*include::lean_get_external_data(self.0.as_ptr() as *mut _).cast::() + } + } +} + +// ============================================================================= +// ExternalClass — Registered external class +// ============================================================================= + +/// A registered Lean external class (wraps `lean_external_class*`). +pub struct ExternalClass(*mut c_void); + +// Safety: the class pointer is initialized once and read-only thereafter. +unsafe impl Send for ExternalClass {} +unsafe impl Sync for ExternalClass {} + +impl ExternalClass { + /// Register a new external class with explicit finalizer and foreach callbacks. + /// + /// # Safety + /// The `finalizer` callback must correctly free the external data, and + /// `foreach` must correctly visit any Lean object references held by the data. + pub unsafe fn register( + finalizer: include::lean_external_finalize_proc, + foreach: include::lean_external_foreach_proc, + ) -> Self { + Self( + unsafe { include::lean_register_external_class(finalizer, foreach) } + .cast(), + ) + } + + /// Register a new external class that uses `Drop` to finalize `T` + /// and has no Lean object references to visit. + pub fn register_with_drop() -> Self { + unsafe extern "C" fn drop_finalizer(ptr: *mut c_void) { + if !ptr.is_null() { + drop(unsafe { Box::from_raw(ptr.cast::()) }); + } + } + unsafe { + Self::register(Some(drop_finalizer::), Some(crate::noop_foreach)) + } + } +} + +// ============================================================================= +// LeanList — List α +// ============================================================================= + +/// Typed wrapper for a Lean `List α` (nil = scalar `lean_box(0)`, cons = ctor tag 1). +#[derive(Clone, Copy)] +#[repr(transparent)] +pub struct LeanList(LeanObject); + +impl Deref for LeanList { + type Target = LeanObject; + #[inline] + fn deref(&self) -> &LeanObject { + &self.0 + } +} + +impl LeanList { + /// Wrap a raw pointer, asserting it is a valid `List` (scalar nil or ctor tag 1). + /// + /// # Safety + /// The pointer must be a valid Lean `List` object. + pub unsafe fn from_raw(ptr: *const c_void) -> Self { + let obj = LeanObject(ptr); + debug_assert!(obj.is_scalar() || obj.tag() == 1); + Self(obj) + } + + /// The empty list. + pub fn nil() -> Self { + Self(LeanObject::box_usize(0)) + } + + /// Prepend `head` to `tail`. + pub fn cons(head: impl Into, tail: LeanList) -> Self { + let ctor = LeanCtor::alloc(1, 2, 0); + ctor.set(0, head); + ctor.set(1, tail); + Self(ctor.0) + } + + pub fn is_nil(&self) -> bool { + self.0.is_scalar() + } + + pub fn iter(&self) -> LeanListIter { + LeanListIter(self.0) + } + + pub fn collect(&self, f: impl Fn(LeanObject) -> T) -> Vec { + self.iter().map(f).collect() + } +} + +impl> FromIterator for LeanList { + fn from_iter>(iter: I) -> Self { + let items: Vec = iter.into_iter().map(Into::into).collect(); + let mut list = Self::nil(); + for item in items.into_iter().rev() { + list = Self::cons(item, list); + } + list + } +} + +/// Iterator over the elements of a `LeanList`. +pub struct LeanListIter(LeanObject); + +impl Iterator for LeanListIter { + type Item = LeanObject; + fn next(&mut self) -> Option { + if self.0.is_scalar() { + return None; + } + let ctor = self.0.as_ctor(); + let [head, tail] = ctor.objs::<2>(); + self.0 = tail; + Some(head) + } +} + +// ============================================================================= +// LeanOption — Option α +// ============================================================================= + +/// Typed wrapper for a Lean `Option α` (none = scalar, some = ctor tag 1). +#[derive(Clone, Copy)] +#[repr(transparent)] +pub struct LeanOption(LeanObject); + +impl Deref for LeanOption { + type Target = LeanObject; + #[inline] + fn deref(&self) -> &LeanObject { + &self.0 + } +} + +impl LeanOption { + /// Wrap a raw pointer, asserting it is a valid `Option`. + /// + /// # Safety + /// The pointer must be a valid Lean `Option` object. + pub unsafe fn from_raw(ptr: *const c_void) -> Self { + let obj = LeanObject(ptr); + debug_assert!(obj.is_scalar() || obj.tag() == 1); + Self(obj) + } + + pub fn none() -> Self { + Self(LeanObject::box_usize(0)) + } + + pub fn some(val: impl Into) -> Self { + let ctor = LeanCtor::alloc(1, 1, 0); + ctor.set(0, val); + Self(ctor.0) + } + + pub fn is_none(&self) -> bool { + self.0.is_scalar() + } + + pub fn is_some(&self) -> bool { + !self.is_none() + } + + pub fn to_option(&self) -> Option { + if self.is_none() { + None + } else { + let ctor = self.0.as_ctor(); + Some(ctor.get(0)) + } + } +} + +// ============================================================================= +// LeanExcept — Except ε α +// ============================================================================= + +/// Typed wrapper for a Lean `Except ε α` (error = ctor tag 0, ok = ctor tag 1). +#[derive(Clone, Copy)] +#[repr(transparent)] +pub struct LeanExcept(LeanObject); + +impl Deref for LeanExcept { + type Target = LeanObject; + #[inline] + fn deref(&self) -> &LeanObject { + &self.0 + } +} + +impl LeanExcept { + /// Wrap a raw pointer, asserting it is a valid `Except`. + /// + /// # Safety + /// The pointer must be a valid Lean `Except` object. + pub unsafe fn from_raw(ptr: *const c_void) -> Self { + let obj = LeanObject(ptr); + debug_assert!(!obj.is_scalar() && (obj.tag() == 0 || obj.tag() == 1)); + Self(obj) + } + + /// Build `Except.ok val`. + pub fn ok(val: impl Into) -> Self { + let ctor = LeanCtor::alloc(1, 1, 0); + ctor.set(0, val); + Self(ctor.0) + } + + /// Build `Except.error msg`. + pub fn error(msg: impl Into) -> Self { + let ctor = LeanCtor::alloc(0, 1, 0); + ctor.set(0, msg); + Self(ctor.0) + } + + /// Build `Except.error (String.mk msg)` from a Rust string. + pub fn error_string(msg: &str) -> Self { + Self::error(LeanString::new(msg)) + } + + pub fn is_ok(&self) -> bool { + self.0.tag() == 1 + } + + pub fn is_error(&self) -> bool { + self.0.tag() == 0 + } + + pub fn into_result(self) -> Result { + let ctor = self.0.as_ctor(); + if self.is_ok() { Ok(ctor.get(0)) } else { Err(ctor.get(0)) } + } +} + +// ============================================================================= +// LeanIOResult — EStateM.Result (BaseIO.Result) +// ============================================================================= + +/// Typed wrapper for a Lean `BaseIO.Result α` (`EStateM.Result`). +/// ok = ctor tag 0 (value, world), error = ctor tag 1 (error, world). +#[derive(Clone, Copy)] +#[repr(transparent)] +pub struct LeanIOResult(LeanObject); + +impl Deref for LeanIOResult { + type Target = LeanObject; + #[inline] + fn deref(&self) -> &LeanObject { + &self.0 + } +} + +impl LeanIOResult { + /// Build a successful IO result (tag 0, fields: [val, box(0)]). + pub fn ok(val: impl Into) -> Self { + let ctor = LeanCtor::alloc(0, 2, 0); + ctor.set(0, val); + ctor.set(1, LeanObject::box_usize(0)); // world token + Self(ctor.0) + } + + /// Build an IO error result (tag 1, fields: [err, box(0)]). + pub fn error(err: impl Into) -> Self { + let ctor = LeanCtor::alloc(1, 2, 0); + ctor.set(0, err); + ctor.set(1, LeanObject::box_usize(0)); // world token + Self(ctor.0) + } + + /// Build an IO error from a Rust string via `IO.Error.userError` (tag 7, 1 field). + pub fn error_string(msg: &str) -> Self { + let user_error = LeanCtor::alloc(IO_ERROR_USER_ERROR_TAG, 1, 0); + user_error.set(0, LeanString::new(msg)); + Self::error(*user_error) + } +} + +// ============================================================================= +// LeanProd — Prod α β (pair) +// ============================================================================= + +/// Typed wrapper for a Lean `Prod α β` (ctor tag 0, 2 object fields). +#[derive(Clone, Copy)] +#[repr(transparent)] +pub struct LeanProd(LeanObject); + +impl Deref for LeanProd { + type Target = LeanObject; + #[inline] + fn deref(&self) -> &LeanObject { + &self.0 + } +} + +impl From for LeanObject { + #[inline] + fn from(x: LeanProd) -> Self { + x.0 + } +} + +impl LeanProd { + /// Build a pair `(fst, snd)`. + pub fn new(fst: impl Into, snd: impl Into) -> Self { + let ctor = LeanCtor::alloc(0, 2, 0); + ctor.set(0, fst); + ctor.set(1, snd); + Self(*ctor) + } + + /// Get the first element. + pub fn fst(&self) -> LeanObject { + let ctor = self.0.as_ctor(); + ctor.get(0) + } + + /// Get the second element. + pub fn snd(&self) -> LeanObject { + let ctor = self.0.as_ctor(); + ctor.get(1) + } +} + +// ============================================================================= +// From for LeanObject — allow wrapper types to be passed to set() etc. +// ============================================================================= + +impl From for LeanObject { + #[inline] + fn from(x: LeanArray) -> Self { + x.0 + } +} + +impl From for LeanObject { + #[inline] + fn from(x: LeanByteArray) -> Self { + x.0 + } +} + +impl From for LeanObject { + #[inline] + fn from(x: LeanString) -> Self { + x.0 + } +} + +impl From for LeanObject { + #[inline] + fn from(x: LeanCtor) -> Self { + x.0 + } +} + +impl From> for LeanObject { + #[inline] + fn from(x: LeanExternal) -> Self { + x.0 + } +} + +impl From for LeanObject { + #[inline] + fn from(x: LeanList) -> Self { + x.0 + } +} + +impl From for LeanObject { + #[inline] + fn from(x: LeanOption) -> Self { + x.0 + } +} + +impl From for LeanObject { + #[inline] + fn from(x: LeanExcept) -> Self { + x.0 + } +} + +impl From for LeanObject { + #[inline] + fn from(x: LeanIOResult) -> Self { + x.0 + } +} + +impl From for LeanObject { + #[inline] + fn from(x: u32) -> Self { + Self::box_u32(x) + } +} diff --git a/src/ffi.rs b/src/ffi.rs new file mode 100644 index 00000000..8af44040 --- /dev/null +++ b/src/ffi.rs @@ -0,0 +1,62 @@ +// Lean and C don't support feature flags, so the _iroh module is exposed as a fallback for when the `net` feature is disabled and/or on the `aarch64-darwin` target. +// This fallback module contains dummy functions that can still be called via Lean->Rust FFI, but will return an error message that Lean then prints before exiting. +#[cfg(any( + not(feature = "net"), + all(target_os = "macos", target_arch = "aarch64") +))] +pub mod _iroh; +pub mod aiur; +pub mod byte_array; +#[cfg(all( + feature = "net", + not(all(target_os = "macos", target_arch = "aarch64")) +))] +pub mod iroh; +pub mod keccak; +pub mod lean_env; +pub mod unsigned; + +// Modular FFI structure +pub mod builder; // IxEnvBuilder struct +pub mod compile; // Compilation: rs_compile_env_full, rs_compile_phases, etc. +pub mod graph; // Graph/SCC: rs_build_ref_graph, rs_compute_sccs +pub mod ix; // Ix types: Name, Level, Expr, ConstantInfo, Environment +pub mod ixon; // Ixon types: Univ, Expr, Constant, metadata +pub mod primitives; // Primitives: rs_roundtrip_nat, rs_roundtrip_string, etc. + +use lean_ffi::object::{LeanArray, LeanByteArray, LeanIOResult}; + +/// Guard an FFI function that returns a Lean IO result against panics. +/// On panic, returns a Lean IO error with the panic message instead of +/// unwinding across the `extern "C"` boundary (which is undefined behavior). +pub(crate) fn ffi_io_guard(f: F) -> LeanIOResult +where + F: FnOnce() -> LeanIOResult + std::panic::UnwindSafe, +{ + match std::panic::catch_unwind(f) { + Ok(result) => result, + Err(panic_info) => { + let msg = if let Some(s) = panic_info.downcast_ref::<&str>() { + format!("FFI panic: {s}") + } else if let Some(s) = panic_info.downcast_ref::() { + format!("FFI panic: {s}") + } else { + "FFI panic: unknown".to_string() + }; + LeanIOResult::error_string(&msg) + }, + } +} + +#[unsafe(no_mangle)] +extern "C" fn rs_boxed_u32s_are_equivalent_to_bytes( + u32s: LeanArray, + bytes: LeanByteArray, +) -> bool { + let u32s_flat: Vec = u32s + .map(|elem| elem.unbox_u32()) + .into_iter() + .flat_map(u32::to_le_bytes) + .collect(); + u32s_flat == bytes.as_bytes() +} diff --git a/src/ffi/_iroh.rs b/src/ffi/_iroh.rs new file mode 100644 index 00000000..be0d5c4d --- /dev/null +++ b/src/ffi/_iroh.rs @@ -0,0 +1,35 @@ +use lean_ffi::object::{LeanArray, LeanExcept, LeanString}; + +const ERR_MSG: &str = "Iroh functions not supported when the Rust `net` feature is disabled \ + or on MacOS aarch64-darwin"; + +/// `Iroh.Connect.putBytes' : @& String → @& Array String → @& String → @& String → Except String PutResponse` +#[unsafe(no_mangle)] +extern "C" fn rs_iroh_put( + _node_id: LeanString, + _addrs: LeanArray, + _relay_url: LeanString, + _input: LeanString, +) -> LeanExcept { + LeanExcept::error_string(ERR_MSG) +} + +/// `Iroh.Connect.getBytes' : @& String → @& Array String → @& String → @& String → Except String GetResponse` +#[unsafe(no_mangle)] +extern "C" fn rs_iroh_get( + _node_id: LeanString, + _addrs: LeanArray, + _relay_url: LeanString, + _hash: LeanString, +) -> LeanExcept { + LeanExcept::error_string(ERR_MSG) +} + +/// `Iroh.Serve.serve' : Unit → Except String Unit` +#[unsafe(no_mangle)] +extern "C" fn rs_iroh_serve() -> LeanExcept { + LeanExcept::error_string( + "Iroh functions not supported when the Rust `net` feature is disabled \ + or on MacOS aarch64-darwin", + ) +} diff --git a/src/ffi/aiur.rs b/src/ffi/aiur.rs new file mode 100644 index 00000000..ed31c634 --- /dev/null +++ b/src/ffi/aiur.rs @@ -0,0 +1,19 @@ +use multi_stark::p3_field::integers::QuotientMap; + +pub mod protocol; +pub mod toplevel; + +use crate::aiur::G; +use lean_ffi::object::LeanObject; + +#[inline] +pub(super) fn lean_unbox_nat_as_usize(obj: LeanObject) -> usize { + assert!(obj.is_scalar()); + obj.unbox_usize() +} + +#[inline] +pub(super) fn lean_unbox_g(obj: LeanObject) -> G { + let u64 = obj.unbox_u64(); + unsafe { G::from_canonical_unchecked(u64) } +} diff --git a/src/ffi/aiur/protocol.rs b/src/ffi/aiur/protocol.rs new file mode 100644 index 00000000..f646bf3b --- /dev/null +++ b/src/ffi/aiur/protocol.rs @@ -0,0 +1,197 @@ +use multi_stark::{ + p3_field::PrimeField64, + prover::Proof, + types::{CommitmentParameters, FriParameters}, +}; +use rustc_hash::{FxBuildHasher, FxHashMap}; +use std::sync::OnceLock; + +use lean_ffi::object::{ + ExternalClass, LeanArray, LeanByteArray, LeanCtor, LeanExcept, LeanExternal, + LeanNat, LeanObject, +}; + +use crate::{ + aiur::{ + G, + execute::{IOBuffer, IOKeyInfo}, + synthesis::AiurSystem, + }, + ffi::aiur::{ + lean_unbox_g, lean_unbox_nat_as_usize, toplevel::decode_toplevel, + }, + lean::{LeanAiurFriParameters, LeanAiurToplevel}, +}; + +// ============================================================================= +// External class registration (OnceLock pattern) +// ============================================================================= + +static AIUR_PROOF_CLASS: OnceLock = OnceLock::new(); +static AIUR_SYSTEM_CLASS: OnceLock = OnceLock::new(); + +fn proof_class() -> &'static ExternalClass { + AIUR_PROOF_CLASS.get_or_init(ExternalClass::register_with_drop::) +} + +fn system_class() -> &'static ExternalClass { + AIUR_SYSTEM_CLASS.get_or_init(ExternalClass::register_with_drop::) +} + +// ============================================================================= +// Lean FFI functions +// ============================================================================= + +/// `Aiur.Proof.toBytes : @& Proof → ByteArray` +#[unsafe(no_mangle)] +extern "C" fn rs_aiur_proof_to_bytes( + proof_obj: LeanExternal, +) -> LeanByteArray { + let bytes = proof_obj.get().to_bytes().expect("Serialization error"); + LeanByteArray::from_bytes(&bytes) +} + +/// `Aiur.Proof.ofBytes : @& ByteArray → Proof` +#[unsafe(no_mangle)] +extern "C" fn rs_aiur_proof_of_bytes( + byte_array: LeanByteArray, +) -> LeanExternal { + let proof = + Proof::from_bytes(byte_array.as_bytes()).expect("Deserialization error"); + LeanExternal::alloc(proof_class(), proof) +} + +/// `AiurSystem.build : @&Bytecode.Toplevel → @&CommitmentParameters → AiurSystem` +#[unsafe(no_mangle)] +extern "C" fn rs_aiur_system_build( + toplevel: LeanAiurToplevel, + commitment_parameters: LeanNat, +) -> LeanExternal { + let system = AiurSystem::build( + decode_toplevel(toplevel), + decode_commitment_parameters(commitment_parameters), + ); + LeanExternal::alloc(system_class(), system) +} + +/// `AiurSystem.verify : @& AiurSystem → @& FriParameters → @& Array G → @& Proof → Except String Unit` +#[unsafe(no_mangle)] +extern "C" fn rs_aiur_system_verify( + aiur_system_obj: LeanExternal, + fri_parameters: LeanAiurFriParameters, + claim: LeanArray, + proof_obj: LeanExternal, +) -> LeanExcept { + let fri_parameters = decode_fri_parameters(fri_parameters); + let claim = claim.map(lean_unbox_g); + match aiur_system_obj.get().verify(fri_parameters, &claim, proof_obj.get()) { + Ok(()) => LeanExcept::ok(LeanObject::box_usize(0)), + Err(err) => LeanExcept::error_string(&format!("{err:?}")), + } +} + +/// `AiurSystem.prove`: runs the prover and returns +/// `Array G × Proof × Array G × Array (Array G × IOKeyInfo)` +#[unsafe(no_mangle)] +extern "C" fn rs_aiur_system_prove( + aiur_system_obj: LeanExternal, + fri_parameters: LeanAiurFriParameters, + fun_idx: LeanNat, + args: LeanArray, + io_data_arr: LeanArray, + io_map_arr: LeanArray, +) -> LeanObject { + let fri_parameters = decode_fri_parameters(fri_parameters); + let fun_idx = lean_unbox_nat_as_usize(*fun_idx); + let args = args.map(lean_unbox_g); + let io_data = io_data_arr.map(lean_unbox_g); + let io_map = decode_io_buffer_map(io_map_arr); + let mut io_buffer = IOBuffer { data: io_data, map: io_map }; + + let (claim, proof) = + aiur_system_obj.get().prove(fri_parameters, fun_idx, &args, &mut io_buffer); + + // claim: Array G + let lean_claim = build_g_array(&claim); + + // proof: Proof (external object) + let lean_proof = *LeanExternal::alloc(proof_class(), proof); + + // io_data: Array G + let lean_io_data = build_g_array(&io_buffer.data); + + // io_map: Array (Array G × IOKeyInfo) + let lean_io_map = { + let arr = LeanArray::alloc(io_buffer.map.len()); + for (i, (key, info)) in io_buffer.map.iter().enumerate() { + let key_arr = build_g_array(key); + // IOKeyInfo ctor (tag 0, 2 object fields) + let key_info = LeanCtor::alloc(0, 2, 0); + key_info.set(0, LeanObject::box_usize(info.idx)); + key_info.set(1, LeanObject::box_usize(info.len)); + // (Array G × IOKeyInfo) tuple + let map_elt = LeanCtor::alloc(0, 2, 0); + map_elt.set(0, key_arr); + map_elt.set(1, *key_info); + arr.set(i, *map_elt); + } + *arr + }; + + // Build nested tuple: + // Array G × Array (Array G × IOKeyInfo) + let io_tuple = LeanCtor::alloc(0, 2, 0); + io_tuple.set(0, lean_io_data); + io_tuple.set(1, lean_io_map); + // Proof × Array G × Array (Array G × IOKeyInfo) + let proof_io_tuple = LeanCtor::alloc(0, 2, 0); + proof_io_tuple.set(0, lean_proof); + proof_io_tuple.set(1, *io_tuple); + // Array G × Proof × Array G × Array (Array G × IOKeyInfo) + let result = LeanCtor::alloc(0, 2, 0); + result.set(0, lean_claim); + result.set(1, *proof_io_tuple); + *result +} + +// ============================================================================= +// Helpers +// ============================================================================= + +/// Build a Lean `Array G` from a slice of field elements. +fn build_g_array(values: &[G]) -> LeanArray { + let arr = LeanArray::alloc(values.len()); + for (i, g) in values.iter().enumerate() { + arr.set(i, LeanObject::box_u64(g.as_canonical_u64())); + } + arr +} + +fn decode_commitment_parameters(obj: LeanNat) -> CommitmentParameters { + CommitmentParameters { log_blowup: lean_unbox_nat_as_usize(*obj) } +} + +fn decode_fri_parameters(obj: LeanAiurFriParameters) -> FriParameters { + let ctor = obj.as_ctor(); + FriParameters { + log_final_poly_len: lean_unbox_nat_as_usize(ctor.get(0)), + num_queries: lean_unbox_nat_as_usize(ctor.get(1)), + commit_proof_of_work_bits: lean_unbox_nat_as_usize(ctor.get(2)), + query_proof_of_work_bits: lean_unbox_nat_as_usize(ctor.get(3)), + } +} + +fn decode_io_buffer_map(arr: LeanArray) -> FxHashMap, IOKeyInfo> { + let mut map = FxHashMap::with_capacity_and_hasher(arr.len(), FxBuildHasher); + for elt in arr.iter() { + let pair = elt.as_ctor(); + let key = pair.get(0).as_array().map(lean_unbox_g); + let info_ctor = pair.get(1).as_ctor(); + let info = IOKeyInfo { + idx: lean_unbox_nat_as_usize(info_ctor.get(0)), + len: lean_unbox_nat_as_usize(info_ctor.get(1)), + }; + map.insert(key, info); + } + map +} diff --git a/src/ffi/aiur/toplevel.rs b/src/ffi/aiur/toplevel.rs new file mode 100644 index 00000000..88c2d548 --- /dev/null +++ b/src/ffi/aiur/toplevel.rs @@ -0,0 +1,200 @@ +use multi_stark::p3_field::PrimeCharacteristicRing; + +use lean_ffi::object::{LeanCtor, LeanObject}; + +use crate::{ + FxIndexMap, + aiur::{ + G, + bytecode::{Block, Ctrl, Function, FunctionLayout, Op, Toplevel, ValIdx}, + }, + lean::LeanAiurToplevel, +}; + +use crate::ffi::aiur::{lean_unbox_g, lean_unbox_nat_as_usize}; + +fn decode_vec_val_idx(obj: LeanObject) -> Vec { + obj.as_array().map(lean_unbox_nat_as_usize) +} + +fn decode_op(ctor: LeanCtor) -> Op { + match ctor.tag() { + 0 => { + let [const_val] = ctor.objs::<1>(); + Op::Const(G::from_u64(const_val.as_enum_tag() as u64)) + }, + 1 => { + let [a, b] = ctor.objs::<2>(); + Op::Add(lean_unbox_nat_as_usize(a), lean_unbox_nat_as_usize(b)) + }, + 2 => { + let [a, b] = ctor.objs::<2>(); + Op::Sub(lean_unbox_nat_as_usize(a), lean_unbox_nat_as_usize(b)) + }, + 3 => { + let [a, b] = ctor.objs::<2>(); + Op::Mul(lean_unbox_nat_as_usize(a), lean_unbox_nat_as_usize(b)) + }, + 4 => { + let [a] = ctor.objs::<1>(); + Op::EqZero(lean_unbox_nat_as_usize(a)) + }, + 5 => { + let [fun_idx, val_idxs, output_size] = ctor.objs::<3>(); + let fun_idx = lean_unbox_nat_as_usize(fun_idx); + let val_idxs = decode_vec_val_idx(val_idxs); + let output_size = lean_unbox_nat_as_usize(output_size); + Op::Call(fun_idx, val_idxs, output_size) + }, + 6 => { + let [val_idxs] = ctor.objs::<1>(); + Op::Store(decode_vec_val_idx(val_idxs)) + }, + 7 => { + let [width, val_idx] = ctor.objs::<2>(); + Op::Load(lean_unbox_nat_as_usize(width), lean_unbox_nat_as_usize(val_idx)) + }, + 8 => { + let [a, b] = ctor.objs::<2>(); + Op::AssertEq(decode_vec_val_idx(a), decode_vec_val_idx(b)) + }, + 9 => { + let [key] = ctor.objs::<1>(); + Op::IOGetInfo(decode_vec_val_idx(key)) + }, + 10 => { + let [key, idx, len] = ctor.objs::<3>(); + Op::IOSetInfo( + decode_vec_val_idx(key), + lean_unbox_nat_as_usize(idx), + lean_unbox_nat_as_usize(len), + ) + }, + 11 => { + let [idx, len] = ctor.objs::<2>(); + Op::IORead(lean_unbox_nat_as_usize(idx), lean_unbox_nat_as_usize(len)) + }, + 12 => { + let [data] = ctor.objs::<1>(); + Op::IOWrite(decode_vec_val_idx(data)) + }, + 13 => { + let [byte] = ctor.objs::<1>(); + Op::U8BitDecomposition(lean_unbox_nat_as_usize(byte)) + }, + 14 => { + let [byte] = ctor.objs::<1>(); + Op::U8ShiftLeft(lean_unbox_nat_as_usize(byte)) + }, + 15 => { + let [byte] = ctor.objs::<1>(); + Op::U8ShiftRight(lean_unbox_nat_as_usize(byte)) + }, + 16 => { + let [i, j] = ctor.objs::<2>().map(lean_unbox_nat_as_usize); + Op::U8Xor(i, j) + }, + 17 => { + let [i, j] = ctor.objs::<2>().map(lean_unbox_nat_as_usize); + Op::U8Add(i, j) + }, + 18 => { + let [i, j] = ctor.objs::<2>().map(lean_unbox_nat_as_usize); + Op::U8Sub(i, j) + }, + 19 => { + let [i, j] = ctor.objs::<2>().map(lean_unbox_nat_as_usize); + Op::U8And(i, j) + }, + 20 => { + let [i, j] = ctor.objs::<2>().map(lean_unbox_nat_as_usize); + Op::U8Or(i, j) + }, + 21 => { + let [i, j] = ctor.objs::<2>().map(lean_unbox_nat_as_usize); + Op::U8LessThan(i, j) + }, + 22 => { + let [label_obj, idxs_obj] = ctor.objs::<2>(); + let label = label_obj.as_string().to_string(); + let idxs = if idxs_obj.is_scalar() { + None + } else { + let inner_ctor = idxs_obj.as_ctor(); + Some(inner_ctor.get(0).as_array().map(lean_unbox_nat_as_usize)) + }; + Op::Debug(label, idxs) + }, + _ => unreachable!(), + } +} + +fn decode_g_block_pair(ctor: LeanCtor) -> (G, Block) { + let [g_obj, block_obj] = ctor.objs::<2>(); + let g = lean_unbox_g(g_obj); + let block = decode_block(block_obj.as_ctor()); + (g, block) +} + +fn decode_ctrl(ctor: LeanCtor) -> Ctrl { + match ctor.tag() { + 0 => { + let [val_idx_obj, cases_obj, default_obj] = ctor.objs::<3>(); + let val_idx = lean_unbox_nat_as_usize(val_idx_obj); + let vec_cases = + cases_obj.as_array().map(|o| decode_g_block_pair(o.as_ctor())); + let cases = FxIndexMap::from_iter(vec_cases); + let default = if default_obj.is_scalar() { + None + } else { + let inner_ctor = default_obj.as_ctor(); + let block = decode_block(inner_ctor.get(0).as_ctor()); + Some(Box::new(block)) + }; + Ctrl::Match(val_idx, cases, default) + }, + 1 => { + let [sel_idx_obj, val_idxs_obj] = ctor.objs::<2>(); + let sel_idx = lean_unbox_nat_as_usize(sel_idx_obj); + let val_idxs = decode_vec_val_idx(val_idxs_obj); + Ctrl::Return(sel_idx, val_idxs) + }, + _ => unreachable!(), + } +} + +fn decode_block(ctor: LeanCtor) -> Block { + let [ops_obj, ctrl_obj, min_sel_obj, max_sel_obj] = ctor.objs::<4>(); + let ops = ops_obj.as_array().map(|o| decode_op(o.as_ctor())); + let ctrl = decode_ctrl(ctrl_obj.as_ctor()); + let min_sel_included = lean_unbox_nat_as_usize(min_sel_obj); + let max_sel_excluded = lean_unbox_nat_as_usize(max_sel_obj); + Block { ops, ctrl, min_sel_included, max_sel_excluded } +} + +fn decode_function_layout(ctor: LeanCtor) -> FunctionLayout { + let [input_size, selectors, auxiliaries, lookups] = ctor.objs::<4>(); + FunctionLayout { + input_size: lean_unbox_nat_as_usize(input_size), + selectors: lean_unbox_nat_as_usize(selectors), + auxiliaries: lean_unbox_nat_as_usize(auxiliaries), + lookups: lean_unbox_nat_as_usize(lookups), + } +} + +fn decode_function(ctor: LeanCtor) -> Function { + let [body_obj, layout_obj, unconstrained_obj] = ctor.objs::<3>(); + let body = decode_block(body_obj.as_ctor()); + let layout = decode_function_layout(layout_obj.as_ctor()); + let unconstrained = unconstrained_obj.as_enum_tag() != 0; + Function { body, layout, unconstrained } +} + +pub(crate) fn decode_toplevel(obj: LeanAiurToplevel) -> Toplevel { + let ctor = obj.as_ctor(); + let [functions_obj, memory_sizes_obj] = ctor.objs::<2>(); + let functions = + functions_obj.as_array().map(|o| decode_function(o.as_ctor())); + let memory_sizes = memory_sizes_obj.as_array().map(lean_unbox_nat_as_usize); + Toplevel { functions, memory_sizes } +} diff --git a/src/lean/ffi/builder.rs b/src/ffi/builder.rs similarity index 81% rename from src/lean/ffi/builder.rs rename to src/ffi/builder.rs index fe0d80af..e4fe8655 100644 --- a/src/lean/ffi/builder.rs +++ b/src/ffi/builder.rs @@ -1,18 +1,18 @@ //! LeanBuildCache struct for constructing Lean Ix types with caching. -use std::ffi::c_void; - use blake3::Hash; use rustc_hash::FxHashMap; +use crate::lean::{LeanIxExpr, LeanIxLevel, LeanIxName}; + /// Cache for constructing Lean Ix types with deduplication. /// /// This struct maintains caches for names, levels, and expressions to avoid /// rebuilding the same Lean objects multiple times during environment construction. pub struct LeanBuildCache { - pub(crate) names: FxHashMap, - pub(crate) levels: FxHashMap, - pub(crate) exprs: FxHashMap, + pub(crate) names: FxHashMap, + pub(crate) levels: FxHashMap, + pub(crate) exprs: FxHashMap, } impl LeanBuildCache { diff --git a/src/ffi/byte_array.rs b/src/ffi/byte_array.rs new file mode 100644 index 00000000..2831380e --- /dev/null +++ b/src/ffi/byte_array.rs @@ -0,0 +1,8 @@ +use lean_ffi::object::LeanByteArray; + +/// `@& ByteArray → @& ByteArray → Bool` +/// Efficient implementation for `BEq ByteArray` +#[unsafe(no_mangle)] +extern "C" fn rs_byte_array_beq(a: LeanByteArray, b: LeanByteArray) -> bool { + a.as_bytes() == b.as_bytes() +} diff --git a/src/ffi/compile.rs b/src/ffi/compile.rs new file mode 100644 index 00000000..b83b30ad --- /dev/null +++ b/src/ffi/compile.rs @@ -0,0 +1,1437 @@ +//! FFI bridge between Lean and Rust for the Ixon compilation/decompilation pipeline. +//! +//! Provides `extern "C"` functions callable from Lean via `@[extern]`: +//! - `rs_compile_env_full` / `rs_compile_env`: compile a Lean environment to Ixon +//! - `rs_compile_phases`: run individual pipeline phases (canon, condense, graph, compile) +//! - `rs_decompile_env`: decompile Ixon back to Lean environment +//! - `rs_roundtrip_*`: roundtrip FFI tests for Lean↔Rust type conversions +//! - `build_*` / `decode_*`: convert between Lean constructor layouts and Rust types + +use std::collections::HashMap; +use std::sync::Arc; + +use crate::ffi::ffi_io_guard; +use crate::ix::address::Address; +use crate::ix::compile::{CompileState, compile_env}; +use crate::ix::condense::compute_sccs; +use crate::ix::decompile::decompile_env; +use crate::ix::env::Name; +use crate::ix::graph::build_ref_graph; +use crate::ix::ixon::constant::{Constant as IxonConstant, ConstantInfo}; +use crate::ix::ixon::expr::Expr as IxonExpr; +use crate::ix::ixon::serialize::put_expr; +use crate::ix::ixon::{Comm, ConstantMeta}; +use crate::lean::{ + LeanIxBlockCompareDetail, LeanIxBlockCompareResult, LeanIxCompileError, + LeanIxCompilePhases, LeanIxCondensedBlocks, LeanIxConstantInfo, + LeanIxDecompileError, LeanIxName, LeanIxRawEnvironment, LeanIxSerializeError, + LeanIxonRawBlob, LeanIxonRawComm, LeanIxonRawConst, LeanIxonRawEnv, + LeanIxonRawNameEntry, LeanIxonRawNamed, +}; +use lean_ffi::nat::Nat; +use lean_ffi::object::LeanIOResult; +use lean_ffi::object::{ + LeanArray, LeanByteArray, LeanCtor, LeanExcept, LeanList, LeanObject, + LeanString, +}; + +use dashmap::DashMap; +use dashmap::DashSet; + +use crate::ffi::builder::LeanBuildCache; +use crate::ffi::ixon::env::decoded_to_ixon_env; +use crate::ffi::lean_env::{GlobalCache, decode_env, decode_name}; +use crate::lean::LeanIxAddress; + +// ============================================================================= +// Helper builders +// ============================================================================= + +/// Build a Lean String from a Rust &str. +fn build_lean_string(s: &str) -> LeanString { + LeanString::new(s) +} + +/// Build a Lean Nat from a usize. +fn build_lean_nat_usize(n: usize) -> LeanObject { + LeanObject::from_nat_u64(n as u64) +} + +// ============================================================================= +// Raw* Builder Functions for Compile FFI +// ============================================================================= + +/// Build RawConst using type method. +fn build_raw_const( + addr: &Address, + constant: &IxonConstant, +) -> LeanIxonRawConst { + LeanIxonRawConst::build_from_parts(addr, constant) +} + +/// Build RawNamed using type method. +fn build_raw_named( + cache: &mut LeanBuildCache, + name: &Name, + addr: &Address, + meta: &ConstantMeta, +) -> LeanIxonRawNamed { + LeanIxonRawNamed::build_from_parts(cache, name, addr, meta) +} + +/// Build RawBlob using type method. +fn build_raw_blob(addr: &Address, bytes: &[u8]) -> LeanIxonRawBlob { + LeanIxonRawBlob::build_from_parts(addr, bytes) +} + +/// Build RawComm using type method. +fn build_raw_comm(addr: &Address, comm: &Comm) -> LeanIxonRawComm { + LeanIxonRawComm::build_from_parts(addr, comm) +} + +// ============================================================================= +// RustCondensedBlocks roundtrip FFI +// ============================================================================= + +/// Round-trip a RustCondensedBlocks structure. +#[unsafe(no_mangle)] +pub extern "C" fn rs_roundtrip_rust_condensed_blocks( + obj: LeanIxCondensedBlocks, +) -> LeanIxCondensedBlocks { + let ctor = obj.as_ctor(); + let low_links = ctor.get(0); + let blocks = ctor.get(1); + let block_refs = ctor.get(2); + + low_links.inc_ref(); + blocks.inc_ref(); + block_refs.inc_ref(); + + let result = LeanCtor::alloc(0, 3, 0); + result.set(0, low_links); + result.set(1, blocks); + result.set(2, block_refs); + LeanIxCondensedBlocks::new(*result) +} + +/// Round-trip a RustCompilePhases structure. +#[unsafe(no_mangle)] +pub extern "C" fn rs_roundtrip_rust_compile_phases( + obj: LeanIxCompilePhases, +) -> LeanIxCompilePhases { + let ctor = obj.as_ctor(); + let raw_env = ctor.get(0); + let condensed = ctor.get(1); + let compile_env = ctor.get(2); + + raw_env.inc_ref(); + condensed.inc_ref(); + compile_env.inc_ref(); + + let result = LeanCtor::alloc(0, 3, 0); + result.set(0, raw_env); + result.set(1, condensed); + result.set(2, compile_env); + LeanIxCompilePhases::new(*result) +} + +// ============================================================================= +// BlockCompareResult and BlockCompareDetail roundtrip FFI +// ============================================================================= + +/// Round-trip a BlockCompareResult. +#[unsafe(no_mangle)] +pub extern "C" fn rs_roundtrip_block_compare_result( + obj: LeanIxBlockCompareResult, +) -> LeanIxBlockCompareResult { + // Tags 0 (match) and 2 (notFound) have 0 fields → Lean represents as scalars + if obj.is_scalar() { + return obj; + } + let ctor = obj.as_ctor(); + match ctor.tag() { + 1 => { + // mismatch: 0 obj, 24 scalar bytes (3 × u64) + let lean_size = ctor.scalar_u64(0, 0); + let rust_size = ctor.scalar_u64(0, 8); + let first_diff = ctor.scalar_u64(0, 16); + + let out = LeanCtor::alloc(1, 0, 24); + out.set_u64(0, lean_size); + out.set_u64(8, rust_size); + out.set_u64(16, first_diff); + LeanIxBlockCompareResult::new(*out) + }, + _ => unreachable!("Invalid BlockCompareResult tag: {}", ctor.tag()), + } +} + +/// Round-trip a BlockCompareDetail. +#[unsafe(no_mangle)] +pub extern "C" fn rs_roundtrip_block_compare_detail( + obj: LeanIxBlockCompareDetail, +) -> LeanIxBlockCompareDetail { + let ctor = obj.as_ctor(); + let result_ptr = ctor.get(0); + let lean_sharing_len = ctor.scalar_u64(1, 0); + let rust_sharing_len = ctor.scalar_u64(1, 8); + + let result_obj = rs_roundtrip_block_compare_result( + LeanIxBlockCompareResult::new(result_ptr), + ); + + let out = LeanCtor::alloc(0, 1, 16); + out.set(0, result_obj); + out.set_u64(8, lean_sharing_len); + out.set_u64(16, rust_sharing_len); + LeanIxBlockCompareDetail::new(*out) +} + +// ============================================================================= +// Full Compilation FFI +// ============================================================================= + +/// FFI function to run the complete compilation pipeline and return all data. +#[unsafe(no_mangle)] +pub extern "C" fn rs_compile_env_full( + env_consts_ptr: LeanList, +) -> LeanIOResult { + ffi_io_guard(std::panic::AssertUnwindSafe(|| { + // Phase 1: Decode Lean environment + let rust_env = decode_env(env_consts_ptr); + let env_len = rust_env.len(); + let rust_env = Arc::new(rust_env); + + // Phase 2: Build ref graph and compute SCCs + let ref_graph = build_ref_graph(&rust_env); + let condensed = compute_sccs(&ref_graph.out_refs); + + // Phase 3: Compile + let compile_stt = match compile_env(&rust_env) { + Ok(stt) => stt, + Err(e) => { + let msg = + format!("rs_compile_env_full: Rust compilation failed: {:?}", e); + return LeanIOResult::error_string(&msg); + }, + }; + + // Phase 4: Build Lean structures + let mut cache = LeanBuildCache::with_capacity(env_len); + + let raw_env = LeanIxRawEnvironment::build(&mut cache, &rust_env); + let condensed_obj = LeanIxCondensedBlocks::build(&mut cache, &condensed); + + // Collect blocks + let mut blocks_data: Vec<(Name, Vec, usize)> = Vec::new(); + let mut seen_addrs: std::collections::HashSet
= + std::collections::HashSet::new(); + + for entry in compile_stt.name_to_addr.iter() { + let name = entry.key().clone(); + let addr = entry.value().clone(); + + if seen_addrs.contains(&addr) { + continue; + } + seen_addrs.insert(addr.clone()); + + if let Some(constant) = compile_stt.env.get_const(&addr) { + let mut bytes = Vec::new(); + constant.put(&mut bytes); + let sharing_len = constant.sharing.len(); + blocks_data.push((name, bytes, sharing_len)); + } + } + + // Build blocks array + let blocks_arr = LeanArray::alloc(blocks_data.len()); + for (i, (name, bytes, sharing_len)) in blocks_data.iter().enumerate() { + let name_obj = LeanIxName::build(&mut cache, name); + let ba = LeanByteArray::from_bytes(bytes); + + // Block: { name: Ix.Name, bytes: ByteArray, sharingLen: UInt64 } + let block = LeanCtor::alloc(0, 2, 8); + block.set(0, name_obj); + block.set(1, ba); + block.set_u64(2 * 8, *sharing_len as u64); + + blocks_arr.set(i, *block); + } + + // Build nameToAddr array + let name_to_addr_len = compile_stt.name_to_addr.len(); + let name_to_addr_arr = LeanArray::alloc(name_to_addr_len); + for (i, entry) in compile_stt.name_to_addr.iter().enumerate() { + let name = entry.key(); + let addr = entry.value(); + + let name_obj = LeanIxName::build(&mut cache, name); + let addr_ba = LeanByteArray::from_bytes(addr.as_bytes()); + + let entry_obj = LeanCtor::alloc(0, 2, 0); + entry_obj.set(0, name_obj); + entry_obj.set(1, addr_ba); + + name_to_addr_arr.set(i, *entry_obj); + } + + // Build RawCompiledEnv + let compiled_obj = LeanCtor::alloc(0, 2, 0); + compiled_obj.set(0, *blocks_arr); + compiled_obj.set(1, *name_to_addr_arr); + + // Build RustCompilationResult + let result = LeanCtor::alloc(0, 3, 0); + result.set(0, raw_env); + result.set(1, condensed_obj); + result.set(2, *compiled_obj); + + LeanIOResult::ok(*result) + })) +} + +/// FFI function to compile a Lean environment to serialized Ixon.Env bytes. +#[unsafe(no_mangle)] +pub extern "C" fn rs_compile_env(env_consts_ptr: LeanList) -> LeanIOResult { + ffi_io_guard(std::panic::AssertUnwindSafe(|| { + let rust_env = decode_env(env_consts_ptr); + let rust_env = Arc::new(rust_env); + + let compile_stt = match compile_env(&rust_env) { + Ok(stt) => stt, + Err(e) => { + let msg = format!("rs_compile_env: Rust compilation failed: {:?}", e); + return LeanIOResult::error_string(&msg); + }, + }; + + // Serialize the compiled Env to bytes + let mut buf = Vec::new(); + if let Err(e) = compile_stt.env.put(&mut buf) { + let msg = format!("rs_compile_env: Env serialization failed: {}", e); + return LeanIOResult::error_string(&msg); + } + + // Build Lean ByteArray + let ba = LeanByteArray::from_bytes(&buf); + LeanIOResult::ok(ba) + })) +} + +/// Round-trip a RawEnv: decode from Lean, re-encode via builder. +/// This performs a full decode/build cycle to verify FFI correctness. +#[unsafe(no_mangle)] +pub extern "C" fn rs_roundtrip_raw_env( + raw_env_obj: LeanIxonRawEnv, +) -> LeanIxonRawEnv { + let env = raw_env_obj.decode(); + LeanIxonRawEnv::build(&env) +} + +/// FFI function to run all compilation phases and return combined results. +#[unsafe(no_mangle)] +pub extern "C" fn rs_compile_phases(env_consts_ptr: LeanList) -> LeanIOResult { + ffi_io_guard(std::panic::AssertUnwindSafe(|| { + let rust_env = decode_env(env_consts_ptr); + let env_len = rust_env.len(); + let rust_env = Arc::new(rust_env); + + let mut cache = LeanBuildCache::with_capacity(env_len); + let raw_env = LeanIxRawEnvironment::build(&mut cache, &rust_env); + + let ref_graph = build_ref_graph(&rust_env); + + let condensed = compute_sccs(&ref_graph.out_refs); + + let condensed_obj = LeanIxCondensedBlocks::build(&mut cache, &condensed); + + let compile_stt = match compile_env(&rust_env) { + Ok(stt) => stt, + Err(e) => { + let msg = format!("rs_compile_phases: compilation failed: {:?}", e); + return LeanIOResult::error_string(&msg); + }, + }; + + // Build Lean objects from compile results + let consts: Vec<_> = compile_stt + .env + .consts + .iter() + .map(|e| (e.key().clone(), e.value().clone())) + .collect(); + let consts_arr = LeanArray::alloc(consts.len()); + for (i, (addr, constant)) in consts.iter().enumerate() { + consts_arr.set(i, build_raw_const(addr, constant)); + } + + let named: Vec<_> = compile_stt + .env + .named + .iter() + .map(|e| (e.key().clone(), e.value().clone())) + .collect(); + let named_arr = LeanArray::alloc(named.len()); + for (i, (name, n)) in named.iter().enumerate() { + named_arr.set(i, build_raw_named(&mut cache, name, &n.addr, &n.meta)); + } + + let blobs: Vec<_> = compile_stt + .env + .blobs + .iter() + .map(|e| (e.key().clone(), e.value().clone())) + .collect(); + let blobs_arr = LeanArray::alloc(blobs.len()); + for (i, (addr, bytes)) in blobs.iter().enumerate() { + blobs_arr.set(i, build_raw_blob(addr, bytes)); + } + + let comms: Vec<_> = compile_stt + .env + .comms + .iter() + .map(|e| (e.key().clone(), e.value().clone())) + .collect(); + let comms_arr = LeanArray::alloc(comms.len()); + for (i, (addr, comm)) in comms.iter().enumerate() { + comms_arr.set(i, build_raw_comm(addr, comm)); + } + + // Build names array (Address → Ix.Name) + let names: Vec<_> = compile_stt + .env + .names + .iter() + .map(|e| (e.key().clone(), e.value().clone())) + .collect(); + let names_arr = LeanArray::alloc(names.len()); + for (i, (addr, name)) in names.iter().enumerate() { + names_arr.set(i, LeanIxonRawNameEntry::build(&mut cache, addr, name)); + } + + let raw_ixon_env = LeanCtor::alloc(0, 5, 0); + raw_ixon_env.set(0, *consts_arr); + raw_ixon_env.set(1, *named_arr); + raw_ixon_env.set(2, *blobs_arr); + raw_ixon_env.set(3, *comms_arr); + raw_ixon_env.set(4, *names_arr); + + let result = LeanCtor::alloc(0, 3, 0); + result.set(0, raw_env); + result.set(1, condensed_obj); + result.set(2, *raw_ixon_env); + + LeanIOResult::ok(*result) + })) +} + +/// FFI function to compile a Lean environment to a RawEnv. +#[unsafe(no_mangle)] +pub extern "C" fn rs_compile_env_to_ixon( + env_consts_ptr: LeanList, +) -> LeanIOResult { + ffi_io_guard(std::panic::AssertUnwindSafe(|| { + let rust_env = decode_env(env_consts_ptr); + let rust_env = Arc::new(rust_env); + + let compile_stt = match compile_env(&rust_env) { + Ok(stt) => stt, + Err(e) => { + let msg = + format!("rs_compile_env_to_ixon: compilation failed: {:?}", e); + return LeanIOResult::error_string(&msg); + }, + }; + + let mut cache = LeanBuildCache::with_capacity(rust_env.len()); + + let consts: Vec<_> = compile_stt + .env + .consts + .iter() + .map(|e| (e.key().clone(), e.value().clone())) + .collect(); + let consts_arr = LeanArray::alloc(consts.len()); + for (i, (addr, constant)) in consts.iter().enumerate() { + consts_arr.set(i, build_raw_const(addr, constant)); + } + + let named: Vec<_> = compile_stt + .env + .named + .iter() + .map(|e| (e.key().clone(), e.value().clone())) + .collect(); + let named_arr = LeanArray::alloc(named.len()); + for (i, (name, n)) in named.iter().enumerate() { + named_arr.set(i, build_raw_named(&mut cache, name, &n.addr, &n.meta)); + } + + let blobs: Vec<_> = compile_stt + .env + .blobs + .iter() + .map(|e| (e.key().clone(), e.value().clone())) + .collect(); + let blobs_arr = LeanArray::alloc(blobs.len()); + for (i, (addr, bytes)) in blobs.iter().enumerate() { + blobs_arr.set(i, build_raw_blob(addr, bytes)); + } + + let comms: Vec<_> = compile_stt + .env + .comms + .iter() + .map(|e| (e.key().clone(), e.value().clone())) + .collect(); + let comms_arr = LeanArray::alloc(comms.len()); + for (i, (addr, comm)) in comms.iter().enumerate() { + comms_arr.set(i, build_raw_comm(addr, comm)); + } + + // Build names array (Address → Ix.Name) + let names: Vec<_> = compile_stt + .env + .names + .iter() + .map(|e| (e.key().clone(), e.value().clone())) + .collect(); + let names_arr = LeanArray::alloc(names.len()); + for (i, (addr, name)) in names.iter().enumerate() { + names_arr.set(i, LeanIxonRawNameEntry::build(&mut cache, addr, name)); + } + + let result = LeanCtor::alloc(0, 5, 0); + result.set(0, *consts_arr); + result.set(1, *named_arr); + result.set(2, *blobs_arr); + result.set(3, *comms_arr); + result.set(4, *names_arr); + LeanIOResult::ok(*result) + })) +} + +/// FFI function to canonicalize environment to Ix.RawEnvironment. +#[unsafe(no_mangle)] +pub extern "C" fn rs_canonicalize_env_to_ix( + env_consts_ptr: LeanList, +) -> LeanIOResult { + ffi_io_guard(std::panic::AssertUnwindSafe(|| { + let rust_env = decode_env(env_consts_ptr); + let mut cache = LeanBuildCache::with_capacity(rust_env.len()); + let raw_env = LeanIxRawEnvironment::build(&mut cache, &rust_env); + LeanIOResult::ok(raw_env) + })) +} + +// ============================================================================= +// RustCompiledEnv - Holds Rust compilation results for comparison +// ============================================================================= + +/// Rust-compiled environment holding blocks indexed by low-link name. +/// Each block is stored as serialized bytes for comparison with Lean output. +pub struct RustCompiledEnv { + /// Map from low-link name to (serialized constant bytes, sharing vector length) + blocks: HashMap, usize)>, + /// The full compile state for accessing pre-sharing expressions + compile_state: CompileState, +} + +// ============================================================================= +// Block-by-block comparison FFI +// ============================================================================= + +/// FFI: Simple test to verify FFI round-trip works. +/// Takes a Lean.Name and returns a magic number to verify the call succeeded. +#[unsafe(no_mangle)] +extern "C" fn rs_test_ffi_roundtrip(name_ptr: LeanObject) -> u64 { + let global_cache = GlobalCache::default(); + let name = decode_name(name_ptr, &global_cache); + + // Return a magic number plus the hash of the name to verify it worked + let hash = name.get_hash(); + let hash_bytes = hash.as_bytes(); + let hash_prefix = + u64::from_le_bytes(hash_bytes[0..8].try_into().unwrap_or([0u8; 8])); + + // Magic number 0xDEADBEEF plus hash prefix + 0xDEAD_BEEF_0000_0000 | (hash_prefix & 0x0000_0000_FFFF_FFFF) +} + +/// FFI: Compile entire environment with Rust, returning a handle to RustCompiledEnv. +#[unsafe(no_mangle)] +extern "C" fn rs_compile_env_rust_first( + env_consts_ptr: LeanList, +) -> *mut RustCompiledEnv { + // Decode Lean environment + let lean_env = decode_env(env_consts_ptr); + let lean_env = Arc::new(lean_env); + + // Compile with Rust + let rust_stt = match compile_env(&lean_env) { + Ok(stt) => stt, + Err(_e) => { + return std::ptr::null_mut(); + }, + }; + + // Build block map: lowlink name -> (serialized bytes, sharing len) + let mut blocks: HashMap, usize)> = HashMap::new(); + + // Iterate over all names and their addresses + for entry in rust_stt.name_to_addr.iter() { + let name = entry.key().clone(); + let addr = entry.value().clone(); + + // Skip if we already have this block (multiple names map to same block) + if blocks.contains_key(&name) { + continue; + } + + // Get the compiled constant + if let Some(constant) = rust_stt.env.get_const(&addr) { + let mut bytes = Vec::new(); + constant.put(&mut bytes); + let sharing_len = constant.sharing.len(); + blocks.insert(name, (bytes, sharing_len)); + } + } + + // Return boxed RustCompiledEnv with full compile state for pre-sharing access + Box::into_raw(Box::new(RustCompiledEnv { blocks, compile_state: rust_stt })) +} + +/// FFI: Compare a single block and return packed result. +/// Returns a packed u64: high 32 bits = matches (1) or error code (0 = mismatch, 2 = not found) +/// low 32 bits = first diff offset (if mismatch) +#[unsafe(no_mangle)] +extern "C" fn rs_compare_block( + rust_env: *const RustCompiledEnv, + lowlink_name: LeanObject, + lean_bytes: LeanByteArray, +) -> u64 { + if rust_env.is_null() { + return 2u64 << 32; // not found + } + let global_cache = GlobalCache::default(); + let name = decode_name(lowlink_name, &global_cache); + + let rust_env = unsafe { &*rust_env }; + let lean_data = lean_bytes.as_bytes(); + + // Look up Rust's compiled block + let rust_bytes = match rust_env.blocks.get(&name) { + Some((bytes, _)) => bytes, + None => { + // Block not found in Rust compilation: code 2 + return 2u64 << 32; + }, + }; + + // Compare bytes + if rust_bytes == lean_data { + // Match: code 1 + return 1u64 << 32; + } + + // Mismatch: find first differing byte + rust_bytes.iter().zip(lean_data.iter()).position(|(a, b)| a != b).map_or_else( + || { + // One is a prefix of the other + rust_bytes.len().min(lean_data.len()) as u64 + }, + |i| i as u64, + ) +} + +/// FFI: Free a RustCompiledEnv. +#[unsafe(no_mangle)] +extern "C" fn rs_free_rust_env(rust_env: *mut RustCompiledEnv) { + if !rust_env.is_null() { + unsafe { + drop(Box::from_raw(rust_env)); + } + } +} + +/// FFI: Get the number of blocks in a RustCompiledEnv. +#[unsafe(no_mangle)] +extern "C" fn rs_get_rust_env_block_count( + rust_env: *const RustCompiledEnv, +) -> u64 { + if rust_env.is_null() { + return 0; + } + let rust_env = unsafe { &*rust_env }; + rust_env.blocks.len() as u64 +} + +/// FFI: Get Rust's compiled bytes length for a block. +#[unsafe(no_mangle)] +extern "C" fn rs_get_block_bytes_len( + rust_env: *const RustCompiledEnv, + lowlink_name: LeanObject, +) -> u64 { + if rust_env.is_null() { + return 0; + } + let global_cache = GlobalCache::default(); + let name = decode_name(lowlink_name, &global_cache); + + let rust_env = unsafe { &*rust_env }; + + match rust_env.blocks.get(&name) { + Some((bytes, _)) => bytes.len() as u64, + None => 0, + } +} + +/// FFI: Copy Rust's compiled bytes into a pre-allocated Lean ByteArray. +#[unsafe(no_mangle)] +extern "C" fn rs_copy_block_bytes( + rust_env: *const RustCompiledEnv, + lowlink_name: LeanObject, + dest: LeanByteArray, +) { + if rust_env.is_null() { + return; + } + let global_cache = GlobalCache::default(); + let name = decode_name(lowlink_name, &global_cache); + + let rust_env = unsafe { &*rust_env }; + + let bytes = match rust_env.blocks.get(&name) { + Some((bytes, _)) => bytes, + None => return, + }; + + // Copy into the Lean ByteArray + unsafe { dest.set_data(bytes) }; +} + +/// FFI: Get Rust's sharing vector length for a block. +#[unsafe(no_mangle)] +extern "C" fn rs_get_block_sharing_len( + rust_env: *const RustCompiledEnv, + lowlink_name: LeanObject, +) -> u64 { + if rust_env.is_null() { + return 0; + } + let global_cache = GlobalCache::default(); + let name = decode_name(lowlink_name, &global_cache); + + let rust_env = unsafe { &*rust_env }; + + match rust_env.blocks.get(&name) { + Some((_, sharing_len)) => *sharing_len as u64, + None => 0, + } +} + +// ============================================================================= +// Pre-sharing expression extraction FFI +// ============================================================================= + +/// Frame for iterative unshare traversal. +enum UnshareFrame<'a> { + Visit(&'a Arc), + BuildApp, + BuildLam, + BuildAll, + BuildLet(bool), + BuildPrj(u64, u64), +} + +/// Expand Share(idx) references in an expression using the sharing vector. +/// This reconstructs the "pre-sharing" expression from the post-sharing +/// representation. Uses iterative traversal to avoid stack overflow on deep +/// expressions. +#[allow(clippy::cast_possible_truncation)] +fn unshare_expr( + expr: &Arc, + sharing: &[Arc], +) -> Arc { + let mut stack: Vec> = vec![UnshareFrame::Visit(expr)]; + let mut results: Vec> = Vec::new(); + + while let Some(frame) = stack.pop() { + match frame { + UnshareFrame::Visit(e) => match e.as_ref() { + IxonExpr::Share(idx) => { + if (*idx as usize) < sharing.len() { + stack.push(UnshareFrame::Visit(&sharing[*idx as usize])); + } else { + results.push(e.clone()); + } + }, + IxonExpr::App(f, a) => { + stack.push(UnshareFrame::BuildApp); + stack.push(UnshareFrame::Visit(a)); + stack.push(UnshareFrame::Visit(f)); + }, + IxonExpr::Lam(t, b) => { + stack.push(UnshareFrame::BuildLam); + stack.push(UnshareFrame::Visit(b)); + stack.push(UnshareFrame::Visit(t)); + }, + IxonExpr::All(t, b) => { + stack.push(UnshareFrame::BuildAll); + stack.push(UnshareFrame::Visit(b)); + stack.push(UnshareFrame::Visit(t)); + }, + IxonExpr::Let(nd, t, v, b) => { + stack.push(UnshareFrame::BuildLet(*nd)); + stack.push(UnshareFrame::Visit(b)); + stack.push(UnshareFrame::Visit(v)); + stack.push(UnshareFrame::Visit(t)); + }, + IxonExpr::Prj(ti, fi, v) => { + stack.push(UnshareFrame::BuildPrj(*ti, *fi)); + stack.push(UnshareFrame::Visit(v)); + }, + // Leaf nodes - no children to unshare + _ => results.push(e.clone()), + }, + UnshareFrame::BuildApp => { + let a = results.pop().unwrap(); + let f = results.pop().unwrap(); + results.push(Arc::new(IxonExpr::App(f, a))); + }, + UnshareFrame::BuildLam => { + let b = results.pop().unwrap(); + let t = results.pop().unwrap(); + results.push(Arc::new(IxonExpr::Lam(t, b))); + }, + UnshareFrame::BuildAll => { + let b = results.pop().unwrap(); + let t = results.pop().unwrap(); + results.push(Arc::new(IxonExpr::All(t, b))); + }, + UnshareFrame::BuildLet(nd) => { + let b = results.pop().unwrap(); + let v = results.pop().unwrap(); + let t = results.pop().unwrap(); + results.push(Arc::new(IxonExpr::Let(nd, t, v, b))); + }, + UnshareFrame::BuildPrj(ti, fi) => { + let v = results.pop().unwrap(); + results.push(Arc::new(IxonExpr::Prj(ti, fi, v))); + }, + } + } + + results.pop().unwrap() +} + +/// FFI: Get the pre-sharing root expressions for a constant. +/// Returns the number of root expressions, and writes serialized expressions to the output buffer. +/// Each expression is serialized without sharing (Share nodes are expanded). +/// +/// Output format: [n_exprs:u64, len1:u64, expr1_bytes..., len2:u64, expr2_bytes..., ...] +#[unsafe(no_mangle)] +extern "C" fn rs_get_pre_sharing_exprs( + rust_env: *const RustCompiledEnv, + lowlink_name: LeanObject, + out_buf: LeanByteArray, +) -> u64 { + if rust_env.is_null() { + return 0; + } + let global_cache = GlobalCache::default(); + let name = decode_name(lowlink_name, &global_cache); + + let rust_env = unsafe { &*rust_env }; + + // Look up the address for this name + let addr = match rust_env.compile_state.name_to_addr.get(&name) { + Some(a) => a.clone(), + None => { + return 0; + }, + }; + + // Get the constant (note: contains post-sharing expressions) + let constant = match rust_env.compile_state.env.get_const(&addr) { + Some(c) => c, + None => { + return 0; + }, + }; + + // Extract root expressions from the constant info + let root_exprs: Vec> = match &constant.info { + ConstantInfo::Defn(def) => vec![def.typ.clone(), def.value.clone()], + ConstantInfo::Axio(ax) => vec![ax.typ.clone()], + ConstantInfo::Quot(q) => vec![q.typ.clone()], + ConstantInfo::Recr(rec) => { + let mut exprs = vec![rec.typ.clone()]; + for rule in &rec.rules { + exprs.push(rule.rhs.clone()); + } + exprs + }, + // Projections don't contain expressions directly + ConstantInfo::CPrj(_) + | ConstantInfo::RPrj(_) + | ConstantInfo::IPrj(_) + | ConstantInfo::DPrj(_) => { + vec![] + }, + ConstantInfo::Muts(muts) => { + let mut exprs = Vec::new(); + for mc in muts { + match mc { + crate::ix::ixon::constant::MutConst::Defn(def) => { + exprs.push(def.typ.clone()); + exprs.push(def.value.clone()); + }, + crate::ix::ixon::constant::MutConst::Indc(ind) => { + exprs.push(ind.typ.clone()); + for ctor in &ind.ctors { + exprs.push(ctor.typ.clone()); + } + }, + crate::ix::ixon::constant::MutConst::Recr(rec) => { + exprs.push(rec.typ.clone()); + for rule in &rec.rules { + exprs.push(rule.rhs.clone()); + } + }, + } + } + exprs + }, + }; + + // Unshare and serialize each root expression + let mut output_bytes: Vec = Vec::new(); + let n_exprs = root_exprs.len() as u64; + + // Write number of expressions + output_bytes.extend_from_slice(&n_exprs.to_le_bytes()); + + for expr in &root_exprs { + // Unshare the expression + let unshared = unshare_expr(expr, &constant.sharing); + + // Serialize to bytes + let mut expr_bytes: Vec = Vec::new(); + put_expr(&unshared, &mut expr_bytes); + + // Write length and bytes + output_bytes.extend_from_slice(&(expr_bytes.len() as u64).to_le_bytes()); + output_bytes.extend(expr_bytes); + } + + // Write to output buffer + unsafe { out_buf.set_data(&output_bytes) }; + + n_exprs +} + +/// FFI: Get the buffer length needed for pre-sharing expressions. +#[unsafe(no_mangle)] +extern "C" fn rs_get_pre_sharing_exprs_len( + rust_env: *const RustCompiledEnv, + lowlink_name: LeanObject, +) -> u64 { + if rust_env.is_null() { + return 0; + } + let global_cache = GlobalCache::default(); + let name = decode_name(lowlink_name, &global_cache); + + let rust_env = unsafe { &*rust_env }; + + // Look up the address for this name + let addr = match rust_env.compile_state.name_to_addr.get(&name) { + Some(a) => a.clone(), + None => return 0, + }; + + // Get the constant + let constant = match rust_env.compile_state.env.get_const(&addr) { + Some(c) => c, + None => return 0, + }; + + // Count root expressions + let n_exprs = match &constant.info { + ConstantInfo::Defn(_) => 2, + ConstantInfo::Axio(_) | ConstantInfo::Quot(_) => 1, + ConstantInfo::Recr(rec) => 1 + rec.rules.len(), + // Projections don't contain expressions directly + ConstantInfo::CPrj(_) + | ConstantInfo::RPrj(_) + | ConstantInfo::IPrj(_) + | ConstantInfo::DPrj(_) => 0, + ConstantInfo::Muts(muts) => { + let mut count = 0; + for mc in muts { + match mc { + crate::ix::ixon::constant::MutConst::Defn(_) => count += 2, + crate::ix::ixon::constant::MutConst::Indc(ind) => { + count += 1 + ind.ctors.len() + }, + crate::ix::ixon::constant::MutConst::Recr(rec) => { + count += 1 + rec.rules.len() + }, + } + } + count + }, + }; + + // Estimate: 8 bytes per header + some for expression data + // This is an upper bound estimate + (8 + n_exprs * 1024) as u64 +} + +/// FFI: Look up a constant's compiled address from RustCompiledEnv. +/// Copies the 32-byte blake3 hash into the provided ByteArray. +/// Returns 1 on success, 0 if name not found. +#[unsafe(no_mangle)] +extern "C" fn rs_lookup_const_addr( + rust_env: *const RustCompiledEnv, + name_ptr: LeanObject, + out_addr: LeanByteArray, +) -> u64 { + if rust_env.is_null() { + return 0; + } + let global_cache = GlobalCache::default(); + let name = decode_name(name_ptr, &global_cache); + + let rust_env = unsafe { &*rust_env }; + + // Look up the address for this name + match rust_env.compile_state.name_to_addr.get(&name) { + Some(addr_ref) => { + // Copy the 32-byte address into the output ByteArray + unsafe { out_addr.set_data(addr_ref.as_bytes()) }; + 1 + }, + None => 0, + } +} + +/// FFI: Get the total number of compiled constants in RustCompiledEnv. +#[unsafe(no_mangle)] +extern "C" fn rs_get_compiled_const_count( + rust_env: *const RustCompiledEnv, +) -> u64 { + if rust_env.is_null() { + return 0; + } + let rust_env = unsafe { &*rust_env }; + rust_env.compile_state.name_to_addr.len() as u64 +} + +// ============================================================================= +// Error type FFI builders +// ============================================================================= + +use crate::ix::ixon::error::{CompileError, DecompileError, SerializeError}; + +impl LeanIxSerializeError { + /// Build a Lean Ixon.SerializeError from a Rust SerializeError. + /// + /// Tags 0–6: + /// 0: unexpectedEof (expected : String) → 1 obj + /// 1: invalidTag (tag : UInt8) (context : String) → 1 obj + 1 scalar (UInt8) + /// 2: invalidFlag (flag : UInt8) (context : String) → 1 obj + 1 scalar (UInt8) + /// 3: invalidVariant (variant : UInt64) (context : String) → 1 obj + 8 scalar (UInt64) + /// 4: invalidBool (value : UInt8) → 0 obj + 1 scalar (UInt8) + /// 5: addressError → 0 obj + 0 scalar + /// 6: invalidShareIndex (idx : UInt64) (max : Nat) → 1 obj (Nat) + 8 scalar (UInt64) + pub fn build(se: &SerializeError) -> Self { + let obj = match se { + SerializeError::UnexpectedEof { expected } => { + let ctor = LeanCtor::alloc(0, 1, 0); + ctor.set(0, build_lean_string(expected)); + *ctor + }, + SerializeError::InvalidTag { tag, context } => { + let ctor = LeanCtor::alloc(1, 1, 1); + ctor.set(0, build_lean_string(context)); + ctor.set_u8(8, *tag); + *ctor + }, + SerializeError::InvalidFlag { flag, context } => { + let ctor = LeanCtor::alloc(2, 1, 1); + ctor.set(0, build_lean_string(context)); + ctor.set_u8(8, *flag); + *ctor + }, + SerializeError::InvalidVariant { variant, context } => { + let ctor = LeanCtor::alloc(3, 1, 8); + ctor.set(0, build_lean_string(context)); + ctor.set_u64(8, *variant); + *ctor + }, + SerializeError::InvalidBool { value } => { + let ctor = LeanCtor::alloc(4, 0, 1); + ctor.set_u8(0, *value); + *ctor + }, + SerializeError::AddressError => LeanObject::box_usize(5), + SerializeError::InvalidShareIndex { idx, max } => { + let ctor = LeanCtor::alloc(6, 1, 8); + ctor.set(0, build_lean_nat_usize(*max)); + ctor.set_u64(8, *idx); + *ctor + }, + }; + Self::new(obj) + } + + /// Decode a Lean Ixon.SerializeError to a Rust SerializeError. + pub fn decode(self) -> SerializeError { + // Tag 5 (addressError) has 0 fields → Lean represents as scalar + if self.is_scalar() { + let tag = self.unbox_usize(); + assert_eq!(tag, 5, "Invalid scalar SerializeError tag: {}", tag); + return SerializeError::AddressError; + } + let ctor = self.as_ctor(); + match ctor.tag() { + 0 => { + let expected = ctor.get(0).as_string().to_string(); + SerializeError::UnexpectedEof { expected } + }, + 1 => { + let context = ctor.get(0).as_string().to_string(); + let tag_val = ctor.scalar_u8(1, 0); + SerializeError::InvalidTag { tag: tag_val, context } + }, + 2 => { + let context = ctor.get(0).as_string().to_string(); + let flag = ctor.scalar_u8(1, 0); + SerializeError::InvalidFlag { flag, context } + }, + 3 => { + let context = ctor.get(0).as_string().to_string(); + let variant = ctor.scalar_u64(1, 0); + SerializeError::InvalidVariant { variant, context } + }, + 4 => { + let value = ctor.scalar_u8(0, 0); + SerializeError::InvalidBool { value } + }, + 5 => SerializeError::AddressError, + 6 => { + let max = Nat::from_obj(ctor.get(0)) + .to_u64() + .and_then(|x| usize::try_from(x).ok()) + .unwrap_or(0); + let idx = ctor.scalar_u64(1, 0); + SerializeError::InvalidShareIndex { idx, max } + }, + _ => unreachable!("Invalid SerializeError tag: {}", ctor.tag()), + } + } +} + +impl LeanIxDecompileError { + /// Build a Lean DecompileError from a Rust DecompileError. + /// + /// Layout for index variants (tags 0–4): + /// `(idx : UInt64) (len/max : Nat) (constant : String)` + /// → 2 object fields (Nat, String) + 8 scalar bytes (UInt64) + /// → `lean_alloc_ctor(tag, 2, 8)` + /// → obj[0] = Nat, obj[1] = String, scalar[0] = UInt64 + pub fn build(err: &DecompileError) -> Self { + let obj = match err { + DecompileError::InvalidRefIndex { idx, refs_len, constant } => { + let ctor = LeanCtor::alloc(0, 2, 8); + ctor.set(0, build_lean_nat_usize(*refs_len)); + ctor.set(1, build_lean_string(constant)); + ctor.set_u64(2 * 8, *idx); + *ctor + }, + DecompileError::InvalidUnivIndex { idx, univs_len, constant } => { + let ctor = LeanCtor::alloc(1, 2, 8); + ctor.set(0, build_lean_nat_usize(*univs_len)); + ctor.set(1, build_lean_string(constant)); + ctor.set_u64(2 * 8, *idx); + *ctor + }, + DecompileError::InvalidShareIndex { idx, max, constant } => { + let ctor = LeanCtor::alloc(2, 2, 8); + ctor.set(0, build_lean_nat_usize(*max)); + ctor.set(1, build_lean_string(constant)); + ctor.set_u64(2 * 8, *idx); + *ctor + }, + DecompileError::InvalidRecIndex { idx, ctx_size, constant } => { + let ctor = LeanCtor::alloc(3, 2, 8); + ctor.set(0, build_lean_nat_usize(*ctx_size)); + ctor.set(1, build_lean_string(constant)); + ctor.set_u64(2 * 8, *idx); + *ctor + }, + DecompileError::InvalidUnivVarIndex { idx, max, constant } => { + let ctor = LeanCtor::alloc(4, 2, 8); + ctor.set(0, build_lean_nat_usize(*max)); + ctor.set(1, build_lean_string(constant)); + ctor.set_u64(2 * 8, *idx); + *ctor + }, + DecompileError::MissingAddress(addr) => { + let ctor = LeanCtor::alloc(5, 1, 0); + ctor.set(0, LeanIxAddress::build(addr)); + *ctor + }, + DecompileError::MissingMetadata(addr) => { + let ctor = LeanCtor::alloc(6, 1, 0); + ctor.set(0, LeanIxAddress::build(addr)); + *ctor + }, + DecompileError::BlobNotFound(addr) => { + let ctor = LeanCtor::alloc(7, 1, 0); + ctor.set(0, LeanIxAddress::build(addr)); + *ctor + }, + DecompileError::BadBlobFormat { addr, expected } => { + let ctor = LeanCtor::alloc(8, 2, 0); + ctor.set(0, LeanIxAddress::build(addr)); + ctor.set(1, build_lean_string(expected)); + *ctor + }, + DecompileError::BadConstantFormat { msg } => { + let ctor = LeanCtor::alloc(9, 1, 0); + ctor.set(0, build_lean_string(msg)); + *ctor + }, + DecompileError::Serialize(se) => { + let ctor = LeanCtor::alloc(10, 1, 0); + ctor.set(0, LeanIxSerializeError::build(se)); + *ctor + }, + }; + Self::new(obj) + } + + /// Decode a Lean DecompileError to a Rust DecompileError. + pub fn decode(self) -> DecompileError { + let ctor = self.as_ctor(); + match ctor.tag() { + 0 => { + let refs_len = Nat::from_obj(ctor.get(0)) + .to_u64() + .and_then(|x| usize::try_from(x).ok()) + .unwrap_or(0); + let constant = ctor.get(1).as_string().to_string(); + let idx = ctor.scalar_u64(2, 0); + DecompileError::InvalidRefIndex { idx, refs_len, constant } + }, + 1 => { + let univs_len = Nat::from_obj(ctor.get(0)) + .to_u64() + .and_then(|x| usize::try_from(x).ok()) + .unwrap_or(0); + let constant = ctor.get(1).as_string().to_string(); + let idx = ctor.scalar_u64(2, 0); + DecompileError::InvalidUnivIndex { idx, univs_len, constant } + }, + 2 => { + let max = Nat::from_obj(ctor.get(0)) + .to_u64() + .and_then(|x| usize::try_from(x).ok()) + .unwrap_or(0); + let constant = ctor.get(1).as_string().to_string(); + let idx = ctor.scalar_u64(2, 0); + DecompileError::InvalidShareIndex { idx, max, constant } + }, + 3 => { + let ctx_size = Nat::from_obj(ctor.get(0)) + .to_u64() + .and_then(|x| usize::try_from(x).ok()) + .unwrap_or(0); + let constant = ctor.get(1).as_string().to_string(); + let idx = ctor.scalar_u64(2, 0); + DecompileError::InvalidRecIndex { idx, ctx_size, constant } + }, + 4 => { + let max = Nat::from_obj(ctor.get(0)) + .to_u64() + .and_then(|x| usize::try_from(x).ok()) + .unwrap_or(0); + let constant = ctor.get(1).as_string().to_string(); + let idx = ctor.scalar_u64(2, 0); + DecompileError::InvalidUnivVarIndex { idx, max, constant } + }, + 5 => { + DecompileError::MissingAddress(LeanIxAddress::new(ctor.get(0)).decode()) + }, + 6 => DecompileError::MissingMetadata( + LeanIxAddress::new(ctor.get(0)).decode(), + ), + 7 => { + DecompileError::BlobNotFound(LeanIxAddress::new(ctor.get(0)).decode()) + }, + 8 => { + let addr = LeanIxAddress::new(ctor.get(0)).decode(); + let expected = ctor.get(1).as_string().to_string(); + DecompileError::BadBlobFormat { addr, expected } + }, + 9 => { + let msg = ctor.get(0).as_string().to_string(); + DecompileError::BadConstantFormat { msg } + }, + 10 => DecompileError::Serialize( + LeanIxSerializeError::new(ctor.get(0)).decode(), + ), + _ => unreachable!("Invalid DecompileError tag: {}", ctor.tag()), + } + } +} + +impl LeanIxCompileError { + /// Build a Lean CompileError from a Rust CompileError. + /// + /// Tags 0–5: + /// 0: missingConstant (name : String) → 1 obj + /// 1: missingAddress (addr : Address) → 1 obj + /// 2: invalidMutualBlock (reason : String) → 1 obj + /// 3: unsupportedExpr (desc : String) → 1 obj + /// 4: unknownUnivParam (curr param : String) → 2 obj + /// 5: serializeError (msg : String) → 1 obj + pub fn build(err: &CompileError) -> Self { + let obj = match err { + CompileError::MissingConstant { name } => { + let ctor = LeanCtor::alloc(0, 1, 0); + ctor.set(0, build_lean_string(name)); + *ctor + }, + CompileError::MissingAddress(addr) => { + let ctor = LeanCtor::alloc(1, 1, 0); + ctor.set(0, LeanIxAddress::build(addr)); + *ctor + }, + CompileError::InvalidMutualBlock { reason } => { + let ctor = LeanCtor::alloc(2, 1, 0); + ctor.set(0, build_lean_string(reason)); + *ctor + }, + CompileError::UnsupportedExpr { desc } => { + let ctor = LeanCtor::alloc(3, 1, 0); + ctor.set(0, build_lean_string(desc)); + *ctor + }, + CompileError::UnknownUnivParam { curr, param } => { + let ctor = LeanCtor::alloc(4, 2, 0); + ctor.set(0, build_lean_string(curr)); + ctor.set(1, build_lean_string(param)); + *ctor + }, + CompileError::Serialize(se) => { + let ctor = LeanCtor::alloc(5, 1, 0); + ctor.set(0, LeanIxSerializeError::build(se)); + *ctor + }, + }; + Self::new(obj) + } + + /// Decode a Lean CompileError to a Rust CompileError. + pub fn decode(self) -> CompileError { + let ctor = self.as_ctor(); + match ctor.tag() { + 0 => { + let name = ctor.get(0).as_string().to_string(); + CompileError::MissingConstant { name } + }, + 1 => { + CompileError::MissingAddress(LeanIxAddress::new(ctor.get(0)).decode()) + }, + 2 => { + let reason = ctor.get(0).as_string().to_string(); + CompileError::InvalidMutualBlock { reason } + }, + 3 => { + let desc = ctor.get(0).as_string().to_string(); + CompileError::UnsupportedExpr { desc } + }, + 4 => { + let curr = ctor.get(0).as_string().to_string(); + let param = ctor.get(1).as_string().to_string(); + CompileError::UnknownUnivParam { curr, param } + }, + 5 => { + CompileError::Serialize(LeanIxSerializeError::new(ctor.get(0)).decode()) + }, + _ => unreachable!("Invalid CompileError tag: {}", ctor.tag()), + } + } +} + +/// FFI: Round-trip a DecompileError: Lean → Rust → Lean. +#[unsafe(no_mangle)] +pub extern "C" fn rs_roundtrip_decompile_error( + obj: LeanIxDecompileError, +) -> LeanIxDecompileError { + let err = obj.decode(); + LeanIxDecompileError::build(&err) +} + +/// FFI: Round-trip a CompileError: Lean → Rust → Lean. +#[unsafe(no_mangle)] +pub extern "C" fn rs_roundtrip_compile_error( + obj: LeanIxCompileError, +) -> LeanIxCompileError { + let err = obj.decode(); + LeanIxCompileError::build(&err) +} + +/// FFI: Round-trip a SerializeError: Lean → Rust → Lean. +#[unsafe(no_mangle)] +pub extern "C" fn rs_roundtrip_serialize_error( + obj: LeanIxSerializeError, +) -> LeanIxSerializeError { + let err = obj.decode(); + LeanIxSerializeError::build(&err) +} + +// ============================================================================= +// Decompilation FFI +// ============================================================================= + +/// FFI: Decompile an Ixon.RawEnv → Except DecompileError (Array (Ix.Name × Ix.ConstantInfo)). Pure. +#[unsafe(no_mangle)] +pub extern "C" fn rs_decompile_env(raw_env_obj: LeanIxonRawEnv) -> LeanExcept { + let decoded = raw_env_obj.decode(); + let env = decoded_to_ixon_env(&decoded); + + // Wrap in CompileState (decompile_env only uses .env) + let stt = CompileState { + env, + name_to_addr: DashMap::new(), + blocks: DashSet::new(), + block_stats: DashMap::new(), + }; + + match decompile_env(&stt) { + Ok(dstt) => { + let entries: Vec<_> = dstt.env.into_iter().collect(); + let mut cache = LeanBuildCache::with_capacity(entries.len()); + + let arr = LeanArray::alloc(entries.len()); + for (i, (name, info)) in entries.iter().enumerate() { + let name_obj = LeanIxName::build(&mut cache, name); + let info_obj = LeanIxConstantInfo::build(&mut cache, info); + let pair = LeanCtor::alloc(0, 2, 0); + pair.set(0, name_obj); + pair.set(1, info_obj); + arr.set(i, *pair); + } + + LeanExcept::ok(arr) + }, + Err(e) => LeanExcept::error(LeanIxDecompileError::build(&e)), + } +} diff --git a/src/ffi/graph.rs b/src/ffi/graph.rs new file mode 100644 index 00000000..1a3a0d7e --- /dev/null +++ b/src/ffi/graph.rs @@ -0,0 +1,123 @@ +//! Graph and SCC FFI functions. + +use std::sync::Arc; + +use crate::ffi::ffi_io_guard; +use crate::ix::condense::compute_sccs; +use crate::ix::graph::build_ref_graph; +use crate::lean::LeanIxCondensedBlocks; +use lean_ffi::object::{LeanArray, LeanCtor, LeanIOResult, LeanList}; + +use crate::ffi::builder::LeanBuildCache; +use crate::ffi::lean_env::decode_env; +use crate::lean::LeanIxName; + +/// Build an Array (Ix.Name × Array Ix.Name) from a RefMap. +pub fn build_ref_graph_array( + cache: &mut LeanBuildCache, + refs: &crate::ix::graph::RefMap, +) -> LeanArray { + let arr = LeanArray::alloc(refs.len()); + for (i, (name, ref_set)) in refs.iter().enumerate() { + let name_obj = LeanIxName::build(cache, name); + + let refs_arr = LeanArray::alloc(ref_set.len()); + for (j, ref_name) in ref_set.iter().enumerate() { + let ref_name_obj = LeanIxName::build(cache, ref_name); + refs_arr.set(j, ref_name_obj); + } + + let pair = LeanCtor::alloc(0, 2, 0); + pair.set(0, name_obj); + pair.set(1, *refs_arr); + arr.set(i, *pair); + } + arr +} + +impl LeanIxCondensedBlocks { + /// Build a RustCondensedBlocks structure. + pub fn build( + cache: &mut LeanBuildCache, + condensed: &crate::ix::condense::CondensedBlocks, + ) -> Self { + // Build lowLinks: Array (Ix.Name × Ix.Name) + let low_links_arr = LeanArray::alloc(condensed.low_links.len()); + for (i, (name, low_link)) in condensed.low_links.iter().enumerate() { + let name_obj = LeanIxName::build(cache, name); + let low_link_obj = LeanIxName::build(cache, low_link); + let pair = LeanCtor::alloc(0, 2, 0); + pair.set(0, name_obj); + pair.set(1, low_link_obj); + low_links_arr.set(i, *pair); + } + + // Build blocks: Array (Ix.Name × Array Ix.Name) + let blocks_arr = LeanArray::alloc(condensed.blocks.len()); + for (i, (name, block_set)) in condensed.blocks.iter().enumerate() { + let name_obj = LeanIxName::build(cache, name); + let block_names_arr = LeanArray::alloc(block_set.len()); + for (j, block_name) in block_set.iter().enumerate() { + let block_name_obj = LeanIxName::build(cache, block_name); + block_names_arr.set(j, block_name_obj); + } + let pair = LeanCtor::alloc(0, 2, 0); + pair.set(0, name_obj); + pair.set(1, *block_names_arr); + blocks_arr.set(i, *pair); + } + + // Build blockRefs: Array (Ix.Name × Array Ix.Name) + let block_refs_arr = LeanArray::alloc(condensed.block_refs.len()); + for (i, (name, ref_set)) in condensed.block_refs.iter().enumerate() { + let name_obj = LeanIxName::build(cache, name); + let refs_arr = LeanArray::alloc(ref_set.len()); + for (j, ref_name) in ref_set.iter().enumerate() { + let ref_name_obj = LeanIxName::build(cache, ref_name); + refs_arr.set(j, ref_name_obj); + } + let pair = LeanCtor::alloc(0, 2, 0); + pair.set(0, name_obj); + pair.set(1, *refs_arr); + block_refs_arr.set(i, *pair); + } + + // Build RustCondensedBlocks structure (3 fields) + let result = LeanCtor::alloc(0, 3, 0); + result.set(0, *low_links_arr); + result.set(1, *blocks_arr); + result.set(2, *block_refs_arr); + Self::new(*result) + } +} + +// ============================================================================= +// FFI Exports +// ============================================================================= + +/// FFI function to build a reference graph from a Lean environment. +#[unsafe(no_mangle)] +pub extern "C" fn rs_build_ref_graph(env_consts_ptr: LeanList) -> LeanIOResult { + ffi_io_guard(std::panic::AssertUnwindSafe(|| { + let rust_env = decode_env(env_consts_ptr); + let rust_env = Arc::new(rust_env); + let ref_graph = build_ref_graph(&rust_env); + let mut cache = LeanBuildCache::with_capacity(rust_env.len()); + let result = build_ref_graph_array(&mut cache, &ref_graph.out_refs); + LeanIOResult::ok(result) + })) +} + +/// FFI function to compute SCCs from a Lean environment. +#[unsafe(no_mangle)] +pub extern "C" fn rs_compute_sccs(env_consts_ptr: LeanList) -> LeanIOResult { + ffi_io_guard(std::panic::AssertUnwindSafe(|| { + let rust_env = decode_env(env_consts_ptr); + let rust_env = Arc::new(rust_env); + let ref_graph = build_ref_graph(&rust_env); + let condensed = compute_sccs(&ref_graph.out_refs); + let mut cache = LeanBuildCache::with_capacity(rust_env.len()); + let result = LeanIxCondensedBlocks::build(&mut cache, &condensed); + LeanIOResult::ok(result) + })) +} diff --git a/src/ffi/iroh.rs b/src/ffi/iroh.rs new file mode 100644 index 00000000..6ccce722 --- /dev/null +++ b/src/ffi/iroh.rs @@ -0,0 +1,120 @@ +use lean_ffi::object::{ + LeanArray, LeanByteArray, LeanCtor, LeanExcept, LeanString, +}; + +use crate::iroh::common::{GetRequest, PutRequest, Request, Response}; +use crate::iroh::{client, server}; + +lean_ffi::lean_domain_type! { + /// Lean `Iroh.Connect.PutResponse` object. + LeanPutResponse; + /// Lean `Iroh.Connect.GetResponse` object. + LeanGetResponse; +} + +impl LeanPutResponse { + /// Build from `message` and `hash` strings. + /// + /// ```lean + /// structure PutResponse where + /// message : String + /// hash : String + /// ``` + pub fn mk(message: &str, hash: &str) -> Self { + let ctor = LeanCtor::alloc(0, 2, 0); + ctor.set(0, LeanString::new(message)); + ctor.set(1, LeanString::new(hash)); + Self::new(*ctor) + } +} + +impl LeanGetResponse { + /// Build from `message`, `hash`, and raw `bytes`. + /// + /// ```lean + /// structure GetResponse where + /// message : String + /// hash : String + /// bytes : ByteArray + /// ``` + pub fn mk(message: &str, hash: &str, bytes: &[u8]) -> Self { + let ctor = LeanCtor::alloc(0, 3, 0); + ctor.set(0, LeanString::new(message)); + ctor.set(1, LeanString::new(hash)); + ctor.set(2, LeanByteArray::from_bytes(bytes)); + Self::new(*ctor) + } +} + +/// `Iroh.Connect.putBytes' : @& String → @& Array String → @& String → @& String → Except String PutResponse` +#[unsafe(no_mangle)] +extern "C" fn rs_iroh_put( + node_id: LeanString, + addrs: LeanArray, + relay_url: LeanString, + input: LeanString, +) -> LeanExcept { + let node_id = node_id.to_string(); + let addrs: Vec = addrs.map(|x| x.as_string().to_string()); + let relay_url = relay_url.to_string(); + let input_str = input.to_string(); + + let request = + Request::Put(PutRequest { bytes: input_str.as_bytes().to_vec() }); + let rt = + tokio::runtime::Runtime::new().expect("Failed to create Tokio runtime"); + + match rt.block_on(client::connect(&node_id, &addrs, &relay_url, request)) { + Ok(response) => match response { + Response::Put(put_response) => LeanExcept::ok(LeanPutResponse::mk( + &put_response.message, + &put_response.hash, + )), + _ => LeanExcept::error_string("error: incorrect server response"), + }, + Err(err) => LeanExcept::error_string(&err.to_string()), + } +} + +/// `Iroh.Connect.getBytes' : @& String → @& Array String → @& String → @& String → Except String GetResponse` +#[unsafe(no_mangle)] +extern "C" fn rs_iroh_get( + node_id: LeanString, + addrs: LeanArray, + relay_url: LeanString, + hash: LeanString, +) -> LeanExcept { + let node_id = node_id.to_string(); + let addrs: Vec = addrs.map(|x| x.as_string().to_string()); + let relay_url = relay_url.to_string(); + let hash_str = hash.to_string(); + + let request = Request::Get(GetRequest { hash: hash_str.clone() }); + + let rt = + tokio::runtime::Runtime::new().expect("Failed to create Tokio runtime"); + + match rt.block_on(client::connect(&node_id, &addrs, &relay_url, request)) { + Ok(response) => match response { + Response::Get(get_response) => LeanExcept::ok(LeanGetResponse::mk( + &get_response.message, + &get_response.hash, + &get_response.bytes, + )), + _ => LeanExcept::error_string("error: incorrect server response"), + }, + Err(err) => LeanExcept::error_string(&err.to_string()), + } +} + +/// `Iroh.Serve.serve' : Unit → Except String Unit` +#[unsafe(no_mangle)] +extern "C" fn rs_iroh_serve() -> LeanExcept { + let rt = + tokio::runtime::Runtime::new().expect("Failed to create Tokio runtime"); + + match rt.block_on(server::serve()) { + Ok(()) => LeanExcept::ok(0), + Err(err) => LeanExcept::error_string(&err.to_string()), + } +} diff --git a/src/lean/ffi/ix.rs b/src/ffi/ix.rs similarity index 100% rename from src/lean/ffi/ix.rs rename to src/ffi/ix.rs diff --git a/src/ffi/ix/address.rs b/src/ffi/ix/address.rs new file mode 100644 index 00000000..9531e9ba --- /dev/null +++ b/src/ffi/ix/address.rs @@ -0,0 +1,49 @@ +//! Ix.Address build/decode/roundtrip FFI. +//! +//! Address = { hash : ByteArray } - ByteArray wrapper for blake3 Hash + +use crate::ix::address::Address; +use crate::lean::LeanIxAddress; +use lean_ffi::object::{LeanArray, LeanByteArray}; + +impl LeanIxAddress { + /// Build an Ix.Address from a blake3::Hash. + pub fn build_from_hash(hash: &blake3::Hash) -> Self { + LeanByteArray::from_bytes(hash.as_bytes()).into() + } + + /// Build an Ix.Address from an Ixon Address (which is just a [u8; 32]). + pub fn build(addr: &Address) -> Self { + LeanByteArray::from_bytes(addr.as_bytes()).into() + } + + /// Build an Array of Addresses. + pub fn build_array(addrs: &[Address]) -> LeanArray { + let arr = LeanArray::alloc(addrs.len()); + for (i, addr) in addrs.iter().enumerate() { + arr.set(i, Self::build(addr)); + } + arr + } + + /// Decode a ByteArray (Address) to Address. + pub fn decode(self) -> Address { + Address::from_slice(&self.as_bytes()[..32]) + .expect("Address should be 32 bytes") + } + + /// Decode Array Address. + pub fn decode_array(obj: LeanArray) -> Vec
{ + obj.map(|x| LeanIxAddress::new(x).decode()) + } +} + +/// Round-trip an Ix.Address: decode ByteArray, re-encode. +/// Address = { hash : ByteArray } - single field struct, so UNBOXED to ByteArray directly +#[unsafe(no_mangle)] +pub extern "C" fn rs_roundtrip_ix_address( + addr: LeanIxAddress, +) -> LeanIxAddress { + let decoded = addr.decode(); + LeanIxAddress::build(&decoded) +} diff --git a/src/ffi/ix/constant.rs b/src/ffi/ix/constant.rs new file mode 100644 index 00000000..49e2d9dc --- /dev/null +++ b/src/ffi/ix/constant.rs @@ -0,0 +1,450 @@ +//! Ix.ConstantInfo build/decode/roundtrip FFI. +//! +//! ConstantInfo variants: +//! - Tag 0: axiomInfo (v : AxiomVal) +//! - Tag 1: defnInfo (v : DefinitionVal) +//! - Tag 2: thmInfo (v : TheoremVal) +//! - Tag 3: opaqueInfo (v : OpaqueVal) +//! - Tag 4: quotInfo (v : QuotVal) +//! - Tag 5: inductInfo (v : InductiveVal) +//! - Tag 6: ctorInfo (v : ConstructorVal) +//! - Tag 7: recInfo (v : RecursorVal) + +use crate::ix::env::{ + AxiomVal, ConstantInfo, ConstantVal, ConstructorVal, DefinitionSafety, + DefinitionVal, InductiveVal, Name, OpaqueVal, QuotKind, QuotVal, + RecursorRule, RecursorVal, ReducibilityHints, TheoremVal, +}; +use crate::lean::{ + LeanIxConstantInfo, LeanIxConstantVal, LeanIxExpr, LeanIxName, + LeanIxRecursorRule, LeanIxReducibilityHints, +}; +use lean_ffi::nat::Nat; +use lean_ffi::object::{LeanArray, LeanCtor, LeanObject}; + +use crate::ffi::builder::LeanBuildCache; +use crate::ffi::primitives::build_nat; + +// ============================================================================= +// ConstantVal +// ============================================================================= + +impl LeanIxConstantVal { + /// Build a Ix.ConstantVal structure. + pub fn build(cache: &mut LeanBuildCache, cv: &ConstantVal) -> Self { + // ConstantVal = { name : Name, levelParams : Array Name, type : Expr } + let name_obj = LeanIxName::build(cache, &cv.name); + let level_params_obj = LeanIxName::build_array(cache, &cv.level_params); + let type_obj = LeanIxExpr::build(cache, &cv.typ); + + let obj = LeanCtor::alloc(0, 3, 0); + obj.set(0, name_obj); + obj.set(1, level_params_obj); + obj.set(2, type_obj); + Self::new(*obj) + } + + /// Decode Ix.ConstantVal from Lean object. + /// ConstantVal = { name : Name, levelParams : Array Name, type : Expr } + pub fn decode(self) -> ConstantVal { + let ctor = self.as_ctor(); + let name = LeanIxName::new(ctor.get(0)).decode(); + let level_params: Vec = + ctor.get(1).as_array().map(|x| LeanIxName::new(x).decode()); + let typ = LeanIxExpr::new(ctor.get(2)).decode(); + + ConstantVal { name, level_params, typ } + } +} + +// ============================================================================= +// ReducibilityHints +// ============================================================================= + +impl LeanIxReducibilityHints { + /// Build ReducibilityHints. + /// NOTE: In Lean 4, 0-field constructors are boxed scalars when the inductive has + /// other constructors with fields. So opaque and abbrev use box_usize. + pub fn build(hints: &ReducibilityHints) -> Self { + let obj = match hints { + // | opaque -- tag 0, boxed as scalar + ReducibilityHints::Opaque => LeanObject::box_usize(0), + // | abbrev -- tag 1, boxed as scalar + ReducibilityHints::Abbrev => LeanObject::box_usize(1), + // | regular (h : UInt32) -- tag 2, object constructor + ReducibilityHints::Regular(h) => { + // UInt32 is a scalar, stored inline + let obj = LeanCtor::alloc(2, 0, 4); + obj.set_u32(0, *h); + *obj + }, + }; + Self::new(obj) + } + + /// Decode Lean.ReducibilityHints from Lean object. + pub fn decode(self) -> ReducibilityHints { + if self.is_scalar() { + let tag = self.as_ptr() as usize >> 1; + match tag { + 0 => return ReducibilityHints::Opaque, + 1 => return ReducibilityHints::Abbrev, + _ => panic!("Invalid ReducibilityHints scalar tag: {}", tag), + } + } + + let ctor = self.as_ctor(); + match ctor.tag() { + 0 => ReducibilityHints::Opaque, + 1 => ReducibilityHints::Abbrev, + 2 => { + // regular: 0 obj fields, 4 scalar bytes (UInt32) + ReducibilityHints::Regular(ctor.scalar_u32(0, 0)) + }, + _ => panic!("Invalid ReducibilityHints tag: {}", ctor.tag()), + } + } +} + +// ============================================================================= +// RecursorRule +// ============================================================================= + +impl LeanIxRecursorRule { + /// Decode Ix.RecursorRule from Lean object. + pub fn decode(self) -> RecursorRule { + let ctor = self.as_ctor(); + RecursorRule { + ctor: LeanIxName::new(ctor.get(0)).decode(), + n_fields: Nat::from_obj(ctor.get(1)), + rhs: LeanIxExpr::new(ctor.get(2)).decode(), + } + } +} + +// ============================================================================= +// ConstantInfo +// ============================================================================= + +impl LeanIxRecursorRule { + /// Build an Array of RecursorRule. + pub fn build_array( + cache: &mut LeanBuildCache, + rules: &[RecursorRule], + ) -> LeanArray { + let arr = LeanArray::alloc(rules.len()); + for (i, rule) in rules.iter().enumerate() { + // RecursorRule = { ctor : Name, nFields : Nat, rhs : Expr } + let ctor_obj = LeanIxName::build(cache, &rule.ctor); + let n_fields_obj = build_nat(&rule.n_fields); + let rhs_obj = LeanIxExpr::build(cache, &rule.rhs); + + let rule_obj = LeanCtor::alloc(0, 3, 0); + rule_obj.set(0, ctor_obj); + rule_obj.set(1, n_fields_obj); + rule_obj.set(2, rhs_obj); + + arr.set(i, rule_obj); + } + arr + } +} + +impl LeanIxConstantInfo { + /// Build a Ix.ConstantInfo from a Rust ConstantInfo. + pub fn build(cache: &mut LeanBuildCache, info: &ConstantInfo) -> Self { + let result = match info { + // | axiomInfo (v : AxiomVal) -- tag 0 + ConstantInfo::AxiomInfo(v) => { + // AxiomVal = { cnst : ConstantVal, isUnsafe : Bool } + let cnst_obj = LeanIxConstantVal::build(cache, &v.cnst); + let axiom_val = LeanCtor::alloc(0, 1, 1); + axiom_val.set(0, cnst_obj); + axiom_val.set_u8(8, v.is_unsafe as u8); + + let obj = LeanCtor::alloc(0, 1, 0); + obj.set(0, axiom_val); + *obj + }, + // | defnInfo (v : DefinitionVal) -- tag 1 + ConstantInfo::DefnInfo(v) => { + // DefinitionVal = { cnst, value, hints, safety, all } + // Memory layout: 4 obj fields (cnst, value, hints, all), 1 scalar byte (safety) + let cnst_obj = LeanIxConstantVal::build(cache, &v.cnst); + let value_obj = LeanIxExpr::build(cache, &v.value); + let hints_obj = LeanIxReducibilityHints::build(&v.hints); + let all_obj = LeanIxName::build_array(cache, &v.all); + let safety_byte = match v.safety { + DefinitionSafety::Unsafe => 0u8, + DefinitionSafety::Safe => 1u8, + DefinitionSafety::Partial => 2u8, + }; + + let defn_val = LeanCtor::alloc(0, 4, 1); + defn_val.set(0, cnst_obj); + defn_val.set(1, value_obj); + defn_val.set(2, hints_obj); + defn_val.set(3, all_obj); + defn_val.set_u8(4 * 8, safety_byte); + + let obj = LeanCtor::alloc(1, 1, 0); + obj.set(0, defn_val); + *obj + }, + // | thmInfo (v : TheoremVal) -- tag 2 + ConstantInfo::ThmInfo(v) => { + // TheoremVal = { cnst, value, all } + let cnst_obj = LeanIxConstantVal::build(cache, &v.cnst); + let value_obj = LeanIxExpr::build(cache, &v.value); + let all_obj = LeanIxName::build_array(cache, &v.all); + + let thm_val = LeanCtor::alloc(0, 3, 0); + thm_val.set(0, cnst_obj); + thm_val.set(1, value_obj); + thm_val.set(2, all_obj); + + let obj = LeanCtor::alloc(2, 1, 0); + obj.set(0, thm_val); + *obj + }, + // | opaqueInfo (v : OpaqueVal) -- tag 3 + ConstantInfo::OpaqueInfo(v) => { + // OpaqueVal = { cnst, value, isUnsafe, all } + let cnst_obj = LeanIxConstantVal::build(cache, &v.cnst); + let value_obj = LeanIxExpr::build(cache, &v.value); + let all_obj = LeanIxName::build_array(cache, &v.all); + + let opaque_val = LeanCtor::alloc(0, 3, 1); + opaque_val.set(0, cnst_obj); + opaque_val.set(1, value_obj); + opaque_val.set(2, all_obj); + opaque_val.set_u8(3 * 8, v.is_unsafe as u8); + + let obj = LeanCtor::alloc(3, 1, 0); + obj.set(0, opaque_val); + *obj + }, + // | quotInfo (v : QuotVal) -- tag 4 + ConstantInfo::QuotInfo(v) => { + // QuotVal = { cnst, kind } + // Memory layout: 1 obj field (cnst), 1 scalar byte (kind) + let cnst_obj = LeanIxConstantVal::build(cache, &v.cnst); + let kind_byte = match v.kind { + QuotKind::Type => 0u8, + QuotKind::Ctor => 1u8, + QuotKind::Lift => 2u8, + QuotKind::Ind => 3u8, + }; + + let quot_val = LeanCtor::alloc(0, 1, 1); + quot_val.set(0, cnst_obj); + quot_val.set_u8(8, kind_byte); + + let obj = LeanCtor::alloc(4, 1, 0); + obj.set(0, quot_val); + *obj + }, + // | inductInfo (v : InductiveVal) -- tag 5 + ConstantInfo::InductInfo(v) => { + // InductiveVal = { cnst, numParams, numIndices, all, ctors, numNested, isRec, isUnsafe, isReflexive } + let cnst_obj = LeanIxConstantVal::build(cache, &v.cnst); + let num_params_obj = build_nat(&v.num_params); + let num_indices_obj = build_nat(&v.num_indices); + let all_obj = LeanIxName::build_array(cache, &v.all); + let ctors_obj = LeanIxName::build_array(cache, &v.ctors); + let num_nested_obj = build_nat(&v.num_nested); + + // 6 object fields, 3 scalar bytes for bools + let induct_val = LeanCtor::alloc(0, 6, 3); + induct_val.set(0, cnst_obj); + induct_val.set(1, num_params_obj); + induct_val.set(2, num_indices_obj); + induct_val.set(3, all_obj); + induct_val.set(4, ctors_obj); + induct_val.set(5, num_nested_obj); + induct_val.set_u8(6 * 8, v.is_rec as u8); + induct_val.set_u8(6 * 8 + 1, v.is_unsafe as u8); + induct_val.set_u8(6 * 8 + 2, v.is_reflexive as u8); + + let obj = LeanCtor::alloc(5, 1, 0); + obj.set(0, induct_val); + *obj + }, + // | ctorInfo (v : ConstructorVal) -- tag 6 + ConstantInfo::CtorInfo(v) => { + // ConstructorVal = { cnst, induct, cidx, numParams, numFields, isUnsafe } + let cnst_obj = LeanIxConstantVal::build(cache, &v.cnst); + let induct_obj = LeanIxName::build(cache, &v.induct); + let cidx_obj = build_nat(&v.cidx); + let num_params_obj = build_nat(&v.num_params); + let num_fields_obj = build_nat(&v.num_fields); + + // 5 object fields, 1 scalar byte for bool + let ctor_val = LeanCtor::alloc(0, 5, 1); + ctor_val.set(0, cnst_obj); + ctor_val.set(1, induct_obj); + ctor_val.set(2, cidx_obj); + ctor_val.set(3, num_params_obj); + ctor_val.set(4, num_fields_obj); + ctor_val.set_u8(5 * 8, v.is_unsafe as u8); + + let obj = LeanCtor::alloc(6, 1, 0); + obj.set(0, ctor_val); + *obj + }, + // | recInfo (v : RecursorVal) -- tag 7 + ConstantInfo::RecInfo(v) => { + // RecursorVal = { cnst, all, numParams, numIndices, numMotives, numMinors, rules, k, isUnsafe } + let cnst_obj = LeanIxConstantVal::build(cache, &v.cnst); + let all_obj = LeanIxName::build_array(cache, &v.all); + let num_params_obj = build_nat(&v.num_params); + let num_indices_obj = build_nat(&v.num_indices); + let num_motives_obj = build_nat(&v.num_motives); + let num_minors_obj = build_nat(&v.num_minors); + let rules_obj = LeanIxRecursorRule::build_array(cache, &v.rules); + + // 7 object fields, 2 scalar bytes for bools + let rec_val = LeanCtor::alloc(0, 7, 2); + rec_val.set(0, cnst_obj); + rec_val.set(1, all_obj); + rec_val.set(2, num_params_obj); + rec_val.set(3, num_indices_obj); + rec_val.set(4, num_motives_obj); + rec_val.set(5, num_minors_obj); + rec_val.set(6, rules_obj); + rec_val.set_u8(7 * 8, v.k as u8); + rec_val.set_u8(7 * 8 + 1, v.is_unsafe as u8); + + let obj = LeanCtor::alloc(7, 1, 0); + obj.set(0, rec_val); + *obj + }, + }; + + Self::new(result) + } + + /// Decode Ix.ConstantInfo from Lean object. + pub fn decode(self) -> ConstantInfo { + let outer = self.as_ctor(); + let inner_obj = outer.get(0); + let inner = inner_obj.as_ctor(); + + match outer.tag() { + 0 => { + let is_unsafe = inner.scalar_u8(1, 0) != 0; + + ConstantInfo::AxiomInfo(AxiomVal { + cnst: LeanIxConstantVal::new(inner.get(0)).decode(), + is_unsafe, + }) + }, + 1 => { + let safety_byte = inner.scalar_u8(4, 0); + let safety = match safety_byte { + 0 => DefinitionSafety::Unsafe, + 1 => DefinitionSafety::Safe, + 2 => DefinitionSafety::Partial, + _ => panic!("Invalid DefinitionSafety: {}", safety_byte), + }; + + ConstantInfo::DefnInfo(DefinitionVal { + cnst: LeanIxConstantVal::new(inner.get(0)).decode(), + value: LeanIxExpr::new(inner.get(1)).decode(), + hints: LeanIxReducibilityHints::new(inner.get(2)).decode(), + safety, + all: LeanIxName::decode_array(inner.get(3).as_array()), + }) + }, + 2 => ConstantInfo::ThmInfo(TheoremVal { + cnst: LeanIxConstantVal::new(inner.get(0)).decode(), + value: LeanIxExpr::new(inner.get(1)).decode(), + all: LeanIxName::decode_array(inner.get(2).as_array()), + }), + 3 => { + let is_unsafe = inner.scalar_u8(3, 0) != 0; + + ConstantInfo::OpaqueInfo(OpaqueVal { + cnst: LeanIxConstantVal::new(inner.get(0)).decode(), + value: LeanIxExpr::new(inner.get(1)).decode(), + is_unsafe, + all: LeanIxName::decode_array(inner.get(2).as_array()), + }) + }, + 4 => { + let kind_byte = inner.scalar_u8(1, 0); + let kind = match kind_byte { + 0 => QuotKind::Type, + 1 => QuotKind::Ctor, + 2 => QuotKind::Lift, + 3 => QuotKind::Ind, + _ => panic!("Invalid QuotKind: {}", kind_byte), + }; + + ConstantInfo::QuotInfo(QuotVal { + cnst: LeanIxConstantVal::new(inner.get(0)).decode(), + kind, + }) + }, + 5 => { + let is_rec = inner.scalar_u8(6, 0) != 0; + let is_unsafe = inner.scalar_u8(6, 1) != 0; + let is_reflexive = inner.scalar_u8(6, 2) != 0; + + ConstantInfo::InductInfo(InductiveVal { + cnst: LeanIxConstantVal::new(inner.get(0)).decode(), + num_params: Nat::from_obj(inner.get(1)), + num_indices: Nat::from_obj(inner.get(2)), + all: LeanIxName::decode_array(inner.get(3).as_array()), + ctors: LeanIxName::decode_array(inner.get(4).as_array()), + num_nested: Nat::from_obj(inner.get(5)), + is_rec, + is_unsafe, + is_reflexive, + }) + }, + 6 => { + let is_unsafe = inner.scalar_u8(5, 0) != 0; + + ConstantInfo::CtorInfo(ConstructorVal { + cnst: LeanIxConstantVal::new(inner.get(0)).decode(), + induct: LeanIxName::new(inner.get(1)).decode(), + cidx: Nat::from_obj(inner.get(2)), + num_params: Nat::from_obj(inner.get(3)), + num_fields: Nat::from_obj(inner.get(4)), + is_unsafe, + }) + }, + 7 => { + let k = inner.scalar_u8(7, 0) != 0; + let is_unsafe = inner.scalar_u8(7, 1) != 0; + + let rules: Vec = + inner.get(6).as_array().map(|x| LeanIxRecursorRule::new(x).decode()); + + ConstantInfo::RecInfo(RecursorVal { + cnst: LeanIxConstantVal::new(inner.get(0)).decode(), + all: LeanIxName::decode_array(inner.get(1).as_array()), + num_params: Nat::from_obj(inner.get(2)), + num_indices: Nat::from_obj(inner.get(3)), + num_motives: Nat::from_obj(inner.get(4)), + num_minors: Nat::from_obj(inner.get(5)), + rules, + k, + is_unsafe, + }) + }, + _ => panic!("Invalid ConstantInfo tag: {}", outer.tag()), + } + } +} + +/// Round-trip an Ix.ConstantInfo: decode from Lean, re-encode via LeanBuildCache. +#[unsafe(no_mangle)] +pub extern "C" fn rs_roundtrip_ix_constant_info( + info_ptr: LeanIxConstantInfo, +) -> LeanIxConstantInfo { + let info = info_ptr.decode(); + let mut cache = LeanBuildCache::new(); + LeanIxConstantInfo::build(&mut cache, &info) +} diff --git a/src/ffi/ix/data.rs b/src/ffi/ix/data.rs new file mode 100644 index 00000000..e0fa6bf0 --- /dev/null +++ b/src/ffi/ix/data.rs @@ -0,0 +1,435 @@ +//! Ix.DataValue, Ix.Syntax, Ix.SourceInfo build/decode/roundtrip FFI. + +use crate::ix::env::{ + DataValue, Int, Name, SourceInfo, Substring, Syntax, SyntaxPreresolved, +}; +use crate::lean::{ + LeanIxDataValue, LeanIxInt, LeanIxName, LeanIxSourceInfo, LeanIxSubstring, + LeanIxSyntax, LeanIxSyntaxPreresolved, +}; +use lean_ffi::nat::Nat; +use lean_ffi::object::{LeanArray, LeanCtor, LeanString}; + +use crate::ffi::builder::LeanBuildCache; +use crate::ffi::primitives::build_nat; + +impl LeanIxInt { + /// Build a Ix.Int (ofNat or negSucc). + pub fn build(int: &Int) -> Self { + match int { + Int::OfNat(n) => { + let obj = LeanCtor::alloc(0, 1, 0); + obj.set(0, build_nat(n)); + Self::new(*obj) + }, + Int::NegSucc(n) => { + let obj = LeanCtor::alloc(1, 1, 0); + obj.set(0, build_nat(n)); + Self::new(*obj) + }, + } + } + + /// Decode Ix.Int from Lean object. + /// Ix.Int: ofNat (tag 0, 1 field) | negSucc (tag 1, 1 field) + pub fn decode(self) -> Int { + let ctor = self.as_ctor(); + let nat = Nat::from_obj(ctor.get(0)); + match ctor.tag() { + 0 => Int::OfNat(nat), + 1 => Int::NegSucc(nat), + _ => panic!("Invalid Ix.Int tag: {}", ctor.tag()), + } + } +} + +impl LeanIxSubstring { + /// Build a Ix.Substring. + pub fn build(ss: &Substring) -> Self { + let obj = LeanCtor::alloc(0, 3, 0); + obj.set(0, LeanString::new(ss.str.as_str())); + obj.set(1, build_nat(&ss.start_pos)); + obj.set(2, build_nat(&ss.stop_pos)); + Self::new(*obj) + } + + /// Decode Ix.Substring. + pub fn decode(self) -> Substring { + let ctor = self.as_ctor(); + Substring { + str: ctor.get(0).as_string().to_string(), + start_pos: Nat::from_obj(ctor.get(1)), + stop_pos: Nat::from_obj(ctor.get(2)), + } + } +} + +impl LeanIxSourceInfo { + /// Build a Ix.SourceInfo. + pub fn build(si: &SourceInfo) -> Self { + match si { + // | original (leading : Substring) (pos : Nat) (trailing : Substring) (endPos : Nat) -- tag 0 + SourceInfo::Original(leading, pos, trailing, end_pos) => { + let obj = LeanCtor::alloc(0, 4, 0); + obj.set(0, LeanIxSubstring::build(leading)); + obj.set(1, build_nat(pos)); + obj.set(2, LeanIxSubstring::build(trailing)); + obj.set(3, build_nat(end_pos)); + Self::new(*obj) + }, + // | synthetic (pos : Nat) (endPos : Nat) (canonical : Bool) -- tag 1 + SourceInfo::Synthetic(pos, end_pos, canonical) => { + let obj = LeanCtor::alloc(1, 2, 1); + obj.set(0, build_nat(pos)); + obj.set(1, build_nat(end_pos)); + obj.set_u8(2 * 8, *canonical as u8); + Self::new(*obj) + }, + // | none -- tag 2 + SourceInfo::None => Self::new(*LeanCtor::alloc(2, 0, 0)), + } + } + + /// Decode Ix.SourceInfo. + pub fn decode(self) -> SourceInfo { + if self.is_scalar() { + return SourceInfo::None; + } + let ctor = self.as_ctor(); + match ctor.tag() { + 0 => { + // original + SourceInfo::Original( + LeanIxSubstring::new(ctor.get(0)).decode(), + Nat::from_obj(ctor.get(1)), + LeanIxSubstring::new(ctor.get(2)).decode(), + Nat::from_obj(ctor.get(3)), + ) + }, + 1 => { + // synthetic: 2 obj fields (pos, end_pos), 1 scalar byte (canonical) + let canonical = ctor.scalar_u8(2, 0) != 0; + + SourceInfo::Synthetic( + Nat::from_obj(ctor.get(0)), + Nat::from_obj(ctor.get(1)), + canonical, + ) + }, + 2 => SourceInfo::None, + _ => panic!("Invalid SourceInfo tag: {}", ctor.tag()), + } + } +} + +impl LeanIxSyntaxPreresolved { + /// Build a Ix.SyntaxPreresolved. + pub fn build(cache: &mut LeanBuildCache, sp: &SyntaxPreresolved) -> Self { + match sp { + // | namespace (name : Name) -- tag 0 + SyntaxPreresolved::Namespace(name) => { + let obj = LeanCtor::alloc(0, 1, 0); + obj.set(0, LeanIxName::build(cache, name)); + Self::new(*obj) + }, + // | decl (name : Name) (aliases : Array String) -- tag 1 + SyntaxPreresolved::Decl(name, aliases) => { + let name_obj = LeanIxName::build(cache, name); + let aliases_obj = build_string_array(aliases); + let obj = LeanCtor::alloc(1, 2, 0); + obj.set(0, name_obj); + obj.set(1, aliases_obj); + Self::new(*obj) + }, + } + } + + /// Decode Ix.SyntaxPreresolved. + pub fn decode(self) -> SyntaxPreresolved { + let ctor = self.as_ctor(); + match ctor.tag() { + 0 => { + // namespace + SyntaxPreresolved::Namespace(LeanIxName::new(ctor.get(0)).decode()) + }, + 1 => { + // decl + let name = LeanIxName::new(ctor.get(0)).decode(); + let aliases: Vec = + ctor.get(1).as_array().map(|obj| obj.as_string().to_string()); + + SyntaxPreresolved::Decl(name, aliases) + }, + _ => panic!("Invalid SyntaxPreresolved tag: {}", ctor.tag()), + } + } +} + +/// Build an Array of Strings. +pub fn build_string_array(strings: &[String]) -> LeanArray { + let arr = LeanArray::alloc(strings.len()); + for (i, s) in strings.iter().enumerate() { + arr.set(i, LeanString::new(s.as_str())); + } + arr +} + +impl LeanIxSyntax { + /// Build a Ix.Syntax. + pub fn build(cache: &mut LeanBuildCache, syn: &Syntax) -> Self { + match syn { + // | missing -- tag 0 + Syntax::Missing => Self::new(*LeanCtor::alloc(0, 0, 0)), + // | node (info : SourceInfo) (kind : Name) (args : Array Syntax) -- tag 1 + Syntax::Node(info, kind, args) => { + let info_obj = LeanIxSourceInfo::build(info); + let kind_obj = LeanIxName::build(cache, kind); + let args_obj = Self::build_array(cache, args); + let obj = LeanCtor::alloc(1, 3, 0); + obj.set(0, info_obj); + obj.set(1, kind_obj); + obj.set(2, args_obj); + Self::new(*obj) + }, + // | atom (info : SourceInfo) (val : String) -- tag 2 + Syntax::Atom(info, val) => { + let info_obj = LeanIxSourceInfo::build(info); + let obj = LeanCtor::alloc(2, 2, 0); + obj.set(0, info_obj); + obj.set(1, LeanString::new(val.as_str())); + Self::new(*obj) + }, + // | ident (info : SourceInfo) (rawVal : Substring) (val : Name) (preresolved : Array SyntaxPreresolved) -- tag 3 + Syntax::Ident(info, raw_val, val, preresolved) => { + let info_obj = LeanIxSourceInfo::build(info); + let raw_val_obj = LeanIxSubstring::build(raw_val); + let val_obj = LeanIxName::build(cache, val); + let preresolved_obj = Self::build_preresolved_array(cache, preresolved); + let obj = LeanCtor::alloc(3, 4, 0); + obj.set(0, info_obj); + obj.set(1, raw_val_obj); + obj.set(2, val_obj); + obj.set(3, preresolved_obj); + Self::new(*obj) + }, + } + } + + /// Build an Array of Syntax. + pub fn build_array( + cache: &mut LeanBuildCache, + items: &[Syntax], + ) -> LeanArray { + let arr = LeanArray::alloc(items.len()); + for (i, item) in items.iter().enumerate() { + arr.set(i, Self::build(cache, item)); + } + arr + } + + /// Build an Array of SyntaxPreresolved. + fn build_preresolved_array( + cache: &mut LeanBuildCache, + items: &[SyntaxPreresolved], + ) -> LeanArray { + let arr = LeanArray::alloc(items.len()); + for (i, item) in items.iter().enumerate() { + arr.set(i, LeanIxSyntaxPreresolved::build(cache, item)); + } + arr + } + + /// Decode Ix.Syntax from a Lean object. + pub fn decode(self) -> Syntax { + if self.is_scalar() { + return Syntax::Missing; + } + let ctor = self.as_ctor(); + match ctor.tag() { + 0 => Syntax::Missing, + 1 => { + // node: info, kind, args + let info = LeanIxSourceInfo::new(ctor.get(0)).decode(); + let kind = LeanIxName::new(ctor.get(1)).decode(); + let args: Vec = + ctor.get(2).as_array().map(|x| Self::new(x).decode()); + + Syntax::Node(info, kind, args) + }, + 2 => { + // atom: info, val + let info = LeanIxSourceInfo::new(ctor.get(0)).decode(); + Syntax::Atom(info, ctor.get(1).as_string().to_string()) + }, + 3 => { + // ident: info, rawVal, val, preresolved + let info = LeanIxSourceInfo::new(ctor.get(0)).decode(); + let raw_val = LeanIxSubstring::new(ctor.get(1)).decode(); + let val = LeanIxName::new(ctor.get(2)).decode(); + let preresolved: Vec = ctor + .get(3) + .as_array() + .map(|x| LeanIxSyntaxPreresolved::new(x).decode()); + + Syntax::Ident(info, raw_val, val, preresolved) + }, + _ => panic!("Invalid Syntax tag: {}", ctor.tag()), + } + } +} + +impl LeanIxDataValue { + /// Build Ix.DataValue. + pub fn build(cache: &mut LeanBuildCache, dv: &DataValue) -> Self { + match dv { + DataValue::OfString(s) => { + let obj = LeanCtor::alloc(0, 1, 0); + obj.set(0, LeanString::new(s.as_str())); + Self::new(*obj) + }, + DataValue::OfBool(b) => { + // 0 object fields, 1 scalar byte + let obj = LeanCtor::alloc(1, 0, 1); + obj.set_u8(0, *b as u8); + Self::new(*obj) + }, + DataValue::OfName(n) => { + let obj = LeanCtor::alloc(2, 1, 0); + obj.set(0, LeanIxName::build(cache, n)); + Self::new(*obj) + }, + DataValue::OfNat(n) => { + let obj = LeanCtor::alloc(3, 1, 0); + obj.set(0, build_nat(n)); + Self::new(*obj) + }, + DataValue::OfInt(i) => { + let obj = LeanCtor::alloc(4, 1, 0); + obj.set(0, LeanIxInt::build(i)); + Self::new(*obj) + }, + DataValue::OfSyntax(syn) => { + let obj = LeanCtor::alloc(5, 1, 0); + obj.set(0, LeanIxSyntax::build(cache, syn)); + Self::new(*obj) + }, + } + } + + /// Build an Array of (Name × DataValue) for mdata. + pub fn build_kvmap( + cache: &mut LeanBuildCache, + data: &[(Name, DataValue)], + ) -> LeanArray { + let arr = LeanArray::alloc(data.len()); + for (i, (name, dv)) in data.iter().enumerate() { + let name_obj = LeanIxName::build(cache, name); + let dv_obj = Self::build(cache, dv); + // Prod (Name × DataValue) + let pair = LeanCtor::alloc(0, 2, 0); + pair.set(0, name_obj); + pair.set(1, dv_obj); + arr.set(i, pair); + } + arr + } + + /// Decode Ix.DataValue from a Lean object. + pub fn decode(self) -> DataValue { + let ctor = self.as_ctor(); + match ctor.tag() { + 0 => { + // ofString: 1 object field + DataValue::OfString(ctor.get(0).as_string().to_string()) + }, + 1 => { + // ofBool: 0 object fields, 1 scalar byte + let b = ctor.scalar_u8(0, 0) != 0; + DataValue::OfBool(b) + }, + 2 => { + // ofName: 1 object field + DataValue::OfName(LeanIxName::new(ctor.get(0)).decode()) + }, + 3 => { + // ofNat: 1 object field + DataValue::OfNat(Nat::from_obj(ctor.get(0))) + }, + 4 => { + // ofInt: 1 object field + let inner = ctor.get(0); + let inner_ctor = inner.as_ctor(); + let nat = Nat::from_obj(inner_ctor.get(0)); + match inner_ctor.tag() { + 0 => DataValue::OfInt(Int::OfNat(nat)), + 1 => DataValue::OfInt(Int::NegSucc(nat)), + _ => panic!("Invalid Int tag: {}", inner_ctor.tag()), + } + }, + 5 => { + // ofSyntax: 1 object field + DataValue::OfSyntax(LeanIxSyntax::new(ctor.get(0)).decode().into()) + }, + _ => panic!("Invalid DataValue tag: {}", ctor.tag()), + } + } +} + +// ============================================================================= +// FFI Exports +// ============================================================================= + +/// Round-trip an Ix.Int: decode from Lean, re-encode. +#[unsafe(no_mangle)] +pub extern "C" fn rs_roundtrip_ix_int(int_ptr: LeanIxInt) -> LeanIxInt { + let int_val = int_ptr.decode(); + LeanIxInt::build(&int_val) +} + +/// Round-trip an Ix.Substring: decode from Lean, re-encode. +#[unsafe(no_mangle)] +pub extern "C" fn rs_roundtrip_ix_substring( + sub_ptr: LeanIxSubstring, +) -> LeanIxSubstring { + let sub = sub_ptr.decode(); + LeanIxSubstring::build(&sub) +} + +/// Round-trip an Ix.SourceInfo: decode from Lean, re-encode. +#[unsafe(no_mangle)] +pub extern "C" fn rs_roundtrip_ix_source_info( + si_ptr: LeanIxSourceInfo, +) -> LeanIxSourceInfo { + let si = si_ptr.decode(); + LeanIxSourceInfo::build(&si) +} + +/// Round-trip an Ix.SyntaxPreresolved: decode from Lean, re-encode. +#[unsafe(no_mangle)] +pub extern "C" fn rs_roundtrip_ix_syntax_preresolved( + sp_ptr: LeanIxSyntaxPreresolved, +) -> LeanIxSyntaxPreresolved { + let sp = sp_ptr.decode(); + let mut cache = LeanBuildCache::new(); + LeanIxSyntaxPreresolved::build(&mut cache, &sp) +} + +/// Round-trip an Ix.Syntax: decode from Lean, re-encode. +#[unsafe(no_mangle)] +pub extern "C" fn rs_roundtrip_ix_syntax( + syn_ptr: LeanIxSyntax, +) -> LeanIxSyntax { + let syn = syn_ptr.decode(); + let mut cache = LeanBuildCache::new(); + LeanIxSyntax::build(&mut cache, &syn) +} + +/// Round-trip an Ix.DataValue: decode from Lean, re-encode. +#[unsafe(no_mangle)] +pub extern "C" fn rs_roundtrip_ix_data_value( + dv_ptr: LeanIxDataValue, +) -> LeanIxDataValue { + let dv = dv_ptr.decode(); + let mut cache = LeanBuildCache::new(); + LeanIxDataValue::build(&mut cache, &dv) +} diff --git a/src/ffi/ix/env.rs b/src/ffi/ix/env.rs new file mode 100644 index 00000000..9d4512c5 --- /dev/null +++ b/src/ffi/ix/env.rs @@ -0,0 +1,259 @@ +//! Ix.Environment build/decode/roundtrip FFI. + +use rustc_hash::FxHashMap; + +use crate::ix::env::{ConstantInfo, Name}; +use crate::lean::{ + LeanIxConstantInfo, LeanIxEnvironment, LeanIxName, LeanIxRawEnvironment, +}; +use lean_ffi::object::{LeanArray, LeanCtor, LeanObject}; + +use crate::ffi::builder::LeanBuildCache; + +// ============================================================================= +// HashMap Building +// ============================================================================= + +/// Build a Lean HashMap from pre-built key-value pairs. +/// +/// Lean's Std.HashMap structure (with unboxing): +/// - HashMap α β unboxes through DHashMap to Raw +/// - Raw = { size : Nat, buckets : Array (AssocList α β) } +/// - Field 0 = size (Nat), Field 1 = buckets (Array) +/// +/// AssocList α β = nil | cons (key : α) (value : β) (tail : AssocList α β) +pub fn build_hashmap_from_pairs( + pairs: Vec<(LeanObject, LeanObject, u64)>, // (key_obj, val_obj, hash) +) -> LeanObject { + let size = pairs.len(); + let bucket_count = (size * 4 / 3 + 1).next_power_of_two().max(8); + + // Create array of AssocLists (initially all nil = boxed 0) + let buckets = LeanArray::alloc(bucket_count); + let nil = LeanObject::box_usize(0); + for i in 0..bucket_count { + buckets.set(i, nil); // nil + } + + // Insert entries + for (key_obj, val_obj, hash) in pairs { + let bucket_idx = + usize::try_from(hash).expect("hash overflows usize") % bucket_count; + + // Get current bucket (AssocList) + let current_tail = buckets.get(bucket_idx); + + // cons (key : α) (value : β) (tail : AssocList α β) -- tag 1 + let cons = LeanCtor::alloc(1, 3, 0); + cons.set(0, key_obj); + cons.set(1, val_obj); + cons.set(2, current_tail); + + buckets.set(bucket_idx, cons); + } + + // Build Raw { size : Nat, buckets : Array } + // Due to unboxing, this IS the HashMap directly + // Field 0 = size, Field 1 = buckets (2 object fields, no scalars) + let size_obj = LeanObject::box_usize(size); + + let raw = LeanCtor::alloc(0, 2, 0); + raw.set(0, size_obj); + raw.set(1, buckets); + *raw +} + +// ============================================================================= +// Environment Building / Decoding +// ============================================================================= + +/// Decode a HashMap's AssocList and collect key-value pairs using a custom decoder. +fn decode_assoc_list( + obj: LeanObject, + decode_key: FK, + decode_val: FV, +) -> Vec<(K, V)> +where + FK: Fn(LeanObject) -> K, + FV: Fn(LeanObject) -> V, +{ + let mut result = Vec::new(); + let mut current = obj; + + loop { + if current.is_scalar() { + break; + } + + let ctor = current.as_ctor(); + if ctor.tag() == 0 { + // AssocList.nil + break; + } + + // AssocList.cons: 3 fields (key, value, tail) + result.push((decode_key(ctor.get(0)), decode_val(ctor.get(1)))); + current = ctor.get(2); + } + + result +} + +/// Decode a Lean HashMap into a Vec of key-value pairs. +/// HashMap structure (after unboxing): Raw { size : Nat, buckets : Array (AssocList α β) } +/// +/// Due to single-field struct unboxing: +/// - HashMap { inner : DHashMap } unboxes to DHashMap +/// - DHashMap { inner : Raw, wf : Prop } unboxes to Raw (Prop is erased) +/// - Raw { size : Nat, buckets : Array } - field 0 = size, field 1 = buckets +fn decode_hashmap( + obj: LeanObject, + decode_key: FK, + decode_val: FV, +) -> Vec<(K, V)> +where + FK: Fn(LeanObject) -> K + Copy, + FV: Fn(LeanObject) -> V + Copy, +{ + let ctor = obj.as_ctor(); + // Raw layout: field 0 = size (Nat), field 1 = buckets (Array) + let _size = ctor.get(0); // unused but needed for layout + let buckets = ctor.get(1).as_array(); + + let mut pairs = Vec::new(); + for bucket in buckets.iter() { + let bucket_pairs = decode_assoc_list(bucket, decode_key, decode_val); + pairs.extend(bucket_pairs); + } + + pairs +} + +impl LeanIxRawEnvironment { + /// Build a Ix.RawEnvironment from collected caches. + /// RawEnvironment has arrays that Lean will convert to HashMaps. + /// + /// Ix.RawEnvironment = { + /// consts : Array (Name × ConstantInfo) + /// } + /// + /// NOTE: RawEnvironment with a single field is UNBOXED by Lean, + /// so we return just the array, not a structure containing it. + pub fn build( + cache: &mut LeanBuildCache, + consts: &FxHashMap, + ) -> Self { + // Build consts array: Array (Name × ConstantInfo) + let consts_arr = LeanArray::alloc(consts.len()); + for (i, (name, info)) in consts.iter().enumerate() { + let key_obj = LeanIxName::build(cache, name); + let val_obj = LeanIxConstantInfo::build(cache, info); + // Build pair (Name × ConstantInfo) + let pair = LeanCtor::alloc(0, 2, 0); + pair.set(0, key_obj); + pair.set(1, val_obj); + consts_arr.set(i, pair); + } + + Self::new(*consts_arr) + } + + /// Build Ix.RawEnvironment from Vec, preserving order and duplicates. + pub fn build_from_vec( + cache: &mut LeanBuildCache, + consts: &[(Name, ConstantInfo)], + ) -> Self { + let consts_arr = LeanArray::alloc(consts.len()); + for (i, (name, info)) in consts.iter().enumerate() { + let key_obj = LeanIxName::build(cache, name); + let val_obj = LeanIxConstantInfo::build(cache, info); + let pair = LeanCtor::alloc(0, 2, 0); + pair.set(0, key_obj); + pair.set(1, val_obj); + consts_arr.set(i, pair); + } + Self::new(*consts_arr) + } + + /// Decode Ix.RawEnvironment from Lean object into HashMap. + /// RawEnvironment = { consts : Array (Name × ConstantInfo) } + /// NOTE: Unboxed to just Array. This version deduplicates by name. + pub fn decode(self) -> FxHashMap { + let arr = self.as_array(); + let mut consts: FxHashMap = FxHashMap::default(); + + for pair_obj in arr.iter() { + let pair = pair_obj.as_ctor(); + let name = LeanIxName::new(pair.get(0)).decode(); + let info = LeanIxConstantInfo::new(pair.get(1)).decode(); + consts.insert(name, info); + } + + consts + } + + /// Decode Ix.RawEnvironment from Lean object preserving array structure. + /// This version preserves all entries including duplicates. + pub fn decode_to_vec(self) -> Vec<(Name, ConstantInfo)> { + let arr = self.as_array(); + let mut consts = Vec::with_capacity(arr.len()); + + for pair_obj in arr.iter() { + let pair = pair_obj.as_ctor(); + let name = LeanIxName::new(pair.get(0)).decode(); + let info = LeanIxConstantInfo::new(pair.get(1)).decode(); + consts.push((name, info)); + } + + consts + } +} + +impl LeanIxEnvironment { + /// Decode Ix.Environment from Lean object. + /// + /// Ix.Environment = { + /// consts : HashMap Name ConstantInfo + /// } + /// + /// NOTE: Environment with a single field is UNBOXED by Lean, + /// so the pointer IS the HashMap directly, not a structure containing it. + pub fn decode(self) -> FxHashMap { + // Environment is unboxed - obj IS the HashMap directly + let consts_pairs = decode_hashmap( + *self, + |x| LeanIxName::new(x).decode(), + |x| LeanIxConstantInfo::new(x).decode(), + ); + let mut consts: FxHashMap = FxHashMap::default(); + for (name, info) in consts_pairs { + consts.insert(name, info); + } + consts + } +} + +// ============================================================================= +// FFI Exports +// ============================================================================= + +/// Round-trip an Ix.Environment: decode from Lean, re-encode. +#[unsafe(no_mangle)] +pub extern "C" fn rs_roundtrip_ix_environment( + env_ptr: LeanIxEnvironment, +) -> LeanIxRawEnvironment { + let env = env_ptr.decode(); + let mut cache = LeanBuildCache::with_capacity(env.len()); + LeanIxRawEnvironment::build(&mut cache, &env) +} + +/// Round-trip an Ix.RawEnvironment: decode from Lean, re-encode. +/// Uses Vec-preserving functions to maintain array structure and order. +#[unsafe(no_mangle)] +pub extern "C" fn rs_roundtrip_ix_raw_environment( + env_ptr: LeanIxRawEnvironment, +) -> LeanIxRawEnvironment { + let env = env_ptr.decode_to_vec(); + let mut cache = LeanBuildCache::with_capacity(env.len()); + LeanIxRawEnvironment::build_from_vec(&mut cache, &env) +} diff --git a/src/ffi/ix/expr.rs b/src/ffi/ix/expr.rs new file mode 100644 index 00000000..6e37989d --- /dev/null +++ b/src/ffi/ix/expr.rs @@ -0,0 +1,335 @@ +//! Ix.Expr build/decode/roundtrip FFI. +//! +//! Ix.Expr layout (12 constructors): +//! - Tag 0: bvar (idx : Nat) (hash : Address) +//! - Tag 1: fvar (name : Name) (hash : Address) +//! - Tag 2: mvar (name : Name) (hash : Address) +//! - Tag 3: sort (level : Level) (hash : Address) +//! - Tag 4: const (name : Name) (levels : Array Level) (hash : Address) +//! - Tag 5: app (fn arg : Expr) (hash : Address) +//! - Tag 6: lam (name : Name) (ty body : Expr) (bi : BinderInfo) (hash : Address) +//! - Tag 7: forallE (name : Name) (ty body : Expr) (bi : BinderInfo) (hash : Address) +//! - Tag 8: letE (name : Name) (ty val body : Expr) (nonDep : Bool) (hash : Address) +//! - Tag 9: lit (l : Literal) (hash : Address) +//! - Tag 10: mdata (data : Array (Name × DataValue)) (expr : Expr) (hash : Address) +//! - Tag 11: proj (typeName : Name) (idx : Nat) (struct : Expr) (hash : Address) + +use crate::ix::env::{ + BinderInfo, DataValue, Expr, ExprData, Level, Literal, Name, +}; +use crate::lean::{ + LeanIxBinderInfo, LeanIxDataValue, LeanIxExpr, LeanIxLevel, LeanIxLiteral, + LeanIxName, +}; +use lean_ffi::nat::Nat; +use lean_ffi::object::{LeanCtor, LeanObject, LeanString}; + +use crate::ffi::builder::LeanBuildCache; +use crate::ffi::primitives::build_nat; +use crate::lean::LeanIxAddress; + +impl LeanIxExpr { + /// Build a Lean Ix.Expr with embedded hash. + /// Uses caching to avoid rebuilding the same expression. + pub fn build(cache: &mut LeanBuildCache, expr: &Expr) -> Self { + let hash = *expr.get_hash(); + if let Some(&cached) = cache.exprs.get(&hash) { + cached.inc_ref(); + return cached; + } + + let result = match expr.as_data() { + ExprData::Bvar(idx, h) => { + let obj = LeanCtor::alloc(0, 2, 0); + obj.set(0, build_nat(idx)); + obj.set(1, LeanIxAddress::build_from_hash(h)); + Self::new(*obj) + }, + ExprData::Fvar(name, h) => { + let obj = LeanCtor::alloc(1, 2, 0); + obj.set(0, LeanIxName::build(cache, name)); + obj.set(1, LeanIxAddress::build_from_hash(h)); + Self::new(*obj) + }, + ExprData::Mvar(name, h) => { + let obj = LeanCtor::alloc(2, 2, 0); + obj.set(0, LeanIxName::build(cache, name)); + obj.set(1, LeanIxAddress::build_from_hash(h)); + Self::new(*obj) + }, + ExprData::Sort(level, h) => { + let obj = LeanCtor::alloc(3, 2, 0); + obj.set(0, LeanIxLevel::build(cache, level)); + obj.set(1, LeanIxAddress::build_from_hash(h)); + Self::new(*obj) + }, + ExprData::Const(name, levels, h) => { + let name_obj = LeanIxName::build(cache, name); + let levels_obj = LeanIxLevel::build_array(cache, levels); + let obj = LeanCtor::alloc(4, 3, 0); + obj.set(0, name_obj); + obj.set(1, levels_obj); + obj.set(2, LeanIxAddress::build_from_hash(h)); + Self::new(*obj) + }, + ExprData::App(fn_expr, arg_expr, h) => { + let fn_obj = Self::build(cache, fn_expr); + let arg_obj = Self::build(cache, arg_expr); + let obj = LeanCtor::alloc(5, 3, 0); + obj.set(0, fn_obj); + obj.set(1, arg_obj); + obj.set(2, LeanIxAddress::build_from_hash(h)); + Self::new(*obj) + }, + ExprData::Lam(name, ty, body, bi, h) => { + let name_obj = LeanIxName::build(cache, name); + let ty_obj = Self::build(cache, ty); + let body_obj = Self::build(cache, body); + let hash_obj = LeanIxAddress::build_from_hash(h); + // 4 object fields, 1 scalar byte for BinderInfo + let obj = LeanCtor::alloc(6, 4, 1); + obj.set(0, name_obj); + obj.set(1, ty_obj); + obj.set(2, body_obj); + obj.set(3, hash_obj); + obj.set_u8(4 * 8, LeanIxBinderInfo::to_u8(bi)); + Self::new(*obj) + }, + ExprData::ForallE(name, ty, body, bi, h) => { + let name_obj = LeanIxName::build(cache, name); + let ty_obj = Self::build(cache, ty); + let body_obj = Self::build(cache, body); + let hash_obj = LeanIxAddress::build_from_hash(h); + let obj = LeanCtor::alloc(7, 4, 1); + obj.set(0, name_obj); + obj.set(1, ty_obj); + obj.set(2, body_obj); + obj.set(3, hash_obj); + obj.set_u8(4 * 8, LeanIxBinderInfo::to_u8(bi)); + Self::new(*obj) + }, + ExprData::LetE(name, ty, val, body, non_dep, h) => { + let name_obj = LeanIxName::build(cache, name); + let ty_obj = Self::build(cache, ty); + let val_obj = Self::build(cache, val); + let body_obj = Self::build(cache, body); + let hash_obj = LeanIxAddress::build_from_hash(h); + // 5 object fields, 1 scalar byte for Bool + let obj = LeanCtor::alloc(8, 5, 1); + obj.set(0, name_obj); + obj.set(1, ty_obj); + obj.set(2, val_obj); + obj.set(3, body_obj); + obj.set(4, hash_obj); + obj.set_u8(5 * 8, *non_dep as u8); + Self::new(*obj) + }, + ExprData::Lit(lit, h) => { + let lit_obj = LeanIxLiteral::build(lit); + let obj = LeanCtor::alloc(9, 2, 0); + obj.set(0, lit_obj); + obj.set(1, LeanIxAddress::build_from_hash(h)); + Self::new(*obj) + }, + ExprData::Mdata(md, inner, h) => { + let md_obj = LeanIxDataValue::build_kvmap(cache, md); + let inner_obj = Self::build(cache, inner); + let obj = LeanCtor::alloc(10, 3, 0); + obj.set(0, md_obj); + obj.set(1, inner_obj); + obj.set(2, LeanIxAddress::build_from_hash(h)); + Self::new(*obj) + }, + ExprData::Proj(type_name, idx, struct_expr, h) => { + let name_obj = LeanIxName::build(cache, type_name); + let idx_obj = build_nat(idx); + let struct_obj = Self::build(cache, struct_expr); + let obj = LeanCtor::alloc(11, 4, 0); + obj.set(0, name_obj); + obj.set(1, idx_obj); + obj.set(2, struct_obj); + obj.set(3, LeanIxAddress::build_from_hash(h)); + Self::new(*obj) + }, + }; + + cache.exprs.insert(hash, result); + result + } + + /// Decode a Lean Ix.Expr to Rust Expr. + pub fn decode(self) -> Expr { + let ctor = self.as_ctor(); + match ctor.tag() { + 0 => { + // bvar + let idx = Nat::from_obj(ctor.get(0)); + Expr::bvar(idx) + }, + 1 => { + // fvar + let name = LeanIxName::new(ctor.get(0)).decode(); + Expr::fvar(name) + }, + 2 => { + // mvar + let name = LeanIxName::new(ctor.get(0)).decode(); + Expr::mvar(name) + }, + 3 => { + // sort + let level = LeanIxLevel::new(ctor.get(0)).decode(); + Expr::sort(level) + }, + 4 => { + // const + let name = LeanIxName::new(ctor.get(0)).decode(); + let levels: Vec = + ctor.get(1).as_array().map(|x| LeanIxLevel::new(x).decode()); + + Expr::cnst(name, levels) + }, + 5 => { + // app + let fn_expr = Self::new(ctor.get(0)).decode(); + let arg_expr = Self::new(ctor.get(1)).decode(); + Expr::app(fn_expr, arg_expr) + }, + 6 => { + // lam: name, ty, body, hash, bi (scalar) + let name = LeanIxName::new(ctor.get(0)).decode(); + let ty = Self::new(ctor.get(1)).decode(); + let body = Self::new(ctor.get(2)).decode(); + + // Read BinderInfo scalar (4 obj fields: name, ty, body, hash) + let bi_byte = ctor.scalar_u8(4, 0); + let bi = LeanIxBinderInfo::from_u8(bi_byte); + + Expr::lam(name, ty, body, bi) + }, + 7 => { + // forallE: same layout as lam + let name = LeanIxName::new(ctor.get(0)).decode(); + let ty = Self::new(ctor.get(1)).decode(); + let body = Self::new(ctor.get(2)).decode(); + + // 4 obj fields: name, ty, body, hash + let bi_byte = ctor.scalar_u8(4, 0); + let bi = LeanIxBinderInfo::from_u8(bi_byte); + + Expr::all(name, ty, body, bi) + }, + 8 => { + // letE: name, ty, val, body, hash, nonDep (scalar) + let name = LeanIxName::new(ctor.get(0)).decode(); + let ty = Self::new(ctor.get(1)).decode(); + let val = Self::new(ctor.get(2)).decode(); + let body = Self::new(ctor.get(3)).decode(); + + // 5 obj fields: name, ty, val, body, hash + let non_dep = ctor.scalar_u8(5, 0) != 0; + + Expr::letE(name, ty, val, body, non_dep) + }, + 9 => { + // lit + let lit = LeanIxLiteral::new(ctor.get(0)).decode(); + Expr::lit(lit) + }, + 10 => { + // mdata: data, expr, hash + let data: Vec<(Name, DataValue)> = ctor.get(0).as_array().map(|obj| { + let pair = obj.as_ctor(); + let name = LeanIxName::new(pair.get(0)).decode(); + let dv = LeanIxDataValue::new(pair.get(1)).decode(); + (name, dv) + }); + + let inner = Self::new(ctor.get(1)).decode(); + Expr::mdata(data, inner) + }, + 11 => { + // proj: typeName, idx, struct, hash + let type_name = LeanIxName::new(ctor.get(0)).decode(); + let idx = Nat::from_obj(ctor.get(1)); + let struct_expr = Self::new(ctor.get(2)).decode(); + + Expr::proj(type_name, idx, struct_expr) + }, + _ => panic!("Invalid Ix.Expr tag: {}", ctor.tag()), + } + } +} + +impl LeanIxLiteral { + /// Build a Literal (natVal or strVal). + pub fn build(lit: &Literal) -> Self { + let obj = match lit { + Literal::NatVal(n) => { + let obj = LeanCtor::alloc(0, 1, 0); + obj.set(0, build_nat(n)); + *obj + }, + Literal::StrVal(s) => { + let obj = LeanCtor::alloc(1, 1, 0); + obj.set(0, LeanString::new(s.as_str())); + *obj + }, + }; + Self::new(obj) + } + + /// Decode Lean.Literal from a Lean object. + pub fn decode(self) -> Literal { + let ctor = self.as_ctor(); + match ctor.tag() { + 0 => { + // natVal + let nat = Nat::from_obj(ctor.get(0)); + Literal::NatVal(nat) + }, + 1 => { + // strVal + Literal::StrVal(ctor.get(0).as_string().to_string()) + }, + _ => panic!("Invalid Literal tag: {}", ctor.tag()), + } + } +} + +impl LeanIxBinderInfo { + /// Build Ix.BinderInfo enum. + /// BinderInfo is a 4-constructor enum with no fields, stored as boxed scalar. + pub fn build(bi: &BinderInfo) -> Self { + Self::new(LeanObject::box_usize(Self::to_u8(bi) as usize)) + } + + /// Convert BinderInfo to u8 tag. + pub fn to_u8(bi: &BinderInfo) -> u8 { + match bi { + BinderInfo::Default => 0, + BinderInfo::Implicit => 1, + BinderInfo::StrictImplicit => 2, + BinderInfo::InstImplicit => 3, + } + } + + /// Decode BinderInfo from byte. + pub fn from_u8(bi_byte: u8) -> BinderInfo { + match bi_byte { + 0 => BinderInfo::Default, + 1 => BinderInfo::Implicit, + 2 => BinderInfo::StrictImplicit, + 3 => BinderInfo::InstImplicit, + _ => panic!("Invalid BinderInfo: {}", bi_byte), + } + } +} + +/// Round-trip an Ix.Expr: decode from Lean, re-encode via LeanBuildCache. +#[unsafe(no_mangle)] +pub extern "C" fn rs_roundtrip_ix_expr(expr_ptr: LeanIxExpr) -> LeanIxExpr { + let expr = expr_ptr.decode(); + let mut cache = LeanBuildCache::new(); + LeanIxExpr::build(&mut cache, &expr) +} diff --git a/src/ffi/ix/level.rs b/src/ffi/ix/level.rs new file mode 100644 index 00000000..bac3eaf6 --- /dev/null +++ b/src/ffi/ix/level.rs @@ -0,0 +1,134 @@ +//! Ix.Level build/decode/roundtrip FFI. +//! +//! Ix.Level layout: +//! - Tag 0: zero (hash : Address) +//! - Tag 1: succ (x : Level) (hash : Address) +//! - Tag 2: max (x y : Level) (hash : Address) +//! - Tag 3: imax (x y : Level) (hash : Address) +//! - Tag 4: param (n : Name) (hash : Address) +//! - Tag 5: mvar (n : Name) (hash : Address) + +use crate::ix::env::{Level, LevelData}; +use crate::lean::{LeanIxLevel, LeanIxName}; +use lean_ffi::object::{LeanArray, LeanCtor}; + +use crate::ffi::builder::LeanBuildCache; +use crate::lean::LeanIxAddress; + +impl LeanIxLevel { + /// Build a Lean Ix.Level with embedded hash. + /// Uses caching to avoid rebuilding the same level. + pub fn build(cache: &mut LeanBuildCache, level: &Level) -> Self { + let hash = *level.get_hash(); + if let Some(&cached) = cache.levels.get(&hash) { + cached.inc_ref(); + return cached; + } + + let result = match level.as_data() { + LevelData::Zero(h) => { + let ctor = LeanCtor::alloc(0, 1, 0); + ctor.set(0, LeanIxAddress::build_from_hash(h)); + Self::new(*ctor) + }, + LevelData::Succ(x, h) => { + let x_obj = Self::build(cache, x); + let ctor = LeanCtor::alloc(1, 2, 0); + ctor.set(0, x_obj); + ctor.set(1, LeanIxAddress::build_from_hash(h)); + Self::new(*ctor) + }, + LevelData::Max(x, y, h) => { + let x_obj = Self::build(cache, x); + let y_obj = Self::build(cache, y); + let ctor = LeanCtor::alloc(2, 3, 0); + ctor.set(0, x_obj); + ctor.set(1, y_obj); + ctor.set(2, LeanIxAddress::build_from_hash(h)); + Self::new(*ctor) + }, + LevelData::Imax(x, y, h) => { + let x_obj = Self::build(cache, x); + let y_obj = Self::build(cache, y); + let ctor = LeanCtor::alloc(3, 3, 0); + ctor.set(0, x_obj); + ctor.set(1, y_obj); + ctor.set(2, LeanIxAddress::build_from_hash(h)); + Self::new(*ctor) + }, + LevelData::Param(n, h) => { + let n_obj = LeanIxName::build(cache, n); + let ctor = LeanCtor::alloc(4, 2, 0); + ctor.set(0, n_obj); + ctor.set(1, LeanIxAddress::build_from_hash(h)); + Self::new(*ctor) + }, + LevelData::Mvar(n, h) => { + let n_obj = LeanIxName::build(cache, n); + let ctor = LeanCtor::alloc(5, 2, 0); + ctor.set(0, n_obj); + ctor.set(1, LeanIxAddress::build_from_hash(h)); + Self::new(*ctor) + }, + }; + + cache.levels.insert(hash, result); + result + } + + /// Build an Array of Levels. + pub fn build_array( + cache: &mut LeanBuildCache, + levels: &[Level], + ) -> LeanArray { + let arr = LeanArray::alloc(levels.len()); + for (i, level) in levels.iter().enumerate() { + arr.set(i, Self::build(cache, level)); + } + arr + } + + /// Decode a Lean Ix.Level to Rust Level. + pub fn decode(self) -> Level { + let ctor = self.as_ctor(); + match ctor.tag() { + 0 => Level::zero(), + 1 => { + let x = Self::new(ctor.get(0)).decode(); + Level::succ(x) + }, + 2 => { + let x = Self::new(ctor.get(0)).decode(); + let y = Self::new(ctor.get(1)).decode(); + Level::max(x, y) + }, + 3 => { + let x = Self::new(ctor.get(0)).decode(); + let y = Self::new(ctor.get(1)).decode(); + Level::imax(x, y) + }, + 4 => { + let n = LeanIxName::new(ctor.get(0)).decode(); + Level::param(n) + }, + 5 => { + let n = LeanIxName::new(ctor.get(0)).decode(); + Level::mvar(n) + }, + _ => panic!("Invalid Ix.Level tag: {}", ctor.tag()), + } + } + + /// Decode Array of Levels from Lean pointer. + pub fn decode_array(obj: LeanArray) -> Vec { + obj.map(|x| Self::new(x).decode()) + } +} + +/// Round-trip an Ix.Level: decode from Lean, re-encode via LeanBuildCache. +#[unsafe(no_mangle)] +pub extern "C" fn rs_roundtrip_ix_level(level_ptr: LeanIxLevel) -> LeanIxLevel { + let level = level_ptr.decode(); + let mut cache = LeanBuildCache::new(); + LeanIxLevel::build(&mut cache, &level) +} diff --git a/src/ffi/ix/name.rs b/src/ffi/ix/name.rs new file mode 100644 index 00000000..dd181cf9 --- /dev/null +++ b/src/ffi/ix/name.rs @@ -0,0 +1,102 @@ +//! Ix.Name build/decode/roundtrip FFI. +//! +//! Ix.Name layout: +//! - Tag 0: anonymous (hash : Address) +//! - Tag 1: str (parent : Name) (s : String) (hash : Address) +//! - Tag 2: num (parent : Name) (i : Nat) (hash : Address) + +use crate::ix::env::{Name, NameData}; +use crate::lean::LeanIxName; +use lean_ffi::nat::Nat; +use lean_ffi::object::{LeanArray, LeanCtor, LeanString}; + +use crate::ffi::builder::LeanBuildCache; +use crate::ffi::primitives::build_nat; +use crate::lean::LeanIxAddress; + +impl LeanIxName { + /// Build a Lean Ix.Name with embedded hash. + /// Uses caching to avoid rebuilding the same name. + pub fn build(cache: &mut LeanBuildCache, name: &Name) -> Self { + let hash = name.get_hash(); + if let Some(&cached) = cache.names.get(hash) { + cached.inc_ref(); + return cached; + } + + let result = match name.as_data() { + NameData::Anonymous(h) => { + let ctor = LeanCtor::alloc(0, 1, 0); + ctor.set(0, LeanIxAddress::build_from_hash(h)); + Self::new(*ctor) + }, + NameData::Str(parent, s, h) => { + let parent_obj = Self::build(cache, parent); + let s_obj = LeanString::new(s.as_str()); + let ctor = LeanCtor::alloc(1, 3, 0); + ctor.set(0, parent_obj); + ctor.set(1, s_obj); + ctor.set(2, LeanIxAddress::build_from_hash(h)); + Self::new(*ctor) + }, + NameData::Num(parent, n, h) => { + let parent_obj = Self::build(cache, parent); + let n_obj = build_nat(n); + let ctor = LeanCtor::alloc(2, 3, 0); + ctor.set(0, parent_obj); + ctor.set(1, n_obj); + ctor.set(2, LeanIxAddress::build_from_hash(h)); + Self::new(*ctor) + }, + }; + + cache.names.insert(*hash, result); + result + } + + /// Build an Array of Names. + pub fn build_array(cache: &mut LeanBuildCache, names: &[Name]) -> LeanArray { + let arr = LeanArray::alloc(names.len()); + for (i, name) in names.iter().enumerate() { + arr.set(i, Self::build(cache, name)); + } + arr + } + + /// Decode a Lean Ix.Name to Rust Name. + pub fn decode(self) -> Name { + let ctor = self.as_ctor(); + match ctor.tag() { + 0 => { + // anonymous: just has hash, construct anon Name + Name::anon() + }, + 1 => { + // str: parent, s, hash + let parent = Self::new(ctor.get(0)).decode(); + let s = ctor.get(1).as_string().to_string(); + Name::str(parent, s) + }, + 2 => { + // num: parent, i, hash + let parent = Self::new(ctor.get(0)).decode(); + let i = Nat::from_obj(ctor.get(1)); + Name::num(parent, i) + }, + _ => panic!("Invalid Ix.Name tag: {}", ctor.tag()), + } + } + + /// Decode Array of Names from Lean pointer. + pub fn decode_array(obj: LeanArray) -> Vec { + obj.map(|x| Self::new(x).decode()) + } +} + +/// Round-trip an Ix.Name: decode from Lean, re-encode via LeanBuildCache. +#[unsafe(no_mangle)] +pub extern "C" fn rs_roundtrip_ix_name(name_ptr: LeanIxName) -> LeanIxName { + let name = name_ptr.decode(); + let mut cache = LeanBuildCache::new(); + LeanIxName::build(&mut cache, &name) +} diff --git a/src/lean/ffi/ixon.rs b/src/ffi/ixon.rs similarity index 100% rename from src/lean/ffi/ixon.rs rename to src/ffi/ixon.rs diff --git a/src/lean/ffi/ixon/compare.rs b/src/ffi/ixon/compare.rs similarity index 67% rename from src/lean/ffi/ixon/compare.rs rename to src/ffi/ixon/compare.rs index 59232b22..e5def305 100644 --- a/src/lean/ffi/ixon/compare.rs +++ b/src/ffi/ixon/compare.rs @@ -1,17 +1,16 @@ //! Cross-implementation compilation comparison FFI. use std::collections::HashMap; -use std::ffi::c_void; use crate::ix::compile::{BlockCache, CompileState, compile_env, compile_expr}; use crate::ix::env::Name; use crate::ix::ixon::serialize::put_expr; use crate::ix::mutual::MutCtx; -use crate::lean::sarray::LeanSArrayObject; -use crate::lean::{lean_alloc_ctor, lean_ctor_set}; +use crate::lean::{LeanIxBlockCompareDetail, LeanIxBlockCompareResult}; +use lean_ffi::object::{LeanByteArray, LeanCtor, LeanList, LeanObject}; -use super::super::lean_env::{ - Cache as LeanCache, GlobalCache, lean_ptr_to_expr, lean_ptr_to_name, +use crate::ffi::lean_env::{ + Cache as LeanCache, GlobalCache, decode_expr, decode_name, }; /// Rust-side compiled environment for block comparison. @@ -22,14 +21,14 @@ pub struct RustBlockEnv { /// Compare Lean's compiled expression output with Rust's compilation of the same input. #[unsafe(no_mangle)] pub extern "C" fn rs_compare_expr_compilation( - lean_expr_ptr: *const c_void, - lean_output: &LeanSArrayObject, + lean_expr_ptr: LeanObject, + lean_output: LeanByteArray, univ_ctx_size: u64, ) -> bool { // Decode Lean.Expr to Rust's representation let global_cache = GlobalCache::default(); let mut cache = LeanCache::new(&global_cache); - let lean_expr = lean_ptr_to_expr(lean_expr_ptr, &mut cache); + let lean_expr = decode_expr(lean_expr_ptr, &mut cache); // Create universe params for de Bruijn indexing (u0, u1, u2, ...) let univ_params: Vec = (0..univ_ctx_size) @@ -58,48 +57,47 @@ pub extern "C" fn rs_compare_expr_compilation( put_expr(&rust_expr, &mut rust_bytes); // Compare byte-for-byte - let lean_bytes = lean_output.data(); + let lean_bytes = lean_output.as_bytes(); rust_bytes == lean_bytes } -/// Build a BlockCompareResult Lean object. -fn build_block_compare_result( - matched: bool, - not_found: bool, - lean_size: u64, - rust_size: u64, - first_diff_offset: u64, -) -> *mut c_void { - unsafe { - if matched { - lean_alloc_ctor(0, 0, 0) // match +impl LeanIxBlockCompareResult { + /// Build a BlockCompareResult Lean object. + fn build( + matched: bool, + not_found: bool, + lean_size: u64, + rust_size: u64, + first_diff_offset: u64, + ) -> Self { + let obj = if matched { + *LeanCtor::alloc(0, 0, 0) // match } else if not_found { - lean_alloc_ctor(2, 0, 0) // notFound + *LeanCtor::alloc(2, 0, 0) // notFound } else { // mismatch - let obj = lean_alloc_ctor(1, 0, 24); - let base = obj.cast::(); - *base.add(8).cast::() = lean_size; - *base.add(16).cast::() = rust_size; - *base.add(24).cast::() = first_diff_offset; - obj - } + let ctor = LeanCtor::alloc(1, 0, 24); + ctor.set_u64(0, lean_size); + ctor.set_u64(8, rust_size); + ctor.set_u64(16, first_diff_offset); + *ctor + }; + Self::new(obj) } } -/// Build a BlockCompareDetail Lean object. -fn build_block_compare_detail( - result: *mut c_void, - lean_sharing_len: u64, - rust_sharing_len: u64, -) -> *mut c_void { - unsafe { - let obj = lean_alloc_ctor(0, 1, 16); - lean_ctor_set(obj, 0, result); - let base = obj.cast::(); - *base.add(16).cast::() = lean_sharing_len; - *base.add(24).cast::() = rust_sharing_len; - obj +impl LeanIxBlockCompareDetail { + /// Build a BlockCompareDetail Lean object. + fn build( + result: LeanIxBlockCompareResult, + lean_sharing_len: u64, + rust_sharing_len: u64, + ) -> Self { + let ctor = LeanCtor::alloc(0, 1, 16); + ctor.set(0, result); + ctor.set_u64(8, lean_sharing_len); + ctor.set_u64(8 + 8, rust_sharing_len); + Self::new(*ctor) } } @@ -108,42 +106,46 @@ fn build_block_compare_detail( /// # Safety /// /// `rust_env` must be a valid pointer to a `RustBlockEnv`. -/// `lowlink_name` must be a valid Lean object pointer. #[unsafe(no_mangle)] pub unsafe extern "C" fn rs_compare_block_v2( rust_env: *const RustBlockEnv, - lowlink_name: *const c_void, - lean_bytes: &LeanSArrayObject, + lowlink_name: LeanObject, + lean_bytes: LeanByteArray, lean_sharing_len: u64, -) -> *mut c_void { +) -> LeanIxBlockCompareDetail { let global_cache = GlobalCache::default(); - let name = lean_ptr_to_name(lowlink_name, &global_cache); + let name = decode_name(lowlink_name, &global_cache); let rust_env = unsafe { &*rust_env }; - let lean_data = lean_bytes.data(); + let lean_data = lean_bytes.as_bytes(); // Look up Rust's compiled block let (rust_bytes, rust_sharing_len) = match rust_env.blocks.get(&name) { Some((bytes, sharing_len)) => (bytes, *sharing_len as u64), None => { // Block not found in Rust compilation - let result = - build_block_compare_result(false, true, lean_data.len() as u64, 0, 0); - return build_block_compare_detail(result, lean_sharing_len, 0); + let result = LeanIxBlockCompareResult::build( + false, + true, + lean_data.len() as u64, + 0, + 0, + ); + return LeanIxBlockCompareDetail::build(result, lean_sharing_len, 0); }, }; // Compare bytes if rust_bytes == lean_data { // Match - let result = build_block_compare_result( + let result = LeanIxBlockCompareResult::build( true, false, lean_data.len() as u64, rust_bytes.len() as u64, 0, ); - return build_block_compare_detail( + return LeanIxBlockCompareDetail::build( result, lean_sharing_len, rust_sharing_len, @@ -163,14 +165,14 @@ pub unsafe extern "C" fn rs_compare_block_v2( |i| i as u64, ); - let result = build_block_compare_result( + let result = LeanIxBlockCompareResult::build( false, false, lean_data.len() as u64, rust_bytes.len() as u64, first_diff_offset, ); - build_block_compare_detail(result, lean_sharing_len, rust_sharing_len) + LeanIxBlockCompareDetail::build(result, lean_sharing_len, rust_sharing_len) } /// Free a RustBlockEnv pointer. @@ -190,12 +192,12 @@ pub unsafe extern "C" fn rs_free_compiled_env(ptr: *mut RustBlockEnv) { /// Build a RustBlockEnv from a Lean environment. #[unsafe(no_mangle)] pub extern "C" fn rs_build_compiled_env( - env_consts_ptr: *const c_void, + env_consts_ptr: LeanList, ) -> *mut RustBlockEnv { - use super::super::lean_env::lean_ptr_to_env; + use crate::ffi::lean_env::decode_env; // Decode Lean environment - let rust_env = lean_ptr_to_env(env_consts_ptr); + let rust_env = decode_env(env_consts_ptr); let rust_env = std::sync::Arc::new(rust_env); // Compile diff --git a/src/ffi/ixon/constant.rs b/src/ffi/ixon/constant.rs new file mode 100644 index 00000000..84f91279 --- /dev/null +++ b/src/ffi/ixon/constant.rs @@ -0,0 +1,693 @@ +//! Ixon constant types build/decode/roundtrip FFI. +//! +//! Includes: Definition, Axiom, Quotient, RecursorRule, Recursor, Constructor, +//! Inductive, InductiveProj, ConstructorProj, RecursorProj, DefinitionProj, +//! MutConst, ConstantInfo, Constant + +use std::sync::Arc; + +use crate::ix::ixon::constant::{ + Axiom as IxonAxiom, Constant as IxonConstant, + ConstantInfo as IxonConstantInfo, Constructor as IxonConstructor, + ConstructorProj, DefKind, Definition as IxonDefinition, DefinitionProj, + Inductive as IxonInductive, InductiveProj, MutConst, + Quotient as IxonQuotient, Recursor as IxonRecursor, RecursorProj, + RecursorRule as IxonRecursorRule, +}; +use crate::lean::{ + LeanIxAddress, LeanIxonAxiom, LeanIxonConstant, LeanIxonConstantInfo, + LeanIxonConstructor, LeanIxonConstructorProj, LeanIxonDefinition, + LeanIxonDefinitionProj, LeanIxonExpr, LeanIxonInductive, + LeanIxonInductiveProj, LeanIxonMutConst, LeanIxonQuotient, LeanIxonRecursor, + LeanIxonRecursorProj, LeanIxonRecursorRule, LeanIxonUniv, +}; +use lean_ffi::object::{LeanArray, LeanCtor}; + +// ============================================================================= +// Definition +// ============================================================================= + +impl LeanIxonDefinition { + /// Build Ixon.Definition + /// Lean stores scalar fields ordered by size (largest first). + /// Layout: header(8) + typ(8) + value(8) + lvls(8) + kind(1) + safety(1) + padding(6) + pub fn build(def: &IxonDefinition) -> Self { + let typ_obj = LeanIxonExpr::build(&def.typ); + let value_obj = LeanIxonExpr::build(&def.value); + // 2 obj fields, 16 scalar bytes (lvls(8) + kind(1) + safety(1) + padding(6)) + let ctor = LeanCtor::alloc(0, 2, 16); + ctor.set(0, typ_obj); + ctor.set(1, value_obj); + // Scalar offsets from obj_cptr: 2*8=16 for lvls, 2*8+8=24 for kind, 2*8+9=25 for safety + ctor.set_u64(16, def.lvls); + let kind_val: u8 = match def.kind { + DefKind::Definition => 0, + DefKind::Opaque => 1, + DefKind::Theorem => 2, + }; + ctor.set_u8(24, kind_val); + let safety_val: u8 = match def.safety { + crate::ix::env::DefinitionSafety::Unsafe => 0, + crate::ix::env::DefinitionSafety::Safe => 1, + crate::ix::env::DefinitionSafety::Partial => 2, + }; + ctor.set_u8(25, safety_val); + Self::new(*ctor) + } + + /// Decode Ixon.Definition. + /// Lean stores scalar fields ordered by size (largest first). + /// Layout: header(8) + typ(8) + value(8) + lvls(8) + kind(1) + safety(1) + padding(6) + pub fn decode(self) -> IxonDefinition { + let ctor = self.as_ctor(); + let typ = Arc::new(LeanIxonExpr::new(ctor.get(0)).decode()); + let value = Arc::new(LeanIxonExpr::new(ctor.get(1)).decode()); + let lvls = ctor.scalar_u64(2, 0); + let kind_val = ctor.scalar_u8(2, 8); + let kind = match kind_val { + 0 => DefKind::Definition, + 1 => DefKind::Opaque, + 2 => DefKind::Theorem, + _ => panic!("Invalid DefKind: {}", kind_val), + }; + let safety_val = ctor.scalar_u8(2, 9); + let safety = match safety_val { + 0 => crate::ix::env::DefinitionSafety::Unsafe, + 1 => crate::ix::env::DefinitionSafety::Safe, + 2 => crate::ix::env::DefinitionSafety::Partial, + _ => panic!("Invalid DefinitionSafety: {}", safety_val), + }; + IxonDefinition { kind, safety, lvls, typ, value } + } +} + +// ============================================================================= +// RecursorRule +// ============================================================================= + +impl LeanIxonRecursorRule { + /// Build Ixon.RecursorRule + pub fn build(rule: &IxonRecursorRule) -> Self { + let rhs_obj = LeanIxonExpr::build(&rule.rhs); + // 1 obj field, 8 scalar bytes + let ctor = LeanCtor::alloc(0, 1, 8); + ctor.set(0, rhs_obj); + ctor.set_u64(8, rule.fields); + Self::new(*ctor) + } + + /// Decode Ixon.RecursorRule. + pub fn decode(self) -> IxonRecursorRule { + let ctor = self.as_ctor(); + let rhs = Arc::new(LeanIxonExpr::new(ctor.get(0)).decode()); + let fields = ctor.scalar_u64(1, 0); + IxonRecursorRule { fields, rhs } + } +} + +// ============================================================================= +// Recursor +// ============================================================================= + +impl LeanIxonRecursor { + /// Build Ixon.Recursor + /// Scalars ordered by size: lvls(8) + params(8) + indices(8) + motives(8) + minors(8) + k(1) + isUnsafe(1) + padding(6) + pub fn build(rec: &IxonRecursor) -> Self { + let typ_obj = LeanIxonExpr::build(&rec.typ); + // Build rules array + let rules_arr = LeanArray::alloc(rec.rules.len()); + for (i, rule) in rec.rules.iter().enumerate() { + rules_arr.set(i, LeanIxonRecursorRule::build(rule)); + } + // 2 obj fields (typ, rules), 48 scalar bytes (5×8 + 1 + 1 + 6 padding) + let ctor = LeanCtor::alloc(0, 2, 48); + ctor.set(0, typ_obj); + ctor.set(1, rules_arr); + // Scalar offsets from obj_cptr: 2*8=16 base + ctor.set_u64(16, rec.lvls); + ctor.set_u64(24, rec.params); + ctor.set_u64(32, rec.indices); + ctor.set_u64(40, rec.motives); + ctor.set_u64(48, rec.minors); + ctor.set_u8(56, if rec.k { 1 } else { 0 }); + ctor.set_u8(57, if rec.is_unsafe { 1 } else { 0 }); + Self::new(*ctor) + } + + /// Decode Ixon.Recursor. + /// Scalars ordered by size: lvls(8) + params(8) + indices(8) + motives(8) + minors(8) + k(1) + isUnsafe(1) + padding(6) + pub fn decode(self) -> IxonRecursor { + let ctor = self.as_ctor(); + let typ = Arc::new(LeanIxonExpr::new(ctor.get(0)).decode()); + let rules_arr = ctor.get(1).as_array(); + let rules = rules_arr.map(|x| LeanIxonRecursorRule::new(x).decode()); + let lvls = ctor.scalar_u64(2, 0); + let params = ctor.scalar_u64(2, 8); + let indices = ctor.scalar_u64(2, 16); + let motives = ctor.scalar_u64(2, 24); + let minors = ctor.scalar_u64(2, 32); + let k = ctor.scalar_u8(2, 40) != 0; + let is_unsafe = ctor.scalar_u8(2, 41) != 0; + IxonRecursor { + k, + is_unsafe, + lvls, + params, + indices, + motives, + minors, + typ, + rules, + } + } +} + +// ============================================================================= +// Axiom +// ============================================================================= + +impl LeanIxonAxiom { + /// Build Ixon.Axiom + /// Scalars ordered by size: lvls(8) + isUnsafe(1) + padding(7) + pub fn build(ax: &IxonAxiom) -> Self { + let typ_obj = LeanIxonExpr::build(&ax.typ); + // 1 obj field, 16 scalar bytes (lvls(8) + isUnsafe(1) + padding(7)) + let ctor = LeanCtor::alloc(0, 1, 16); + ctor.set(0, typ_obj); + // Scalar offsets from obj_cptr: 1*8=8 base + ctor.set_u64(8, ax.lvls); + ctor.set_u8(16, if ax.is_unsafe { 1 } else { 0 }); + Self::new(*ctor) + } + + /// Decode Ixon.Axiom. + /// Scalars ordered by size: lvls(8) + isUnsafe(1) + padding(7) + pub fn decode(self) -> IxonAxiom { + let ctor = self.as_ctor(); + let typ = Arc::new(LeanIxonExpr::new(ctor.get(0)).decode()); + let lvls = ctor.scalar_u64(1, 0); + let is_unsafe = ctor.scalar_u8(1, 8) != 0; + IxonAxiom { is_unsafe, lvls, typ } + } +} + +// ============================================================================= +// Quotient +// ============================================================================= + +impl LeanIxonQuotient { + /// Build Ixon.Quotient + /// QuotKind is a simple enum stored as scalar u8, not object field. + /// Scalars ordered by size: lvls(8) + kind(1) + padding(7) + pub fn build(quot: &IxonQuotient) -> Self { + let typ_obj = LeanIxonExpr::build(".typ); + // 1 obj field (typ), 16 scalar bytes (lvls(8) + kind(1) + padding(7)) + let ctor = LeanCtor::alloc(0, 1, 16); + ctor.set(0, typ_obj); + // Scalar offsets from obj_cptr: 1*8=8 base + ctor.set_u64(8, quot.lvls); + let kind_val: u8 = match quot.kind { + crate::ix::env::QuotKind::Type => 0, + crate::ix::env::QuotKind::Ctor => 1, + crate::ix::env::QuotKind::Lift => 2, + crate::ix::env::QuotKind::Ind => 3, + }; + ctor.set_u8(16, kind_val); + Self::new(*ctor) + } + + /// Decode Ixon.Quotient. + /// QuotKind is a scalar (not object field). Scalars: lvls(8) + kind(1) + padding(7) + pub fn decode(self) -> IxonQuotient { + let ctor = self.as_ctor(); + let typ = Arc::new(LeanIxonExpr::new(ctor.get(0)).decode()); + let lvls = ctor.scalar_u64(1, 0); + let kind_val = ctor.scalar_u8(1, 8); + let kind = match kind_val { + 0 => crate::ix::env::QuotKind::Type, + 1 => crate::ix::env::QuotKind::Ctor, + 2 => crate::ix::env::QuotKind::Lift, + 3 => crate::ix::env::QuotKind::Ind, + _ => panic!("Invalid QuotKind: {}", kind_val), + }; + IxonQuotient { kind, lvls, typ } + } +} + +// ============================================================================= +// Constructor +// ============================================================================= + +impl LeanIxonConstructor { + /// Build Ixon.Constructor + /// Scalars ordered by size: lvls(8) + cidx(8) + params(8) + fields(8) + isUnsafe(1) + padding(7) + pub fn build(c: &IxonConstructor) -> Self { + let typ_obj = LeanIxonExpr::build(&c.typ); + // 1 obj field, 40 scalar bytes (4×8 + 1 + 7 padding) + let ctor = LeanCtor::alloc(0, 1, 40); + ctor.set(0, typ_obj); + // Scalar offsets from obj_cptr: 1*8=8 base + ctor.set_u64(8, c.lvls); + ctor.set_u64(16, c.cidx); + ctor.set_u64(24, c.params); + ctor.set_u64(32, c.fields); + ctor.set_u8(40, if c.is_unsafe { 1 } else { 0 }); + Self::new(*ctor) + } + + /// Decode Ixon.Constructor. + /// Scalars ordered by size: lvls(8) + cidx(8) + params(8) + fields(8) + isUnsafe(1) + padding(7) + pub fn decode(self) -> IxonConstructor { + let ctor = self.as_ctor(); + let typ = Arc::new(LeanIxonExpr::new(ctor.get(0)).decode()); + let lvls = ctor.scalar_u64(1, 0); + let cidx = ctor.scalar_u64(1, 8); + let params = ctor.scalar_u64(1, 16); + let fields = ctor.scalar_u64(1, 24); + let is_unsafe = ctor.scalar_u8(1, 32) != 0; + IxonConstructor { is_unsafe, lvls, cidx, params, fields, typ } + } +} + +// ============================================================================= +// Inductive +// ============================================================================= + +impl LeanIxonInductive { + /// Build Ixon.Inductive + /// Scalars ordered by size: lvls(8) + params(8) + indices(8) + nested(8) + recr(1) + refl(1) + isUnsafe(1) + padding(5) + pub fn build(ind: &IxonInductive) -> Self { + let typ_obj = LeanIxonExpr::build(&ind.typ); + // Build ctors array + let ctors_arr = LeanArray::alloc(ind.ctors.len()); + for (i, c) in ind.ctors.iter().enumerate() { + ctors_arr.set(i, LeanIxonConstructor::build(c)); + } + // 2 obj fields, 40 scalar bytes (4×8 + 3 + 5 padding) + let ctor = LeanCtor::alloc(0, 2, 40); + ctor.set(0, typ_obj); + ctor.set(1, ctors_arr); + // Scalar offsets from obj_cptr: 2*8=16 base + ctor.set_u64(16, ind.lvls); + ctor.set_u64(24, ind.params); + ctor.set_u64(32, ind.indices); + ctor.set_u64(40, ind.nested); + ctor.set_u8(48, if ind.recr { 1 } else { 0 }); + ctor.set_u8(49, if ind.refl { 1 } else { 0 }); + ctor.set_u8(50, if ind.is_unsafe { 1 } else { 0 }); + Self::new(*ctor) + } + + /// Decode Ixon.Inductive. + /// Scalars ordered by size: lvls(8) + params(8) + indices(8) + nested(8) + recr(1) + refl(1) + isUnsafe(1) + padding(5) + pub fn decode(self) -> IxonInductive { + let ctor = self.as_ctor(); + let typ = Arc::new(LeanIxonExpr::new(ctor.get(0)).decode()); + let ctors_arr = ctor.get(1).as_array(); + let ctors = ctors_arr.map(|x| LeanIxonConstructor::new(x).decode()); + let lvls = ctor.scalar_u64(2, 0); + let params = ctor.scalar_u64(2, 8); + let indices = ctor.scalar_u64(2, 16); + let nested = ctor.scalar_u64(2, 24); + let recr = ctor.scalar_u8(2, 32) != 0; + let refl = ctor.scalar_u8(2, 33) != 0; + let is_unsafe = ctor.scalar_u8(2, 34) != 0; + IxonInductive { + recr, + refl, + is_unsafe, + lvls, + params, + indices, + nested, + typ, + ctors, + } + } +} + +// ============================================================================= +// Projection Types +// ============================================================================= + +impl LeanIxonInductiveProj { + pub fn build(proj: &InductiveProj) -> Self { + let block_obj = LeanIxAddress::build(&proj.block); + let ctor = LeanCtor::alloc(0, 1, 8); + ctor.set(0, block_obj); + ctor.set_u64(8, proj.idx); + Self::new(*ctor) + } + + pub fn decode(self) -> InductiveProj { + let ctor = self.as_ctor(); + let block = LeanIxAddress::new(ctor.get(0)).decode(); + let idx = ctor.scalar_u64(1, 0); + InductiveProj { idx, block } + } +} + +impl LeanIxonConstructorProj { + pub fn build(proj: &ConstructorProj) -> Self { + let block_obj = LeanIxAddress::build(&proj.block); + let ctor = LeanCtor::alloc(0, 1, 16); + ctor.set(0, block_obj); + ctor.set_u64(8, proj.idx); + ctor.set_u64(16, proj.cidx); + Self::new(*ctor) + } + + pub fn decode(self) -> ConstructorProj { + let ctor = self.as_ctor(); + let block = LeanIxAddress::new(ctor.get(0)).decode(); + let idx = ctor.scalar_u64(1, 0); + let cidx = ctor.scalar_u64(1, 8); + ConstructorProj { idx, cidx, block } + } +} + +impl LeanIxonRecursorProj { + pub fn build(proj: &RecursorProj) -> Self { + let block_obj = LeanIxAddress::build(&proj.block); + let ctor = LeanCtor::alloc(0, 1, 8); + ctor.set(0, block_obj); + ctor.set_u64(8, proj.idx); + Self::new(*ctor) + } + + pub fn decode(self) -> RecursorProj { + let ctor = self.as_ctor(); + let block = LeanIxAddress::new(ctor.get(0)).decode(); + let idx = ctor.scalar_u64(1, 0); + RecursorProj { idx, block } + } +} + +impl LeanIxonDefinitionProj { + pub fn build(proj: &DefinitionProj) -> Self { + let block_obj = LeanIxAddress::build(&proj.block); + let ctor = LeanCtor::alloc(0, 1, 8); + ctor.set(0, block_obj); + ctor.set_u64(8, proj.idx); + Self::new(*ctor) + } + + pub fn decode(self) -> DefinitionProj { + let ctor = self.as_ctor(); + let block = LeanIxAddress::new(ctor.get(0)).decode(); + let idx = ctor.scalar_u64(1, 0); + DefinitionProj { idx, block } + } +} + +// ============================================================================= +// MutConst +// ============================================================================= + +impl LeanIxonMutConst { + pub fn build(mc: &MutConst) -> Self { + let obj = match mc { + MutConst::Defn(def) => { + let def_obj = LeanIxonDefinition::build(def); + let ctor = LeanCtor::alloc(0, 1, 0); + ctor.set(0, def_obj); + *ctor + }, + MutConst::Indc(ind) => { + let ind_obj = LeanIxonInductive::build(ind); + let ctor = LeanCtor::alloc(1, 1, 0); + ctor.set(0, ind_obj); + *ctor + }, + MutConst::Recr(rec) => { + let rec_obj = LeanIxonRecursor::build(rec); + let ctor = LeanCtor::alloc(2, 1, 0); + ctor.set(0, rec_obj); + *ctor + }, + }; + Self::new(obj) + } + + pub fn decode(self) -> MutConst { + let ctor = self.as_ctor(); + let inner = ctor.get(0); + match ctor.tag() { + 0 => MutConst::Defn(LeanIxonDefinition::new(inner).decode()), + 1 => MutConst::Indc(LeanIxonInductive::new(inner).decode()), + 2 => MutConst::Recr(LeanIxonRecursor::new(inner).decode()), + tag => panic!("Invalid Ixon.MutConst tag: {}", tag), + } + } +} + +// ============================================================================= +// ConstantInfo +// ============================================================================= + +impl LeanIxonConstantInfo { + /// Build Ixon.ConstantInfo (9 constructors) + pub fn build(info: &IxonConstantInfo) -> Self { + let obj = match info { + IxonConstantInfo::Defn(def) => { + let def_obj = LeanIxonDefinition::build(def); + let ctor = LeanCtor::alloc(0, 1, 0); + ctor.set(0, def_obj); + *ctor + }, + IxonConstantInfo::Recr(rec) => { + let rec_obj = LeanIxonRecursor::build(rec); + let ctor = LeanCtor::alloc(1, 1, 0); + ctor.set(0, rec_obj); + *ctor + }, + IxonConstantInfo::Axio(ax) => { + let ax_obj = LeanIxonAxiom::build(ax); + let ctor = LeanCtor::alloc(2, 1, 0); + ctor.set(0, ax_obj); + *ctor + }, + IxonConstantInfo::Quot(quot) => { + let quot_obj = LeanIxonQuotient::build(quot); + let ctor = LeanCtor::alloc(3, 1, 0); + ctor.set(0, quot_obj); + *ctor + }, + IxonConstantInfo::CPrj(proj) => { + let proj_obj = LeanIxonConstructorProj::build(proj); + let ctor = LeanCtor::alloc(4, 1, 0); + ctor.set(0, proj_obj); + *ctor + }, + IxonConstantInfo::RPrj(proj) => { + let proj_obj = LeanIxonRecursorProj::build(proj); + let ctor = LeanCtor::alloc(5, 1, 0); + ctor.set(0, proj_obj); + *ctor + }, + IxonConstantInfo::IPrj(proj) => { + let proj_obj = LeanIxonInductiveProj::build(proj); + let ctor = LeanCtor::alloc(6, 1, 0); + ctor.set(0, proj_obj); + *ctor + }, + IxonConstantInfo::DPrj(proj) => { + let proj_obj = LeanIxonDefinitionProj::build(proj); + let ctor = LeanCtor::alloc(7, 1, 0); + ctor.set(0, proj_obj); + *ctor + }, + IxonConstantInfo::Muts(muts) => { + let arr = LeanArray::alloc(muts.len()); + for (i, mc) in muts.iter().enumerate() { + arr.set(i, LeanIxonMutConst::build(mc)); + } + let ctor = LeanCtor::alloc(8, 1, 0); + ctor.set(0, arr); + *ctor + }, + }; + Self::new(obj) + } + + /// Decode Ixon.ConstantInfo. + pub fn decode(self) -> IxonConstantInfo { + let ctor = self.as_ctor(); + let inner = ctor.get(0); + match ctor.tag() { + 0 => IxonConstantInfo::Defn(LeanIxonDefinition::new(inner).decode()), + 1 => IxonConstantInfo::Recr(LeanIxonRecursor::new(inner).decode()), + 2 => IxonConstantInfo::Axio(LeanIxonAxiom::new(inner).decode()), + 3 => IxonConstantInfo::Quot(LeanIxonQuotient::new(inner).decode()), + 4 => IxonConstantInfo::CPrj(LeanIxonConstructorProj::new(inner).decode()), + 5 => IxonConstantInfo::RPrj(LeanIxonRecursorProj::new(inner).decode()), + 6 => IxonConstantInfo::IPrj(LeanIxonInductiveProj::new(inner).decode()), + 7 => IxonConstantInfo::DPrj(LeanIxonDefinitionProj::new(inner).decode()), + 8 => { + let arr = inner.as_array(); + let muts = arr.map(|x| LeanIxonMutConst::new(x).decode()); + IxonConstantInfo::Muts(muts) + }, + tag => panic!("Invalid Ixon.ConstantInfo tag: {}", tag), + } + } +} + +// ============================================================================= +// Constant +// ============================================================================= + +impl LeanIxonConstant { + /// Build Ixon.Constant + pub fn build(constant: &IxonConstant) -> Self { + let info_obj = LeanIxonConstantInfo::build(&constant.info); + let sharing_obj = LeanIxonExpr::build_array(&constant.sharing); + let refs_obj = LeanIxAddress::build_array(&constant.refs); + let univs_obj = LeanIxonUniv::build_array(&constant.univs); + let ctor = LeanCtor::alloc(0, 4, 0); + ctor.set(0, info_obj); + ctor.set(1, sharing_obj); + ctor.set(2, refs_obj); + ctor.set(3, univs_obj); + Self::new(*ctor) + } + + /// Decode Ixon.Constant. + pub fn decode(self) -> IxonConstant { + let ctor = self.as_ctor(); + IxonConstant { + info: LeanIxonConstantInfo::new(ctor.get(0)).decode(), + sharing: LeanIxonExpr::decode_array(ctor.get(1).as_array()), + refs: LeanIxAddress::decode_array(ctor.get(2).as_array()), + univs: LeanIxonUniv::decode_array(ctor.get(3).as_array()), + } + } +} + +// ============================================================================= +// FFI Exports +// ============================================================================= + +/// Round-trip Ixon.Definition. +#[unsafe(no_mangle)] +pub extern "C" fn rs_roundtrip_ixon_definition( + obj: LeanIxonDefinition, +) -> LeanIxonDefinition { + let def = obj.decode(); + LeanIxonDefinition::build(&def) +} + +/// Round-trip Ixon.Recursor. +#[unsafe(no_mangle)] +pub extern "C" fn rs_roundtrip_ixon_recursor( + obj: LeanIxonRecursor, +) -> LeanIxonRecursor { + let rec = obj.decode(); + LeanIxonRecursor::build(&rec) +} + +/// Round-trip Ixon.Axiom. +#[unsafe(no_mangle)] +pub extern "C" fn rs_roundtrip_ixon_axiom(obj: LeanIxonAxiom) -> LeanIxonAxiom { + let ax = obj.decode(); + LeanIxonAxiom::build(&ax) +} + +/// Round-trip Ixon.Quotient. +#[unsafe(no_mangle)] +pub extern "C" fn rs_roundtrip_ixon_quotient( + obj: LeanIxonQuotient, +) -> LeanIxonQuotient { + let quot = obj.decode(); + LeanIxonQuotient::build(") +} + +/// Round-trip Ixon.ConstantInfo. +#[unsafe(no_mangle)] +pub extern "C" fn rs_roundtrip_ixon_constant_info( + obj: LeanIxonConstantInfo, +) -> LeanIxonConstantInfo { + let info = obj.decode(); + LeanIxonConstantInfo::build(&info) +} + +/// Round-trip Ixon.Constant. +#[unsafe(no_mangle)] +pub extern "C" fn rs_roundtrip_ixon_constant( + obj: LeanIxonConstant, +) -> LeanIxonConstant { + let constant = obj.decode(); + LeanIxonConstant::build(&constant) +} + +/// Round-trip Ixon.RecursorRule. +#[unsafe(no_mangle)] +pub extern "C" fn rs_roundtrip_ixon_recursor_rule( + obj: LeanIxonRecursorRule, +) -> LeanIxonRecursorRule { + let rule = obj.decode(); + LeanIxonRecursorRule::build(&rule) +} + +/// Round-trip Ixon.Constructor. +#[unsafe(no_mangle)] +pub extern "C" fn rs_roundtrip_ixon_constructor( + obj: LeanIxonConstructor, +) -> LeanIxonConstructor { + let c = obj.decode(); + LeanIxonConstructor::build(&c) +} + +/// Round-trip Ixon.Inductive. +#[unsafe(no_mangle)] +pub extern "C" fn rs_roundtrip_ixon_inductive( + obj: LeanIxonInductive, +) -> LeanIxonInductive { + let ind = obj.decode(); + LeanIxonInductive::build(&ind) +} + +/// Round-trip Ixon.InductiveProj. +#[unsafe(no_mangle)] +pub extern "C" fn rs_roundtrip_ixon_inductive_proj( + obj: LeanIxonInductiveProj, +) -> LeanIxonInductiveProj { + let proj = obj.decode(); + LeanIxonInductiveProj::build(&proj) +} + +/// Round-trip Ixon.ConstructorProj. +#[unsafe(no_mangle)] +pub extern "C" fn rs_roundtrip_ixon_constructor_proj( + obj: LeanIxonConstructorProj, +) -> LeanIxonConstructorProj { + let proj = obj.decode(); + LeanIxonConstructorProj::build(&proj) +} + +/// Round-trip Ixon.RecursorProj. +#[unsafe(no_mangle)] +pub extern "C" fn rs_roundtrip_ixon_recursor_proj( + obj: LeanIxonRecursorProj, +) -> LeanIxonRecursorProj { + let proj = obj.decode(); + LeanIxonRecursorProj::build(&proj) +} + +/// Round-trip Ixon.DefinitionProj. +#[unsafe(no_mangle)] +pub extern "C" fn rs_roundtrip_ixon_definition_proj( + obj: LeanIxonDefinitionProj, +) -> LeanIxonDefinitionProj { + let proj = obj.decode(); + LeanIxonDefinitionProj::build(&proj) +} + +/// Round-trip Ixon.MutConst. +#[unsafe(no_mangle)] +pub extern "C" fn rs_roundtrip_ixon_mut_const( + obj: LeanIxonMutConst, +) -> LeanIxonMutConst { + let mc = obj.decode(); + LeanIxonMutConst::build(&mc) +} diff --git a/src/ffi/ixon/enums.rs b/src/ffi/ixon/enums.rs new file mode 100644 index 00000000..a1fb3e55 --- /dev/null +++ b/src/ffi/ixon/enums.rs @@ -0,0 +1,121 @@ +//! Ixon enum types: DefKind, DefinitionSafety, QuotKind build/decode/roundtrip FFI. + +use crate::ix::env::{DefinitionSafety, QuotKind}; +use crate::ix::ixon::constant::DefKind; +use crate::lean::{ + LeanIxonDefKind, LeanIxonDefinitionSafety, LeanIxonQuotKind, +}; +use lean_ffi::object::LeanObject; + +impl LeanIxonDefKind { + /// Build Ixon.DefKind + /// | defn -- tag 0 + /// | opaq -- tag 1 + /// | thm -- tag 2 + /// Simple enums are passed as raw (unboxed) tag values across Lean FFI. + pub fn build(kind: &DefKind) -> Self { + let tag = match kind { + DefKind::Definition => 0, + DefKind::Opaque => 1, + DefKind::Theorem => 2, + }; + Self::new(LeanObject::from_enum_tag(tag)) + } + + /// Decode Ixon.DefKind (simple enum, raw unboxed tag value). + pub fn decode(self) -> DefKind { + let tag = self.as_enum_tag(); + match tag { + 0 => DefKind::Definition, + 1 => DefKind::Opaque, + 2 => DefKind::Theorem, + _ => panic!("Invalid Ixon.DefKind tag: {}", tag), + } + } +} + +impl LeanIxonDefinitionSafety { + /// Build Ixon.DefinitionSafety + /// | unsaf -- tag 0 + /// | safe -- tag 1 + /// | part -- tag 2 + pub fn build(safety: &DefinitionSafety) -> Self { + let tag = match safety { + DefinitionSafety::Unsafe => 0, + DefinitionSafety::Safe => 1, + DefinitionSafety::Partial => 2, + }; + Self::new(LeanObject::from_enum_tag(tag)) + } + + /// Decode Ixon.DefinitionSafety (simple enum, raw unboxed tag value). + pub fn decode(self) -> DefinitionSafety { + let tag = self.as_enum_tag(); + match tag { + 0 => DefinitionSafety::Unsafe, + 1 => DefinitionSafety::Safe, + 2 => DefinitionSafety::Partial, + _ => panic!("Invalid Ixon.DefinitionSafety tag: {}", tag), + } + } +} + +impl LeanIxonQuotKind { + /// Build Ixon.QuotKind + /// | type -- tag 0 + /// | ctor -- tag 1 + /// | lift -- tag 2 + /// | ind -- tag 3 + pub fn build(kind: &QuotKind) -> Self { + let tag = match kind { + QuotKind::Type => 0, + QuotKind::Ctor => 1, + QuotKind::Lift => 2, + QuotKind::Ind => 3, + }; + Self::new(LeanObject::from_enum_tag(tag)) + } + + /// Decode Ixon.QuotKind (simple enum, raw unboxed tag value). + pub fn decode(self) -> QuotKind { + let tag = self.as_enum_tag(); + match tag { + 0 => QuotKind::Type, + 1 => QuotKind::Ctor, + 2 => QuotKind::Lift, + 3 => QuotKind::Ind, + _ => panic!("Invalid Ixon.QuotKind tag: {}", tag), + } + } +} + +// ============================================================================= +// FFI Exports +// ============================================================================= + +/// Round-trip Ixon.DefKind. +#[unsafe(no_mangle)] +pub extern "C" fn rs_roundtrip_ixon_def_kind( + obj: LeanIxonDefKind, +) -> LeanIxonDefKind { + let kind = obj.decode(); + LeanIxonDefKind::build(&kind) +} + +/// Round-trip Ixon.DefinitionSafety. +#[unsafe(no_mangle)] +pub extern "C" fn rs_roundtrip_ixon_definition_safety( + obj: LeanIxonDefinitionSafety, +) -> LeanIxonDefinitionSafety { + let safety = obj.decode(); + LeanIxonDefinitionSafety::build(&safety) +} + +/// Round-trip Ixon.QuotKind. +#[unsafe(no_mangle)] +pub extern "C" fn rs_roundtrip_ixon_quot_kind( + obj: LeanIxonQuotKind, +) -> LeanIxonQuotKind { + let kind = obj.decode(); + LeanIxonQuotKind::build(&kind) +} diff --git a/src/ffi/ixon/env.rs b/src/ffi/ixon/env.rs new file mode 100644 index 00000000..143fb23c --- /dev/null +++ b/src/ffi/ixon/env.rs @@ -0,0 +1,392 @@ +//! Ixon.RawEnv FFI build/decode/roundtrip functions. +//! +//! Provides full decode/build cycle for RawEnv and its component types: +//! RawConst, RawNamed, RawBlob, RawComm. + +use crate::ix::address::Address; +use crate::ix::env::Name; +use crate::ix::ixon::comm::Comm; +use crate::ix::ixon::constant::Constant as IxonConstant; +use crate::ix::ixon::env::{Env as IxonEnv, Named as IxonNamed}; +use crate::ix::ixon::metadata::ConstantMeta; +use crate::lean::{ + LeanIxName, LeanIxonComm, LeanIxonConstant, LeanIxonConstantMeta, + LeanIxonRawBlob, LeanIxonRawComm, LeanIxonRawConst, LeanIxonRawEnv, + LeanIxonRawNameEntry, LeanIxonRawNamed, +}; +use lean_ffi::object::{LeanArray, LeanByteArray, LeanCtor, LeanExcept}; + +use crate::ffi::builder::LeanBuildCache; +use crate::lean::LeanIxAddress; + +// ============================================================================= +// RawConst (addr: Address, const: Constant) +// ============================================================================= + +/// Decoded Ixon.RawConst +pub struct DecodedRawConst { + pub addr: Address, + pub constant: IxonConstant, +} + +impl LeanIxonRawConst { + /// Decode Ixon.RawConst from Lean pointer. + pub fn decode(self) -> DecodedRawConst { + let ctor = self.as_ctor(); + DecodedRawConst { + addr: LeanIxAddress::new(ctor.get(0)).decode(), + constant: LeanIxonConstant::new(ctor.get(1)).decode(), + } + } + + /// Build Ixon.RawConst Lean object. + pub fn build(rc: &DecodedRawConst) -> Self { + let ctor = LeanCtor::alloc(0, 2, 0); + ctor.set(0, LeanIxAddress::build(&rc.addr)); + ctor.set(1, LeanIxonConstant::build(&rc.constant)); + Self::new(*ctor) + } + + /// Build from individual parts (used by compile.rs). + pub fn build_from_parts(addr: &Address, constant: &IxonConstant) -> Self { + let ctor = LeanCtor::alloc(0, 2, 0); + ctor.set(0, LeanIxAddress::build(addr)); + ctor.set(1, LeanIxonConstant::build(constant)); + Self::new(*ctor) + } +} + +// ============================================================================= +// RawNamed (name: Ix.Name, addr: Address, constMeta: ConstantMeta) +// ============================================================================= + +/// Decoded Ixon.RawNamed +pub struct DecodedRawNamed { + pub name: Name, + pub addr: Address, + pub const_meta: ConstantMeta, +} + +impl LeanIxonRawNamed { + /// Decode Ixon.RawNamed from Lean pointer. + pub fn decode(self) -> DecodedRawNamed { + let ctor = self.as_ctor(); + DecodedRawNamed { + name: LeanIxName::new(ctor.get(0)).decode(), + addr: LeanIxAddress::new(ctor.get(1)).decode(), + const_meta: LeanIxonConstantMeta::new(ctor.get(2)).decode(), + } + } + + /// Build Ixon.RawNamed Lean object. + pub fn build(cache: &mut LeanBuildCache, rn: &DecodedRawNamed) -> Self { + let ctor = LeanCtor::alloc(0, 3, 0); + ctor.set(0, LeanIxName::build(cache, &rn.name)); + ctor.set(1, LeanIxAddress::build(&rn.addr)); + ctor.set(2, LeanIxonConstantMeta::build(&rn.const_meta)); + Self::new(*ctor) + } + + /// Build from individual parts (used by compile.rs). + pub fn build_from_parts( + cache: &mut LeanBuildCache, + name: &Name, + addr: &Address, + meta: &ConstantMeta, + ) -> Self { + let ctor = LeanCtor::alloc(0, 3, 0); + ctor.set(0, LeanIxName::build(cache, name)); + ctor.set(1, LeanIxAddress::build(addr)); + ctor.set(2, LeanIxonConstantMeta::build(meta)); + Self::new(*ctor) + } +} + +// ============================================================================= +// RawBlob (addr: Address, bytes: ByteArray) +// ============================================================================= + +/// Decoded Ixon.RawBlob +pub struct DecodedRawBlob { + pub addr: Address, + pub bytes: Vec, +} + +impl LeanIxonRawBlob { + /// Decode Ixon.RawBlob from Lean pointer. + pub fn decode(self) -> DecodedRawBlob { + let ctor = self.as_ctor(); + let ba = ctor.get(1).as_byte_array(); + DecodedRawBlob { + addr: LeanIxAddress::new(ctor.get(0)).decode(), + bytes: ba.as_bytes().to_vec(), + } + } + + /// Build Ixon.RawBlob Lean object. + pub fn build(rb: &DecodedRawBlob) -> Self { + let ctor = LeanCtor::alloc(0, 2, 0); + ctor.set(0, LeanIxAddress::build(&rb.addr)); + ctor.set(1, LeanByteArray::from_bytes(&rb.bytes)); + Self::new(*ctor) + } + + /// Build from individual parts (used by compile.rs). + pub fn build_from_parts(addr: &Address, bytes: &[u8]) -> Self { + let ctor = LeanCtor::alloc(0, 2, 0); + ctor.set(0, LeanIxAddress::build(addr)); + ctor.set(1, LeanByteArray::from_bytes(bytes)); + Self::new(*ctor) + } +} + +// ============================================================================= +// RawComm (addr: Address, comm: Comm) +// ============================================================================= + +/// Decoded Ixon.RawComm +pub struct DecodedRawComm { + pub addr: Address, + pub comm: Comm, +} + +impl LeanIxonRawComm { + /// Decode Ixon.RawComm from Lean pointer. + pub fn decode(self) -> DecodedRawComm { + let ctor = self.as_ctor(); + DecodedRawComm { + addr: LeanIxAddress::new(ctor.get(0)).decode(), + comm: LeanIxonComm::new(ctor.get(1)).decode(), + } + } + + /// Build Ixon.RawComm Lean object. + pub fn build(rc: &DecodedRawComm) -> Self { + let ctor = LeanCtor::alloc(0, 2, 0); + ctor.set(0, LeanIxAddress::build(&rc.addr)); + ctor.set(1, LeanIxonComm::build(&rc.comm)); + Self::new(*ctor) + } + + /// Build from individual parts (used by compile.rs). + pub fn build_from_parts(addr: &Address, comm: &Comm) -> Self { + let ctor = LeanCtor::alloc(0, 2, 0); + ctor.set(0, LeanIxAddress::build(addr)); + ctor.set(1, LeanIxonComm::build(comm)); + Self::new(*ctor) + } +} + +// ============================================================================= +// RawNameEntry (addr: Address, name: Ix.Name) +// ============================================================================= + +/// Decoded Ixon.RawNameEntry +pub struct DecodedRawNameEntry { + pub addr: Address, + pub name: Name, +} + +impl LeanIxonRawNameEntry { + /// Decode Ixon.RawNameEntry from Lean pointer. + pub fn decode(self) -> DecodedRawNameEntry { + let ctor = self.as_ctor(); + DecodedRawNameEntry { + addr: LeanIxAddress::new(ctor.get(0)).decode(), + name: LeanIxName::new(ctor.get(1)).decode(), + } + } + + /// Build Ixon.RawNameEntry Lean object. + pub fn build( + cache: &mut LeanBuildCache, + addr: &Address, + name: &Name, + ) -> Self { + let ctor = LeanCtor::alloc(0, 2, 0); + ctor.set(0, LeanIxAddress::build(addr)); + ctor.set(1, LeanIxName::build(cache, name)); + Self::new(*ctor) + } +} + +// ============================================================================= +// RawEnv (consts, named, blobs, comms, names) +// ============================================================================= + +/// Decoded Ixon.RawEnv +pub struct DecodedRawEnv { + pub consts: Vec, + pub named: Vec, + pub blobs: Vec, + pub comms: Vec, + pub names: Vec, +} + +impl LeanIxonRawEnv { + /// Decode Ixon.RawEnv from Lean pointer. + pub fn decode(self) -> DecodedRawEnv { + let ctor = self.as_ctor(); + let consts_arr = ctor.get(0).as_array(); + let named_arr = ctor.get(1).as_array(); + let blobs_arr = ctor.get(2).as_array(); + let comms_arr = ctor.get(3).as_array(); + let names_arr = ctor.get(4).as_array(); + + DecodedRawEnv { + consts: consts_arr.map(|x| LeanIxonRawConst::new(x).decode()), + named: named_arr.map(|x| LeanIxonRawNamed::new(x).decode()), + blobs: blobs_arr.map(|x| LeanIxonRawBlob::new(x).decode()), + comms: comms_arr.map(|x| LeanIxonRawComm::new(x).decode()), + names: names_arr.map(|x| LeanIxonRawNameEntry::new(x).decode()), + } + } + + /// Build Ixon.RawEnv Lean object. + pub fn build(env: &DecodedRawEnv) -> Self { + let mut cache = LeanBuildCache::new(); + + // Build consts array + let consts_arr = LeanArray::alloc(env.consts.len()); + for (i, rc) in env.consts.iter().enumerate() { + consts_arr.set(i, LeanIxonRawConst::build(rc)); + } + + // Build named array + let named_arr = LeanArray::alloc(env.named.len()); + for (i, rn) in env.named.iter().enumerate() { + named_arr.set(i, LeanIxonRawNamed::build(&mut cache, rn)); + } + + // Build blobs array + let blobs_arr = LeanArray::alloc(env.blobs.len()); + for (i, rb) in env.blobs.iter().enumerate() { + blobs_arr.set(i, LeanIxonRawBlob::build(rb)); + } + + // Build comms array + let comms_arr = LeanArray::alloc(env.comms.len()); + for (i, rc) in env.comms.iter().enumerate() { + comms_arr.set(i, LeanIxonRawComm::build(rc)); + } + + // Build names array + let names_arr = LeanArray::alloc(env.names.len()); + for (i, rn) in env.names.iter().enumerate() { + names_arr + .set(i, LeanIxonRawNameEntry::build(&mut cache, &rn.addr, &rn.name)); + } + + // Build RawEnv structure + let ctor = LeanCtor::alloc(0, 5, 0); + ctor.set(0, consts_arr); + ctor.set(1, named_arr); + ctor.set(2, blobs_arr); + ctor.set(3, comms_arr); + ctor.set(4, names_arr); + Self::new(*ctor) + } +} + +// ============================================================================= +// DecodedRawEnv <-> IxonEnv Conversion Helpers +// ============================================================================= + +/// Reconstruct a Rust IxonEnv from a DecodedRawEnv. +pub fn decoded_to_ixon_env(decoded: &DecodedRawEnv) -> IxonEnv { + let env = IxonEnv::new(); + for rc in &decoded.consts { + env.store_const(rc.addr.clone(), rc.constant.clone()); + } + for rn in &decoded.names { + env.store_name(rn.addr.clone(), rn.name.clone()); + } + for rn in &decoded.named { + let named = IxonNamed::new(rn.addr.clone(), rn.const_meta.clone()); + env.register_name(rn.name.clone(), named); + } + for rb in &decoded.blobs { + env.blobs.insert(rb.addr.clone(), rb.bytes.clone()); + } + for rc in &decoded.comms { + env.store_comm(rc.addr.clone(), rc.comm.clone()); + } + env +} + +/// Convert a Rust IxonEnv to a DecodedRawEnv. +pub fn ixon_env_to_decoded(env: &IxonEnv) -> DecodedRawEnv { + let consts = env + .consts + .iter() + .map(|e| DecodedRawConst { + addr: e.key().clone(), + constant: e.value().clone(), + }) + .collect(); + let named = env + .named + .iter() + .map(|e| DecodedRawNamed { + name: e.key().clone(), + addr: e.value().addr.clone(), + const_meta: e.value().meta.clone(), + }) + .collect(); + let blobs = env + .blobs + .iter() + .map(|e| DecodedRawBlob { addr: e.key().clone(), bytes: e.value().clone() }) + .collect(); + let comms = env + .comms + .iter() + .map(|e| DecodedRawComm { addr: e.key().clone(), comm: e.value().clone() }) + .collect(); + let names = env + .names + .iter() + .map(|e| DecodedRawNameEntry { + addr: e.key().clone(), + name: e.value().clone(), + }) + .collect(); + DecodedRawEnv { consts, named, blobs, comms, names } +} + +// ============================================================================= +// rs_ser_env: Serialize an Ixon.RawEnv to bytes +// ============================================================================= + +/// FFI: Serialize an Ixon.RawEnv -> ByteArray via Rust's Env.put. Pure. +#[unsafe(no_mangle)] +pub extern "C" fn rs_ser_env(obj: LeanIxonRawEnv) -> LeanByteArray { + let decoded = obj.decode(); + let env = decoded_to_ixon_env(&decoded); + let mut buf = Vec::new(); + env.put(&mut buf).expect("Env serialization failed"); + + LeanByteArray::from_bytes(&buf) +} + +// ============================================================================= +// rs_des_env: Deserialize bytes to an Ixon.RawEnv +// ============================================================================= + +/// FFI: Deserialize ByteArray -> Except String Ixon.RawEnv via Rust's Env.get. Pure. +#[unsafe(no_mangle)] +pub extern "C" fn rs_des_env(obj: LeanByteArray) -> LeanExcept { + let data = obj.as_bytes(); + let mut slice: &[u8] = data; + match IxonEnv::get(&mut slice) { + Ok(env) => { + let decoded = ixon_env_to_decoded(&env); + let raw_env = LeanIxonRawEnv::build(&decoded); + LeanExcept::ok(raw_env) + }, + Err(e) => { + let msg = format!("rs_des_env: {}", e); + LeanExcept::error_string(&msg) + }, + } +} diff --git a/src/ffi/ixon/expr.rs b/src/ffi/ixon/expr.rs new file mode 100644 index 00000000..31cd2293 --- /dev/null +++ b/src/ffi/ixon/expr.rs @@ -0,0 +1,220 @@ +//! Ixon.Expr build/decode/roundtrip FFI. + +use std::sync::Arc; + +use crate::ix::ixon::expr::Expr as IxonExpr; +use crate::lean::LeanIxonExpr; +use lean_ffi::object::{LeanArray, LeanCtor}; + +/// Decode Array UInt64 from Lean. +fn decode_u64_array(obj: LeanArray) -> Vec { + obj + .iter() + .map(|elem| { + if elem.is_scalar() { + elem.unbox_usize() as u64 + } else { + let ctor = elem.as_ctor(); + ctor.scalar_u64(0, 0) + } + }) + .collect() +} + +impl LeanIxonExpr { + /// Build Ixon.Expr (12 constructors). + pub fn build(expr: &IxonExpr) -> Self { + let obj = match expr { + IxonExpr::Sort(idx) => { + let ctor = LeanCtor::alloc(0, 0, 8); + ctor.set_u64(0, *idx); + *ctor + }, + IxonExpr::Var(idx) => { + let ctor = LeanCtor::alloc(1, 0, 8); + ctor.set_u64(0, *idx); + *ctor + }, + IxonExpr::Ref(ref_idx, univ_idxs) => { + let arr = LeanArray::alloc(univ_idxs.len()); + for (i, idx) in univ_idxs.iter().enumerate() { + let uint64_obj = LeanCtor::alloc(0, 0, 8); + uint64_obj.set_u64(0, *idx); + arr.set(i, uint64_obj); + } + let ctor = LeanCtor::alloc(2, 1, 8); + ctor.set(0, arr); + ctor.set_u64(8, *ref_idx); + *ctor + }, + IxonExpr::Rec(rec_idx, univ_idxs) => { + let arr = LeanArray::alloc(univ_idxs.len()); + for (i, idx) in univ_idxs.iter().enumerate() { + let uint64_obj = LeanCtor::alloc(0, 0, 8); + uint64_obj.set_u64(0, *idx); + arr.set(i, uint64_obj); + } + let ctor = LeanCtor::alloc(3, 1, 8); + ctor.set(0, arr); + ctor.set_u64(8, *rec_idx); + *ctor + }, + IxonExpr::Prj(type_ref_idx, field_idx, val) => { + let val_obj = Self::build(val); + let ctor = LeanCtor::alloc(4, 1, 16); + ctor.set(0, val_obj); + ctor.set_u64(8, *type_ref_idx); + ctor.set_u64(16, *field_idx); + *ctor + }, + IxonExpr::Str(ref_idx) => { + let ctor = LeanCtor::alloc(5, 0, 8); + ctor.set_u64(0, *ref_idx); + *ctor + }, + IxonExpr::Nat(ref_idx) => { + let ctor = LeanCtor::alloc(6, 0, 8); + ctor.set_u64(0, *ref_idx); + *ctor + }, + IxonExpr::App(fun, arg) => { + let fun_obj = Self::build(fun); + let arg_obj = Self::build(arg); + let ctor = LeanCtor::alloc(7, 2, 0); + ctor.set(0, fun_obj); + ctor.set(1, arg_obj); + *ctor + }, + IxonExpr::Lam(ty, body) => { + let ty_obj = Self::build(ty); + let body_obj = Self::build(body); + let ctor = LeanCtor::alloc(8, 2, 0); + ctor.set(0, ty_obj); + ctor.set(1, body_obj); + *ctor + }, + IxonExpr::All(ty, body) => { + let ty_obj = Self::build(ty); + let body_obj = Self::build(body); + let ctor = LeanCtor::alloc(9, 2, 0); + ctor.set(0, ty_obj); + ctor.set(1, body_obj); + *ctor + }, + IxonExpr::Let(non_dep, ty, val, body) => { + let ty_obj = Self::build(ty); + let val_obj = Self::build(val); + let body_obj = Self::build(body); + let ctor = LeanCtor::alloc(10, 3, 1); + ctor.set(0, ty_obj); + ctor.set(1, val_obj); + ctor.set(2, body_obj); + ctor.set_u8(24, if *non_dep { 1 } else { 0 }); + *ctor + }, + IxonExpr::Share(idx) => { + let ctor = LeanCtor::alloc(11, 0, 8); + ctor.set_u64(0, *idx); + *ctor + }, + }; + Self::new(obj) + } + + /// Build an Array of Ixon.Expr. + pub fn build_array(exprs: &[Arc]) -> LeanArray { + let arr = LeanArray::alloc(exprs.len()); + for (i, expr) in exprs.iter().enumerate() { + arr.set(i, Self::build(expr)); + } + arr + } + + /// Decode Ixon.Expr (12 constructors). + pub fn decode(self) -> IxonExpr { + let ctor = self.as_ctor(); + let tag = ctor.tag(); + match tag { + 0 => { + let idx = ctor.scalar_u64(0, 0); + IxonExpr::Sort(idx) + }, + 1 => { + let idx = ctor.scalar_u64(0, 0); + IxonExpr::Var(idx) + }, + 2 => { + let ref_idx = ctor.scalar_u64(1, 0); + let univ_idxs = decode_u64_array(ctor.get(0).as_array()); + IxonExpr::Ref(ref_idx, univ_idxs) + }, + 3 => { + let rec_idx = ctor.scalar_u64(1, 0); + let univ_idxs = decode_u64_array(ctor.get(0).as_array()); + IxonExpr::Rec(rec_idx, univ_idxs) + }, + 4 => { + let val_obj = Self::new(ctor.get(0)); + let type_ref_idx = ctor.scalar_u64(1, 0); + let field_idx = ctor.scalar_u64(1, 8); + IxonExpr::Prj(type_ref_idx, field_idx, Arc::new(val_obj.decode())) + }, + 5 => { + let ref_idx = ctor.scalar_u64(0, 0); + IxonExpr::Str(ref_idx) + }, + 6 => { + let ref_idx = ctor.scalar_u64(0, 0); + IxonExpr::Nat(ref_idx) + }, + 7 => { + let f_obj = Self::new(ctor.get(0)); + let a_obj = Self::new(ctor.get(1)); + IxonExpr::App(Arc::new(f_obj.decode()), Arc::new(a_obj.decode())) + }, + 8 => { + let ty_obj = Self::new(ctor.get(0)); + let body_obj = Self::new(ctor.get(1)); + IxonExpr::Lam(Arc::new(ty_obj.decode()), Arc::new(body_obj.decode())) + }, + 9 => { + let ty_obj = Self::new(ctor.get(0)); + let body_obj = Self::new(ctor.get(1)); + IxonExpr::All(Arc::new(ty_obj.decode()), Arc::new(body_obj.decode())) + }, + 10 => { + let ty_obj = Self::new(ctor.get(0)); + let val_obj = Self::new(ctor.get(1)); + let body_obj = Self::new(ctor.get(2)); + let non_dep = ctor.scalar_u8(3, 0) != 0; + IxonExpr::Let( + non_dep, + Arc::new(ty_obj.decode()), + Arc::new(val_obj.decode()), + Arc::new(body_obj.decode()), + ) + }, + 11 => { + let idx = ctor.scalar_u64(0, 0); + IxonExpr::Share(idx) + }, + _ => panic!("Invalid Ixon.Expr tag: {}", tag), + } + } + + /// Decode Array Ixon.Expr. + pub fn decode_array(obj: LeanArray) -> Vec> { + obj.map(|e| Arc::new(Self::new(e).decode())) + } +} + +// ============================================================================= +// FFI Exports +// ============================================================================= + +/// Round-trip Ixon.Expr. +#[unsafe(no_mangle)] +pub extern "C" fn rs_roundtrip_ixon_expr(obj: LeanIxonExpr) -> LeanIxonExpr { + let expr = obj.decode(); + LeanIxonExpr::build(&expr) +} diff --git a/src/ffi/ixon/meta.rs b/src/ffi/ixon/meta.rs new file mode 100644 index 00000000..03e16f49 --- /dev/null +++ b/src/ffi/ixon/meta.rs @@ -0,0 +1,624 @@ +//! Ixon metadata types build/decode/roundtrip FFI. +//! +//! Includes: DataValue, KVMap, ExprMetaData, ExprMetaArena, ConstantMeta, Named, Comm + +use crate::ix::address::Address; +use crate::ix::env::BinderInfo; +use crate::ix::ixon::Comm; +use crate::ix::ixon::env::Named; +use crate::ix::ixon::metadata::{ + ConstantMeta, DataValue as IxonDataValue, ExprMeta, ExprMetaData, KVMap, +}; +use crate::lean::{ + LeanIxReducibilityHints, LeanIxonComm, LeanIxonConstantMeta, + LeanIxonDataValue, LeanIxonExprMetaArena, LeanIxonExprMetaData, + LeanIxonNamed, +}; +use lean_ffi::object::{LeanArray, LeanCtor, LeanObject}; + +use crate::lean::LeanIxAddress; +use crate::lean::LeanIxBinderInfo; + +// ============================================================================= +// KVMap Build/Decode (not domain types, kept as free functions) +// ============================================================================= + +/// Build an Ixon.KVMap (Array (Address × DataValue)). +pub fn build_ixon_kvmap(kvmap: &KVMap) -> LeanArray { + let arr = LeanArray::alloc(kvmap.len()); + for (i, (addr, dv)) in kvmap.iter().enumerate() { + let pair = LeanCtor::alloc(0, 2, 0); + pair.set(0, LeanIxAddress::build(addr)); + pair.set(1, LeanIxonDataValue::build(dv)); + arr.set(i, pair); + } + arr +} + +/// Build Array KVMap. +pub fn build_kvmap_array(kvmaps: &[KVMap]) -> LeanArray { + let arr = LeanArray::alloc(kvmaps.len()); + for (i, kvmap) in kvmaps.iter().enumerate() { + arr.set(i, build_ixon_kvmap(kvmap)); + } + arr +} + +/// Decode KVMap (Array (Address × DataValue)). +pub fn decode_ixon_kvmap(obj: LeanArray) -> KVMap { + obj + .iter() + .map(|pair| { + let pair_ctor = pair.as_ctor(); + ( + LeanIxAddress::new(pair_ctor.get(0)).decode(), + LeanIxonDataValue::new(pair_ctor.get(1)).decode(), + ) + }) + .collect() +} + +/// Decode Array KVMap. +fn decode_kvmap_array(obj: LeanArray) -> Vec { + obj.map(|x| decode_ixon_kvmap(x.as_array())) +} + +// ============================================================================= +// Address Array Helpers +// ============================================================================= + +/// Decode Array Address. +fn decode_address_array(obj: LeanArray) -> Vec
{ + LeanIxAddress::decode_array(obj) +} + +/// Build Array UInt64. +fn build_u64_array(vals: &[u64]) -> LeanArray { + let arr = LeanArray::alloc(vals.len()); + for (i, &v) in vals.iter().enumerate() { + arr.set(i, LeanObject::box_u64(v)); + } + arr +} + +/// Decode Array UInt64. +fn decode_u64_array(obj: LeanArray) -> Vec { + obj.iter().map(|elem| elem.unbox_u64()).collect() +} + +// ============================================================================= +// DataValue Build/Decode +// ============================================================================= + +impl LeanIxonDataValue { + /// Build Ixon.DataValue (for metadata) + pub fn build(dv: &IxonDataValue) -> Self { + let obj = match dv { + IxonDataValue::OfString(addr) => { + let ctor = LeanCtor::alloc(0, 1, 0); + ctor.set(0, LeanIxAddress::build(addr)); + *ctor + }, + IxonDataValue::OfBool(b) => { + let ctor = LeanCtor::alloc(1, 0, 1); + ctor.set_u8(0, if *b { 1 } else { 0 }); + *ctor + }, + IxonDataValue::OfName(addr) => { + let ctor = LeanCtor::alloc(2, 1, 0); + ctor.set(0, LeanIxAddress::build(addr)); + *ctor + }, + IxonDataValue::OfNat(addr) => { + let ctor = LeanCtor::alloc(3, 1, 0); + ctor.set(0, LeanIxAddress::build(addr)); + *ctor + }, + IxonDataValue::OfInt(addr) => { + let ctor = LeanCtor::alloc(4, 1, 0); + ctor.set(0, LeanIxAddress::build(addr)); + *ctor + }, + IxonDataValue::OfSyntax(addr) => { + let ctor = LeanCtor::alloc(5, 1, 0); + ctor.set(0, LeanIxAddress::build(addr)); + *ctor + }, + }; + Self::new(obj) + } + + /// Decode Ixon.DataValue. + pub fn decode(self) -> IxonDataValue { + let ctor = self.as_ctor(); + match ctor.tag() { + 0 => IxonDataValue::OfString(LeanIxAddress::new(ctor.get(0)).decode()), + 1 => { + let b = ctor.scalar_u8(0, 0) != 0; + IxonDataValue::OfBool(b) + }, + 2 => IxonDataValue::OfName(LeanIxAddress::new(ctor.get(0)).decode()), + 3 => IxonDataValue::OfNat(LeanIxAddress::new(ctor.get(0)).decode()), + 4 => IxonDataValue::OfInt(LeanIxAddress::new(ctor.get(0)).decode()), + 5 => IxonDataValue::OfSyntax(LeanIxAddress::new(ctor.get(0)).decode()), + tag => panic!("Invalid Ixon.DataValue tag: {}", tag), + } + } +} + +// ============================================================================= +// ExprMetaData Build/Decode +// ============================================================================= + +impl LeanIxonExprMetaData { + /// Build Ixon.ExprMetaData Lean object. + /// + /// | Variant | Tag | Obj fields | Scalar bytes | + /// |------------|-----|------------------------|--------------------------| + /// | leaf | 0 | 0 | 0 | + /// | app | 1 | 0 | 16 (2× u64) | + /// | binder | 2 | 1 (name: Address) | 17 (info: u8, 2× u64) | + /// | letBinder | 3 | 1 (name: Address) | 24 (3× u64) | + /// | ref | 4 | 1 (name: Address) | 0 | + /// | prj | 5 | 1 (structName: Address) | 8 (1× u64) | + /// | mdata | 6 | 1 (mdata: Array) | 8 (1× u64) | + pub fn build(node: &ExprMetaData) -> Self { + let obj = match node { + ExprMetaData::Leaf => LeanObject::box_usize(0), + + ExprMetaData::App { children } => { + // Tag 1, 0 obj fields, 16 scalar bytes (2× u64) + let ctor = LeanCtor::alloc(1, 0, 16); + ctor.set_u64(0, children[0]); + ctor.set_u64(8, children[1]); + *ctor + }, + + ExprMetaData::Binder { name, info, children } => { + // Tag 2, 1 obj field (name), scalar: 2× u64 + u8 (info) + // Lean ABI sorts scalars by size descending: [tyChild: u64 @ 8] [bodyChild: u64 @ 16] [info: u8 @ 24] + // Offsets from obj_cptr: 1*8=8 base for scalar area + let ctor = LeanCtor::alloc(2, 1, 17); + ctor.set(0, LeanIxAddress::build(name)); + ctor.set_u64(8, children[0]); + ctor.set_u64(16, children[1]); + ctor.set_u8(24, LeanIxBinderInfo::to_u8(info)); + *ctor + }, + + ExprMetaData::LetBinder { name, children } => { + // Tag 3, 1 obj field (name), 24 scalar bytes (3× u64) + let ctor = LeanCtor::alloc(3, 1, 24); + ctor.set(0, LeanIxAddress::build(name)); + ctor.set_u64(8, children[0]); + ctor.set_u64(16, children[1]); + ctor.set_u64(24, children[2]); + *ctor + }, + + ExprMetaData::Ref { name } => { + // Tag 4, 1 obj field (name), 0 scalar bytes + let ctor = LeanCtor::alloc(4, 1, 0); + ctor.set(0, LeanIxAddress::build(name)); + *ctor + }, + + ExprMetaData::Prj { struct_name, child } => { + // Tag 5, 1 obj field (structName), 8 scalar bytes (1× u64) + let ctor = LeanCtor::alloc(5, 1, 8); + ctor.set(0, LeanIxAddress::build(struct_name)); + ctor.set_u64(8, *child); + *ctor + }, + + ExprMetaData::Mdata { mdata, child } => { + // Tag 6, 1 obj field (mdata: Array KVMap), 8 scalar bytes (1× u64) + let mdata_arr = build_kvmap_array(mdata); + let ctor = LeanCtor::alloc(6, 1, 8); + ctor.set(0, mdata_arr); + ctor.set_u64(8, *child); + *ctor + }, + }; + Self::new(obj) + } + + /// Decode Ixon.ExprMetaData from Lean pointer. + pub fn decode(self) -> ExprMetaData { + // Leaf (tag 0, no fields) is represented as a scalar lean_box(0) + if self.is_scalar() { + let tag = self.as_ptr() as usize >> 1; + assert_eq!(tag, 0, "Invalid scalar ExprMetaData tag: {}", tag); + return ExprMetaData::Leaf; + } + let ctor = self.as_ctor(); + match ctor.tag() { + 1 => { + // app: 0 obj fields, 2× u64 scalar + let fun_ = ctor.scalar_u64(0, 0); + let arg = ctor.scalar_u64(0, 8); + ExprMetaData::App { children: [fun_, arg] } + }, + + 2 => { + // binder: 1 obj field (name), scalar (Lean ABI: u64s first, then u8): + // [tyChild: u64 @ 0] [bodyChild: u64 @ 8] [info: u8 @ 16] + let name = LeanIxAddress::new(ctor.get(0)).decode(); + let ty_child = ctor.scalar_u64(1, 0); + let body_child = ctor.scalar_u64(1, 8); + let info_byte = ctor.scalar_u8(1, 16); + let info = match info_byte { + 0 => BinderInfo::Default, + 1 => BinderInfo::Implicit, + 2 => BinderInfo::StrictImplicit, + 3 => BinderInfo::InstImplicit, + _ => panic!("Invalid BinderInfo tag: {}", info_byte), + }; + ExprMetaData::Binder { name, info, children: [ty_child, body_child] } + }, + + 3 => { + // letBinder: 1 obj field (name), 3× u64 scalar + let name = LeanIxAddress::new(ctor.get(0)).decode(); + let ty_child = ctor.scalar_u64(1, 0); + let val_child = ctor.scalar_u64(1, 8); + let body_child = ctor.scalar_u64(1, 16); + ExprMetaData::LetBinder { + name, + children: [ty_child, val_child, body_child], + } + }, + + 4 => { + // ref: 1 obj field (name), 0 scalar + ExprMetaData::Ref { name: LeanIxAddress::new(ctor.get(0)).decode() } + }, + + 5 => { + // prj: 1 obj field (structName), 1× u64 scalar + let struct_name = LeanIxAddress::new(ctor.get(0)).decode(); + let child = ctor.scalar_u64(1, 0); + ExprMetaData::Prj { struct_name, child } + }, + + 6 => { + // mdata: 1 obj field (mdata: Array KVMap), 1× u64 scalar + let mdata = decode_kvmap_array(ctor.get(0).as_array()); + let child = ctor.scalar_u64(1, 0); + ExprMetaData::Mdata { mdata, child } + }, + + tag => panic!("Invalid Ixon.ExprMetaData tag: {}", tag), + } + } +} + +// ============================================================================= +// ExprMetaArena Build/Decode +// ============================================================================= + +impl LeanIxonExprMetaArena { + /// Build Ixon.ExprMetaArena Lean object. + /// ExprMetaArena is a single-field structure (nodes : Array ExprMetaData), + /// which Lean unboxes — the value IS the Array directly. + pub fn build(arena: &ExprMeta) -> Self { + let arr = LeanArray::alloc(arena.nodes.len()); + for (i, node) in arena.nodes.iter().enumerate() { + arr.set(i, LeanIxonExprMetaData::build(node)); + } + Self::new(*arr) + } + + /// Decode Ixon.ExprMetaArena from Lean pointer. + /// Single-field struct is unboxed — obj IS the Array directly. + pub fn decode(self) -> ExprMeta { + let arr = self.as_array(); + ExprMeta { nodes: arr.map(|x| LeanIxonExprMetaData::new(x).decode()) } + } +} + +// ============================================================================= +// ConstantMeta Build/Decode +// ============================================================================= + +impl LeanIxonConstantMeta { + /// Build Ixon.ConstantMeta Lean object. + /// + /// | Variant | Tag | Obj fields | Scalar bytes | + /// |---------|-----|-----------|-------------| + /// | empty | 0 | 0 | 0 | + /// | defn | 1 | 6 (name, lvls, hints, all, ctx, arena) | 16 (2× u64) | + /// | axio | 2 | 3 (name, lvls, arena) | 8 (1× u64) | + /// | quot | 3 | 3 (name, lvls, arena) | 8 (1× u64) | + /// | indc | 4 | 6 (name, lvls, ctors, all, ctx, arena) | 8 (1× u64) | + /// | ctor | 5 | 4 (name, lvls, induct, arena) | 8 (1× u64) | + /// | recr | 6 | 7 (name, lvls, rules, all, ctx, arena, ruleRoots) | 8 (1× u64) | + pub fn build(meta: &ConstantMeta) -> Self { + let obj = match meta { + ConstantMeta::Empty => LeanObject::box_usize(0), + + ConstantMeta::Def { + name, + lvls, + hints, + all, + ctx, + arena, + type_root, + value_root, + } => { + let ctor = LeanCtor::alloc(1, 6, 16); + ctor.set(0, LeanIxAddress::build(name)); + ctor.set(1, LeanIxAddress::build_array(lvls)); + ctor.set(2, LeanIxReducibilityHints::build(hints)); + ctor.set(3, LeanIxAddress::build_array(all)); + ctor.set(4, LeanIxAddress::build_array(ctx)); + ctor.set(5, LeanIxonExprMetaArena::build(arena)); + ctor.set_u64(6 * 8, *type_root); + ctor.set_u64(6 * 8 + 8, *value_root); + *ctor + }, + + ConstantMeta::Axio { name, lvls, arena, type_root } => { + let ctor = LeanCtor::alloc(2, 3, 8); + ctor.set(0, LeanIxAddress::build(name)); + ctor.set(1, LeanIxAddress::build_array(lvls)); + ctor.set(2, LeanIxonExprMetaArena::build(arena)); + ctor.set_u64(3 * 8, *type_root); + *ctor + }, + + ConstantMeta::Quot { name, lvls, arena, type_root } => { + let ctor = LeanCtor::alloc(3, 3, 8); + ctor.set(0, LeanIxAddress::build(name)); + ctor.set(1, LeanIxAddress::build_array(lvls)); + ctor.set(2, LeanIxonExprMetaArena::build(arena)); + ctor.set_u64(3 * 8, *type_root); + *ctor + }, + + ConstantMeta::Indc { name, lvls, ctors, all, ctx, arena, type_root } => { + let ctor = LeanCtor::alloc(4, 6, 8); + ctor.set(0, LeanIxAddress::build(name)); + ctor.set(1, LeanIxAddress::build_array(lvls)); + ctor.set(2, LeanIxAddress::build_array(ctors)); + ctor.set(3, LeanIxAddress::build_array(all)); + ctor.set(4, LeanIxAddress::build_array(ctx)); + ctor.set(5, LeanIxonExprMetaArena::build(arena)); + ctor.set_u64(6 * 8, *type_root); + *ctor + }, + + ConstantMeta::Ctor { name, lvls, induct, arena, type_root } => { + let ctor = LeanCtor::alloc(5, 4, 8); + ctor.set(0, LeanIxAddress::build(name)); + ctor.set(1, LeanIxAddress::build_array(lvls)); + ctor.set(2, LeanIxAddress::build(induct)); + ctor.set(3, LeanIxonExprMetaArena::build(arena)); + ctor.set_u64(4 * 8, *type_root); + *ctor + }, + + ConstantMeta::Rec { + name, + lvls, + rules, + all, + ctx, + arena, + type_root, + rule_roots, + } => { + let ctor = LeanCtor::alloc(6, 7, 8); + ctor.set(0, LeanIxAddress::build(name)); + ctor.set(1, LeanIxAddress::build_array(lvls)); + ctor.set(2, LeanIxAddress::build_array(rules)); + ctor.set(3, LeanIxAddress::build_array(all)); + ctor.set(4, LeanIxAddress::build_array(ctx)); + ctor.set(5, LeanIxonExprMetaArena::build(arena)); + ctor.set(6, build_u64_array(rule_roots)); + ctor.set_u64(7 * 8, *type_root); + *ctor + }, + }; + Self::new(obj) + } + + /// Decode Ixon.ConstantMeta from Lean pointer. + pub fn decode(self) -> ConstantMeta { + // Empty (tag 0, no fields) is represented as a scalar lean_box(0) + if self.is_scalar() { + let tag = self.as_ptr() as usize >> 1; + assert_eq!(tag, 0, "Invalid scalar ConstantMeta tag: {}", tag); + return ConstantMeta::Empty; + } + let ctor = self.as_ctor(); + match ctor.tag() { + 1 => { + // defn: 6 obj fields, 2× u64 scalar + let name = LeanIxAddress::new(ctor.get(0)).decode(); + let lvls = decode_address_array(ctor.get(1).as_array()); + let hints = LeanIxReducibilityHints::new(ctor.get(2)).decode(); + let all = decode_address_array(ctor.get(3).as_array()); + let ctx = decode_address_array(ctor.get(4).as_array()); + let arena = LeanIxonExprMetaArena::new(ctor.get(5)).decode(); + let type_root = ctor.scalar_u64(6, 0); + let value_root = ctor.scalar_u64(6, 8); + ConstantMeta::Def { + name, + lvls, + hints, + all, + ctx, + arena, + type_root, + value_root, + } + }, + + 2 => { + // axio: 3 obj fields, 1× u64 scalar + let name = LeanIxAddress::new(ctor.get(0)).decode(); + let lvls = decode_address_array(ctor.get(1).as_array()); + let arena = LeanIxonExprMetaArena::new(ctor.get(2)).decode(); + let type_root = ctor.scalar_u64(3, 0); + ConstantMeta::Axio { name, lvls, arena, type_root } + }, + + 3 => { + // quot: 3 obj fields, 1× u64 scalar + let name = LeanIxAddress::new(ctor.get(0)).decode(); + let lvls = decode_address_array(ctor.get(1).as_array()); + let arena = LeanIxonExprMetaArena::new(ctor.get(2)).decode(); + let type_root = ctor.scalar_u64(3, 0); + ConstantMeta::Quot { name, lvls, arena, type_root } + }, + + 4 => { + // indc: 6 obj fields, 1× u64 scalar + let name = LeanIxAddress::new(ctor.get(0)).decode(); + let lvls = decode_address_array(ctor.get(1).as_array()); + let ctors = decode_address_array(ctor.get(2).as_array()); + let all = decode_address_array(ctor.get(3).as_array()); + let ctx = decode_address_array(ctor.get(4).as_array()); + let arena = LeanIxonExprMetaArena::new(ctor.get(5)).decode(); + let type_root = ctor.scalar_u64(6, 0); + ConstantMeta::Indc { name, lvls, ctors, all, ctx, arena, type_root } + }, + + 5 => { + // ctor: 4 obj fields, 1× u64 scalar + let name = LeanIxAddress::new(ctor.get(0)).decode(); + let lvls = decode_address_array(ctor.get(1).as_array()); + let induct = LeanIxAddress::new(ctor.get(2)).decode(); + let arena = LeanIxonExprMetaArena::new(ctor.get(3)).decode(); + let type_root = ctor.scalar_u64(4, 0); + ConstantMeta::Ctor { name, lvls, induct, arena, type_root } + }, + + 6 => { + // recr: 7 obj fields, 1× u64 scalar + let name = LeanIxAddress::new(ctor.get(0)).decode(); + let lvls = decode_address_array(ctor.get(1).as_array()); + let rules = decode_address_array(ctor.get(2).as_array()); + let all = decode_address_array(ctor.get(3).as_array()); + let ctx = decode_address_array(ctor.get(4).as_array()); + let arena = LeanIxonExprMetaArena::new(ctor.get(5)).decode(); + let rule_roots = decode_u64_array(ctor.get(6).as_array()); + let type_root = ctor.scalar_u64(7, 0); + ConstantMeta::Rec { + name, + lvls, + rules, + all, + ctx, + arena, + type_root, + rule_roots, + } + }, + + tag => panic!("Invalid Ixon.ConstantMeta tag: {}", tag), + } + } +} + +// ============================================================================= +// Named and Comm Build/Decode +// ============================================================================= + +impl LeanIxonNamed { + /// Build Ixon.Named { addr : Address, constMeta : ConstantMeta } + pub fn build(addr: &Address, meta: &ConstantMeta) -> Self { + let addr_obj = LeanIxAddress::build(addr); + let meta_obj = LeanIxonConstantMeta::build(meta); + let ctor = LeanCtor::alloc(0, 2, 0); + ctor.set(0, addr_obj); + ctor.set(1, meta_obj); + Self::new(*ctor) + } + + /// Decode Ixon.Named. + pub fn decode(self) -> Named { + let ctor = self.as_ctor(); + Named { + addr: LeanIxAddress::new(ctor.get(0)).decode(), + meta: LeanIxonConstantMeta::new(ctor.get(1)).decode(), + } + } +} + +impl LeanIxonComm { + /// Build Ixon.Comm { secret : Address, payload : Address } + pub fn build(comm: &Comm) -> Self { + let secret_obj = LeanIxAddress::build(&comm.secret); + let payload_obj = LeanIxAddress::build(&comm.payload); + let ctor = LeanCtor::alloc(0, 2, 0); + ctor.set(0, secret_obj); + ctor.set(1, payload_obj); + Self::new(*ctor) + } + + /// Decode Ixon.Comm. + pub fn decode(self) -> Comm { + let ctor = self.as_ctor(); + Comm { + secret: LeanIxAddress::new(ctor.get(0)).decode(), + payload: LeanIxAddress::new(ctor.get(1)).decode(), + } + } +} + +// ============================================================================= +// FFI Exports +// ============================================================================= + +/// Round-trip Ixon.DataValue. +#[unsafe(no_mangle)] +pub extern "C" fn rs_roundtrip_ixon_data_value( + obj: LeanIxonDataValue, +) -> LeanIxonDataValue { + let dv = obj.decode(); + LeanIxonDataValue::build(&dv) +} + +/// Round-trip Ixon.Comm. +#[unsafe(no_mangle)] +pub extern "C" fn rs_roundtrip_ixon_comm(obj: LeanIxonComm) -> LeanIxonComm { + let comm = obj.decode(); + LeanIxonComm::build(&comm) +} + +/// Round-trip Ixon.ExprMetaData. +#[unsafe(no_mangle)] +pub extern "C" fn rs_roundtrip_ixon_expr_meta_data( + obj: LeanIxonExprMetaData, +) -> LeanIxonExprMetaData { + let node = obj.decode(); + LeanIxonExprMetaData::build(&node) +} + +/// Round-trip Ixon.ExprMetaArena. +#[unsafe(no_mangle)] +pub extern "C" fn rs_roundtrip_ixon_expr_meta_arena( + obj: LeanIxonExprMetaArena, +) -> LeanIxonExprMetaArena { + let arena = obj.decode(); + LeanIxonExprMetaArena::build(&arena) +} + +/// Round-trip Ixon.ConstantMeta (full arena-based). +#[unsafe(no_mangle)] +pub extern "C" fn rs_roundtrip_ixon_constant_meta( + obj: LeanIxonConstantMeta, +) -> LeanIxonConstantMeta { + let meta = obj.decode(); + LeanIxonConstantMeta::build(&meta) +} + +/// Round-trip Ixon.Named (with real metadata). +#[unsafe(no_mangle)] +pub extern "C" fn rs_roundtrip_ixon_named(obj: LeanIxonNamed) -> LeanIxonNamed { + let named = obj.decode(); + LeanIxonNamed::build(&named.addr, &named.meta) +} diff --git a/src/ffi/ixon/serialize.rs b/src/ffi/ixon/serialize.rs new file mode 100644 index 00000000..14a3683c --- /dev/null +++ b/src/ffi/ixon/serialize.rs @@ -0,0 +1,201 @@ +//! Ixon serialization compatibility FFI. +//! +//! Contains FFI functions for comparing Lean and Rust serialization outputs, +//! and Env serialization roundtrip testing. + +use std::sync::Arc; + +use crate::ix::address::Address; +use crate::ix::ixon::serialize::put_expr; +use crate::ix::ixon::sharing::hash_expr; +use crate::ix::ixon::univ::put_univ; +use crate::lean::{ + LeanIxAddress, LeanIxonConstant, LeanIxonExpr, LeanIxonRawEnv, LeanIxonUniv, +}; +use lean_ffi::object::LeanByteArray; + +/// Check if Lean's computed hash matches Rust's computed hash. +#[unsafe(no_mangle)] +pub extern "C" fn rs_expr_hash_matches( + expr_obj: LeanIxonExpr, + expected_hash: LeanIxAddress, +) -> bool { + let expr = Arc::new(expr_obj.decode()); + let hash = hash_expr(&expr); + let expected = expected_hash.decode(); + Address::from_slice(hash.as_bytes()).is_ok_and(|h| h == expected) +} + +/// Check if Lean's Ixon.Univ serialization matches Rust. +#[unsafe(no_mangle)] +pub extern "C" fn rs_eq_univ_serialization( + univ_obj: LeanIxonUniv, + bytes_obj: LeanByteArray, +) -> bool { + let univ = univ_obj.decode(); + let bytes_data = bytes_obj.as_bytes(); + let mut buf = Vec::with_capacity(bytes_data.len()); + put_univ(&univ, &mut buf); + buf == bytes_data +} + +/// Check if Lean's Ixon.Expr serialization matches Rust. +#[unsafe(no_mangle)] +pub extern "C" fn rs_eq_expr_serialization( + expr_obj: LeanIxonExpr, + bytes_obj: LeanByteArray, +) -> bool { + let expr = expr_obj.decode(); + let bytes_data = bytes_obj.as_bytes(); + let mut buf = Vec::with_capacity(bytes_data.len()); + put_expr(&expr, &mut buf); + buf == bytes_data +} + +/// Check if Lean's Ixon.Constant serialization matches Rust. +#[unsafe(no_mangle)] +pub extern "C" fn rs_eq_constant_serialization( + constant_obj: LeanIxonConstant, + bytes_obj: LeanByteArray, +) -> bool { + let constant = constant_obj.decode(); + let bytes_data = bytes_obj.as_bytes(); + let mut buf = Vec::with_capacity(bytes_data.len()); + constant.put(&mut buf); + buf == bytes_data +} + +/// Check if Lean's Ixon.Env serialization can be deserialized by Rust and content matches. +/// Due to HashMap ordering differences, we compare deserialized content rather than bytes. +#[unsafe(no_mangle)] +pub extern "C" fn rs_eq_env_serialization( + raw_env_obj: LeanIxonRawEnv, + bytes_obj: LeanByteArray, +) -> bool { + use crate::ix::ixon::env::Env; + + let decoded = raw_env_obj.decode(); + let bytes_data = bytes_obj.as_bytes(); + + // Deserialize Lean's bytes using Rust's deserializer + let rust_env = match Env::get(&mut &bytes_data[..]) { + Ok(env) => env, + Err(_) => return false, + }; + + // Compare content: check that all items from decoded RawEnv are in the deserialized Env + // Consts + if rust_env.consts.len() != decoded.consts.len() { + return false; + } + for rc in &decoded.consts { + match rust_env.consts.get(&rc.addr) { + Some(c) if *c == rc.constant => {}, + _ => return false, + } + } + + // Blobs + if rust_env.blobs.len() != decoded.blobs.len() { + return false; + } + for rb in &decoded.blobs { + match rust_env.blobs.get(&rb.addr) { + Some(b) if *b == rb.bytes => {}, + _ => return false, + } + } + + // Comms + if rust_env.comms.len() != decoded.comms.len() { + return false; + } + for rc in &decoded.comms { + match rust_env.comms.get(&rc.addr) { + Some(c) if *c == rc.comm => {}, + _ => return false, + } + } + + // Named: compare by checking all entries exist with matching addresses + if rust_env.named.len() != decoded.named.len() { + return false; + } + for rn in &decoded.named { + match rust_env.named.get(&rn.name) { + Some(named) if named.addr == rn.addr => {}, + _ => return false, + } + } + + true +} + +/// FFI: Test Env serialization roundtrip. +/// Takes: +/// - lean_bytes_obj: pointer to ByteArray containing serialized Env from Lean +/// +/// Returns: true if Rust can deserialize and re-serialize to the same bytes +#[unsafe(no_mangle)] +extern "C" fn rs_env_serde_roundtrip(lean_bytes_obj: LeanByteArray) -> bool { + use crate::ix::ixon::env::Env; + + // Get bytes from Lean ByteArray + let lean_bytes = lean_bytes_obj.as_bytes().to_vec(); + + // Try to deserialize with Rust + let mut slice = lean_bytes.as_slice(); + let env = match Env::get(&mut slice) { + Ok(e) => e, + Err(e) => { + eprintln!("Rust Env::get failed: {}", e); + return false; + }, + }; + + // Re-serialize + let mut rust_bytes = Vec::new(); + if let Err(e) = env.put(&mut rust_bytes) { + eprintln!("Rust Env::put failed: {}", e); + return false; + } + + // Compare + if lean_bytes != rust_bytes { + eprintln!("Env roundtrip mismatch:"); + eprintln!(" Input: {} bytes", lean_bytes.len()); + eprintln!(" Output: {} bytes", rust_bytes.len()); + if lean_bytes.len() <= 200 { + eprintln!(" Input bytes: {:?}", lean_bytes); + } + if rust_bytes.len() <= 200 { + eprintln!(" Output bytes: {:?}", rust_bytes); + } + return false; + } + + true +} + +/// FFI: Compare Env serialization between Lean and Rust. +/// Takes: +/// - lean_bytes_obj: pointer to ByteArray containing serialized Env from Lean +/// +/// Returns: true if Rust can deserialize and the counts match +#[unsafe(no_mangle)] +extern "C" fn rs_env_serde_check(lean_bytes_obj: LeanByteArray) -> bool { + use crate::ix::ixon::env::Env; + + // Get bytes from Lean ByteArray + let lean_bytes = lean_bytes_obj.as_bytes().to_vec(); + + // Try to deserialize with Rust + let mut slice = lean_bytes.as_slice(); + match Env::get(&mut slice) { + Ok(_) => true, + Err(e) => { + eprintln!("Rust Env::get failed: {}", e); + false + }, + } +} diff --git a/src/lean/ffi/ixon/sharing.rs b/src/ffi/ixon/sharing.rs similarity index 77% rename from src/lean/ffi/ixon/sharing.rs rename to src/ffi/ixon/sharing.rs index 955386cb..85e5ddd9 100644 --- a/src/lean/ffi/ixon/sharing.rs +++ b/src/ffi/ixon/sharing.rs @@ -1,6 +1,5 @@ //! Ixon sharing analysis FFI. -use std::ffi::c_void; use std::sync::Arc; use crate::ix::ixon::expr::Expr as IxonExpr; @@ -8,19 +7,15 @@ use crate::ix::ixon::serialize::put_expr; use crate::ix::ixon::sharing::{ analyze_block, build_sharing_vec, decide_sharing, }; -use crate::lean::array::LeanArrayObject; -use crate::lean::as_ref_unsafe; -use crate::lean::sarray::LeanSArrayObject; - -use super::expr::decode_ixon_expr_array; -use super::serialize::lean_ptr_to_ixon_expr; +use crate::lean::LeanIxonExpr; +use lean_ffi::object::{LeanArray, LeanByteArray}; /// FFI: Debug sharing analysis - print usage counts for subterms with usage >= 2. /// This helps diagnose why Lean and Rust make different sharing decisions. #[unsafe(no_mangle)] -pub extern "C" fn rs_debug_sharing_analysis(exprs_ptr: *const c_void) { - let exprs_arr: &LeanArrayObject = as_ref_unsafe(exprs_ptr.cast()); - let exprs: Vec> = exprs_arr.to_vec(lean_ptr_to_ixon_expr); +pub extern "C" fn rs_debug_sharing_analysis(exprs_obj: LeanArray) { + let exprs: Vec> = + exprs_obj.map(|x| Arc::new(LeanIxonExpr::new(x).decode())); println!("[Rust] Analyzing {} input expressions", exprs.len()); @@ -60,8 +55,8 @@ pub extern "C" fn rs_debug_sharing_analysis(exprs_ptr: *const c_void) { /// FFI: Run Rust's sharing analysis on Lean-provided Ixon.Expr array. /// Returns the number of shared items Rust would produce. #[unsafe(no_mangle)] -extern "C" fn rs_analyze_sharing_count(exprs_ptr: *const c_void) -> u64 { - let exprs = decode_ixon_expr_array(exprs_ptr); +extern "C" fn rs_analyze_sharing_count(exprs_obj: LeanArray) -> u64 { + let exprs = LeanIxonExpr::decode_array(exprs_obj); let (info_map, _ptr_to_hash) = analyze_block(&exprs, false); let shared_hashes = decide_sharing(&info_map); @@ -74,11 +69,11 @@ extern "C" fn rs_analyze_sharing_count(exprs_ptr: *const c_void) -> u64 { /// Returns number of shared items. #[unsafe(no_mangle)] extern "C" fn rs_run_sharing_analysis( - exprs_ptr: *const c_void, - out_sharing_vec: *mut c_void, - out_rewritten: *mut c_void, + exprs_obj: LeanArray, + out_sharing_vec: LeanByteArray, + out_rewritten: LeanByteArray, ) -> u64 { - let exprs = decode_ixon_expr_array(exprs_ptr); + let exprs = LeanIxonExpr::decode_array(exprs_obj); let (info_map, ptr_to_hash) = analyze_block(&exprs, false); let shared_hashes = decide_sharing(&info_map); @@ -98,13 +93,8 @@ extern "C" fn rs_run_sharing_analysis( } // Write to output arrays - let sharing_out: &mut LeanSArrayObject = - unsafe { &mut *out_sharing_vec.cast() }; - sharing_out.set_data(&sharing_bytes); - - let rewritten_out: &mut LeanSArrayObject = - unsafe { &mut *out_rewritten.cast() }; - rewritten_out.set_data(&rewritten_bytes); + unsafe { out_sharing_vec.set_data(&sharing_bytes) }; + unsafe { out_rewritten.set_data(&rewritten_bytes) }; shared_hashes.len() as u64 } @@ -117,15 +107,15 @@ extern "C" fn rs_run_sharing_analysis( /// - bits 48-63: Rust sharing count #[unsafe(no_mangle)] extern "C" fn rs_compare_sharing_analysis( - exprs_ptr: *const c_void, - lean_sharing_ptr: *const c_void, - _lean_rewritten_ptr: *const c_void, + exprs_obj: LeanArray, + lean_sharing_obj: LeanArray, + _lean_rewritten_obj: LeanArray, ) -> u64 { // Decode input expressions - let exprs = decode_ixon_expr_array(exprs_ptr); + let exprs = LeanIxonExpr::decode_array(exprs_obj); // Decode Lean's sharing vector - let lean_sharing = decode_ixon_expr_array(lean_sharing_ptr); + let lean_sharing = LeanIxonExpr::decode_array(lean_sharing_obj); // Run Rust's sharing analysis let (info_map, ptr_to_hash) = analyze_block(&exprs, false); diff --git a/src/ffi/ixon/univ.rs b/src/ffi/ixon/univ.rs new file mode 100644 index 00000000..074363ff --- /dev/null +++ b/src/ffi/ixon/univ.rs @@ -0,0 +1,87 @@ +//! Ixon.Univ build/decode/roundtrip FFI. + +use std::sync::Arc; + +use crate::ix::ixon::univ::Univ; +use crate::lean::LeanIxonUniv; +use lean_ffi::object::{LeanArray, LeanCtor, LeanObject}; + +impl LeanIxonUniv { + /// Build Ixon.Univ + pub fn build(univ: &Univ) -> Self { + let obj = match univ { + Univ::Zero => LeanObject::box_usize(0), + Univ::Succ(inner) => { + let ctor = LeanCtor::alloc(1, 1, 0); + ctor.set(0, Self::build(inner)); + *ctor + }, + Univ::Max(a, b) => { + let ctor = LeanCtor::alloc(2, 2, 0); + ctor.set(0, Self::build(a)); + ctor.set(1, Self::build(b)); + *ctor + }, + Univ::IMax(a, b) => { + let ctor = LeanCtor::alloc(3, 2, 0); + ctor.set(0, Self::build(a)); + ctor.set(1, Self::build(b)); + *ctor + }, + Univ::Var(idx) => { + let ctor = LeanCtor::alloc(4, 0, 8); + ctor.set_u64(0, *idx); + *ctor + }, + }; + Self::new(obj) + } + + /// Build an Array of Ixon.Univ. + pub fn build_array(univs: &[Arc]) -> LeanArray { + let arr = LeanArray::alloc(univs.len()); + for (i, univ) in univs.iter().enumerate() { + arr.set(i, Self::build(univ)); + } + arr + } + + /// Decode Ixon.Univ (recursive enum). + pub fn decode(self) -> Univ { + let obj: LeanObject = *self; + if obj.is_scalar() { + return Univ::Zero; + } + let ctor = obj.as_ctor(); + match ctor.tag() { + 0 => Univ::Zero, + 1 => Univ::Succ(Arc::new(Self::new(ctor.get(0)).decode())), + 2 => Univ::Max( + Arc::new(Self::new(ctor.get(0)).decode()), + Arc::new(Self::new(ctor.get(1)).decode()), + ), + 3 => Univ::IMax( + Arc::new(Self::new(ctor.get(0)).decode()), + Arc::new(Self::new(ctor.get(1)).decode()), + ), + 4 => Univ::Var(ctor.scalar_u64(0, 0)), + tag => panic!("Invalid Ixon.Univ tag: {tag}"), + } + } + + /// Decode Array Ixon.Univ. + pub fn decode_array(obj: LeanArray) -> Vec> { + obj.map(|elem| Arc::new(Self::new(elem).decode())) + } +} + +// ============================================================================= +// FFI Exports +// ============================================================================= + +/// Round-trip Ixon.Univ. +#[unsafe(no_mangle)] +pub extern "C" fn rs_roundtrip_ixon_univ(obj: LeanIxonUniv) -> LeanIxonUniv { + let univ = obj.decode(); + LeanIxonUniv::build(&univ) +} diff --git a/src/ffi/keccak.rs b/src/ffi/keccak.rs new file mode 100644 index 00000000..3638a3d9 --- /dev/null +++ b/src/ffi/keccak.rs @@ -0,0 +1,42 @@ +use std::sync::OnceLock; + +use tiny_keccak::{Hasher, Keccak}; + +use lean_ffi::object::{ + ExternalClass, LeanByteArray, LeanExternal, LeanObject, +}; + +static KECCAK_CLASS: OnceLock = OnceLock::new(); + +fn keccak_class() -> &'static ExternalClass { + KECCAK_CLASS.get_or_init(ExternalClass::register_with_drop::) +} + +/// `Keccak.Hasher.init : Unit → Hasher` +#[unsafe(no_mangle)] +extern "C" fn rs_keccak256_hasher_init( + _unit: LeanObject, +) -> LeanExternal { + LeanExternal::alloc(keccak_class(), Keccak::v256()) +} + +/// `Keccak.Hasher.update : (hasher: Hasher) → (input: @& ByteArray) → Hasher` +#[unsafe(no_mangle)] +extern "C" fn rs_keccak256_hasher_update( + hasher: LeanExternal, + input: LeanByteArray, +) -> LeanExternal { + let mut new_hasher = hasher.get().clone(); + new_hasher.update(input.as_bytes()); + LeanExternal::alloc(keccak_class(), new_hasher) +} + +/// `Keccak.Hasher.finalize : (hasher: Hasher) → ByteArray` +#[unsafe(no_mangle)] +extern "C" fn rs_keccak256_hasher_finalize( + hasher: LeanExternal, +) -> LeanByteArray { + let mut data = [0u8; 32]; + hasher.get().clone().finalize(&mut data); + LeanByteArray::from_bytes(&data) +} diff --git a/src/lean/ffi/lean_env.rs b/src/ffi/lean_env.rs similarity index 63% rename from src/lean/ffi/lean_env.rs rename to src/ffi/lean_env.rs index 3817e0e4..695b29c7 100644 --- a/src/lean/ffi/lean_env.rs +++ b/src/ffi/lean_env.rs @@ -19,6 +19,9 @@ use std::sync::Arc; use rustc_hash::FxHashMap; +use lean_ffi::nat::Nat; +use lean_ffi::object::{LeanList, LeanObject}; + use crate::{ ix::compile::compile_env, ix::decompile::{check_decompile, decompile_env}, @@ -29,26 +32,21 @@ use crate::{ ReducibilityHints, SourceInfo, Substring, Syntax, SyntaxPreresolved, TheoremVal, }, - lean::{ - array::LeanArrayObject, as_ref_unsafe, collect_list, ctor::LeanCtorObject, - lean_is_scalar, nat::Nat, string::LeanStringObject, - }, - lean_unbox, }; const PARALLEL_THRESHOLD: usize = 100; -/// Wrapper to allow sending raw pointers across threads. The underlying Lean -/// objects must remain valid for the entire duration of parallel decoding +/// Wrapper to allow sending `LeanObject` across threads. The underlying Lean +/// objects must remain valid for the entire duration of parallel decoding. #[derive(Clone, Copy)] -struct SendPtr(*const c_void); +struct SendObj(LeanObject); -unsafe impl Send for SendPtr {} -unsafe impl Sync for SendPtr {} +unsafe impl Send for SendObj {} +unsafe impl Sync for SendObj {} -impl SendPtr { +impl SendObj { #[inline] - fn get(self) -> *const c_void { + fn get(self) -> LeanObject { self.0 } } @@ -96,38 +94,29 @@ impl<'g> Cache<'g> { } } -fn collect_list_ptrs(mut ptr: *const c_void) -> Vec<*const c_void> { - let mut ptrs = Vec::new(); - while !lean_is_scalar(ptr) { - let ctor: &LeanCtorObject = as_ref_unsafe(ptr.cast()); - let [head_ptr, tail_ptr] = ctor.objs(); - ptrs.push(head_ptr); - ptr = tail_ptr; - } - ptrs +fn collect_list_objs(list: LeanList) -> Vec { + list.iter().collect() } // Name decoding with global cache -pub fn lean_ptr_to_name(ptr: *const c_void, global: &GlobalCache) -> Name { +pub fn decode_name(obj: LeanObject, global: &GlobalCache) -> Name { + let ptr = obj.as_ptr(); // Fast path: check if already cached if let Some(name) = global.names.get(&ptr) { return name.clone(); } // Compute the name - let name = if lean_is_scalar(ptr) { + let name = if obj.is_scalar() { Name::anon() } else { - let ctor: &LeanCtorObject = as_ref_unsafe(ptr.cast()); - let [pre_ptr, pos_ptr] = ctor.objs(); + let ctor = obj.as_ctor(); + let [pre, pos] = ctor.objs(); // Recursive call - will also use global cache - let pre = lean_ptr_to_name(pre_ptr, global); + let pre = decode_name(pre, global); match ctor.tag() { - 1 => { - let str_obj: &LeanStringObject = as_ref_unsafe(pos_ptr.cast()); - Name::str(pre, str_obj.as_string()) - }, - 2 => Name::num(pre, Nat::from_ptr(pos_ptr)), + 1 => Name::str(pre, pos.as_string().to_string()), + 2 => Name::num(pre, Nat::from_obj(pos)), _ => unreachable!(), } }; @@ -136,33 +125,34 @@ pub fn lean_ptr_to_name(ptr: *const c_void, global: &GlobalCache) -> Name { global.names.entry(ptr).or_insert(name).clone() } -fn lean_ptr_to_level(ptr: *const c_void, cache: &mut Cache<'_>) -> Level { +fn decode_level(obj: LeanObject, cache: &mut Cache<'_>) -> Level { + let ptr = obj.as_ptr(); if let Some(cached) = cache.local.univs.get(&ptr) { return cached.clone(); } - let level = if lean_is_scalar(ptr) { + let level = if obj.is_scalar() { Level::zero() } else { - let ctor: &LeanCtorObject = as_ref_unsafe(ptr.cast()); + let ctor = obj.as_ctor(); match ctor.tag() { 1 => { - let [u] = ctor.objs().map(|p| lean_ptr_to_level(p, cache)); + let [u] = ctor.objs::<1>().map(|o| decode_level(o, cache)); Level::succ(u) }, 2 => { - let [u, v] = ctor.objs().map(|p| lean_ptr_to_level(p, cache)); + let [u, v] = ctor.objs::<2>().map(|o| decode_level(o, cache)); Level::max(u, v) }, 3 => { - let [u, v] = ctor.objs().map(|p| lean_ptr_to_level(p, cache)); + let [u, v] = ctor.objs::<2>().map(|o| decode_level(o, cache)); Level::imax(u, v) }, 4 => { - let [name] = ctor.objs().map(|p| lean_ptr_to_name(p, cache.global)); + let [name] = ctor.objs::<1>().map(|o| decode_name(o, cache.global)); Level::param(name) }, 5 => { - let [name] = ctor.objs().map(|p| lean_ptr_to_name(p, cache.global)); + let [name] = ctor.objs::<1>().map(|o| decode_name(o, cache.global)); Level::mvar(name) }, _ => unreachable!(), @@ -172,97 +162,92 @@ fn lean_ptr_to_level(ptr: *const c_void, cache: &mut Cache<'_>) -> Level { level } -fn lean_ptr_to_substring(ptr: *const c_void) -> Substring { - let ctor: &LeanCtorObject = as_ref_unsafe(ptr.cast()); - let [str_ptr, start_pos_ptr, stop_pos_ptr] = ctor.objs(); - let str: &LeanStringObject = as_ref_unsafe(str_ptr.cast()); - let str = str.as_string(); - let start_pos = Nat::from_ptr(start_pos_ptr); - let stop_pos = Nat::from_ptr(stop_pos_ptr); +fn decode_substring(obj: LeanObject) -> Substring { + let ctor = obj.as_ctor(); + let [str_obj, start_pos, stop_pos] = ctor.objs(); + let str = str_obj.as_string().to_string(); + let start_pos = Nat::from_obj(start_pos); + let stop_pos = Nat::from_obj(stop_pos); Substring { str, start_pos, stop_pos } } -fn lean_ptr_to_source_info(ptr: *const c_void) -> SourceInfo { - if lean_is_scalar(ptr) { +fn decode_source_info(obj: LeanObject) -> SourceInfo { + if obj.is_scalar() { return SourceInfo::None; } - let ctor: &LeanCtorObject = as_ref_unsafe(ptr.cast()); + let ctor = obj.as_ctor(); match ctor.tag() { 0 => { - let [leading_ptr, pos_ptr, trailing_ptr, end_pos_ptr] = ctor.objs(); - let leading = lean_ptr_to_substring(leading_ptr); - let pos = Nat::from_ptr(pos_ptr); - let trailing = lean_ptr_to_substring(trailing_ptr); - let end_pos = Nat::from_ptr(end_pos_ptr); + let [leading, pos, trailing, end_pos] = ctor.objs(); + let leading = decode_substring(leading); + let pos = Nat::from_obj(pos); + let trailing = decode_substring(trailing); + let end_pos = Nat::from_obj(end_pos); SourceInfo::Original(leading, pos, trailing, end_pos) }, 1 => { - let [pos_ptr, end_pos_ptr, canonical_ptr] = ctor.objs(); - let pos = Nat::from_ptr(pos_ptr); - let end_pos = Nat::from_ptr(end_pos_ptr); - let canonical = canonical_ptr as usize == 1; + let [pos, end_pos, canonical] = ctor.objs(); + let pos = Nat::from_obj(pos); + let end_pos = Nat::from_obj(end_pos); + let canonical = canonical.as_ptr() as usize == 1; SourceInfo::Synthetic(pos, end_pos, canonical) }, _ => unreachable!(), } } -fn lean_ptr_to_syntax_preresolved( - ptr: *const c_void, +fn decode_syntax_preresolved( + obj: LeanObject, cache: &mut Cache<'_>, ) -> SyntaxPreresolved { - let ctor: &LeanCtorObject = as_ref_unsafe(ptr.cast()); + let ctor = obj.as_ctor(); match ctor.tag() { 0 => { - let [name_ptr] = ctor.objs(); - let name = lean_ptr_to_name(name_ptr, cache.global); + let [name_obj] = ctor.objs::<1>(); + let name = decode_name(name_obj, cache.global); SyntaxPreresolved::Namespace(name) }, 1 => { - let [name_ptr, fields_ptr] = ctor.objs(); - let name = lean_ptr_to_name(name_ptr, cache.global); - let fields = collect_list(fields_ptr, |p| { - let str: &LeanStringObject = as_ref_unsafe(p.cast()); - str.as_string() - }); + let [name_obj, fields_obj] = ctor.objs(); + let name = decode_name(name_obj, cache.global); + let fields: Vec = fields_obj + .as_list() + .iter() + .map(|o| o.as_string().to_string()) + .collect(); SyntaxPreresolved::Decl(name, fields) }, _ => unreachable!(), } } -fn lean_ptr_to_syntax(ptr: *const c_void, cache: &mut Cache<'_>) -> Syntax { - if lean_is_scalar(ptr) { +fn decode_syntax(obj: LeanObject, cache: &mut Cache<'_>) -> Syntax { + if obj.is_scalar() { return Syntax::Missing; } - let ctor: &LeanCtorObject = as_ref_unsafe(ptr.cast()); + let ctor = obj.as_ctor(); match ctor.tag() { 1 => { - let [info_ptr, kind_ptr, args_ptr] = ctor.objs(); - let info = lean_ptr_to_source_info(info_ptr); - let kind = lean_ptr_to_name(kind_ptr, cache.global); - let args_array: &LeanArrayObject = as_ref_unsafe(args_ptr.cast()); - let args: Vec<_> = args_array - .data() - .iter() - .map(|&p| lean_ptr_to_syntax(p, cache)) - .collect(); + let [info, kind, args] = ctor.objs(); + let info = decode_source_info(info); + let kind = decode_name(kind, cache.global); + let args: Vec<_> = + args.as_array().iter().map(|o| decode_syntax(o, cache)).collect(); Syntax::Node(info, kind, args) }, 2 => { - let [info_ptr, val_ptr] = ctor.objs(); - let info = lean_ptr_to_source_info(info_ptr); - let val_str: &LeanStringObject = as_ref_unsafe(val_ptr.cast()); - Syntax::Atom(info, val_str.as_string()) + let [info, val] = ctor.objs(); + let info = decode_source_info(info); + Syntax::Atom(info, val.as_string().to_string()) }, 3 => { - let [info_ptr, raw_val_ptr, val_ptr, preresolved_ptr] = ctor.objs(); - let info = lean_ptr_to_source_info(info_ptr); - let raw_val = lean_ptr_to_substring(raw_val_ptr); - let val = lean_ptr_to_name(val_ptr, cache.global); - let preresolved = collect_list_ptrs(preresolved_ptr) + let [info, raw_val, val, preresolved] = ctor.objs(); + let info = decode_source_info(info); + let raw_val = decode_substring(raw_val); + let val = decode_name(val, cache.global); + let preresolved = collect_list_objs(preresolved.as_list()) .into_iter() - .map(|p| lean_ptr_to_syntax_preresolved(p, cache)) + .map(|o| decode_syntax_preresolved(o, cache)) .collect(); Syntax::Ident(info, raw_val, val, preresolved) }, @@ -270,93 +255,84 @@ fn lean_ptr_to_syntax(ptr: *const c_void, cache: &mut Cache<'_>) -> Syntax { } } -fn lean_ptr_to_name_data_value( - ptr: *const c_void, +fn decode_name_data_value( + obj: LeanObject, cache: &mut Cache<'_>, ) -> (Name, DataValue) { - let ctor: &LeanCtorObject = as_ref_unsafe(ptr.cast()); - let [name_ptr, data_value_ptr] = ctor.objs(); - let name = lean_ptr_to_name(name_ptr, cache.global); - let data_value_ctor: &LeanCtorObject = as_ref_unsafe(data_value_ptr.cast()); - let [inner_ptr] = data_value_ctor.objs(); - let data_value = match data_value_ctor.tag() { - 0 => { - let str: &LeanStringObject = as_ref_unsafe(inner_ptr.cast()); - DataValue::OfString(str.as_string()) - }, - 1 => DataValue::OfBool(inner_ptr as usize == 1), - 2 => DataValue::OfName(lean_ptr_to_name(inner_ptr, cache.global)), - 3 => DataValue::OfNat(Nat::from_ptr(inner_ptr)), + let ctor = obj.as_ctor(); + let [name_obj, data_value_obj] = ctor.objs(); + let name = decode_name(name_obj, cache.global); + let dv_ctor = data_value_obj.as_ctor(); + let [inner] = dv_ctor.objs::<1>(); + let data_value = match dv_ctor.tag() { + 0 => DataValue::OfString(inner.as_string().to_string()), + 1 => DataValue::OfBool(inner.as_ptr() as usize == 1), + 2 => DataValue::OfName(decode_name(inner, cache.global)), + 3 => DataValue::OfNat(Nat::from_obj(inner)), 4 => { - let int_ctor: &LeanCtorObject = as_ref_unsafe(inner_ptr.cast()); - let [nat_ptr] = int_ctor.objs(); - let nat = Nat::from_ptr(nat_ptr); - let int = match int_ctor.tag() { + let inner_ctor = inner.as_ctor(); + let [nat_obj] = inner_ctor.objs::<1>(); + let nat = Nat::from_obj(nat_obj); + let int = match inner_ctor.tag() { 0 => Int::OfNat(nat), 1 => Int::NegSucc(nat), _ => unreachable!(), }; DataValue::OfInt(int) }, - 5 => DataValue::OfSyntax(lean_ptr_to_syntax(inner_ptr, cache).into()), + 5 => DataValue::OfSyntax(decode_syntax(inner, cache).into()), _ => unreachable!(), }; (name, data_value) } -pub fn lean_ptr_to_expr(ptr: *const c_void, cache: &mut Cache<'_>) -> Expr { +pub fn decode_expr(obj: LeanObject, cache: &mut Cache<'_>) -> Expr { + let ptr = obj.as_ptr(); if let Some(cached) = cache.local.exprs.get(&ptr) { return cached.clone(); } - let ctor: &LeanCtorObject = as_ref_unsafe(ptr.cast()); + let ctor = obj.as_ctor(); let expr = match ctor.tag() { 0 => { - let [nat_ptr, _hash_ptr] = ctor.objs(); - let nat = Nat::from_ptr(nat_ptr.cast()); - Expr::bvar(nat) + let [nat, _hash] = ctor.objs(); + Expr::bvar(Nat::from_obj(nat)) }, 1 => { - let [name_ptr, _hash_ptr] = ctor.objs(); - let name = lean_ptr_to_name(name_ptr, cache.global); + let [name_obj, _hash] = ctor.objs(); + let name = decode_name(name_obj, cache.global); Expr::fvar(name) }, 2 => { - let [name_ptr, _hash_ptr] = ctor.objs(); - let name = lean_ptr_to_name(name_ptr, cache.global); + let [name_obj, _hash] = ctor.objs(); + let name = decode_name(name_obj, cache.global); Expr::mvar(name) }, 3 => { - let [u_ptr, _hash_ptr] = ctor.objs(); - let u = lean_ptr_to_level(u_ptr, cache); + let [u, _hash] = ctor.objs(); + let u = decode_level(u, cache); Expr::sort(u) }, 4 => { - let [name_ptr, levels_ptr, _hash_ptr] = ctor.objs(); - let name = lean_ptr_to_name(name_ptr, cache.global); - let levels = collect_list_ptrs(levels_ptr) + let [name_obj, levels, _hash] = ctor.objs(); + let name = decode_name(name_obj, cache.global); + let levels = collect_list_objs(levels.as_list()) .into_iter() - .map(|p| lean_ptr_to_level(p, cache)) + .map(|o| decode_level(o, cache)) .collect(); Expr::cnst(name, levels) }, 5 => { - let [f_ptr, a_ptr, _hash_ptr] = ctor.objs(); - let f = lean_ptr_to_expr(f_ptr, cache); - let a = lean_ptr_to_expr(a_ptr, cache); + let [f, a, _hash] = ctor.objs(); + let f = decode_expr(f, cache); + let a = decode_expr(a, cache); Expr::app(f, a) }, 6 => { - let [ - binder_name_ptr, - binder_typ_ptr, - body_ptr, - _hash_ptr, - binder_info_ptr, - ] = ctor.objs(); - let binder_name = lean_ptr_to_name(binder_name_ptr, cache.global); - let binder_typ = lean_ptr_to_expr(binder_typ_ptr, cache); - let body = lean_ptr_to_expr(body_ptr, cache); - let binder_info = match binder_info_ptr as usize { + let [binder_name, binder_typ, body, _hash, binder_info] = ctor.objs(); + let binder_name = decode_name(binder_name, cache.global); + let binder_typ = decode_expr(binder_typ, cache); + let body = decode_expr(body, cache); + let binder_info = match binder_info.as_ptr() as usize { 0 => BinderInfo::Default, 1 => BinderInfo::Implicit, 2 => BinderInfo::StrictImplicit, @@ -366,17 +342,11 @@ pub fn lean_ptr_to_expr(ptr: *const c_void, cache: &mut Cache<'_>) -> Expr { Expr::lam(binder_name, binder_typ, body, binder_info) }, 7 => { - let [ - binder_name_ptr, - binder_typ_ptr, - body_ptr, - _hash_ptr, - binder_info_ptr, - ] = ctor.objs(); - let binder_name = lean_ptr_to_name(binder_name_ptr, cache.global); - let binder_typ = lean_ptr_to_expr(binder_typ_ptr, cache); - let body = lean_ptr_to_expr(body_ptr, cache); - let binder_info = match binder_info_ptr as usize { + let [binder_name, binder_typ, body, _hash, binder_info] = ctor.objs(); + let binder_name = decode_name(binder_name, cache.global); + let binder_typ = decode_expr(binder_typ, cache); + let body = decode_expr(body, cache); + let binder_info = match binder_info.as_ptr() as usize { 0 => BinderInfo::Default, 1 => BinderInfo::Implicit, 2 => BinderInfo::StrictImplicit, @@ -386,45 +356,38 @@ pub fn lean_ptr_to_expr(ptr: *const c_void, cache: &mut Cache<'_>) -> Expr { Expr::all(binder_name, binder_typ, body, binder_info) }, 8 => { - let [decl_name_ptr, typ_ptr, value_ptr, body_ptr, _hash_ptr, nondep_ptr] = - ctor.objs(); - let decl_name = lean_ptr_to_name(decl_name_ptr, cache.global); - let typ = lean_ptr_to_expr(typ_ptr, cache); - let value = lean_ptr_to_expr(value_ptr, cache); - let body = lean_ptr_to_expr(body_ptr, cache); - let nondep = nondep_ptr as usize == 1; + let [decl_name, typ, value, body, _hash, nondep] = ctor.objs(); + let decl_name = decode_name(decl_name, cache.global); + let typ = decode_expr(typ, cache); + let value = decode_expr(value, cache); + let body = decode_expr(body, cache); + let nondep = nondep.as_ptr() as usize == 1; Expr::letE(decl_name, typ, value, body, nondep) }, 9 => { - let [literal_ptr, _hash_ptr] = ctor.objs(); - let literal: &LeanCtorObject = as_ref_unsafe(literal_ptr.cast()); - let [inner_ptr] = literal.objs(); - match literal.tag() { - 0 => { - let nat = Nat::from_ptr(inner_ptr); - Expr::lit(Literal::NatVal(nat)) - }, - 1 => { - let str: &LeanStringObject = as_ref_unsafe(inner_ptr.cast()); - Expr::lit(Literal::StrVal(str.as_string())) - }, + let [literal, _hash] = ctor.objs(); + let lit_ctor = literal.as_ctor(); + let [inner] = lit_ctor.objs::<1>(); + match lit_ctor.tag() { + 0 => Expr::lit(Literal::NatVal(Nat::from_obj(inner))), + 1 => Expr::lit(Literal::StrVal(inner.as_string().to_string())), _ => unreachable!(), } }, 10 => { - let [data_ptr, expr_ptr] = ctor.objs(); - let kv_map: Vec<_> = collect_list_ptrs(data_ptr) + let [data, expr_obj] = ctor.objs(); + let kv_map: Vec<_> = collect_list_objs(data.as_list()) .into_iter() - .map(|p| lean_ptr_to_name_data_value(p, cache)) + .map(|o| decode_name_data_value(o, cache)) .collect(); - let expr = lean_ptr_to_expr(expr_ptr, cache); + let expr = decode_expr(expr_obj, cache); Expr::mdata(kv_map, expr) }, 11 => { - let [typ_name_ptr, idx_ptr, struct_ptr] = ctor.objs(); - let typ_name = lean_ptr_to_name(typ_name_ptr, cache.global); - let idx = Nat::from_ptr(idx_ptr); - let struct_expr = lean_ptr_to_expr(struct_ptr, cache); + let [typ_name, idx, struct_expr] = ctor.objs(); + let typ_name = decode_name(typ_name, cache.global); + let idx = Nat::from_obj(idx); + let struct_expr = decode_expr(struct_expr, cache); Expr::proj(typ_name, idx, struct_expr) }, _ => unreachable!(), @@ -433,69 +396,65 @@ pub fn lean_ptr_to_expr(ptr: *const c_void, cache: &mut Cache<'_>) -> Expr { expr } -fn lean_ptr_to_recursor_rule( - ptr: *const c_void, +fn decode_recursor_rule( + obj: LeanObject, cache: &mut Cache<'_>, ) -> RecursorRule { - let ctor: &LeanCtorObject = as_ref_unsafe(ptr.cast()); - let [ctor_ptr, n_fields_ptr, rhs_ptr] = ctor.objs(); - let ctor = lean_ptr_to_name(ctor_ptr, cache.global); - let n_fields = Nat::from_ptr(n_fields_ptr); - let rhs = lean_ptr_to_expr(rhs_ptr, cache); - RecursorRule { ctor, n_fields, rhs } + let ctor = obj.as_ctor(); + let [ctor_name, n_fields, rhs] = ctor.objs(); + let ctor_name = decode_name(ctor_name, cache.global); + let n_fields = Nat::from_obj(n_fields); + let rhs = decode_expr(rhs, cache); + RecursorRule { ctor: ctor_name, n_fields, rhs } } -fn lean_ptr_to_constant_val( - ptr: *const c_void, - cache: &mut Cache<'_>, -) -> ConstantVal { - let ctor: &LeanCtorObject = as_ref_unsafe(ptr.cast()); - let [name_ptr, level_params_ptr, typ_ptr] = ctor.objs(); - let name = lean_ptr_to_name(name_ptr, cache.global); - let level_params: Vec<_> = collect_list_ptrs(level_params_ptr) +fn decode_constant_val(obj: LeanObject, cache: &mut Cache<'_>) -> ConstantVal { + let ctor = obj.as_ctor(); + let [name_obj, level_params, typ] = ctor.objs(); + let name = decode_name(name_obj, cache.global); + let level_params: Vec<_> = collect_list_objs(level_params.as_list()) .into_iter() - .map(|p| lean_ptr_to_name(p, cache.global)) + .map(|o| decode_name(o, cache.global)) .collect(); - let typ = lean_ptr_to_expr(typ_ptr, cache); + let typ = decode_expr(typ, cache); ConstantVal { name, level_params, typ } } -pub fn lean_ptr_to_constant_info( - ptr: *const c_void, +pub fn decode_constant_info( + obj: LeanObject, cache: &mut Cache<'_>, ) -> ConstantInfo { - let ctor: &LeanCtorObject = as_ref_unsafe(ptr.cast()); - let [inner_val_ptr] = ctor.objs(); - let inner_val: &LeanCtorObject = as_ref_unsafe(inner_val_ptr.cast()); + let ctor = obj.as_ctor(); + let [inner_val] = ctor.objs::<1>(); + let inner = inner_val.as_ctor(); match ctor.tag() { 0 => { - let [constant_val_ptr, is_unsafe_ptr] = inner_val.objs(); - let constant_val = lean_ptr_to_constant_val(constant_val_ptr, cache); - let is_unsafe = is_unsafe_ptr as usize == 1; + let [constant_val, is_unsafe] = inner.objs(); + let constant_val = decode_constant_val(constant_val, cache); + let is_unsafe = is_unsafe.as_ptr() as usize == 1; ConstantInfo::AxiomInfo(AxiomVal { cnst: constant_val, is_unsafe }) }, 1 => { - let [constant_val_ptr, value_ptr, hints_ptr, all_ptr, safety_ptr] = - inner_val.objs(); - let constant_val = lean_ptr_to_constant_val(constant_val_ptr, cache); - let value = lean_ptr_to_expr(value_ptr, cache); - let hints = if lean_is_scalar(hints_ptr) { - match lean_unbox!(usize, hints_ptr) { + let [constant_val, value, hints, all, safety] = inner.objs(); + let constant_val = decode_constant_val(constant_val, cache); + let value = decode_expr(value, cache); + let hints = if hints.is_scalar() { + match hints.unbox_usize() { 0 => ReducibilityHints::Opaque, 1 => ReducibilityHints::Abbrev, _ => unreachable!(), } } else { - let hints_ctor: &LeanCtorObject = as_ref_unsafe(hints_ptr.cast()); - let [height_ptr] = hints_ctor.objs(); - ReducibilityHints::Regular(height_ptr as u32) + let hints_ctor = hints.as_ctor(); + let [height] = hints_ctor.objs::<1>(); + ReducibilityHints::Regular(height.as_ptr() as u32) }; - let all: Vec<_> = collect_list_ptrs(all_ptr) + let all: Vec<_> = collect_list_objs(all.as_list()) .into_iter() - .map(|p| lean_ptr_to_name(p, cache.global)) + .map(|o| decode_name(o, cache.global)) .collect(); - let safety = match safety_ptr as usize { + let safety = match safety.as_ptr() as usize { 0 => DefinitionSafety::Unsafe, 1 => DefinitionSafety::Safe, 2 => DefinitionSafety::Partial, @@ -510,25 +469,24 @@ pub fn lean_ptr_to_constant_info( }) }, 2 => { - let [constant_val_ptr, value_ptr, all_ptr] = inner_val.objs(); - let constant_val = lean_ptr_to_constant_val(constant_val_ptr, cache); - let value = lean_ptr_to_expr(value_ptr, cache); - let all: Vec<_> = collect_list_ptrs(all_ptr) + let [constant_val, value, all] = inner.objs(); + let constant_val = decode_constant_val(constant_val, cache); + let value = decode_expr(value, cache); + let all: Vec<_> = collect_list_objs(all.as_list()) .into_iter() - .map(|p| lean_ptr_to_name(p, cache.global)) + .map(|o| decode_name(o, cache.global)) .collect(); ConstantInfo::ThmInfo(TheoremVal { cnst: constant_val, value, all }) }, 3 => { - let [constant_val_ptr, value_ptr, all_ptr, is_unsafe_ptr] = - inner_val.objs(); - let constant_val = lean_ptr_to_constant_val(constant_val_ptr, cache); - let value = lean_ptr_to_expr(value_ptr, cache); - let all: Vec<_> = collect_list_ptrs(all_ptr) + let [constant_val, value, all, is_unsafe] = inner.objs(); + let constant_val = decode_constant_val(constant_val, cache); + let value = decode_expr(value, cache); + let all: Vec<_> = collect_list_objs(all.as_list()) .into_iter() - .map(|p| lean_ptr_to_name(p, cache.global)) + .map(|o| decode_name(o, cache.global)) .collect(); - let is_unsafe = is_unsafe_ptr as usize == 1; + let is_unsafe = is_unsafe.as_ptr() as usize == 1; ConstantInfo::OpaqueInfo(OpaqueVal { cnst: constant_val, value, @@ -537,9 +495,9 @@ pub fn lean_ptr_to_constant_info( }) }, 4 => { - let [constant_val_ptr, kind_ptr] = inner_val.objs(); - let constant_val = lean_ptr_to_constant_val(constant_val_ptr, cache); - let kind = match kind_ptr as usize { + let [constant_val, kind] = inner.objs(); + let constant_val = decode_constant_val(constant_val, cache); + let kind = match kind.as_ptr() as usize { 0 => QuotKind::Type, 1 => QuotKind::Ctor, 2 => QuotKind::Lift, @@ -550,28 +508,28 @@ pub fn lean_ptr_to_constant_info( }, 5 => { let [ - constant_val_ptr, - num_params_ptr, - num_indices_ptr, - all_ptr, - ctors_ptr, - num_nested_ptr, - bools_ptr, - ] = inner_val.objs(); - let constant_val = lean_ptr_to_constant_val(constant_val_ptr, cache); - let num_params = Nat::from_ptr(num_params_ptr); - let num_indices = Nat::from_ptr(num_indices_ptr); - let all: Vec<_> = collect_list_ptrs(all_ptr) + constant_val, + num_params, + num_indices, + all, + ctors, + num_nested, + bools, + ] = inner.objs(); + let constant_val = decode_constant_val(constant_val, cache); + let num_params = Nat::from_obj(num_params); + let num_indices = Nat::from_obj(num_indices); + let all: Vec<_> = collect_list_objs(all.as_list()) .into_iter() - .map(|p| lean_ptr_to_name(p, cache.global)) + .map(|o| decode_name(o, cache.global)) .collect(); - let ctors: Vec<_> = collect_list_ptrs(ctors_ptr) + let ctors: Vec<_> = collect_list_objs(ctors.as_list()) .into_iter() - .map(|p| lean_ptr_to_name(p, cache.global)) + .map(|o| decode_name(o, cache.global)) .collect(); - let num_nested = Nat::from_ptr(num_nested_ptr); + let num_nested = Nat::from_obj(num_nested); let [is_rec, is_unsafe, is_reflexive, ..] = - (bools_ptr as usize).to_le_bytes().map(|b| b == 1); + (bools.as_ptr() as usize).to_le_bytes().map(|b| b == 1); ConstantInfo::InductInfo(InductiveVal { cnst: constant_val, num_params, @@ -585,20 +543,14 @@ pub fn lean_ptr_to_constant_info( }) }, 6 => { - let [ - constant_val_ptr, - induct_ptr, - cidx_ptr, - num_params_ptr, - num_fields_ptr, - is_unsafe_ptr, - ] = inner_val.objs(); - let constant_val = lean_ptr_to_constant_val(constant_val_ptr, cache); - let induct = lean_ptr_to_name(induct_ptr, cache.global); - let cidx = Nat::from_ptr(cidx_ptr); - let num_params = Nat::from_ptr(num_params_ptr); - let num_fields = Nat::from_ptr(num_fields_ptr); - let is_unsafe = is_unsafe_ptr as usize == 1; + let [constant_val, induct, cidx, num_params, num_fields, is_unsafe] = + inner.objs(); + let constant_val = decode_constant_val(constant_val, cache); + let induct = decode_name(induct, cache.global); + let cidx = Nat::from_obj(cidx); + let num_params = Nat::from_obj(num_params); + let num_fields = Nat::from_obj(num_fields); + let is_unsafe = is_unsafe.as_ptr() as usize == 1; ConstantInfo::CtorInfo(ConstructorVal { cnst: constant_val, induct, @@ -610,30 +562,30 @@ pub fn lean_ptr_to_constant_info( }, 7 => { let [ - constant_val_ptr, - all_ptr, - num_params_ptr, - num_indices_ptr, - num_motives_ptr, - num_minors_ptr, - rules_ptr, - bools_ptr, - ] = inner_val.objs(); - let constant_val = lean_ptr_to_constant_val(constant_val_ptr, cache); - let all: Vec<_> = collect_list_ptrs(all_ptr) + constant_val, + all, + num_params, + num_indices, + num_motives, + num_minors, + rules, + bools, + ] = inner.objs(); + let constant_val = decode_constant_val(constant_val, cache); + let all: Vec<_> = collect_list_objs(all.as_list()) .into_iter() - .map(|p| lean_ptr_to_name(p, cache.global)) + .map(|o| decode_name(o, cache.global)) .collect(); - let num_params = Nat::from_ptr(num_params_ptr); - let num_indices = Nat::from_ptr(num_indices_ptr); - let num_motives = Nat::from_ptr(num_motives_ptr); - let num_minors = Nat::from_ptr(num_minors_ptr); - let rules: Vec<_> = collect_list_ptrs(rules_ptr) + let num_params = Nat::from_obj(num_params); + let num_indices = Nat::from_obj(num_indices); + let num_motives = Nat::from_obj(num_motives); + let num_minors = Nat::from_obj(num_minors); + let rules: Vec<_> = collect_list_objs(rules.as_list()) .into_iter() - .map(|p| lean_ptr_to_recursor_rule(p, cache)) + .map(|o| decode_recursor_rule(o, cache)) .collect(); let [k, is_unsafe, ..] = - (bools_ptr as usize).to_le_bytes().map(|b| b == 1); + (bools.as_ptr() as usize).to_le_bytes().map(|b| b == 1); ConstantInfo::RecInfo(RecursorVal { cnst: constant_val, all, @@ -652,36 +604,36 @@ pub fn lean_ptr_to_constant_info( /// Decode a single (Name, ConstantInfo) pair. fn decode_name_constant_info( - ptr: *const c_void, + obj: LeanObject, global: &GlobalCache, ) -> (Name, ConstantInfo) { let mut cache = Cache::new(global); - let prod_ctor: &LeanCtorObject = as_ref_unsafe(ptr.cast()); - let [name_ptr, constant_info_ptr] = prod_ctor.objs(); - let name = lean_ptr_to_name(name_ptr, global); - let constant_info = lean_ptr_to_constant_info(constant_info_ptr, &mut cache); + let ctor = obj.as_ctor(); + let [name_obj, constant_info] = ctor.objs(); + let name = decode_name(name_obj, global); + let constant_info = decode_constant_info(constant_info, &mut cache); (name, constant_info) } // Decode a Lean environment in parallel with hybrid caching. -pub fn lean_ptr_to_env(ptr: *const c_void) -> Env { +pub fn decode_env(obj: LeanList) -> Env { // Phase 1: Collect pointers (sequential) - let ptrs = collect_list_ptrs(ptr); + let objs = collect_list_objs(obj); - if ptrs.len() < PARALLEL_THRESHOLD { - return lean_ptr_to_env_sequential(ptr); + if objs.len() < PARALLEL_THRESHOLD { + return decode_env_sequential(obj); } // Estimate: ~3 unique names per constant on average - let global = GlobalCache::with_capacity(ptrs.len() * 3); + let global = GlobalCache::with_capacity(objs.len() * 3); // Phase 2: Decode in parallel with shared global name cache - let pairs: Vec<(Name, ConstantInfo)> = ptrs + let pairs: Vec<(Name, ConstantInfo)> = objs .into_iter() - .map(SendPtr) // Wrap each *const c_void in SendPtr - .collect::>() // Collect into Vec - .into_par_iter() // Now Rayon can use it (SendPtr is Send+Sync) - .map(|p| decode_name_constant_info(p.get(), &global)) // Unwrap with .get() + .map(SendObj) + .collect::>() + .into_par_iter() + .map(|o| decode_name_constant_info(o.get(), &global)) .collect(); // Phase 3: Build final map @@ -694,31 +646,25 @@ pub fn lean_ptr_to_env(ptr: *const c_void) -> Env { } /// Sequential fallback for small environments. -pub fn lean_ptr_to_env_sequential(ptr: *const c_void) -> Env { - let ptrs = collect_list_ptrs(ptr); +pub fn decode_env_sequential(obj: LeanList) -> Env { + let objs = collect_list_objs(obj); let global = GlobalCache::new(); let mut env = Env::default(); - env.reserve(ptrs.len()); + env.reserve(objs.len()); - for p in ptrs { - let (name, constant_info) = decode_name_constant_info(p, &global); + for o in objs { + let (name, constant_info) = decode_name_constant_info(o, &global); env.insert(name, constant_info); } env } -//#[unsafe(no_mangle)] -//pub extern "C" fn rs_decode_env(ptr: *const c_void) -> usize { -// let env = lean_ptr_to_env(ptr); -// env.len() -//} - // Debug/analysis entry point invoked via the `rust-compile` test flag in // `Tests/FFI/Basic.lean`. Exercises the full compile→decompile→check→serialize // roundtrip and size analysis. Output is intentionally suppressed; re-enable // individual `eprintln!` lines when debugging locally. #[unsafe(no_mangle)] -extern "C" fn rs_tmp_decode_const_map(ptr: *const c_void) -> usize { +extern "C" fn rs_tmp_decode_const_map(obj: LeanList) -> usize { // Enable hash-consed size tracking for debugging // TODO: Make this configurable via CLI instead of hardcoded crate::ix::compile::TRACK_HASH_CONSED_SIZE @@ -729,7 +675,7 @@ extern "C" fn rs_tmp_decode_const_map(ptr: *const c_void) -> usize { crate::ix::compile::ANALYZE_SHARING .store(false, std::sync::atomic::Ordering::Relaxed); - let env = lean_ptr_to_env(ptr); + let env = decode_env(obj); let env = Arc::new(env); if let Ok(stt) = compile_env(&env) { if let Ok(dstt) = decompile_env(&stt) { diff --git a/src/ffi/primitives.rs b/src/ffi/primitives.rs new file mode 100644 index 00000000..63c5f84d --- /dev/null +++ b/src/ffi/primitives.rs @@ -0,0 +1,350 @@ +//! Basic Lean type encode/decode/roundtrip operations. +//! +//! This module provides FFI functions for primitive Lean types: +//! - Nat, String, Bool +//! - Option, Pair +//! - List, Array, ByteArray +//! - AssocList, HashMap + +use lean_ffi::nat::Nat; +use lean_ffi::object::{ + LeanArray, LeanBool, LeanByteArray, LeanCtor, LeanList, LeanNat, LeanObject, + LeanString, +}; + +// ============================================================================= +// Nat Building +// ============================================================================= + +/// Build a Lean Nat from a Rust Nat. +pub fn build_nat(n: &Nat) -> LeanObject { + // Try to get as u64 first + if let Some(val) = n.to_u64() { + // For small values that fit in a boxed scalar (max value is usize::MAX >> 1) + if val <= (usize::MAX >> 1) as u64 { + #[allow(clippy::cast_possible_truncation)] + return LeanObject::box_usize(val as usize); + } + return LeanObject::from_nat_u64(val); + } + // For values larger than u64, convert to limbs and use GMP + let bytes = n.to_le_bytes(); + let mut limbs: Vec = Vec::with_capacity(bytes.len().div_ceil(8)); + for chunk in bytes.chunks(8) { + let mut arr = [0u8; 8]; + arr[..chunk.len()].copy_from_slice(chunk); + limbs.push(u64::from_le_bytes(arr)); + } + unsafe { lean_ffi::nat::lean_nat_from_limbs(limbs.len(), limbs.as_ptr()) } +} + +// ============================================================================= +// Round-trip FFI Functions for Testing +// ============================================================================= + +/// Round-trip a Nat: decode from Lean, re-encode to Lean. +#[unsafe(no_mangle)] +pub extern "C" fn rs_roundtrip_nat(nat_ptr: LeanNat) -> LeanObject { + let nat = Nat::from_obj(*nat_ptr); + build_nat(&nat) +} + +/// Round-trip a String: decode from Lean, re-encode to Lean. +#[unsafe(no_mangle)] +pub extern "C" fn rs_roundtrip_string(s_ptr: LeanString) -> LeanString { + let s = s_ptr.to_string(); + LeanString::new(&s) +} + +/// Round-trip a List Nat: decode from Lean, re-encode to Lean. +#[unsafe(no_mangle)] +pub extern "C" fn rs_roundtrip_list_nat(list_ptr: LeanList) -> LeanList { + let nats: Vec = list_ptr.collect(Nat::from_obj); + build_list_nat(&nats) +} + +/// Round-trip an Array Nat: decode from Lean, re-encode to Lean. +#[unsafe(no_mangle)] +pub extern "C" fn rs_roundtrip_array_nat(arr_ptr: LeanArray) -> LeanArray { + let nats: Vec = arr_ptr.map(Nat::from_obj); + build_array_nat(&nats) +} + +/// Round-trip a ByteArray: decode from Lean, re-encode to Lean. +#[unsafe(no_mangle)] +pub extern "C" fn rs_roundtrip_bytearray(ba: LeanByteArray) -> LeanByteArray { + LeanByteArray::from_bytes(ba.as_bytes()) +} + +/// Round-trip a Bool: decode from Lean, re-encode. +/// Bool in Lean is passed as unboxed scalar: false = 0, true = 1 +#[unsafe(no_mangle)] +pub extern "C" fn rs_roundtrip_bool(bool_ptr: LeanBool) -> LeanBool { + bool_ptr +} + +// ============================================================================= +// Helper functions for building basic Lean types +// ============================================================================= + +/// Build a Lean List Nat from a Vec. +fn build_list_nat(nats: &[Nat]) -> LeanList { + let items: Vec = nats.iter().map(build_nat).collect(); + items.into_iter().collect() +} + +/// Build a Lean Array Nat from a Vec. +fn build_array_nat(nats: &[Nat]) -> LeanArray { + let arr = LeanArray::alloc(nats.len()); + for (i, nat) in nats.iter().enumerate() { + arr.set(i, build_nat(nat)); + } + arr +} + +// ============================================================================= +// FFI roundtrip functions for struct/inductive/HashMap +// ============================================================================= + +/// Round-trip a Point (structure with x, y : Nat). +/// Point is a structure, which in Lean is represented as a constructor with tag 0. +#[unsafe(no_mangle)] +pub extern "C" fn rs_roundtrip_point(point_ptr: LeanCtor) -> LeanObject { + // Point is a structure (single constructor, tag 0) with 2 Nat fields + let x = Nat::from_obj(point_ptr.get(0)); + let y = Nat::from_obj(point_ptr.get(1)); + + // Re-encode as Point + let point = LeanCtor::alloc(0, 2, 0); + point.set(0, build_nat(&x)); + point.set(1, build_nat(&y)); + *point +} + +/// Round-trip a NatTree (inductive with leaf : Nat → NatTree | node : NatTree → NatTree → NatTree). +#[unsafe(no_mangle)] +pub extern "C" fn rs_roundtrip_nat_tree(tree_ptr: LeanCtor) -> LeanObject { + roundtrip_nat_tree_recursive(tree_ptr) +} + +fn roundtrip_nat_tree_recursive(ctor: LeanCtor) -> LeanObject { + match ctor.tag() { + 0 => { + // leaf : Nat → NatTree + let nat = Nat::from_obj(ctor.get(0)); + let leaf = LeanCtor::alloc(0, 1, 0); + leaf.set(0, build_nat(&nat)); + *leaf + }, + 1 => { + // node : NatTree → NatTree → NatTree + let left = roundtrip_nat_tree_recursive(ctor.get(0).as_ctor()); + let right = roundtrip_nat_tree_recursive(ctor.get(1).as_ctor()); + let node = LeanCtor::alloc(1, 2, 0); + node.set(0, left); + node.set(1, right); + *node + }, + _ => panic!("Invalid NatTree tag: {}", ctor.tag()), + } +} + +/// Round-trip an AssocList Nat Nat. +/// AssocList: nil (tag 0, 0 fields) | cons key value tail (tag 1, 3 fields) +/// Note: nil with 0 fields may be represented as lean_box(0) +#[unsafe(no_mangle)] +pub extern "C" fn rs_roundtrip_assoclist_nat_nat( + list_ptr: LeanObject, +) -> LeanObject { + if list_ptr.is_scalar() { + return LeanObject::box_usize(0); + } + let pairs = decode_assoc_list_nat_nat(list_ptr); + build_assoc_list_nat_nat(&pairs) +} + +/// Build an AssocList Nat Nat from pairs +fn build_assoc_list_nat_nat(pairs: &[(Nat, Nat)]) -> LeanObject { + // Build in reverse to preserve order + let mut list = LeanObject::box_usize(0); // nil + for (k, v) in pairs.iter().rev() { + let cons = LeanCtor::alloc(1, 3, 0); // AssocList.cons + cons.set(0, build_nat(k)); + cons.set(1, build_nat(v)); + cons.set(2, list); + list = *cons; + } + list +} + +/// Round-trip a DHashMap.Raw Nat Nat. +#[unsafe(no_mangle)] +pub extern "C" fn rs_roundtrip_dhashmap_raw_nat_nat( + raw_ptr: LeanObject, +) -> LeanObject { + if raw_ptr.is_scalar() { + return raw_ptr; + } + + let raw_ctor = raw_ptr.as_ctor(); + let size = Nat::from_obj(raw_ctor.get(0)); + let buckets = raw_ctor.get(1).as_array(); + + // Decode and rebuild buckets + let num_buckets = buckets.len(); + + let mut all_pairs: Vec<(Nat, Nat)> = Vec::new(); + for bucket in buckets.iter() { + let pairs = decode_assoc_list_nat_nat(bucket); + all_pairs.extend(pairs); + } + + // Rebuild buckets + let new_buckets = LeanArray::alloc(num_buckets); + let nil = LeanObject::box_usize(0); + for i in 0..num_buckets { + new_buckets.set(i, nil); + } + + for (k, v) in &all_pairs { + let k_u64 = k.to_u64().unwrap_or_else(|| { + let bytes = k.to_le_bytes(); + let mut arr = [0u8; 8]; + let len = bytes.len().min(8); + arr[..len].copy_from_slice(&bytes[..len]); + u64::from_le_bytes(arr) + }); + #[allow(clippy::cast_possible_truncation)] + let bucket_idx = (k_u64 as usize) & (num_buckets - 1); + + let old_bucket = new_buckets.get(bucket_idx); + let new_bucket = LeanCtor::alloc(1, 3, 0); + new_bucket.set(0, build_nat(k)); + new_bucket.set(1, build_nat(v)); + new_bucket.set(2, old_bucket); + new_buckets.set(bucket_idx, *new_bucket); + } + + // Build Raw + let raw = LeanCtor::alloc(0, 2, 0); + raw.set(0, build_nat(&size)); + raw.set(1, *new_buckets); + *raw +} + +/// Round-trip a Std.HashMap Nat Nat. +/// +/// IMPORTANT: Single-field structures are unboxed in Lean 4! +/// - HashMap has 1 field (inner : DHashMap) +/// - DHashMap has 1 field (inner : Raw) - wf : Prop is erased +/// So HashMap pointer points DIRECTLY to Raw! +/// +/// Memory layout (after unboxing): +/// - HashMap/DHashMap/Raw all share the same pointer +/// - Raw: ctor 0, 2 fields +/// - field 0: size : Nat +/// - field 1: buckets : Array (AssocList α β) +/// - AssocList: +/// - nil: lean_box(0) +/// - cons key value tail: ctor 1, 3 fields +#[unsafe(no_mangle)] +pub extern "C" fn rs_roundtrip_hashmap_nat_nat( + map_ptr: LeanCtor, +) -> LeanObject { + // Due to unboxing, map_ptr points directly to Raw + let size = Nat::from_obj(map_ptr.get(0)); + let buckets = map_ptr.get(1).as_array(); + + // Decode buckets (Array of AssocLists) + let mut pairs: Vec<(Nat, Nat)> = Vec::new(); + + for bucket in buckets.iter() { + let bucket_pairs = decode_assoc_list_nat_nat(bucket); + pairs.extend(bucket_pairs); + } + + // Rebuild the HashMap with the same bucket count + let num_buckets = buckets.len(); + let new_buckets = LeanArray::alloc(num_buckets); + + // Initialize all buckets to AssocList.nil (lean_box(0)) + let nil = LeanObject::box_usize(0); + for i in 0..num_buckets { + new_buckets.set(i, nil); + } + + // Insert each pair into the appropriate bucket using Lean's hash function + for (k, v) in &pairs { + // Hash the key - for Nat, Lean uses the value itself as hash + let k_u64 = k.to_u64().unwrap_or_else(|| { + // For large nats, use low 64 bits + let bytes = k.to_le_bytes(); + let mut arr = [0u8; 8]; + let len = bytes.len().min(8); + arr[..len].copy_from_slice(&bytes[..len]); + u64::from_le_bytes(arr) + }); + // Lean uses (hash & (buckets.size - 1)) for bucket index (power of 2) + #[allow(clippy::cast_possible_truncation)] + let bucket_idx = (k_u64 as usize) & (num_buckets - 1); + + // Get current bucket AssocList + let old_bucket = new_buckets.get(bucket_idx); + + // Build AssocList.cons key value tail (tag 1, 3 fields) + let new_bucket = LeanCtor::alloc(1, 3, 0); + new_bucket.set(0, build_nat(k)); + new_bucket.set(1, build_nat(v)); + new_bucket.set(2, old_bucket); + new_buckets.set(bucket_idx, *new_bucket); + } + + // Build Raw (ctor 0, 2 fields: size, buckets) + // Due to unboxing, this IS the HashMap + let raw = LeanCtor::alloc(0, 2, 0); + raw.set(0, build_nat(&size)); + raw.set(1, *new_buckets); + *raw +} + +/// Decode a Lean AssocList Nat Nat to Vec of pairs +/// AssocList: nil (tag 0) | cons key value tail (tag 1, 3 fields) +pub fn decode_assoc_list_nat_nat(obj: LeanObject) -> Vec<(Nat, Nat)> { + let mut result = Vec::new(); + let mut current = obj; + + loop { + if current.is_scalar() { + break; + } + + let ctor = current.as_ctor(); + if ctor.tag() == 0 { + break; + } + + let k = Nat::from_obj(ctor.get(0)); + let v = Nat::from_obj(ctor.get(1)); + + result.push((k, v)); + current = ctor.get(2); + } + + result +} + +// ============================================================================= +// Utility FFI Functions +// ============================================================================= + +/// Read first 8 bytes of a ByteArray as little-endian UInt64. +/// Used by Address.Hashable to match Rust's bucket hash computation. +/// This is essentially just a pointer cast - very fast. +#[unsafe(no_mangle)] +pub extern "C" fn rs_bytearray_to_u64_le(ba: LeanByteArray) -> u64 { + let data = ba.as_bytes(); + if data.len() < 8 { + return 0; + } + u64::from_le_bytes(data[..8].try_into().unwrap()) +} diff --git a/src/ffi/unsigned.rs b/src/ffi/unsigned.rs new file mode 100644 index 00000000..ffc44d25 --- /dev/null +++ b/src/ffi/unsigned.rs @@ -0,0 +1,21 @@ +use lean_ffi::object::LeanByteArray; + +#[unsafe(no_mangle)] +extern "C" fn c_u16_to_le_bytes(v: u16) -> LeanByteArray { + LeanByteArray::from_bytes(&v.to_le_bytes()) +} + +#[unsafe(no_mangle)] +extern "C" fn c_u32_to_le_bytes(v: u32) -> LeanByteArray { + LeanByteArray::from_bytes(&v.to_le_bytes()) +} + +#[unsafe(no_mangle)] +extern "C" fn c_u64_to_le_bytes(v: u64) -> LeanByteArray { + LeanByteArray::from_bytes(&v.to_le_bytes()) +} + +#[unsafe(no_mangle)] +extern "C" fn c_usize_to_le_bytes(v: usize) -> LeanByteArray { + LeanByteArray::from_bytes(&v.to_le_bytes()) +} diff --git a/src/iroh.rs b/src/iroh.rs index 489a3f2a..2e32a2c7 100644 --- a/src/iroh.rs +++ b/src/iroh.rs @@ -1,33 +1,6 @@ -//! The client, server, and common modules are enabled by the `net` feature. However, Iroh doesn't work on `aarch64-darwin`, so they are always disabled for that target. -//! -//! Lean and C don't support feature flags, so the `_client` and `_server` modules are exposed as a fallback for when the `net` feature is disabled and/or on the `aarch64-darwin` target. -//! -//! These fallback modules contain dummy functions that can still be called via Lean->C->Rust FFI, but will return an error message that Lean then prints before exiting. - -#[cfg(any( - not(feature = "net"), - all(target_os = "macos", target_arch = "aarch64") -))] -pub mod _client; -#[cfg(any( - not(feature = "net"), - all(target_os = "macos", target_arch = "aarch64") -))] -pub mod _server; -#[cfg(all( - feature = "net", - not(all(target_os = "macos", target_arch = "aarch64")) -))] pub mod client; -#[cfg(all( - feature = "net", - not(all(target_os = "macos", target_arch = "aarch64")) -))] pub mod server; -#[cfg(all( - feature = "net", - not(all(target_os = "macos", target_arch = "aarch64")) -))] + pub mod common { use bincode::{Decode, Encode}; use serde::{Deserialize, Serialize}; diff --git a/src/iroh/_client.rs b/src/iroh/_client.rs deleted file mode 100644 index aadb73df..00000000 --- a/src/iroh/_client.rs +++ /dev/null @@ -1,30 +0,0 @@ -use std::ffi::{CString, c_char}; - -use crate::lean::{ - array::LeanArrayObject, - ffi::{CResult, to_raw}, -}; - -#[unsafe(no_mangle)] -extern "C" fn rs_iroh_put( - _node_id: *const c_char, - _addrs: &LeanArrayObject, - _relay_url: *const c_char, - _file_path: *const c_char, -) -> *const CResult { - let msg = CString::new("Iroh functions not supported when the Rust `net` feature is disabled or on MacOS aarch64-darwin").expect("CString::new failure"); - let c_result = CResult { is_ok: false, data: msg.into_raw().cast() }; - to_raw(c_result) -} - -#[unsafe(no_mangle)] -extern "C" fn rs_iroh_get( - _node_id: *const c_char, - _addrs: &LeanArrayObject, - _relay_url: *const c_char, - _hash: *const c_char, -) -> *const CResult { - let msg = CString::new("Iroh functions not supported when the Rust `net` feature is disabled or on MacOS aarch64-darwin").expect("CString::new failure"); - let c_result = CResult { is_ok: false, data: msg.into_raw().cast() }; - to_raw(c_result) -} diff --git a/src/iroh/_server.rs b/src/iroh/_server.rs deleted file mode 100644 index f5bcd892..00000000 --- a/src/iroh/_server.rs +++ /dev/null @@ -1,10 +0,0 @@ -use std::ffi::CString; - -use crate::lean::ffi::{CResult, to_raw}; - -#[unsafe(no_mangle)] -extern "C" fn rs_iroh_serve() -> *const CResult { - let msg = CString::new("Iroh functions not supported when the Rust `net` feature is disabled or on MacOS aarch64-darwin").expect("CString::new failure"); - let c_result = CResult { is_ok: false, data: msg.into_raw().cast() }; - to_raw(c_result) -} diff --git a/src/iroh/client.rs b/src/iroh/client.rs index 7df9cde7..1828ea10 100644 --- a/src/iroh/client.rs +++ b/src/iroh/client.rs @@ -1,116 +1,21 @@ use iroh::{Endpoint, NodeAddr, NodeId, RelayMode, RelayUrl, SecretKey}; use n0_snafu::{Result, ResultExt}; use n0_watcher::Watcher as _; -use std::ffi::{CString, c_char}; use std::net::SocketAddr; use tracing::info; use tracing_subscriber::layer::SubscriberExt; use tracing_subscriber::util::SubscriberInitExt; use tracing_subscriber::{EnvFilter, fmt}; -use crate::iroh::common::{GetRequest, PutRequest, Request, Response}; -use crate::lean::array::LeanArrayObject; -use crate::lean::as_ref_unsafe; -use crate::lean::ffi::iroh::{GetResponseFFI, PutResponseFFI}; -use crate::lean::ffi::{CResult, raw_to_str, to_raw}; -use crate::lean::string::LeanStringObject; +use crate::iroh::common::{Request, Response}; // An example ALPN that we are using to communicate over the `Endpoint` const EXAMPLE_ALPN: &[u8] = b"n0/iroh/examples/magic/0"; // Maximum number of characters to read from the server. Connection automatically closed if this is exceeded const READ_SIZE_LIMIT: usize = 100_000_000; -#[unsafe(no_mangle)] -extern "C" fn rs_iroh_put( - node_id: *const c_char, - addrs: &LeanArrayObject, - relay_url: *const c_char, - input: *const c_char, -) -> *const CResult { - let node_id = raw_to_str(node_id); - let addrs: Vec = addrs.to_vec(|ptr| { - let string: &LeanStringObject = as_ref_unsafe(ptr.cast()); - string.as_string() - }); - let relay_url = raw_to_str(relay_url); - let input = raw_to_str(input); - - let request = Request::Put(PutRequest { bytes: input.as_bytes().to_vec() }); - // Create a Tokio runtime to block on the async function - let rt = - tokio::runtime::Runtime::new().expect("Failed to create Tokio runtime"); - - // Run the async function and block until we get the result - let c_result = match rt.block_on(connect(node_id, &addrs, relay_url, request)) - { - Ok(response) => match response { - Response::Put(put_response) => { - let put_response_ffi = - PutResponseFFI::new(&put_response.message, &put_response.hash); - CResult { is_ok: true, data: to_raw(put_response_ffi).cast() } - }, - _ => { - let msg = CString::new("error: incorrect server response") - .expect("CString::new failure"); - CResult { is_ok: false, data: msg.into_raw().cast() } - }, - }, - Err(err) => { - let msg = CString::new(err.to_string()).expect("CString::new failure"); - CResult { is_ok: false, data: msg.into_raw().cast() } - }, - }; - - to_raw(c_result) -} - -#[unsafe(no_mangle)] -extern "C" fn rs_iroh_get( - node_id: *const c_char, - addrs: &LeanArrayObject, - relay_url: *const c_char, - hash: *const c_char, -) -> *const CResult { - let node_id = raw_to_str(node_id); - let addrs: Vec = addrs.to_vec(|ptr| { - let string: &LeanStringObject = as_ref_unsafe(ptr.cast()); - string.as_string() - }); - let relay_url = raw_to_str(relay_url); - let hash = raw_to_str(hash); - let request = Request::Get(GetRequest { hash: hash.to_owned() }); - - let rt = - tokio::runtime::Runtime::new().expect("Failed to create Tokio runtime"); - - let c_result = match rt.block_on(connect(node_id, &addrs, relay_url, request)) - { - Ok(response) => match response { - Response::Get(get_response) => { - let get_response_ffi = GetResponseFFI::new( - &get_response.message, - &get_response.hash, - &get_response.bytes, - ); - CResult { is_ok: true, data: to_raw(get_response_ffi).cast() } - }, - _ => { - let msg = CString::new("error: incorrect server response") - .expect("CString::new failure"); - CResult { is_ok: false, data: msg.into_raw().cast() } - }, - }, - Err(err) => { - let msg = CString::new(err.to_string()).expect("CString::new failure"); - CResult { is_ok: false, data: msg.into_raw().cast() } - }, - }; - - to_raw(c_result) -} - // Largely taken from https://github.com/n0-computer/iroh/blob/main/iroh/examples/connect.rs -async fn connect( +pub async fn connect( node_id: &str, addrs: &[String], relay_url: &str, diff --git a/src/iroh/server.rs b/src/iroh/server.rs index 1820867f..a40c5c3f 100644 --- a/src/iroh/server.rs +++ b/src/iroh/server.rs @@ -1,5 +1,4 @@ use std::collections::BTreeMap; -use std::ffi::CString; use std::sync::{Arc, Mutex}; use std::time::Duration; @@ -12,33 +11,14 @@ use tracing_subscriber::util::SubscriberInitExt; use tracing_subscriber::{EnvFilter, fmt}; use crate::iroh::common::{GetResponse, PutResponse, Request, Response}; -use crate::lean::ffi::{CResult, to_raw}; // An example ALPN that we are using to communicate over the `Endpoint` const EXAMPLE_ALPN: &[u8] = b"n0/iroh/examples/magic/0"; // Maximum number of characters to read from the client. Connection automatically closed if this is exceeded const READ_SIZE_LIMIT: usize = 100_000_000; -#[unsafe(no_mangle)] -extern "C" fn rs_iroh_serve() -> *const CResult { - // Create a Tokio runtime to block on the async function - let rt = - tokio::runtime::Runtime::new().expect("Failed to create Tokio runtime"); - - // Run the async function and block until we get the result - let c_result = match rt.block_on(serve()) { - Ok(()) => CResult { is_ok: true, data: std::ptr::null() }, - Err(err) => { - let msg = CString::new(err.to_string()).expect("CString::new failure"); - CResult { is_ok: false, data: msg.into_raw().cast() } - }, - }; - - to_raw(c_result) -} - // Largely taken from https://github.com/n0-computer/iroh/blob/main/iroh/examples/listen.rs -async fn serve() -> n0_snafu::Result<()> { +pub async fn serve() -> n0_snafu::Result<()> { // Initialize the subscriber with `RUST_LOG=info` to preserve some server logging tracing_subscriber::registry() .with(fmt::layer()) diff --git a/src/ix/compile.rs b/src/ix/compile.rs index 5c2a8269..0f176002 100644 --- a/src/ix/compile.rs +++ b/src/ix/compile.rs @@ -17,6 +17,8 @@ use std::{ thread, }; +use lean_ffi::nat::Nat; + use crate::{ ix::address::Address, ix::condense::compute_sccs, @@ -44,7 +46,6 @@ use crate::{ }, ix::mutual::{Def, Ind, MutConst, MutCtx, Rec, ctx_to_all}, ix::strong_ordering::SOrd, - lean::nat::Nat, }; /// Whether to track hash-consed sizes during compilation. diff --git a/src/ix/decompile.rs b/src/ix/decompile.rs index 88082135..eb63f31b 100644 --- a/src/ix/decompile.rs +++ b/src/ix/decompile.rs @@ -9,6 +9,8 @@ #![allow(clippy::map_err_ignore)] #![allow(clippy::match_same_arms)] +use lean_ffi::nat::Nat; + use crate::{ ix::address::Address, ix::compile::CompileState, @@ -32,7 +34,6 @@ use crate::{ univ::Univ, }, ix::mutual::{MutCtx, all_to_ctx}, - lean::nat::Nat, }; use dashmap::DashMap; use rayon::iter::{IntoParallelRefIterator, ParallelIterator}; diff --git a/src/ix/env.rs b/src/ix/env.rs index 73749f98..c57dc2ff 100644 --- a/src/ix/env.rs +++ b/src/ix/env.rs @@ -14,7 +14,7 @@ use std::{ sync::Arc, }; -use crate::lean::nat::Nat; +use lean_ffi::nat::Nat; use rustc_hash::FxHashMap; // -- Name tags ---------------------------------------------------------------- diff --git a/src/ix/graph.rs b/src/ix/graph.rs index 86d211fc..74f4d961 100644 --- a/src/ix/graph.rs +++ b/src/ix/graph.rs @@ -177,7 +177,7 @@ fn get_expr_references<'a>( mod tests { use super::*; use crate::ix::env::*; - use crate::lean::nat::Nat; + use lean_ffi::nat::Nat; fn n(s: &str) -> Name { Name::str(Name::anon(), s.to_string()) diff --git a/src/ix/ground.rs b/src/ix/ground.rs index 008d00fd..4be05110 100644 --- a/src/ix/ground.rs +++ b/src/ix/ground.rs @@ -9,12 +9,13 @@ use rayon::iter::{IntoParallelRefIterator, ParallelIterator}; use rustc_hash::{FxHashMap, FxHashSet}; use std::collections::hash_map::Entry; +use lean_ffi::nat::Nat; + use crate::{ ix::env::{ ConstantInfo, Env, Expr, ExprData, InductiveVal, Level, LevelData, Name, }, ix::graph::RefMap, - lean::nat::Nat, }; /// Reason a constant failed groundedness checking. diff --git a/src/ix/ixon/serialize.rs b/src/ix/ixon/serialize.rs index c0572160..78e05580 100644 --- a/src/ix/ixon/serialize.rs +++ b/src/ix/ixon/serialize.rs @@ -871,7 +871,7 @@ impl Constant { // ============================================================================ use crate::ix::env::{Name, NameData}; -use crate::lean::nat::Nat; +use lean_ffi::nat::Nat; use rustc_hash::FxHashMap; /// Serialize a Name to bytes (full recursive serialization, for standalone use). diff --git a/src/ix/mutual.rs b/src/ix/mutual.rs index 3e0e5dde..b3bf8122 100644 --- a/src/ix/mutual.rs +++ b/src/ix/mutual.rs @@ -5,13 +5,14 @@ //! [`ctx_to_all`] / [`all_to_ctx`] functions convert between ordered name //! vectors and index maps. +use lean_ffi::nat::Nat; + use crate::{ ix::env::{ ConstructorVal, DefinitionSafety, DefinitionVal, Expr, InductiveVal, Name, OpaqueVal, RecursorVal, ReducibilityHints, TheoremVal, }, ix::ixon::constant::DefKind, - lean::nat::Nat, }; use rustc_hash::FxHashMap; diff --git a/src/lean.rs b/src/lean.rs index 676fb0a8..a9f72353 100644 --- a/src/lean.rs +++ b/src/lean.rs @@ -1,315 +1,164 @@ -//! Rust bindings for Lean, implemented by mimicking the memory layout of Lean's -//! low-level C objects. +//! Ix-specific Lean domain type definitions. //! -//! This crate must be kept in sync with `lean/lean.h`. Pay close attention to -//! definitions containing C code in their docstrings. - -pub mod array; -pub mod boxed; -pub mod ctor; -pub mod external; -pub mod ffi; -pub mod nat; -pub mod object; -pub mod sarray; -pub mod string; - -use std::ffi::{CString, c_void}; - -use crate::lean::{ - boxed::{BoxedU64, BoxedUSize}, - ctor::LeanCtorObject, -}; - -#[inline] -#[allow(clippy::not_unsafe_ptr_arg_deref)] -pub fn as_ref_unsafe<'a, T>(ptr: *const T) -> &'a T { - let t_ref = unsafe { ptr.as_ref() }; - t_ref.expect("Null pointer dereference") -} - -#[inline] -#[allow(clippy::not_unsafe_ptr_arg_deref)] -pub fn as_mut_unsafe<'a, T>(ptr: *mut T) -> &'a mut T { - let t_ref = unsafe { ptr.as_mut() }; - t_ref.expect("Null pointer dereference") -} - -/// ```c -/// bool lean_is_scalar(lean_object * o) { return ((size_t)(o) & 1) == 1; } -/// ``` -#[inline] -pub fn lean_is_scalar(ptr: *const T) -> bool { - ptr as usize & 1 == 1 -} - -/// Create a CString from a str, stripping any interior null bytes. -/// Lean strings are length-prefixed and can contain null bytes, but the -/// `lean_mk_string` FFI requires a null-terminated C string. This function -/// ensures conversion always succeeds by filtering out interior nulls. -pub fn safe_cstring(s: &str) -> CString { - CString::new(s).unwrap_or_else(|_| { - let bytes: Vec = s.bytes().filter(|&b| b != 0).collect(); - CString::new(bytes).expect("filtered string should have no nulls") - }) -} - -#[macro_export] -/// ```c -/// lean_object * lean_box(size_t n) { return (lean_object*)(((size_t)(n) << 1) | 1); } -/// ``` -macro_rules! lean_box { - ($e:expr) => { - (($e << 1) | 1) as *const std::ffi::c_void - }; -} - -/// ```c -/// size_t lean_unbox(lean_object * o) { return (size_t)(o) >> 1; } -/// ``` -#[macro_export] -macro_rules! lean_unbox { - ($t:ident, $e:expr) => { - $t::try_from(($e as usize) >> 1).expect("Unintended truncation") - }; -} - -/// ```c -/// unsigned lean_unbox_uint32(b_lean_obj_arg o) { -/// if (sizeof(void*) == 4) { -/// /* 32-bit implementation */ -/// return lean_ctor_get_uint32(o, 0); -/// } else { -/// /* 64-bit implementation */ -/// return lean_unbox(o); -/// } -/// } -/// ``` -#[inline] -pub fn lean_unbox_u32(ptr: *const c_void) -> u32 { - if cfg!(target_pointer_width = "32") { - let boxed_usize: &BoxedUSize = as_ref_unsafe(ptr.cast()); - u32::try_from(boxed_usize.value).expect("Cannot convert from usize") - } else { - lean_unbox!(u32, ptr) - } -} - -/// ```c -/// uint64_t lean_unbox_uint64(b_lean_obj_arg o) { -/// return lean_ctor_get_uint64(o, 0); -/// } -/// ``` -#[inline] -pub fn lean_unbox_u64(ptr: *const c_void) -> u64 { - let boxed_usize: &BoxedU64 = as_ref_unsafe(ptr.cast()); - boxed_usize.value -} - -/// ```c -/// lean_object * lean_box_uint64(uint64_t v) { -/// lean_object * r = lean_alloc_ctor(0, 0, sizeof(uint64_t)); -/// lean_ctor_set_uint64(r, 0, v); -/// return r; -/// } -/// ``` -#[inline] -pub fn lean_box_u64(v: u64) -> *mut c_void { - unsafe { - let obj = lean_alloc_ctor(0, 0, 8); - lean_ctor_set_uint64(obj, 0, v); - obj - } -} - -pub fn boxed_usize_ptr_to_usize(ptr: *const c_void) -> usize { - let boxed_usize_ptr = ptr.cast::(); - let boxed_usize = as_ref_unsafe(boxed_usize_ptr); - boxed_usize.value -} - -/// Emulates arrays of flexible size from C. -#[repr(C)] -pub struct CArray([T; 0]); - -impl CArray { +//! Generic Lean FFI wrappers live in the `lean_ffi` crate. This module defines +//! typed newtypes for ix-specific Lean types using `lean_ffi::lean_domain_type!`. + +lean_ffi::lean_domain_type! { + // Ix core types + /// Lean `Ix.Name` object. + LeanIxName; + /// Lean `Ix.Level` object. + LeanIxLevel; + /// Lean `Ix.Expr` object. + LeanIxExpr; + /// Lean `Ix.ConstantInfo` object. + LeanIxConstantInfo; + /// Lean `Ix.ConstantVal` object. + LeanIxConstantVal; + /// Lean `Ix.ReducibilityHints` object. + LeanIxReducibilityHints; + /// Lean `Ix.Literal` object. + LeanIxLiteral; + /// Lean `Ix.BinderInfo` object. + LeanIxBinderInfo; + /// Lean `Ix.RecursorRule` object. + LeanIxRecursorRule; + /// Lean `Ix.RawEnvironment` object. + LeanIxRawEnvironment; + /// Lean `Ix.Environment` object. + LeanIxEnvironment; + /// Lean `Ix.RustCondensedBlocks` object. + LeanIxCondensedBlocks; + /// Lean `Ix.CompileM.RustCompilePhases` object. + LeanIxCompilePhases; + + // Ix data types + /// Lean `Ix.Int` object. + LeanIxInt; + /// Lean `Ix.Substring` object. + LeanIxSubstring; + /// Lean `Ix.SourceInfo` object. + LeanIxSourceInfo; + /// Lean `Ix.SyntaxPreresolved` object. + LeanIxSyntaxPreresolved; + /// Lean `Ix.Syntax` object. + LeanIxSyntax; + /// Lean `Ix.DataValue` object. + LeanIxDataValue; + + // Ixon types + /// Lean `Ixon.DefKind` object. + LeanIxonDefKind; + /// Lean `Ixon.DefinitionSafety` object. + LeanIxonDefinitionSafety; + /// Lean `Ixon.QuotKind` object. + LeanIxonQuotKind; + /// Lean `Ixon.Univ` object. + LeanIxonUniv; + /// Lean `Ixon.Expr` object. + LeanIxonExpr; + /// Lean `Ixon.Definition` object. + LeanIxonDefinition; + /// Lean `Ixon.RecursorRule` object. + LeanIxonRecursorRule; + /// Lean `Ixon.Recursor` object. + LeanIxonRecursor; + /// Lean `Ixon.Axiom` object. + LeanIxonAxiom; + /// Lean `Ixon.Quotient` object. + LeanIxonQuotient; + /// Lean `Ixon.Constructor` object. + LeanIxonConstructor; + /// Lean `Ixon.Inductive` object. + LeanIxonInductive; + /// Lean `Ixon.InductiveProj` object. + LeanIxonInductiveProj; + /// Lean `Ixon.ConstructorProj` object. + LeanIxonConstructorProj; + /// Lean `Ixon.RecursorProj` object. + LeanIxonRecursorProj; + /// Lean `Ixon.DefinitionProj` object. + LeanIxonDefinitionProj; + /// Lean `Ixon.MutConst` object. + LeanIxonMutConst; + /// Lean `Ixon.ConstantInfo` object. + LeanIxonConstantInfo; + /// Lean `Ixon.Constant` object. + LeanIxonConstant; + /// Lean `Ixon.DataValue` object. + LeanIxonDataValue; + /// Lean `Ixon.ExprMetaData` object. + LeanIxonExprMetaData; + /// Lean `Ixon.ExprMetaArena` object. + LeanIxonExprMetaArena; + /// Lean `Ixon.ConstantMeta` object. + LeanIxonConstantMeta; + /// Lean `Ixon.Named` object. + LeanIxonNamed; + /// Lean `Ixon.Comm` object. + LeanIxonComm; + /// Lean `Ixon.RawEnv` object. + LeanIxonRawEnv; + /// Lean `Ixon.RawConst` object. + LeanIxonRawConst; + /// Lean `Ixon.RawNamed` object. + LeanIxonRawNamed; + /// Lean `Ixon.RawBlob` object. + LeanIxonRawBlob; + /// Lean `Ixon.RawComm` object. + LeanIxonRawComm; + /// Lean `Ixon.RawNameEntry` object. + LeanIxonRawNameEntry; + + // Aiur types + /// Lean `Aiur.Bytecode.Toplevel` object. + LeanAiurToplevel; + /// Lean `Aiur.FriParameters` object. + LeanAiurFriParameters; + + // Error types + /// Lean `Ixon.SerializeError` object. + LeanIxSerializeError; + /// Lean `Ix.DecompileM.DecompileError` object. + LeanIxDecompileError; + /// Lean `Ix.CompileM.CompileError` object. + LeanIxCompileError; + /// Lean `BlockCompareResult` object. + LeanIxBlockCompareResult; + /// Lean `BlockCompareDetail` object. + LeanIxBlockCompareDetail; +} + +/// Lean `Address` object — newtype over `LeanByteArray`. +#[derive(Clone, Copy)] +#[repr(transparent)] +pub struct LeanIxAddress(lean_ffi::object::LeanByteArray); + +impl std::ops::Deref for LeanIxAddress { + type Target = lean_ffi::object::LeanByteArray; #[inline] - pub fn slice(&self, len: usize) -> &[T] { - unsafe { std::slice::from_raw_parts(self.0.as_ptr(), len) } - } - - #[inline] - pub fn slice_mut(&mut self, len: usize) -> &mut [T] { - unsafe { std::slice::from_raw_parts_mut(self.0.as_mut_ptr(), len) } - } - - #[inline] - pub fn copy_from_slice(&mut self, src: &[T]) { - unsafe { - std::ptr::copy_nonoverlapping( - src.as_ptr(), - self.0.as_ptr() as *mut _, - src.len(), - ); - } + fn deref(&self) -> &lean_ffi::object::LeanByteArray { + &self.0 } } -pub struct ListIterator(*const c_void); - -impl Iterator for ListIterator { - type Item = *const c_void; - fn next(&mut self) -> Option { - let ptr = self.0; - if lean_is_scalar(ptr) { - return None; - } - let ctor: &LeanCtorObject = as_ref_unsafe(ptr.cast()); - let [head_ptr, tail_ptr] = ctor.objs(); - self.0 = tail_ptr; - Some(head_ptr) +impl From for lean_ffi::object::LeanObject { + #[inline] + fn from(x: LeanIxAddress) -> Self { + x.0.into() } } -pub fn collect_list( - mut ptr: *const c_void, - map_fn: fn(*const c_void) -> T, -) -> Vec { - let mut vec = Vec::new(); - while !lean_is_scalar(ptr) { - let ctor: &LeanCtorObject = as_ref_unsafe(ptr.cast()); - let [head_ptr, tail_ptr] = ctor.objs(); - vec.push(map_fn(head_ptr)); - ptr = tail_ptr; +impl From for LeanIxAddress { + #[inline] + fn from(x: lean_ffi::object::LeanByteArray) -> Self { + Self(x) } - vec } -pub fn collect_list_with( - mut ptr: *const c_void, - map_fn: fn(*const c_void, &mut C) -> T, - c: &mut C, -) -> Vec { - let mut vec = Vec::new(); - while !lean_is_scalar(ptr) { - let ctor: &LeanCtorObject = as_ref_unsafe(ptr.cast()); - let [head_ptr, tail_ptr] = ctor.objs(); - vec.push(map_fn(head_ptr, c)); - ptr = tail_ptr; +impl LeanIxAddress { + #[inline] + pub fn new(obj: lean_ffi::object::LeanObject) -> Self { + Self(obj.as_byte_array()) } - vec -} - -// ============================================================================= -// Lean C API extern declarations for object construction -// ============================================================================= - -use std::ffi::c_uint; - -// Lean C API wrappers (defined in c/ixon_ffi.c) -// These wrap Lean's allocation functions so they can be linked from Rust -unsafe extern "C" { - // Object allocation - /// Allocate a constructor object with the given tag, number of object fields, - /// and scalar size in bytes. - #[link_name = "c_lean_alloc_ctor"] - pub fn lean_alloc_ctor( - tag: c_uint, - num_objs: c_uint, - scalar_sz: c_uint, - ) -> *mut c_void; - - /// Set the i-th object field of a constructor. - #[link_name = "c_lean_ctor_set"] - pub fn lean_ctor_set(o: *mut c_void, i: c_uint, v: *mut c_void); - - /// Get the i-th object field of a constructor. - #[link_name = "c_lean_ctor_get"] - pub fn lean_ctor_get(o: *mut c_void, i: c_uint) -> *const c_void; - - /// Get the tag of a Lean object. - #[link_name = "c_lean_obj_tag"] - pub fn lean_obj_tag(o: *mut c_void) -> c_uint; - - /// Set a uint8 scalar field at the given byte offset (after object fields). - #[link_name = "c_lean_ctor_set_uint8"] - pub fn lean_ctor_set_uint8(o: *mut c_void, offset: usize, v: u8); - - /// Set a uint64 scalar field at the given byte offset (after object fields). - #[link_name = "c_lean_ctor_set_uint64"] - pub fn lean_ctor_set_uint64(o: *mut c_void, offset: usize, v: u64); - - // String allocation - /// Create a Lean string from a null-terminated C string. - #[link_name = "c_lean_mk_string"] - pub fn lean_mk_string(s: *const std::ffi::c_char) -> *mut c_void; - - // Scalar array (ByteArray) allocation - /// Allocate a scalar array with the given element size, initial size, and capacity. - #[link_name = "c_lean_alloc_sarray"] - pub fn lean_alloc_sarray( - elem_size: c_uint, - size: usize, - capacity: usize, - ) -> *mut c_void; - - /// Get a pointer to the data area of a scalar array. - #[link_name = "c_lean_sarray_cptr"] - pub fn lean_sarray_cptr(o: *mut c_void) -> *mut u8; - - // Array allocation - /// Allocate an array with the given initial size and capacity. - #[link_name = "c_lean_alloc_array"] - pub fn lean_alloc_array(size: usize, capacity: usize) -> *mut c_void; - - /// Set the i-th element of an array (does not update size). - #[link_name = "c_lean_array_set_core"] - pub fn lean_array_set_core(o: *mut c_void, i: usize, v: *mut c_void); - - /// Get the i-th element of an array. - #[link_name = "c_lean_array_get_core"] - pub fn lean_array_get_core(o: *mut c_void, i: usize) -> *const c_void; - - // Reference counting - /// Increment the reference count of a Lean object. - #[link_name = "c_lean_inc"] - pub fn lean_inc(o: *mut c_void); - - /// Increment the reference count by n. - #[link_name = "c_lean_inc_n"] - pub fn lean_inc_n(o: *mut c_void, n: usize); - - // IO result construction - /// Wrap a value in a successful IO result. - #[link_name = "c_lean_io_result_mk_ok"] - pub fn lean_io_result_mk_ok(v: *mut c_void) -> *mut c_void; - - /// Wrap an error in an IO error result. - #[link_name = "c_lean_io_result_mk_error"] - pub fn lean_io_result_mk_error(err: *mut c_void) -> *mut c_void; - - /// Create an IO.Error.userError from a String. - #[link_name = "c_lean_mk_io_user_error"] - pub fn lean_mk_io_user_error(msg: *mut c_void) -> *mut c_void; - - // Nat allocation for large values - /// Create a Nat from a uint64. For values > max boxed, allocates on heap. - #[link_name = "c_lean_uint64_to_nat"] - pub fn lean_uint64_to_nat(n: u64) -> *mut c_void; - - /// Create a Nat from limbs (little-endian u64 array). Uses GMP internally. - #[link_name = "c_lean_nat_from_limbs"] - pub fn lean_nat_from_limbs( - num_limbs: usize, - limbs: *const u64, - ) -> *mut c_void; -} - -/// Box a scalar value into a Lean object pointer. -/// ```c -/// lean_object * lean_box(size_t n) { return (lean_object*)(((size_t)(n) << 1) | 1); } -/// ``` -#[inline] -pub fn lean_box_fn(n: usize) -> *mut c_void { - ((n << 1) | 1) as *mut c_void } diff --git a/src/lean/array.rs b/src/lean/array.rs deleted file mode 100644 index 0bb468fb..00000000 --- a/src/lean/array.rs +++ /dev/null @@ -1,46 +0,0 @@ -use std::ffi::c_void; - -use super::{CArray, object::LeanObject}; - -/// ```c -/// typedef struct { -/// lean_object m_header; -/// size_t m_size; -/// size_t m_capacity; -/// lean_object * m_data[]; -/// } lean_array_object; -/// ``` -#[repr(C)] -pub struct LeanArrayObject { - m_header: LeanObject, - m_size: usize, - m_capacity: usize, - m_data: CArray<*const c_void>, -} - -impl LeanArrayObject { - #[inline] - pub fn data(&self) -> &[*const c_void] { - self.m_data.slice(self.m_size) - } - - #[inline] - pub fn to_vec(&self, map_fn: fn(*const c_void) -> T) -> Vec { - self.data().iter().map(|ptr| map_fn(*ptr)).collect() - } - - #[inline] - pub fn to_vec_with( - &self, - map_fn: fn(*const c_void, &mut C) -> T, - c: &mut C, - ) -> Vec { - self.data().iter().map(|ptr| map_fn(*ptr, c)).collect() - } - - pub fn set_data(&mut self, data: &[*const c_void]) { - assert!(self.m_capacity >= data.len()); - self.m_data.copy_from_slice(data); - self.m_size = data.len(); - } -} diff --git a/src/lean/boxed.rs b/src/lean/boxed.rs deleted file mode 100644 index f7e709e9..00000000 --- a/src/lean/boxed.rs +++ /dev/null @@ -1,16 +0,0 @@ -use super::object::LeanObject; - -/// This is equivalent to a `lean_ctor_object` with `m_objs` of size 1. -#[repr(C)] -pub struct BoxedUSize { - m_header: LeanObject, - pub value: usize, -} - -/// This is equivalent to a `lean_ctor_object` with `m_objs` of size 1 on x64 -/// and 2 on x32. -#[repr(C)] -pub struct BoxedU64 { - m_header: LeanObject, - pub value: u64, -} diff --git a/src/lean/ctor.rs b/src/lean/ctor.rs deleted file mode 100644 index 4e17f439..00000000 --- a/src/lean/ctor.rs +++ /dev/null @@ -1,64 +0,0 @@ -//! Lean constructor object layout and field access. - -use std::{ffi::c_void, ptr}; - -use super::{CArray, object::LeanObject}; - -/// ```c -/// typedef struct { -/// lean_object m_header; -/// lean_object * m_objs[]; -/// } lean_ctor_object; -/// ``` -#[repr(C)] -pub struct LeanCtorObject { - m_header: LeanObject, - m_objs: CArray<*const c_void>, -} - -impl LeanCtorObject { - #[inline] - pub fn tag(&self) -> u8 { - self.m_header.m_tag() - } - - /// The number of objects must be known at compile time, given the context - /// in which the data is being read. - #[inline] - pub fn objs(&self) -> [*const c_void; N] { - let mut ptrs = [ptr::null(); N]; - ptrs.copy_from_slice(self.m_objs.slice(N)); - ptrs - } - - #[inline] - pub fn set_objs(&mut self, data: &[*const c_void]) { - self.m_objs.copy_from_slice(data); - } - - /// Read a u64 scalar field from the constructor. - /// `num_objs` is the number of object fields (pointers) in this constructor. - /// `scalar_offset` is the byte offset within the scalar area. - /// Scalar fields are stored after the object fields in memory. - #[inline] - pub fn get_scalar_u64(&self, num_objs: usize, scalar_offset: usize) -> u64 { - // Scalar area starts after: header (8 bytes) + object pointers (8 bytes each) - let base_ptr = (self as *const Self).cast::(); - let scalar_area = unsafe { base_ptr.add(8 + num_objs * 8 + scalar_offset) }; - unsafe { ptr::read_unaligned(scalar_area.cast::()) } - } - - /// Read a u8 scalar field from the constructor. - #[inline] - pub fn get_scalar_u8(&self, num_objs: usize, scalar_offset: usize) -> u8 { - let base_ptr = (self as *const Self).cast::(); - let scalar_area = unsafe { base_ptr.add(8 + num_objs * 8 + scalar_offset) }; - unsafe { *scalar_area } - } - - /// Read a bool scalar field from the constructor. - #[inline] - pub fn get_scalar_bool(&self, num_objs: usize, scalar_offset: usize) -> bool { - self.get_scalar_u8(num_objs, scalar_offset) != 0 - } -} diff --git a/src/lean/external.rs b/src/lean/external.rs deleted file mode 100644 index a16437b8..00000000 --- a/src/lean/external.rs +++ /dev/null @@ -1,24 +0,0 @@ -use std::ffi::c_void; - -use super::object::LeanObject; - -/// ```c -/// typedef struct { -/// lean_object m_header; -/// lean_external_class * m_class; -/// void * m_data; -/// } lean_external_object; -/// ``` -#[repr(C)] -pub struct LeanExternalObject { - m_header: LeanObject, - m_class: *const c_void, - m_data: *const c_void, -} - -impl LeanExternalObject { - #[inline] - pub fn cast_data(&self) -> *const T { - self.m_data.cast() - } -} diff --git a/src/lean/ffi.rs b/src/lean/ffi.rs deleted file mode 100644 index 07003a57..00000000 --- a/src/lean/ffi.rs +++ /dev/null @@ -1,136 +0,0 @@ -pub mod aiur; -pub mod byte_array; -pub mod iroh; -pub mod keccak; -pub mod lean_env; - -// Modular FFI structure -pub mod builder; // IxEnvBuilder struct -pub mod compile; // Compilation: rs_compile_env_full, rs_compile_phases, etc. -pub mod graph; // Graph/SCC: rs_build_ref_graph, rs_compute_sccs -pub mod ix; // Ix types: Name, Level, Expr, ConstantInfo, Environment -pub mod ixon; // Ixon types: Univ, Expr, Constant, metadata -pub mod primitives; // Primitives: rs_roundtrip_nat, rs_roundtrip_string, etc. - -use std::ffi::{CStr, CString, c_char, c_void}; - -use crate::lean::{ - array::LeanArrayObject, as_ref_unsafe, lean_io_result_mk_error, - lean_mk_io_user_error, lean_mk_string, lean_unbox_u32, - sarray::LeanSArrayObject, -}; - -/// Guard an FFI function that returns a Lean IO result against panics. -/// On panic, returns a Lean IO error with the panic message instead of -/// unwinding across the `extern "C"` boundary (which is undefined behavior). -pub(crate) fn ffi_io_guard(f: F) -> *mut c_void -where - F: FnOnce() -> *mut c_void + std::panic::UnwindSafe, -{ - match std::panic::catch_unwind(f) { - Ok(result) => result, - Err(panic_info) => { - let msg = if let Some(s) = panic_info.downcast_ref::<&str>() { - format!("FFI panic: {s}") - } else if let Some(s) = panic_info.downcast_ref::() { - format!("FFI panic: {s}") - } else { - "FFI panic: unknown".to_string() - }; - let c_msg = CString::new(msg).unwrap_or_else(|_| { - CString::new("FFI panic: (invalid message)").unwrap() - }); - unsafe { - let lean_msg = lean_mk_string(c_msg.as_ptr()); - let lean_err = lean_mk_io_user_error(lean_msg); - lean_io_result_mk_error(lean_err) - } - }, - } -} - -/// ```c -/// typedef struct { -/// bool is_ok; -/// void *data; -/// } c_result; -/// ``` -#[repr(C)] -pub struct CResult { - pub is_ok: bool, - pub data: *const c_void, -} - -// Free a `CResult` object that corresponds to the Rust type `Result<(), String>` -#[unsafe(no_mangle)] -extern "C" fn rs__c_result_unit_string_free(ptr: *mut CResult) { - let c_result = as_ref_unsafe(ptr); - // Free the string error message - if !c_result.is_ok { - let char_ptr = c_result.data as *mut c_char; - let c_string = unsafe { CString::from_raw(char_ptr) }; - drop(c_string); - } - drop_raw(ptr); -} - -#[inline] -pub(crate) fn to_raw(t: T) -> *const T { - Box::into_raw(Box::new(t)) -} - -#[inline] -pub(super) fn drop_raw(ptr: *mut T) { - assert!(!ptr.is_null(), "Null pointer free attempt"); - let t = unsafe { Box::from_raw(ptr) }; - drop(t); -} - -// Only used in the Iroh client for the moment -#[inline] -#[cfg_attr( - any(not(feature = "net"), all(target_os = "macos", target_arch = "aarch64")), - allow(dead_code) -)] -pub(crate) fn raw_to_str<'a>(ptr: *const c_char) -> &'a str { - let c_str = unsafe { CStr::from_ptr(ptr) }; - c_str.to_str().expect("Invalid UTF-8 string") -} - -#[unsafe(no_mangle)] -extern "C" fn rs_boxed_u32s_are_equivalent_to_bytes( - u32s: &LeanArrayObject, - bytes: &LeanSArrayObject, -) -> bool { - let u32s = u32s - .to_vec(lean_unbox_u32) - .into_iter() - .flat_map(u32::to_le_bytes) - .collect::>(); - u32s == bytes.data() -} - -#[repr(C)] -pub struct BytesData { - size: usize, - bytes_vec: *const Vec, -} - -impl BytesData { - #[inline] - pub(super) fn from_vec(vec: Vec) -> Self { - Self { size: vec.len(), bytes_vec: to_raw(vec) } - } -} - -#[unsafe(no_mangle)] -extern "C" fn rs_move_bytes( - bytes_data: *mut BytesData, - byte_array: &mut LeanSArrayObject, -) { - let bytes_data = unsafe { Box::from_raw(bytes_data) }; - let bytes_vec = unsafe { Box::from_raw(bytes_data.bytes_vec as *mut Vec<_>) }; - byte_array.set_data(&bytes_vec); - drop(bytes_vec); - drop(bytes_data); -} diff --git a/src/lean/ffi/aiur.rs b/src/lean/ffi/aiur.rs deleted file mode 100644 index 24927018..00000000 --- a/src/lean/ffi/aiur.rs +++ /dev/null @@ -1,23 +0,0 @@ -use multi_stark::p3_field::integers::QuotientMap; -use std::ffi::c_void; - -pub mod protocol; -pub mod toplevel; - -use crate::{ - aiur::G, - lean::{lean_is_scalar, lean_unbox_u64}, - lean_unbox, -}; - -#[inline] -pub(super) fn lean_unbox_nat_as_usize(ptr: *const c_void) -> usize { - assert!(lean_is_scalar(ptr)); - lean_unbox!(usize, ptr) -} - -#[inline] -pub(super) fn lean_unbox_g(ptr: *const c_void) -> G { - let u64 = lean_unbox_u64(ptr); - unsafe { G::from_canonical_unchecked(u64) } -} diff --git a/src/lean/ffi/aiur/protocol.rs b/src/lean/ffi/aiur/protocol.rs deleted file mode 100644 index be3afef6..00000000 --- a/src/lean/ffi/aiur/protocol.rs +++ /dev/null @@ -1,245 +0,0 @@ -use multi_stark::{ - p3_field::PrimeField64, - prover::Proof, - types::{CommitmentParameters, FriParameters}, -}; -use rustc_hash::{FxBuildHasher, FxHashMap}; -use std::{ - ffi::{CString, c_void}, - slice, -}; - -use crate::{ - aiur::{ - G, - execute::{IOBuffer, IOKeyInfo}, - synthesis::AiurSystem, - }, - lean::{ - array::LeanArrayObject, - as_mut_unsafe, as_ref_unsafe, - boxed::BoxedU64, - ctor::LeanCtorObject, - ffi::{ - BytesData, CResult, - aiur::{ - lean_unbox_g, lean_unbox_nat_as_usize, toplevel::lean_ctor_to_toplevel, - }, - drop_raw, to_raw, - }, - sarray::LeanSArrayObject, - }, - lean_box, -}; - -#[unsafe(no_mangle)] -extern "C" fn rs_aiur_proof_to_bytes(proof: &Proof) -> *const BytesData { - let bytes = proof.to_bytes().expect("Serialization error"); - let bytes_data = BytesData::from_vec(bytes); - to_raw(bytes_data) -} - -#[unsafe(no_mangle)] -extern "C" fn rs_aiur_proof_of_bytes( - byte_array: &LeanSArrayObject, -) -> *const Proof { - let proof = - Proof::from_bytes(byte_array.data()).expect("Deserialization error"); - to_raw(proof) -} - -#[unsafe(no_mangle)] -extern "C" fn rs_aiur_system_free(ptr: *mut AiurSystem) { - drop_raw(ptr); -} - -fn lean_ptr_to_commitment_parameters( - commitment_parameters_ptr: *const c_void, -) -> CommitmentParameters { - // Single-attribute structure in Lean. - CommitmentParameters { - log_blowup: lean_unbox_nat_as_usize(commitment_parameters_ptr), - } -} - -#[unsafe(no_mangle)] -extern "C" fn rs_aiur_system_build( - toplevel: &LeanCtorObject, - commitment_parameters: *const c_void, -) -> *const AiurSystem { - to_raw(AiurSystem::build( - lean_ctor_to_toplevel(toplevel), - lean_ptr_to_commitment_parameters(commitment_parameters), - )) -} - -fn lean_ctor_to_fri_parameters(ctor: &LeanCtorObject) -> FriParameters { - let [ - log_final_poly_len_ptr, - num_queries_ptr, - commit_proof_of_work_bits, - query_proof_of_work_bits, - ] = ctor.objs(); - FriParameters { - log_final_poly_len: lean_unbox_nat_as_usize(log_final_poly_len_ptr), - num_queries: lean_unbox_nat_as_usize(num_queries_ptr), - commit_proof_of_work_bits: lean_unbox_nat_as_usize( - commit_proof_of_work_bits, - ), - query_proof_of_work_bits: lean_unbox_nat_as_usize(query_proof_of_work_bits), - } -} - -#[repr(C)] -struct ProveData { - claim_size: usize, - claim: *const Vec, - proof: *const Proof, - io_buffer: *const IOBuffer, - io_data_size: usize, - io_map_size: usize, - io_keys_sizes: *const usize, -} - -#[unsafe(no_mangle)] -extern "C" fn rs_aiur_claim_free(ptr: *mut Vec) { - drop_raw(ptr); -} - -#[unsafe(no_mangle)] -extern "C" fn rs_aiur_proof_free(ptr: *mut Proof) { - drop_raw(ptr); -} - -#[unsafe(no_mangle)] -extern "C" fn rs_aiur_prove_data_io_buffer_free(prove_data: &ProveData) { - let boxed_io_keys_sizes = unsafe { - let slice = slice::from_raw_parts_mut( - prove_data.io_keys_sizes as *mut usize, - prove_data.io_map_size, - ); - Box::from_raw(slice) - }; - drop(boxed_io_keys_sizes); - drop_raw(prove_data.io_buffer as *mut ProveData); -} - -#[unsafe(no_mangle)] -extern "C" fn rs_aiur_prove_data_free(ptr: *mut ProveData) { - drop_raw(ptr); -} - -fn lean_array_to_io_buffer_map( - array: &LeanArrayObject, -) -> FxHashMap, IOKeyInfo> { - let array_data = array.data(); - let mut map = - FxHashMap::with_capacity_and_hasher(array_data.len(), FxBuildHasher); - for ptr in array_data { - let ctor: &LeanCtorObject = as_ref_unsafe(ptr.cast()); - let [key_ptr, info_ptr] = ctor.objs(); - let key_array: &LeanArrayObject = as_ref_unsafe(key_ptr.cast()); - let key = key_array.to_vec(lean_unbox_g); - let info_ctor: &LeanCtorObject = as_ref_unsafe(info_ptr.cast()); - let [idx_ptr, len_ptr] = info_ctor.objs(); - let info = IOKeyInfo { - idx: lean_unbox_nat_as_usize(idx_ptr), - len: lean_unbox_nat_as_usize(len_ptr), - }; - map.insert(key, info); - } - map -} - -#[unsafe(no_mangle)] -extern "C" fn rs_aiur_system_prove( - aiur_system: &AiurSystem, - fri_parameters: &LeanCtorObject, - fun_idx: *const c_void, - args: &LeanArrayObject, - io_data: &LeanArrayObject, - io_map: &LeanArrayObject, -) -> *const ProveData { - let fri_parameters = lean_ctor_to_fri_parameters(fri_parameters); - let fun_idx = lean_unbox_nat_as_usize(fun_idx); - let args = args.to_vec(lean_unbox_g); - let io_data = io_data.to_vec(lean_unbox_g); - let io_map = lean_array_to_io_buffer_map(io_map); - let mut io_buffer = IOBuffer { data: io_data, map: io_map }; - let (claim, proof) = - aiur_system.prove(fri_parameters, fun_idx, &args, &mut io_buffer); - let claim_size = claim.len(); - let io_keys_sizes_boxed: Box<[usize]> = - io_buffer.map.keys().map(Vec::len).collect(); - let io_keys_sizes = io_keys_sizes_boxed.as_ptr(); - std::mem::forget(io_keys_sizes_boxed); - let io_data_size = io_buffer.data.len(); - let io_map_size = io_buffer.map.len(); - let prove_data = ProveData { - claim_size, - claim: to_raw(claim), - proof: to_raw(proof), - io_buffer: to_raw(io_buffer), - io_data_size, - io_map_size, - io_keys_sizes, - }; - to_raw(prove_data) -} - -#[unsafe(no_mangle)] -extern "C" fn rs_set_array_g_values(array: &LeanArrayObject, values: &Vec) { - let array_values = array.data(); - assert_eq!(array_values.len(), values.len()); - array_values.iter().zip(values).for_each(|(ptr, g)| { - let boxed_u64 = as_mut_unsafe(*ptr as *mut BoxedU64); - boxed_u64.value = g.as_canonical_u64(); - }); -} - -#[unsafe(no_mangle)] -extern "C" fn rs_set_aiur_io_data_values( - io_data_array: &LeanArrayObject, - io_buffer: &IOBuffer, -) { - rs_set_array_g_values(io_data_array, &io_buffer.data); -} - -#[unsafe(no_mangle)] -extern "C" fn rs_set_aiur_io_map_values( - io_map_array: &LeanArrayObject, - io_buffer: &IOBuffer, -) { - let io_map_values = io_map_array.data(); - assert_eq!(io_map_values.len(), io_buffer.map.len()); - io_map_values.iter().zip(&io_buffer.map).for_each(|(ptr, (key, info))| { - let ctor: &LeanCtorObject = as_ref_unsafe(ptr.cast()); - let [key_array, key_info] = ctor.objs(); - rs_set_array_g_values( - as_mut_unsafe(key_array as *mut LeanArrayObject), - key, - ); - - let key_info_ctor: &mut LeanCtorObject = as_mut_unsafe(key_info as *mut _); - key_info_ctor.set_objs(&[lean_box!(info.idx), lean_box!(info.len)]); - }); -} - -#[unsafe(no_mangle)] -extern "C" fn rs_aiur_system_verify( - aiur_system: &AiurSystem, - fri_parameters: &LeanCtorObject, - claim: &LeanArrayObject, - proof: &Proof, -) -> *const CResult { - let fri_parameters = lean_ctor_to_fri_parameters(fri_parameters); - let claim = claim.to_vec(lean_unbox_g); - let c_result = match aiur_system.verify(fri_parameters, &claim, proof) { - Ok(()) => CResult { is_ok: true, data: std::ptr::null() }, - Err(err) => { - let msg = CString::new(format!("{err:?}")).expect("CString::new failure"); - CResult { is_ok: false, data: msg.into_raw().cast() } - }, - }; - to_raw(c_result) -} diff --git a/src/lean/ffi/aiur/toplevel.rs b/src/lean/ffi/aiur/toplevel.rs deleted file mode 100644 index 96c8d27b..00000000 --- a/src/lean/ffi/aiur/toplevel.rs +++ /dev/null @@ -1,228 +0,0 @@ -use std::ffi::c_void; - -use multi_stark::p3_field::PrimeCharacteristicRing; - -use crate::{ - FxIndexMap, - aiur::{ - G, - bytecode::{Block, Ctrl, Function, FunctionLayout, Op, Toplevel, ValIdx}, - }, - lean::{ - array::LeanArrayObject, - ctor::LeanCtorObject, - ffi::{ - aiur::{lean_unbox_g, lean_unbox_nat_as_usize}, - as_ref_unsafe, - }, - lean_is_scalar, - string::LeanStringObject, - }, -}; - -fn lean_ptr_to_vec_val_idx(ptr: *const c_void) -> Vec { - let array: &LeanArrayObject = as_ref_unsafe(ptr.cast()); - array.to_vec(lean_unbox_nat_as_usize) -} - -fn lean_ptr_to_op(ptr: *const c_void) -> Op { - let ctor: &LeanCtorObject = as_ref_unsafe(ptr.cast()); - match ctor.tag() { - 0 => { - let [const_val_ptr] = ctor.objs(); - Op::Const(G::from_u64(const_val_ptr as u64)) - }, - 1 => { - let [a_ptr, b_ptr] = ctor.objs(); - Op::Add(lean_unbox_nat_as_usize(a_ptr), lean_unbox_nat_as_usize(b_ptr)) - }, - 2 => { - let [a_ptr, b_ptr] = ctor.objs(); - Op::Sub(lean_unbox_nat_as_usize(a_ptr), lean_unbox_nat_as_usize(b_ptr)) - }, - 3 => { - let [a_ptr, b_ptr] = ctor.objs(); - Op::Mul(lean_unbox_nat_as_usize(a_ptr), lean_unbox_nat_as_usize(b_ptr)) - }, - 4 => { - let [a_ptr] = ctor.objs(); - Op::EqZero(lean_unbox_nat_as_usize(a_ptr)) - }, - 5 => { - let [fun_idx_ptr, val_idxs_ptr, output_size_ptr] = ctor.objs(); - let fun_idx = lean_unbox_nat_as_usize(fun_idx_ptr); - let val_idxs = lean_ptr_to_vec_val_idx(val_idxs_ptr); - let output_size = lean_unbox_nat_as_usize(output_size_ptr); - Op::Call(fun_idx, val_idxs, output_size) - }, - 6 => { - let [val_idxs_ptr] = ctor.objs(); - Op::Store(lean_ptr_to_vec_val_idx(val_idxs_ptr)) - }, - 7 => { - let [width_ptr, val_idx_ptr] = ctor.objs(); - Op::Load( - lean_unbox_nat_as_usize(width_ptr), - lean_unbox_nat_as_usize(val_idx_ptr), - ) - }, - 8 => { - let [as_ptr, bs_ptr] = ctor.objs(); - Op::AssertEq( - lean_ptr_to_vec_val_idx(as_ptr), - lean_ptr_to_vec_val_idx(bs_ptr), - ) - }, - 9 => { - let [key_ptr] = ctor.objs(); - Op::IOGetInfo(lean_ptr_to_vec_val_idx(key_ptr)) - }, - 10 => { - let [key_ptr, idx_ptr, len_ptr] = ctor.objs(); - Op::IOSetInfo( - lean_ptr_to_vec_val_idx(key_ptr), - lean_unbox_nat_as_usize(idx_ptr), - lean_unbox_nat_as_usize(len_ptr), - ) - }, - 11 => { - let [idx_ptr, len_ptr] = ctor.objs(); - Op::IORead( - lean_unbox_nat_as_usize(idx_ptr), - lean_unbox_nat_as_usize(len_ptr), - ) - }, - 12 => { - let [data_ptr] = ctor.objs(); - Op::IOWrite(lean_ptr_to_vec_val_idx(data_ptr)) - }, - 13 => { - let [byte_ptr] = ctor.objs(); - Op::U8BitDecomposition(lean_unbox_nat_as_usize(byte_ptr)) - }, - 14 => { - let [byte_ptr] = ctor.objs(); - Op::U8ShiftLeft(lean_unbox_nat_as_usize(byte_ptr)) - }, - 15 => { - let [byte_ptr] = ctor.objs(); - Op::U8ShiftRight(lean_unbox_nat_as_usize(byte_ptr)) - }, - 16 => { - let [i, j] = ctor.objs().map(lean_unbox_nat_as_usize); - Op::U8Xor(i, j) - }, - 17 => { - let [i, j] = ctor.objs().map(lean_unbox_nat_as_usize); - Op::U8Add(i, j) - }, - 18 => { - let [i, j] = ctor.objs().map(lean_unbox_nat_as_usize); - Op::U8Sub(i, j) - }, - 19 => { - let [i, j] = ctor.objs().map(lean_unbox_nat_as_usize); - Op::U8And(i, j) - }, - 20 => { - let [i, j] = ctor.objs().map(lean_unbox_nat_as_usize); - Op::U8Or(i, j) - }, - 21 => { - let [i, j] = ctor.objs().map(lean_unbox_nat_as_usize); - Op::U8LessThan(i, j) - }, - 22 => { - let [label_ptr, idxs_ptr] = ctor.objs(); - let label_str: &LeanStringObject = as_ref_unsafe(label_ptr.cast()); - let label = label_str.as_string(); - let idxs = if lean_is_scalar(idxs_ptr) { - None - } else { - let option_ctor: &LeanCtorObject = as_ref_unsafe(idxs_ptr.cast()); - let [idxs_ptr] = option_ctor.objs(); - let idxs: &LeanArrayObject = as_ref_unsafe(idxs_ptr.cast()); - Some(idxs.to_vec(lean_unbox_nat_as_usize)) - }; - Op::Debug(label, idxs) - }, - _ => unreachable!(), - } -} - -fn lean_ptr_to_g_block_pair(ptr: *const c_void) -> (G, Block) { - let ctor: &LeanCtorObject = as_ref_unsafe(ptr.cast()); - let [g_ptr, block_ptr] = ctor.objs(); - let g = lean_unbox_g(g_ptr); - let block = lean_ctor_to_block(as_ref_unsafe(block_ptr.cast())); - (g, block) -} - -fn lean_ctor_to_ctrl(ctor: &LeanCtorObject) -> Ctrl { - match ctor.tag() { - 0 => { - let [val_idx_ptr, cases_ptr, default_ptr] = ctor.objs(); - let val_idx = lean_unbox_nat_as_usize(val_idx_ptr); - let cases_array: &LeanArrayObject = as_ref_unsafe(cases_ptr.cast()); - let vec_cases = cases_array.to_vec(lean_ptr_to_g_block_pair); - let cases = FxIndexMap::from_iter(vec_cases); - let default = if lean_is_scalar(default_ptr) { - None - } else { - let default_ctor: &LeanCtorObject = as_ref_unsafe(default_ptr.cast()); - let [block_ptr] = default_ctor.objs(); - let block = lean_ctor_to_block(as_ref_unsafe(block_ptr.cast())); - Some(Box::new(block)) - }; - Ctrl::Match(val_idx, cases, default) - }, - 1 => { - let [sel_idx_ptr, val_idxs_ptr] = ctor.objs(); - let sel_idx = lean_unbox_nat_as_usize(sel_idx_ptr); - let val_idxs = lean_ptr_to_vec_val_idx(val_idxs_ptr); - Ctrl::Return(sel_idx, val_idxs) - }, - _ => unreachable!(), - } -} - -fn lean_ctor_to_block(ctor: &LeanCtorObject) -> Block { - let [ops_ptr, ctrl_ptr, min_sel_included_ptr, max_sel_excluded_ptr] = - ctor.objs(); - let ops_array: &LeanArrayObject = as_ref_unsafe(ops_ptr.cast()); - let ops = ops_array.to_vec(lean_ptr_to_op); - let ctrl = lean_ctor_to_ctrl(as_ref_unsafe(ctrl_ptr.cast())); - let min_sel_included = lean_unbox_nat_as_usize(min_sel_included_ptr); - let max_sel_excluded = lean_unbox_nat_as_usize(max_sel_excluded_ptr); - Block { ops, ctrl, min_sel_included, max_sel_excluded } -} - -fn lean_ctor_to_function_layout(ctor: &LeanCtorObject) -> FunctionLayout { - let [input_size_ptr, selectors_ptr, auxiliaries_ptr, lookups_ptr] = - ctor.objs(); - FunctionLayout { - input_size: lean_unbox_nat_as_usize(input_size_ptr), - selectors: lean_unbox_nat_as_usize(selectors_ptr), - auxiliaries: lean_unbox_nat_as_usize(auxiliaries_ptr), - lookups: lean_unbox_nat_as_usize(lookups_ptr), - } -} - -fn lean_ptr_to_function(ptr: *const c_void) -> Function { - let ctor: &LeanCtorObject = as_ref_unsafe(ptr.cast()); - let [body_ptr, layout_ptr, unconstrained_ptr] = ctor.objs(); - let body = lean_ctor_to_block(as_ref_unsafe(body_ptr.cast())); - let layout = lean_ctor_to_function_layout(as_ref_unsafe(layout_ptr.cast())); - let unconstrained = unconstrained_ptr as usize != 0; - Function { body, layout, unconstrained } -} - -pub(crate) fn lean_ctor_to_toplevel(ctor: &LeanCtorObject) -> Toplevel { - let [functions_ptr, memory_sizes_ptr] = ctor.objs(); - let functions_array: &LeanArrayObject = as_ref_unsafe(functions_ptr.cast()); - let functions = functions_array.to_vec(lean_ptr_to_function); - let memory_sizes_array: &LeanArrayObject = - as_ref_unsafe(memory_sizes_ptr.cast()); - let memory_sizes = memory_sizes_array.to_vec(lean_unbox_nat_as_usize); - Toplevel { functions, memory_sizes } -} diff --git a/src/lean/ffi/byte_array.rs b/src/lean/ffi/byte_array.rs deleted file mode 100644 index 86bc01cf..00000000 --- a/src/lean/ffi/byte_array.rs +++ /dev/null @@ -1,11 +0,0 @@ -use crate::lean::sarray::LeanSArrayObject; - -/// `@& ByteArray → @& ByteArray → Bool` -/// Efficient implementation for `BEq ByteArray` -#[unsafe(no_mangle)] -extern "C" fn rs_byte_array_beq( - a: &LeanSArrayObject, - b: &LeanSArrayObject, -) -> bool { - a.data() == b.data() -} diff --git a/src/lean/ffi/compile.rs b/src/lean/ffi/compile.rs deleted file mode 100644 index 41c0a7a2..00000000 --- a/src/lean/ffi/compile.rs +++ /dev/null @@ -1,1599 +0,0 @@ -//! FFI bridge between Lean and Rust for the Ixon compilation/decompilation pipeline. -//! -//! Provides `extern "C"` functions callable from Lean via `@[extern]`: -//! - `rs_compile_env_full` / `rs_compile_env`: compile a Lean environment to Ixon -//! - `rs_compile_phases`: run individual pipeline phases (canon, condense, graph, compile) -//! - `rs_decompile_env`: decompile Ixon back to Lean environment -//! - `rs_roundtrip_*`: roundtrip FFI tests for Lean↔Rust type conversions -//! - `build_*` / `decode_*`: convert between Lean constructor layouts and Rust types -//! -//! ## Lean object layout conventions -//! -//! Lean constructors are allocated via `lean_alloc_ctor(tag, num_objs, scalar_size)`: -//! - Object fields are accessed with `lean_ctor_get(obj, i)` (0-indexed) -//! - Scalar fields follow objects at byte offset `8 + num_objs * 8` -//! - Scalar fields are accessed via pointer arithmetic on the object base - -use std::collections::HashMap; -use std::ffi::{CString, c_void}; -use std::sync::Arc; - -use super::ffi_io_guard; -use crate::ix::address::Address; -use crate::ix::compile::{CompileState, compile_env}; -use crate::ix::condense::compute_sccs; -use crate::ix::decompile::decompile_env; -use crate::ix::env::Name; -use crate::ix::graph::build_ref_graph; -use crate::ix::ixon::constant::{Constant as IxonConstant, ConstantInfo}; -use crate::ix::ixon::expr::Expr as IxonExpr; -use crate::ix::ixon::serialize::put_expr; -use crate::ix::ixon::{Comm, ConstantMeta}; -use crate::lean::nat::Nat; -use crate::lean::sarray::LeanSArrayObject; -use crate::lean::string::LeanStringObject; -use crate::lean::{ - as_ref_unsafe, lean_alloc_array, lean_alloc_ctor, lean_alloc_sarray, - lean_array_set_core, lean_ctor_get, lean_ctor_set, lean_ctor_set_uint8, - lean_ctor_set_uint64, lean_inc, lean_io_result_mk_error, - lean_io_result_mk_ok, lean_mk_io_user_error, lean_mk_string, lean_obj_tag, - lean_sarray_cptr, lean_uint64_to_nat, -}; - -use dashmap::DashMap; -use dashmap::DashSet; - -use super::builder::LeanBuildCache; -use super::graph::build_condensed_blocks; -use super::ix::constant::build_constant_info; -use super::ix::env::build_raw_environment; -use super::ix::name::build_name; -use super::ixon::constant::{ - build_address_from_ixon, build_ixon_constant, decode_ixon_address, -}; -use super::ixon::env::{ - build_raw_env, build_raw_name_entry, decode_raw_env, decoded_to_ixon_env, -}; -use super::ixon::meta::{build_constant_meta, build_ixon_comm}; -use super::lean_env::{GlobalCache, lean_ptr_to_env, lean_ptr_to_name}; - -// ============================================================================= -// Raw* Builder Functions for Compile FFI -// ============================================================================= - -/// Build RawConst: { addr : Address, const : Ixon.Constant } -pub fn build_raw_const(addr: &Address, constant: &IxonConstant) -> *mut c_void { - unsafe { - let addr_obj = build_address_from_ixon(addr); - let const_obj = build_ixon_constant(constant); - let obj = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(obj, 0, addr_obj); - lean_ctor_set(obj, 1, const_obj); - obj - } -} - -/// Build RawNamed: { name : Ix.Name, addr : Address, constMeta : Ixon.ConstantMeta } -pub fn build_raw_named( - cache: &mut LeanBuildCache, - name: &Name, - addr: &Address, - meta: &ConstantMeta, -) -> *mut c_void { - unsafe { - let name_obj = build_name(cache, name); - let addr_obj = build_address_from_ixon(addr); - let meta_obj = build_constant_meta(meta); - let obj = lean_alloc_ctor(0, 3, 0); - lean_ctor_set(obj, 0, name_obj); - lean_ctor_set(obj, 1, addr_obj); - lean_ctor_set(obj, 2, meta_obj); - obj - } -} - -/// Build RawBlob: { addr : Address, bytes : ByteArray } -pub fn build_raw_blob(addr: &Address, bytes: &[u8]) -> *mut c_void { - unsafe { - let addr_obj = build_address_from_ixon(addr); - let ba = lean_alloc_sarray(1, bytes.len(), bytes.len()); - let ba_data = lean_sarray_cptr(ba); - std::ptr::copy_nonoverlapping(bytes.as_ptr(), ba_data, bytes.len()); - let obj = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(obj, 0, addr_obj); - lean_ctor_set(obj, 1, ba); - obj - } -} - -/// Build RawComm: { addr : Address, comm : Ixon.Comm } -pub fn build_raw_comm(addr: &Address, comm: &Comm) -> *mut c_void { - unsafe { - let addr_obj = build_address_from_ixon(addr); - let comm_obj = build_ixon_comm(comm); - let obj = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(obj, 0, addr_obj); - lean_ctor_set(obj, 1, comm_obj); - obj - } -} - -// ============================================================================= -// RustCondensedBlocks roundtrip FFI -// ============================================================================= - -/// Round-trip a RustCondensedBlocks structure. -#[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_rust_condensed_blocks( - ptr: *const c_void, -) -> *mut c_void { - unsafe { - let low_links = lean_ctor_get(ptr as *mut _, 0) as *mut c_void; - let blocks = lean_ctor_get(ptr as *mut _, 1) as *mut c_void; - let block_refs = lean_ctor_get(ptr as *mut _, 2) as *mut c_void; - - lean_inc(low_links); - lean_inc(blocks); - lean_inc(block_refs); - - let result = lean_alloc_ctor(0, 3, 0); - lean_ctor_set(result, 0, low_links); - lean_ctor_set(result, 1, blocks); - lean_ctor_set(result, 2, block_refs); - result - } -} - -/// Round-trip a RustCompilePhases structure. -#[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_rust_compile_phases( - ptr: *const c_void, -) -> *mut c_void { - unsafe { - let raw_env = lean_ctor_get(ptr as *mut _, 0) as *mut c_void; - let condensed = lean_ctor_get(ptr as *mut _, 1) as *mut c_void; - let compile_env = lean_ctor_get(ptr as *mut _, 2) as *mut c_void; - - lean_inc(raw_env); - lean_inc(condensed); - lean_inc(compile_env); - - let result = lean_alloc_ctor(0, 3, 0); - lean_ctor_set(result, 0, raw_env); - lean_ctor_set(result, 1, condensed); - lean_ctor_set(result, 2, compile_env); - result - } -} - -// ============================================================================= -// BlockCompareResult and BlockCompareDetail roundtrip FFI -// ============================================================================= - -/// Round-trip a BlockCompareResult. -#[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_block_compare_result( - ptr: *const c_void, -) -> *mut c_void { - unsafe { - let tag = lean_obj_tag(ptr as *mut _); - match tag { - 0 => lean_alloc_ctor(0, 0, 0), - 1 => { - let base = ptr.cast::(); - let lean_size = *base.add(8).cast::(); - let rust_size = *base.add(16).cast::(); - let first_diff = *base.add(24).cast::(); - - let obj = lean_alloc_ctor(1, 0, 24); - let out_base = obj.cast::(); - *out_base.add(8).cast::() = lean_size; - *out_base.add(16).cast::() = rust_size; - *out_base.add(24).cast::() = first_diff; - obj - }, - 2 => lean_alloc_ctor(2, 0, 0), - _ => unreachable!("Invalid BlockCompareResult tag: {}", tag), - } - } -} - -/// Round-trip a BlockCompareDetail. -#[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_block_compare_detail( - ptr: *const c_void, -) -> *mut c_void { - unsafe { - let result_ptr = lean_ctor_get(ptr as *mut _, 0); - let base = ptr.cast::(); - let lean_sharing_len = *base.add(16).cast::(); - let rust_sharing_len = *base.add(24).cast::(); - - let result_obj = rs_roundtrip_block_compare_result(result_ptr); - - let obj = lean_alloc_ctor(0, 1, 16); - lean_ctor_set(obj, 0, result_obj); - let out_base = obj.cast::(); - *out_base.add(16).cast::() = lean_sharing_len; - *out_base.add(24).cast::() = rust_sharing_len; - obj - } -} - -// ============================================================================= -// Full Compilation FFI -// ============================================================================= - -/// Create a Lean IO error result from a Rust error message. -unsafe fn make_compile_io_error(msg: &str) -> *mut c_void { - unsafe { - let c_msg = CString::new(msg) - .unwrap_or_else(|_| CString::new("compilation error").unwrap()); - let lean_msg = lean_mk_string(c_msg.as_ptr()); - let lean_err = lean_mk_io_user_error(lean_msg); - lean_io_result_mk_error(lean_err) - } -} - -/// FFI function to run the complete compilation pipeline and return all data. -#[unsafe(no_mangle)] -pub extern "C" fn rs_compile_env_full( - env_consts_ptr: *const c_void, -) -> *mut c_void { - ffi_io_guard(std::panic::AssertUnwindSafe(|| { - // Phase 1: Decode Lean environment - let rust_env = lean_ptr_to_env(env_consts_ptr); - let env_len = rust_env.len(); - let rust_env = Arc::new(rust_env); - - // Phase 2: Build ref graph and compute SCCs - let ref_graph = build_ref_graph(&rust_env); - let condensed = compute_sccs(&ref_graph.out_refs); - - // Phase 3: Compile - let compile_stt = match compile_env(&rust_env) { - Ok(stt) => stt, - Err(e) => { - let msg = - format!("rs_compile_env_full: Rust compilation failed: {:?}", e); - return unsafe { make_compile_io_error(&msg) }; - }, - }; - - // Phase 4: Build Lean structures - let mut cache = LeanBuildCache::with_capacity(env_len); - - unsafe { - let raw_env = build_raw_environment(&mut cache, &rust_env); - let condensed_obj = build_condensed_blocks(&mut cache, &condensed); - - // Collect blocks - let mut blocks_data: Vec<(Name, Vec, usize)> = Vec::new(); - let mut seen_addrs: std::collections::HashSet
= - std::collections::HashSet::new(); - - for entry in compile_stt.name_to_addr.iter() { - let name = entry.key().clone(); - let addr = entry.value().clone(); - - if seen_addrs.contains(&addr) { - continue; - } - seen_addrs.insert(addr.clone()); - - if let Some(constant) = compile_stt.env.get_const(&addr) { - let mut bytes = Vec::new(); - constant.put(&mut bytes); - let sharing_len = constant.sharing.len(); - blocks_data.push((name, bytes, sharing_len)); - } - } - - // Build blocks array - let blocks_arr = lean_alloc_array(blocks_data.len(), blocks_data.len()); - for (i, (name, bytes, sharing_len)) in blocks_data.iter().enumerate() { - let name_obj = build_name(&mut cache, name); - - let ba = lean_alloc_sarray(1, bytes.len(), bytes.len()); - let ba_data = lean_sarray_cptr(ba); - std::ptr::copy_nonoverlapping(bytes.as_ptr(), ba_data, bytes.len()); - - let block = lean_alloc_ctor(0, 2, 8); - lean_ctor_set(block, 0, name_obj); - lean_ctor_set(block, 1, ba); - let base = block.cast::(); - *base.add(8 + 16).cast::() = *sharing_len as u64; - - lean_array_set_core(blocks_arr, i, block); - } - - // Build nameToAddr array - let name_to_addr_len = compile_stt.name_to_addr.len(); - let name_to_addr_arr = - lean_alloc_array(name_to_addr_len, name_to_addr_len); - for (i, entry) in compile_stt.name_to_addr.iter().enumerate() { - let name = entry.key(); - let addr = entry.value(); - - let name_obj = build_name(&mut cache, name); - - let addr_bytes = addr.as_bytes(); - let addr_ba = lean_alloc_sarray(1, 32, 32); - let addr_data = lean_sarray_cptr(addr_ba); - std::ptr::copy_nonoverlapping(addr_bytes.as_ptr(), addr_data, 32); - - let entry_obj = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(entry_obj, 0, name_obj); - lean_ctor_set(entry_obj, 1, addr_ba); - - lean_array_set_core(name_to_addr_arr, i, entry_obj); - } - - // Build RawCompiledEnv - let compiled_obj = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(compiled_obj, 0, blocks_arr); - lean_ctor_set(compiled_obj, 1, name_to_addr_arr); - - // Build RustCompilationResult - let result = lean_alloc_ctor(0, 3, 0); - lean_ctor_set(result, 0, raw_env); - lean_ctor_set(result, 1, condensed_obj); - lean_ctor_set(result, 2, compiled_obj); - - lean_io_result_mk_ok(result) - } - })) -} - -/// FFI function to compile a Lean environment to serialized Ixon.Env bytes. -#[unsafe(no_mangle)] -pub extern "C" fn rs_compile_env(env_consts_ptr: *const c_void) -> *mut c_void { - ffi_io_guard(std::panic::AssertUnwindSafe(|| { - let rust_env = lean_ptr_to_env(env_consts_ptr); - let rust_env = Arc::new(rust_env); - - let compile_stt = match compile_env(&rust_env) { - Ok(stt) => stt, - Err(e) => { - let msg = format!("rs_compile_env: Rust compilation failed: {:?}", e); - return unsafe { make_compile_io_error(&msg) }; - }, - }; - - // Serialize the compiled Env to bytes - let mut buf = Vec::new(); - if let Err(e) = compile_stt.env.put(&mut buf) { - let msg = format!("rs_compile_env: Env serialization failed: {}", e); - return unsafe { make_compile_io_error(&msg) }; - } - - // Build Lean ByteArray - unsafe { - let ba = lean_alloc_sarray(1, buf.len(), buf.len()); - let ba_data = lean_sarray_cptr(ba); - std::ptr::copy_nonoverlapping(buf.as_ptr(), ba_data, buf.len()); - lean_io_result_mk_ok(ba) - } - })) -} - -/// Round-trip a RawEnv: decode from Lean, re-encode via builder. -/// This performs a full decode/build cycle to verify FFI correctness. -#[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_raw_env( - raw_env_ptr: *const c_void, -) -> *mut c_void { - let env = decode_raw_env(raw_env_ptr); - build_raw_env(&env) -} - -/// FFI function to run all compilation phases and return combined results. -#[unsafe(no_mangle)] -pub extern "C" fn rs_compile_phases( - env_consts_ptr: *const c_void, -) -> *mut c_void { - ffi_io_guard(std::panic::AssertUnwindSafe(|| { - let rust_env = lean_ptr_to_env(env_consts_ptr); - let env_len = rust_env.len(); - let rust_env = Arc::new(rust_env); - - let mut cache = LeanBuildCache::with_capacity(env_len); - let raw_env = build_raw_environment(&mut cache, &rust_env); - - let ref_graph = build_ref_graph(&rust_env); - - let condensed = compute_sccs(&ref_graph.out_refs); - - let condensed_obj = build_condensed_blocks(&mut cache, &condensed); - - let compile_stt = match compile_env(&rust_env) { - Ok(stt) => stt, - Err(e) => { - let msg = format!("rs_compile_phases: compilation failed: {:?}", e); - return unsafe { make_compile_io_error(&msg) }; - }, - }; - // Build Lean objects from compile results - unsafe { - let consts: Vec<_> = compile_stt - .env - .consts - .iter() - .map(|e| (e.key().clone(), e.value().clone())) - .collect(); - let consts_arr = lean_alloc_array(consts.len(), consts.len()); - for (i, (addr, constant)) in consts.iter().enumerate() { - let raw_const = build_raw_const(addr, constant); - lean_array_set_core(consts_arr, i, raw_const); - } - - let named: Vec<_> = compile_stt - .env - .named - .iter() - .map(|e| (e.key().clone(), e.value().clone())) - .collect(); - let named_arr = lean_alloc_array(named.len(), named.len()); - for (i, (name, n)) in named.iter().enumerate() { - let raw_named = build_raw_named(&mut cache, name, &n.addr, &n.meta); - lean_array_set_core(named_arr, i, raw_named); - } - - let blobs: Vec<_> = compile_stt - .env - .blobs - .iter() - .map(|e| (e.key().clone(), e.value().clone())) - .collect(); - let blobs_arr = lean_alloc_array(blobs.len(), blobs.len()); - for (i, (addr, bytes)) in blobs.iter().enumerate() { - let raw_blob = build_raw_blob(addr, bytes); - lean_array_set_core(blobs_arr, i, raw_blob); - } - - let comms: Vec<_> = compile_stt - .env - .comms - .iter() - .map(|e| (e.key().clone(), e.value().clone())) - .collect(); - let comms_arr = lean_alloc_array(comms.len(), comms.len()); - for (i, (addr, comm)) in comms.iter().enumerate() { - let raw_comm = build_raw_comm(addr, comm); - lean_array_set_core(comms_arr, i, raw_comm); - } - - // Build names array (Address → Ix.Name) - let names: Vec<_> = compile_stt - .env - .names - .iter() - .map(|e| (e.key().clone(), e.value().clone())) - .collect(); - let names_arr = lean_alloc_array(names.len(), names.len()); - for (i, (addr, name)) in names.iter().enumerate() { - let obj = build_raw_name_entry(&mut cache, addr, name); - lean_array_set_core(names_arr, i, obj); - } - - let raw_ixon_env = lean_alloc_ctor(0, 5, 0); - lean_ctor_set(raw_ixon_env, 0, consts_arr); - lean_ctor_set(raw_ixon_env, 1, named_arr); - lean_ctor_set(raw_ixon_env, 2, blobs_arr); - lean_ctor_set(raw_ixon_env, 3, comms_arr); - lean_ctor_set(raw_ixon_env, 4, names_arr); - - let result = lean_alloc_ctor(0, 3, 0); - lean_ctor_set(result, 0, raw_env); - lean_ctor_set(result, 1, condensed_obj); - lean_ctor_set(result, 2, raw_ixon_env); - - lean_io_result_mk_ok(result) - } - })) -} - -/// FFI function to compile a Lean environment to a RawEnv. -#[unsafe(no_mangle)] -pub extern "C" fn rs_compile_env_to_ixon( - env_consts_ptr: *const c_void, -) -> *mut c_void { - ffi_io_guard(std::panic::AssertUnwindSafe(|| { - let rust_env = lean_ptr_to_env(env_consts_ptr); - let rust_env = Arc::new(rust_env); - - let compile_stt = match compile_env(&rust_env) { - Ok(stt) => stt, - Err(e) => { - let msg = - format!("rs_compile_env_to_ixon: compilation failed: {:?}", e); - return unsafe { make_compile_io_error(&msg) }; - }, - }; - - let mut cache = LeanBuildCache::with_capacity(rust_env.len()); - - unsafe { - let consts: Vec<_> = compile_stt - .env - .consts - .iter() - .map(|e| (e.key().clone(), e.value().clone())) - .collect(); - let consts_arr = lean_alloc_array(consts.len(), consts.len()); - for (i, (addr, constant)) in consts.iter().enumerate() { - let raw_const = build_raw_const(addr, constant); - lean_array_set_core(consts_arr, i, raw_const); - } - - let named: Vec<_> = compile_stt - .env - .named - .iter() - .map(|e| (e.key().clone(), e.value().clone())) - .collect(); - let named_arr = lean_alloc_array(named.len(), named.len()); - for (i, (name, n)) in named.iter().enumerate() { - let raw_named = build_raw_named(&mut cache, name, &n.addr, &n.meta); - lean_array_set_core(named_arr, i, raw_named); - } - - let blobs: Vec<_> = compile_stt - .env - .blobs - .iter() - .map(|e| (e.key().clone(), e.value().clone())) - .collect(); - let blobs_arr = lean_alloc_array(blobs.len(), blobs.len()); - for (i, (addr, bytes)) in blobs.iter().enumerate() { - let raw_blob = build_raw_blob(addr, bytes); - lean_array_set_core(blobs_arr, i, raw_blob); - } - - let comms: Vec<_> = compile_stt - .env - .comms - .iter() - .map(|e| (e.key().clone(), e.value().clone())) - .collect(); - let comms_arr = lean_alloc_array(comms.len(), comms.len()); - for (i, (addr, comm)) in comms.iter().enumerate() { - let raw_comm = build_raw_comm(addr, comm); - lean_array_set_core(comms_arr, i, raw_comm); - } - - // Build names array (Address → Ix.Name) - let names: Vec<_> = compile_stt - .env - .names - .iter() - .map(|e| (e.key().clone(), e.value().clone())) - .collect(); - let names_arr = lean_alloc_array(names.len(), names.len()); - for (i, (addr, name)) in names.iter().enumerate() { - let obj = build_raw_name_entry(&mut cache, addr, name); - lean_array_set_core(names_arr, i, obj); - } - - let result = lean_alloc_ctor(0, 5, 0); - lean_ctor_set(result, 0, consts_arr); - lean_ctor_set(result, 1, named_arr); - lean_ctor_set(result, 2, blobs_arr); - lean_ctor_set(result, 3, comms_arr); - lean_ctor_set(result, 4, names_arr); - lean_io_result_mk_ok(result) - } - })) -} - -/// FFI function to canonicalize environment to Ix.RawEnvironment. -#[unsafe(no_mangle)] -pub extern "C" fn rs_canonicalize_env_to_ix( - env_consts_ptr: *const c_void, -) -> *mut c_void { - ffi_io_guard(std::panic::AssertUnwindSafe(|| { - let rust_env = lean_ptr_to_env(env_consts_ptr); - let mut cache = LeanBuildCache::with_capacity(rust_env.len()); - let raw_env = build_raw_environment(&mut cache, &rust_env); - unsafe { lean_io_result_mk_ok(raw_env) } - })) -} - -// ============================================================================= -// RustCompiledEnv - Holds Rust compilation results for comparison -// ============================================================================= - -/// Rust-compiled environment holding blocks indexed by low-link name. -/// Each block is stored as serialized bytes for comparison with Lean output. -pub struct RustCompiledEnv { - /// Map from low-link name to (serialized constant bytes, sharing vector length) - blocks: HashMap, usize)>, - /// The full compile state for accessing pre-sharing expressions - compile_state: CompileState, -} - -// ============================================================================= -// Block-by-block comparison FFI -// ============================================================================= - -/// FFI: Simple test to verify FFI round-trip works. -/// Takes a Lean.Name and returns a magic number to verify the call succeeded. -#[unsafe(no_mangle)] -extern "C" fn rs_test_ffi_roundtrip(name_ptr: *const c_void) -> u64 { - let global_cache = GlobalCache::default(); - let name = lean_ptr_to_name(name_ptr, &global_cache); - - // Return a magic number plus the hash of the name to verify it worked - let hash = name.get_hash(); - let hash_bytes = hash.as_bytes(); - let hash_prefix = - u64::from_le_bytes(hash_bytes[0..8].try_into().unwrap_or([0u8; 8])); - - // Magic number 0xDEADBEEF plus hash prefix - 0xDEAD_BEEF_0000_0000 | (hash_prefix & 0x0000_0000_FFFF_FFFF) -} - -/// FFI: Compile entire environment with Rust, returning a handle to RustCompiledEnv. -/// Takes: -/// - env_consts_ptr: pointer to List (Name x ConstantInfo) from Lean environment -/// -/// Returns: pointer to RustCompiledEnv (or null on failure) -#[unsafe(no_mangle)] -extern "C" fn rs_compile_env_rust_first( - env_consts_ptr: *const c_void, -) -> *mut RustCompiledEnv { - // Decode Lean environment - let lean_env = lean_ptr_to_env(env_consts_ptr); - let lean_env = Arc::new(lean_env); - - // Compile with Rust - let rust_stt = match compile_env(&lean_env) { - Ok(stt) => stt, - Err(_e) => { - return std::ptr::null_mut(); - }, - }; - - // Build block map: lowlink name -> (serialized bytes, sharing len) - let mut blocks: HashMap, usize)> = HashMap::new(); - - // Iterate over all names and their addresses - for entry in rust_stt.name_to_addr.iter() { - let name = entry.key().clone(); - let addr = entry.value().clone(); - - // Skip if we already have this block (multiple names map to same block) - if blocks.contains_key(&name) { - continue; - } - - // Get the compiled constant - if let Some(constant) = rust_stt.env.get_const(&addr) { - let mut bytes = Vec::new(); - constant.put(&mut bytes); - let sharing_len = constant.sharing.len(); - blocks.insert(name, (bytes, sharing_len)); - } - } - - // Return boxed RustCompiledEnv with full compile state for pre-sharing access - Box::into_raw(Box::new(RustCompiledEnv { blocks, compile_state: rust_stt })) -} - -/// FFI: Compare a single block and return packed result. -/// Returns a packed u64: high 32 bits = matches (1) or error code (0 = mismatch, 2 = not found) -/// low 32 bits = first diff offset (if mismatch) -#[unsafe(no_mangle)] -extern "C" fn rs_compare_block( - rust_env: *const RustCompiledEnv, - lowlink_name: *const c_void, - lean_bytes: &LeanSArrayObject, -) -> u64 { - if rust_env.is_null() { - return 2u64 << 32; // not found - } - let global_cache = GlobalCache::default(); - let name = lean_ptr_to_name(lowlink_name, &global_cache); - - let rust_env = unsafe { &*rust_env }; - let lean_data = lean_bytes.data(); - - // Look up Rust's compiled block - let rust_bytes = match rust_env.blocks.get(&name) { - Some((bytes, _)) => bytes, - None => { - // Block not found in Rust compilation: code 2 - return 2u64 << 32; - }, - }; - - // Compare bytes - if rust_bytes == lean_data { - // Match: code 1 - return 1u64 << 32; - } - - // Mismatch: find first differing byte - rust_bytes.iter().zip(lean_data.iter()).position(|(a, b)| a != b).map_or_else( - || { - // One is a prefix of the other - rust_bytes.len().min(lean_data.len()) as u64 - }, - |i| i as u64, - ) -} - -/// FFI: Free a RustCompiledEnv. -#[unsafe(no_mangle)] -extern "C" fn rs_free_rust_env(rust_env: *mut RustCompiledEnv) { - if !rust_env.is_null() { - unsafe { - drop(Box::from_raw(rust_env)); - } - } -} - -/// FFI: Get the number of blocks in a RustCompiledEnv. -#[unsafe(no_mangle)] -extern "C" fn rs_get_rust_env_block_count( - rust_env: *const RustCompiledEnv, -) -> u64 { - if rust_env.is_null() { - return 0; - } - let rust_env = unsafe { &*rust_env }; - rust_env.blocks.len() as u64 -} - -/// FFI: Get Rust's compiled bytes length for a block. -#[unsafe(no_mangle)] -extern "C" fn rs_get_block_bytes_len( - rust_env: *const RustCompiledEnv, - lowlink_name: *const c_void, -) -> u64 { - if rust_env.is_null() { - return 0; - } - let global_cache = GlobalCache::default(); - let name = lean_ptr_to_name(lowlink_name, &global_cache); - - let rust_env = unsafe { &*rust_env }; - - match rust_env.blocks.get(&name) { - Some((bytes, _)) => bytes.len() as u64, - None => 0, - } -} - -/// FFI: Copy Rust's compiled bytes into a pre-allocated Lean ByteArray. -#[unsafe(no_mangle)] -extern "C" fn rs_copy_block_bytes( - rust_env: *const RustCompiledEnv, - lowlink_name: *const c_void, - dest: *mut c_void, -) { - if rust_env.is_null() { - return; - } - let global_cache = GlobalCache::default(); - let name = lean_ptr_to_name(lowlink_name, &global_cache); - - let rust_env = unsafe { &*rust_env }; - - let bytes = match rust_env.blocks.get(&name) { - Some((bytes, _)) => bytes, - None => return, - }; - - // Copy into the Lean ByteArray - let dest_arr: &mut LeanSArrayObject = unsafe { &mut *dest.cast() }; - dest_arr.set_data(bytes); -} - -/// FFI: Get Rust's sharing vector length for a block. -#[unsafe(no_mangle)] -extern "C" fn rs_get_block_sharing_len( - rust_env: *const RustCompiledEnv, - lowlink_name: *const c_void, -) -> u64 { - if rust_env.is_null() { - return 0; - } - let global_cache = GlobalCache::default(); - let name = lean_ptr_to_name(lowlink_name, &global_cache); - - let rust_env = unsafe { &*rust_env }; - - match rust_env.blocks.get(&name) { - Some((_, sharing_len)) => *sharing_len as u64, - None => 0, - } -} - -// ============================================================================= -// Pre-sharing expression extraction FFI -// ============================================================================= - -/// Frame for iterative unshare traversal. -enum UnshareFrame<'a> { - Visit(&'a Arc), - BuildApp, - BuildLam, - BuildAll, - BuildLet(bool), - BuildPrj(u64, u64), -} - -/// Expand Share(idx) references in an expression using the sharing vector. -/// This reconstructs the "pre-sharing" expression from the post-sharing -/// representation. Uses iterative traversal to avoid stack overflow on deep -/// expressions. -#[allow(clippy::cast_possible_truncation)] -fn unshare_expr( - expr: &Arc, - sharing: &[Arc], -) -> Arc { - let mut stack: Vec> = vec![UnshareFrame::Visit(expr)]; - let mut results: Vec> = Vec::new(); - - while let Some(frame) = stack.pop() { - match frame { - UnshareFrame::Visit(e) => match e.as_ref() { - IxonExpr::Share(idx) => { - if (*idx as usize) < sharing.len() { - stack.push(UnshareFrame::Visit(&sharing[*idx as usize])); - } else { - results.push(e.clone()); - } - }, - IxonExpr::App(f, a) => { - stack.push(UnshareFrame::BuildApp); - stack.push(UnshareFrame::Visit(a)); - stack.push(UnshareFrame::Visit(f)); - }, - IxonExpr::Lam(t, b) => { - stack.push(UnshareFrame::BuildLam); - stack.push(UnshareFrame::Visit(b)); - stack.push(UnshareFrame::Visit(t)); - }, - IxonExpr::All(t, b) => { - stack.push(UnshareFrame::BuildAll); - stack.push(UnshareFrame::Visit(b)); - stack.push(UnshareFrame::Visit(t)); - }, - IxonExpr::Let(nd, t, v, b) => { - stack.push(UnshareFrame::BuildLet(*nd)); - stack.push(UnshareFrame::Visit(b)); - stack.push(UnshareFrame::Visit(v)); - stack.push(UnshareFrame::Visit(t)); - }, - IxonExpr::Prj(ti, fi, v) => { - stack.push(UnshareFrame::BuildPrj(*ti, *fi)); - stack.push(UnshareFrame::Visit(v)); - }, - // Leaf nodes - no children to unshare - _ => results.push(e.clone()), - }, - UnshareFrame::BuildApp => { - let a = results.pop().unwrap(); - let f = results.pop().unwrap(); - results.push(Arc::new(IxonExpr::App(f, a))); - }, - UnshareFrame::BuildLam => { - let b = results.pop().unwrap(); - let t = results.pop().unwrap(); - results.push(Arc::new(IxonExpr::Lam(t, b))); - }, - UnshareFrame::BuildAll => { - let b = results.pop().unwrap(); - let t = results.pop().unwrap(); - results.push(Arc::new(IxonExpr::All(t, b))); - }, - UnshareFrame::BuildLet(nd) => { - let b = results.pop().unwrap(); - let v = results.pop().unwrap(); - let t = results.pop().unwrap(); - results.push(Arc::new(IxonExpr::Let(nd, t, v, b))); - }, - UnshareFrame::BuildPrj(ti, fi) => { - let v = results.pop().unwrap(); - results.push(Arc::new(IxonExpr::Prj(ti, fi, v))); - }, - } - } - - results.pop().unwrap() -} - -/// FFI: Get the pre-sharing root expressions for a constant. -/// Returns the number of root expressions, and writes serialized expressions to the output buffer. -/// Each expression is serialized without sharing (Share nodes are expanded). -/// -/// Output format: [n_exprs:u64, len1:u64, expr1_bytes..., len2:u64, expr2_bytes..., ...] -#[unsafe(no_mangle)] -extern "C" fn rs_get_pre_sharing_exprs( - rust_env: *const RustCompiledEnv, - lowlink_name: *const c_void, - out_buf: *mut c_void, -) -> u64 { - if rust_env.is_null() { - return 0; - } - let global_cache = GlobalCache::default(); - let name = lean_ptr_to_name(lowlink_name, &global_cache); - - let rust_env = unsafe { &*rust_env }; - - // Look up the address for this name - let addr = match rust_env.compile_state.name_to_addr.get(&name) { - Some(a) => a.clone(), - None => { - return 0; - }, - }; - - // Get the constant (note: contains post-sharing expressions) - let constant = match rust_env.compile_state.env.get_const(&addr) { - Some(c) => c, - None => { - return 0; - }, - }; - - // Extract root expressions from the constant info - let root_exprs: Vec> = match &constant.info { - ConstantInfo::Defn(def) => vec![def.typ.clone(), def.value.clone()], - ConstantInfo::Axio(ax) => vec![ax.typ.clone()], - ConstantInfo::Quot(q) => vec![q.typ.clone()], - ConstantInfo::Recr(rec) => { - let mut exprs = vec![rec.typ.clone()]; - for rule in &rec.rules { - exprs.push(rule.rhs.clone()); - } - exprs - }, - // Projections don't contain expressions directly - ConstantInfo::CPrj(_) - | ConstantInfo::RPrj(_) - | ConstantInfo::IPrj(_) - | ConstantInfo::DPrj(_) => { - vec![] - }, - ConstantInfo::Muts(muts) => { - let mut exprs = Vec::new(); - for mc in muts { - match mc { - crate::ix::ixon::constant::MutConst::Defn(def) => { - exprs.push(def.typ.clone()); - exprs.push(def.value.clone()); - }, - crate::ix::ixon::constant::MutConst::Indc(ind) => { - exprs.push(ind.typ.clone()); - for ctor in &ind.ctors { - exprs.push(ctor.typ.clone()); - } - }, - crate::ix::ixon::constant::MutConst::Recr(rec) => { - exprs.push(rec.typ.clone()); - for rule in &rec.rules { - exprs.push(rule.rhs.clone()); - } - }, - } - } - exprs - }, - }; - - // Unshare and serialize each root expression - let mut output_bytes: Vec = Vec::new(); - let n_exprs = root_exprs.len() as u64; - - // Write number of expressions - output_bytes.extend_from_slice(&n_exprs.to_le_bytes()); - - for expr in &root_exprs { - // Unshare the expression - let unshared = unshare_expr(expr, &constant.sharing); - - // Serialize to bytes - let mut expr_bytes: Vec = Vec::new(); - put_expr(&unshared, &mut expr_bytes); - - // Write length and bytes - output_bytes.extend_from_slice(&(expr_bytes.len() as u64).to_le_bytes()); - output_bytes.extend(expr_bytes); - } - - // Write to output buffer - let out_arr: &mut LeanSArrayObject = unsafe { &mut *out_buf.cast() }; - out_arr.set_data(&output_bytes); - - n_exprs -} - -/// FFI: Get the buffer length needed for pre-sharing expressions. -#[unsafe(no_mangle)] -extern "C" fn rs_get_pre_sharing_exprs_len( - rust_env: *const RustCompiledEnv, - lowlink_name: *const c_void, -) -> u64 { - if rust_env.is_null() { - return 0; - } - let global_cache = GlobalCache::default(); - let name = lean_ptr_to_name(lowlink_name, &global_cache); - - let rust_env = unsafe { &*rust_env }; - - // Look up the address for this name - let addr = match rust_env.compile_state.name_to_addr.get(&name) { - Some(a) => a.clone(), - None => return 0, - }; - - // Get the constant - let constant = match rust_env.compile_state.env.get_const(&addr) { - Some(c) => c, - None => return 0, - }; - - // Count root expressions - let n_exprs = match &constant.info { - ConstantInfo::Defn(_) => 2, - ConstantInfo::Axio(_) | ConstantInfo::Quot(_) => 1, - ConstantInfo::Recr(rec) => 1 + rec.rules.len(), - // Projections don't contain expressions directly - ConstantInfo::CPrj(_) - | ConstantInfo::RPrj(_) - | ConstantInfo::IPrj(_) - | ConstantInfo::DPrj(_) => 0, - ConstantInfo::Muts(muts) => { - let mut count = 0; - for mc in muts { - match mc { - crate::ix::ixon::constant::MutConst::Defn(_) => count += 2, - crate::ix::ixon::constant::MutConst::Indc(ind) => { - count += 1 + ind.ctors.len() - }, - crate::ix::ixon::constant::MutConst::Recr(rec) => { - count += 1 + rec.rules.len() - }, - } - } - count - }, - }; - - // Estimate: 8 bytes per header + some for expression data - // This is an upper bound estimate - (8 + n_exprs * 1024) as u64 -} - -/// FFI: Look up a constant's compiled address from RustCompiledEnv. -/// Copies the 32-byte blake3 hash into the provided ByteArray. -/// Returns 1 on success, 0 if name not found. -#[unsafe(no_mangle)] -extern "C" fn rs_lookup_const_addr( - rust_env: *const RustCompiledEnv, - name_ptr: *const c_void, - out_addr: *mut c_void, -) -> u64 { - if rust_env.is_null() { - return 0; - } - let global_cache = GlobalCache::default(); - let name = lean_ptr_to_name(name_ptr, &global_cache); - - let rust_env = unsafe { &*rust_env }; - - // Look up the address for this name - match rust_env.compile_state.name_to_addr.get(&name) { - Some(addr_ref) => { - // Copy the 32-byte address into the output ByteArray - let out_arr: &mut LeanSArrayObject = unsafe { &mut *out_addr.cast() }; - out_arr.set_data(addr_ref.as_bytes()); - 1 - }, - None => 0, - } -} - -/// FFI: Get the total number of compiled constants in RustCompiledEnv. -#[unsafe(no_mangle)] -extern "C" fn rs_get_compiled_const_count( - rust_env: *const RustCompiledEnv, -) -> u64 { - if rust_env.is_null() { - return 0; - } - let rust_env = unsafe { &*rust_env }; - rust_env.compile_state.name_to_addr.len() as u64 -} - -// ============================================================================= -// Error type FFI builders -// ============================================================================= - -use crate::ix::ixon::error::{CompileError, DecompileError, SerializeError}; - -/// Build a Lean String from a Rust &str. -fn build_lean_string(s: &str) -> *mut c_void { - let cstr = CString::new(s) - .unwrap_or_else(|_| CString::new("(invalid string)").unwrap()); - unsafe { lean_mk_string(cstr.as_ptr()) } -} - -/// Build a Lean Nat from a usize. -fn build_lean_nat_usize(n: usize) -> *mut c_void { - unsafe { lean_uint64_to_nat(n as u64) } -} - -/// Build a Lean Ixon.SerializeError from a Rust SerializeError. -/// -/// Tags 0–6: -/// 0: unexpectedEof (expected : String) → 1 obj -/// 1: invalidTag (tag : UInt8) (context : String) → 1 obj + 1 scalar (UInt8) -/// 2: invalidFlag (flag : UInt8) (context : String) → 1 obj + 1 scalar (UInt8) -/// 3: invalidVariant (variant : UInt64) (context : String) → 1 obj + 8 scalar (UInt64) -/// 4: invalidBool (value : UInt8) → 0 obj + 1 scalar (UInt8) -/// 5: addressError → 0 obj + 0 scalar -/// 6: invalidShareIndex (idx : UInt64) (max : Nat) → 1 obj (Nat) + 8 scalar (UInt64) -pub fn build_serialize_error(se: &SerializeError) -> *mut c_void { - unsafe { - match se { - SerializeError::UnexpectedEof { expected } => { - let obj = lean_alloc_ctor(0, 1, 0); - lean_ctor_set(obj, 0, build_lean_string(expected)); - obj - }, - SerializeError::InvalidTag { tag, context } => { - // 1 obj (String) + 1 scalar byte (UInt8) - let obj = lean_alloc_ctor(1, 1, 1); - lean_ctor_set(obj, 0, build_lean_string(context)); - lean_ctor_set_uint8(obj, 8, *tag); - obj - }, - SerializeError::InvalidFlag { flag, context } => { - let obj = lean_alloc_ctor(2, 1, 1); - lean_ctor_set(obj, 0, build_lean_string(context)); - lean_ctor_set_uint8(obj, 8, *flag); - obj - }, - SerializeError::InvalidVariant { variant, context } => { - let obj = lean_alloc_ctor(3, 1, 8); - lean_ctor_set(obj, 0, build_lean_string(context)); - lean_ctor_set_uint64(obj, 8, *variant); - obj - }, - SerializeError::InvalidBool { value } => { - let obj = lean_alloc_ctor(4, 0, 1); - lean_ctor_set_uint8(obj, 0, *value); - obj - }, - SerializeError::AddressError => lean_alloc_ctor(5, 0, 0), - SerializeError::InvalidShareIndex { idx, max } => { - let obj = lean_alloc_ctor(6, 1, 8); - lean_ctor_set(obj, 0, build_lean_nat_usize(*max)); - lean_ctor_set_uint64(obj, 8, *idx); - obj - }, - } - } -} - -/// Decode a Lean Ixon.SerializeError to a Rust SerializeError. -pub fn decode_serialize_error(ptr: *const c_void) -> SerializeError { - unsafe { - let tag = lean_obj_tag(ptr as *mut _); - match tag { - 0 => { - let str_ptr = lean_ctor_get(ptr as *mut _, 0); - let expected = - as_ref_unsafe::(str_ptr.cast()).as_string(); - SerializeError::UnexpectedEof { expected } - }, - 1 => { - let str_ptr = lean_ctor_get(ptr as *mut _, 0); - let base = ptr.cast::(); - let tag_val = *base.add(8 + 8); - let context = - as_ref_unsafe::(str_ptr.cast()).as_string(); - SerializeError::InvalidTag { tag: tag_val, context } - }, - 2 => { - let str_ptr = lean_ctor_get(ptr as *mut _, 0); - let base = ptr.cast::(); - let flag = *base.add(8 + 8); - let context = - as_ref_unsafe::(str_ptr.cast()).as_string(); - SerializeError::InvalidFlag { flag, context } - }, - 3 => { - let str_ptr = lean_ctor_get(ptr as *mut _, 0); - let base = ptr.cast::(); - let variant = *base.add(8 + 8).cast::(); - let context = - as_ref_unsafe::(str_ptr.cast()).as_string(); - SerializeError::InvalidVariant { variant, context } - }, - 4 => { - let base = ptr.cast::(); - let value = *base.add(8); - SerializeError::InvalidBool { value } - }, - 5 => SerializeError::AddressError, - 6 => { - let nat_ptr = lean_ctor_get(ptr as *mut _, 0); - let base = ptr.cast::(); - let idx = *base.add(8 + 8).cast::(); - let max = Nat::from_ptr(nat_ptr) - .to_u64() - .and_then(|x| usize::try_from(x).ok()) - .unwrap_or(0); - SerializeError::InvalidShareIndex { idx, max } - }, - _ => unreachable!("Invalid SerializeError tag: {}", tag), - } - } -} - -/// Build a Lean DecompileError from a Rust DecompileError. -/// -/// Layout for index variants (tags 0–4): -/// `(idx : UInt64) (len/max : Nat) (constant : String)` -/// → 2 object fields (Nat, String) + 8 scalar bytes (UInt64) -/// → `lean_alloc_ctor(tag, 2, 8)` -/// → obj[0] = Nat, obj[1] = String, scalar[0] = UInt64 -pub fn build_decompile_error(err: &DecompileError) -> *mut c_void { - unsafe { - match err { - DecompileError::InvalidRefIndex { idx, refs_len, constant } => { - let obj = lean_alloc_ctor(0, 2, 8); - lean_ctor_set(obj, 0, build_lean_nat_usize(*refs_len)); - lean_ctor_set(obj, 1, build_lean_string(constant)); - lean_ctor_set_uint64(obj, 2 * 8, *idx); - obj - }, - DecompileError::InvalidUnivIndex { idx, univs_len, constant } => { - let obj = lean_alloc_ctor(1, 2, 8); - lean_ctor_set(obj, 0, build_lean_nat_usize(*univs_len)); - lean_ctor_set(obj, 1, build_lean_string(constant)); - lean_ctor_set_uint64(obj, 2 * 8, *idx); - obj - }, - DecompileError::InvalidShareIndex { idx, max, constant } => { - let obj = lean_alloc_ctor(2, 2, 8); - lean_ctor_set(obj, 0, build_lean_nat_usize(*max)); - lean_ctor_set(obj, 1, build_lean_string(constant)); - lean_ctor_set_uint64(obj, 2 * 8, *idx); - obj - }, - DecompileError::InvalidRecIndex { idx, ctx_size, constant } => { - let obj = lean_alloc_ctor(3, 2, 8); - lean_ctor_set(obj, 0, build_lean_nat_usize(*ctx_size)); - lean_ctor_set(obj, 1, build_lean_string(constant)); - lean_ctor_set_uint64(obj, 2 * 8, *idx); - obj - }, - DecompileError::InvalidUnivVarIndex { idx, max, constant } => { - let obj = lean_alloc_ctor(4, 2, 8); - lean_ctor_set(obj, 0, build_lean_nat_usize(*max)); - lean_ctor_set(obj, 1, build_lean_string(constant)); - lean_ctor_set_uint64(obj, 2 * 8, *idx); - obj - }, - DecompileError::MissingAddress(addr) => { - // tag 5, 1 object (Address = ByteArray) - let obj = lean_alloc_ctor(5, 1, 0); - lean_ctor_set(obj, 0, build_address_from_ixon(addr)); - obj - }, - DecompileError::MissingMetadata(addr) => { - // tag 6, 1 object (Address = ByteArray) - let obj = lean_alloc_ctor(6, 1, 0); - lean_ctor_set(obj, 0, build_address_from_ixon(addr)); - obj - }, - DecompileError::BlobNotFound(addr) => { - // tag 7, 1 object (Address = ByteArray) - let obj = lean_alloc_ctor(7, 1, 0); - lean_ctor_set(obj, 0, build_address_from_ixon(addr)); - obj - }, - DecompileError::BadBlobFormat { addr, expected } => { - // tag 8, 2 objects (Address, String) - let obj = lean_alloc_ctor(8, 2, 0); - lean_ctor_set(obj, 0, build_address_from_ixon(addr)); - lean_ctor_set(obj, 1, build_lean_string(expected)); - obj - }, - DecompileError::BadConstantFormat { msg } => { - // tag 9, 1 object (String) - let obj = lean_alloc_ctor(9, 1, 0); - lean_ctor_set(obj, 0, build_lean_string(msg)); - obj - }, - DecompileError::Serialize(se) => { - // tag 10, 1 object (SerializeError) - let obj = lean_alloc_ctor(10, 1, 0); - lean_ctor_set(obj, 0, build_serialize_error(se)); - obj - }, - } - } -} - -/// Decode a Lean DecompileError to a Rust DecompileError. -pub fn decode_decompile_error(ptr: *const c_void) -> DecompileError { - unsafe { - let tag = lean_obj_tag(ptr as *mut _); - match tag { - 0 => { - let nat_ptr = lean_ctor_get(ptr as *mut _, 0); - let str_ptr = lean_ctor_get(ptr as *mut _, 1); - let base = ptr.cast::(); - let idx = *base.add(8 + 2 * 8).cast::(); - let refs_len = Nat::from_ptr(nat_ptr) - .to_u64() - .and_then(|x| usize::try_from(x).ok()) - .unwrap_or(0); - let constant = - as_ref_unsafe::(str_ptr.cast()).as_string().clone(); - DecompileError::InvalidRefIndex { idx, refs_len, constant } - }, - 1 => { - let nat_ptr = lean_ctor_get(ptr as *mut _, 0); - let str_ptr = lean_ctor_get(ptr as *mut _, 1); - let base = ptr.cast::(); - let idx = *base.add(8 + 2 * 8).cast::(); - let univs_len = Nat::from_ptr(nat_ptr) - .to_u64() - .and_then(|x| usize::try_from(x).ok()) - .unwrap_or(0); - let constant = - as_ref_unsafe::(str_ptr.cast()).as_string().clone(); - DecompileError::InvalidUnivIndex { idx, univs_len, constant } - }, - 2 => { - let nat_ptr = lean_ctor_get(ptr as *mut _, 0); - let str_ptr = lean_ctor_get(ptr as *mut _, 1); - let base = ptr.cast::(); - let idx = *base.add(8 + 2 * 8).cast::(); - let max = Nat::from_ptr(nat_ptr) - .to_u64() - .and_then(|x| usize::try_from(x).ok()) - .unwrap_or(0); - let constant = - as_ref_unsafe::(str_ptr.cast()).as_string().clone(); - DecompileError::InvalidShareIndex { idx, max, constant } - }, - 3 => { - let nat_ptr = lean_ctor_get(ptr as *mut _, 0); - let str_ptr = lean_ctor_get(ptr as *mut _, 1); - let base = ptr.cast::(); - let idx = *base.add(8 + 2 * 8).cast::(); - let ctx_size = Nat::from_ptr(nat_ptr) - .to_u64() - .and_then(|x| usize::try_from(x).ok()) - .unwrap_or(0); - let constant = - as_ref_unsafe::(str_ptr.cast()).as_string().clone(); - DecompileError::InvalidRecIndex { idx, ctx_size, constant } - }, - 4 => { - let nat_ptr = lean_ctor_get(ptr as *mut _, 0); - let str_ptr = lean_ctor_get(ptr as *mut _, 1); - let base = ptr.cast::(); - let idx = *base.add(8 + 2 * 8).cast::(); - let max = Nat::from_ptr(nat_ptr) - .to_u64() - .and_then(|x| usize::try_from(x).ok()) - .unwrap_or(0); - let constant = - as_ref_unsafe::(str_ptr.cast()).as_string().clone(); - DecompileError::InvalidUnivVarIndex { idx, max, constant } - }, - 5 => { - let addr_ptr = lean_ctor_get(ptr as *mut _, 0); - DecompileError::MissingAddress(decode_ixon_address(addr_ptr)) - }, - 6 => { - let addr_ptr = lean_ctor_get(ptr as *mut _, 0); - DecompileError::MissingMetadata(decode_ixon_address(addr_ptr)) - }, - 7 => { - let addr_ptr = lean_ctor_get(ptr as *mut _, 0); - DecompileError::BlobNotFound(decode_ixon_address(addr_ptr)) - }, - 8 => { - let addr_ptr = lean_ctor_get(ptr as *mut _, 0); - let str_ptr = lean_ctor_get(ptr as *mut _, 1); - let addr = decode_ixon_address(addr_ptr); - let expected = - as_ref_unsafe::(str_ptr.cast()).as_string().clone(); - DecompileError::BadBlobFormat { addr, expected } - }, - 9 => { - let str_ptr = lean_ctor_get(ptr as *mut _, 0); - let msg = - as_ref_unsafe::(str_ptr.cast()).as_string().clone(); - DecompileError::BadConstantFormat { msg } - }, - 10 => { - let se_ptr = lean_ctor_get(ptr as *mut _, 0); - DecompileError::Serialize(decode_serialize_error(se_ptr)) - }, - _ => unreachable!("Invalid DecompileError tag: {}", tag), - } - } -} - -/// Build a Lean CompileError from a Rust CompileError. -/// -/// Tags 0–5: -/// 0: missingConstant (name : String) → 1 obj -/// 1: missingAddress (addr : Address) → 1 obj -/// 2: invalidMutualBlock (reason : String) → 1 obj -/// 3: unsupportedExpr (desc : String) → 1 obj -/// 4: unknownUnivParam (curr param : String) → 2 obj -/// 5: serializeError (msg : String) → 1 obj -pub fn build_compile_error(err: &CompileError) -> *mut c_void { - unsafe { - match err { - CompileError::MissingConstant { name } => { - let obj = lean_alloc_ctor(0, 1, 0); - lean_ctor_set(obj, 0, build_lean_string(name)); - obj - }, - CompileError::MissingAddress(addr) => { - let obj = lean_alloc_ctor(1, 1, 0); - lean_ctor_set(obj, 0, build_address_from_ixon(addr)); - obj - }, - CompileError::InvalidMutualBlock { reason } => { - let obj = lean_alloc_ctor(2, 1, 0); - lean_ctor_set(obj, 0, build_lean_string(reason)); - obj - }, - CompileError::UnsupportedExpr { desc } => { - let obj = lean_alloc_ctor(3, 1, 0); - lean_ctor_set(obj, 0, build_lean_string(desc)); - obj - }, - CompileError::UnknownUnivParam { curr, param } => { - let obj = lean_alloc_ctor(4, 2, 0); - lean_ctor_set(obj, 0, build_lean_string(curr)); - lean_ctor_set(obj, 1, build_lean_string(param)); - obj - }, - CompileError::Serialize(se) => { - let obj = lean_alloc_ctor(5, 1, 0); - lean_ctor_set(obj, 0, build_serialize_error(se)); - obj - }, - } - } -} - -/// Decode a Lean CompileError to a Rust CompileError. -pub fn decode_compile_error(ptr: *const c_void) -> CompileError { - unsafe { - let tag = lean_obj_tag(ptr as *mut _); - match tag { - 0 => { - let str_ptr = lean_ctor_get(ptr as *mut _, 0); - let name = - as_ref_unsafe::(str_ptr.cast()).as_string().clone(); - CompileError::MissingConstant { name } - }, - 1 => { - let addr_ptr = lean_ctor_get(ptr as *mut _, 0); - CompileError::MissingAddress(decode_ixon_address(addr_ptr)) - }, - 2 => { - let str_ptr = lean_ctor_get(ptr as *mut _, 0); - let reason = - as_ref_unsafe::(str_ptr.cast()).as_string().clone(); - CompileError::InvalidMutualBlock { reason } - }, - 3 => { - let str_ptr = lean_ctor_get(ptr as *mut _, 0); - let desc = - as_ref_unsafe::(str_ptr.cast()).as_string().clone(); - CompileError::UnsupportedExpr { desc } - }, - 4 => { - let str0 = lean_ctor_get(ptr as *mut _, 0); - let str1 = lean_ctor_get(ptr as *mut _, 1); - let curr = - as_ref_unsafe::(str0.cast()).as_string().clone(); - let param = - as_ref_unsafe::(str1.cast()).as_string().clone(); - CompileError::UnknownUnivParam { curr, param } - }, - 5 => { - let se_ptr = lean_ctor_get(ptr as *mut _, 0); - CompileError::Serialize(decode_serialize_error(se_ptr)) - }, - _ => unreachable!("Invalid CompileError tag: {}", tag), - } - } -} - -/// FFI: Round-trip a DecompileError: Lean → Rust → Lean. -#[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_decompile_error( - ptr: *const c_void, -) -> *mut c_void { - let err = decode_decompile_error(ptr); - build_decompile_error(&err) -} - -/// FFI: Round-trip a CompileError: Lean → Rust → Lean. -#[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_compile_error( - ptr: *const c_void, -) -> *mut c_void { - let err = decode_compile_error(ptr); - build_compile_error(&err) -} - -/// FFI: Round-trip a SerializeError: Lean → Rust → Lean. -#[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_serialize_error( - ptr: *const c_void, -) -> *mut c_void { - let err = decode_serialize_error(ptr); - build_serialize_error(&err) -} - -// ============================================================================= -// Decompilation FFI -// ============================================================================= - -/// FFI: Decompile an Ixon.RawEnv → Except DecompileError (Array (Ix.Name × Ix.ConstantInfo)). Pure. -#[unsafe(no_mangle)] -pub extern "C" fn rs_decompile_env(raw_env_ptr: *const c_void) -> *mut c_void { - let decoded = decode_raw_env(raw_env_ptr); - let env = decoded_to_ixon_env(&decoded); - - // Wrap in CompileState (decompile_env only uses .env) - let stt = CompileState { - env, - name_to_addr: DashMap::new(), - blocks: DashSet::new(), - block_stats: DashMap::new(), - }; - - match decompile_env(&stt) { - Ok(dstt) => { - let entries: Vec<_> = dstt.env.into_iter().collect(); - let mut cache = LeanBuildCache::with_capacity(entries.len()); - unsafe { - let arr = lean_alloc_array(entries.len(), entries.len()); - for (i, (name, info)) in entries.iter().enumerate() { - let name_obj = build_name(&mut cache, name); - let info_obj = build_constant_info(&mut cache, info); - let pair = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(pair, 0, name_obj); - lean_ctor_set(pair, 1, info_obj); - lean_array_set_core(arr, i, pair); - } - // Except.ok (tag 1) - let obj = lean_alloc_ctor(1, 1, 0); - lean_ctor_set(obj, 0, arr); - obj - } - }, - Err(e) => { - // Except.error (tag 0) — build DecompileError directly - unsafe { - let err_obj = build_decompile_error(&e); - let obj = lean_alloc_ctor(0, 1, 0); - lean_ctor_set(obj, 0, err_obj); - obj - } - }, - } -} diff --git a/src/lean/ffi/graph.rs b/src/lean/ffi/graph.rs deleted file mode 100644 index 5258c35d..00000000 --- a/src/lean/ffi/graph.rs +++ /dev/null @@ -1,136 +0,0 @@ -//! Graph and SCC FFI functions. - -use std::ffi::c_void; -use std::sync::Arc; - -use super::ffi_io_guard; -use crate::ix::condense::compute_sccs; -use crate::ix::graph::build_ref_graph; -use crate::lean::{ - lean_alloc_array, lean_alloc_ctor, lean_array_set_core, lean_ctor_set, - lean_io_result_mk_ok, -}; - -use super::builder::LeanBuildCache; -use super::ix::name::build_name; -use super::lean_env::lean_ptr_to_env; - -/// Build an Array (Ix.Name × Array Ix.Name) from a RefMap. -pub fn build_ref_graph_array( - cache: &mut LeanBuildCache, - refs: &crate::ix::graph::RefMap, -) -> *mut c_void { - unsafe { - let arr = lean_alloc_array(refs.len(), refs.len()); - for (i, (name, ref_set)) in refs.iter().enumerate() { - let name_obj = build_name(cache, name); - - let refs_arr = lean_alloc_array(ref_set.len(), ref_set.len()); - for (j, ref_name) in ref_set.iter().enumerate() { - let ref_name_obj = build_name(cache, ref_name); - lean_array_set_core(refs_arr, j, ref_name_obj); - } - - let pair = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(pair, 0, name_obj); - lean_ctor_set(pair, 1, refs_arr); - - lean_array_set_core(arr, i, pair); - } - arr - } -} - -/// Build a RustCondensedBlocks structure. -pub fn build_condensed_blocks( - cache: &mut LeanBuildCache, - condensed: &crate::ix::condense::CondensedBlocks, -) -> *mut c_void { - unsafe { - // Build lowLinks: Array (Ix.Name × Ix.Name) - let low_links_arr = - lean_alloc_array(condensed.low_links.len(), condensed.low_links.len()); - for (i, (name, low_link)) in condensed.low_links.iter().enumerate() { - let name_obj = build_name(cache, name); - let low_link_obj = build_name(cache, low_link); - let pair = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(pair, 0, name_obj); - lean_ctor_set(pair, 1, low_link_obj); - lean_array_set_core(low_links_arr, i, pair); - } - - // Build blocks: Array (Ix.Name × Array Ix.Name) - let blocks_arr = - lean_alloc_array(condensed.blocks.len(), condensed.blocks.len()); - for (i, (name, block_set)) in condensed.blocks.iter().enumerate() { - let name_obj = build_name(cache, name); - let block_names_arr = lean_alloc_array(block_set.len(), block_set.len()); - for (j, block_name) in block_set.iter().enumerate() { - let block_name_obj = build_name(cache, block_name); - lean_array_set_core(block_names_arr, j, block_name_obj); - } - let pair = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(pair, 0, name_obj); - lean_ctor_set(pair, 1, block_names_arr); - lean_array_set_core(blocks_arr, i, pair); - } - - // Build blockRefs: Array (Ix.Name × Array Ix.Name) - let block_refs_arr = - lean_alloc_array(condensed.block_refs.len(), condensed.block_refs.len()); - for (i, (name, ref_set)) in condensed.block_refs.iter().enumerate() { - let name_obj = build_name(cache, name); - let refs_arr = lean_alloc_array(ref_set.len(), ref_set.len()); - for (j, ref_name) in ref_set.iter().enumerate() { - let ref_name_obj = build_name(cache, ref_name); - lean_array_set_core(refs_arr, j, ref_name_obj); - } - let pair = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(pair, 0, name_obj); - lean_ctor_set(pair, 1, refs_arr); - lean_array_set_core(block_refs_arr, i, pair); - } - - // Build RustCondensedBlocks structure (3 fields) - let result = lean_alloc_ctor(0, 3, 0); - lean_ctor_set(result, 0, low_links_arr); - lean_ctor_set(result, 1, blocks_arr); - lean_ctor_set(result, 2, block_refs_arr); - result - } -} - -// ============================================================================= -// FFI Exports -// ============================================================================= - -/// FFI function to build a reference graph from a Lean environment. -#[unsafe(no_mangle)] -pub extern "C" fn rs_build_ref_graph( - env_consts_ptr: *const c_void, -) -> *mut c_void { - ffi_io_guard(std::panic::AssertUnwindSafe(|| { - let rust_env = lean_ptr_to_env(env_consts_ptr); - let rust_env = Arc::new(rust_env); - let ref_graph = build_ref_graph(&rust_env); - let mut cache = LeanBuildCache::with_capacity(rust_env.len()); - let result = build_ref_graph_array(&mut cache, &ref_graph.out_refs); - unsafe { lean_io_result_mk_ok(result) } - })) -} - -/// FFI function to compute SCCs from a Lean environment. -#[unsafe(no_mangle)] -pub extern "C" fn rs_compute_sccs( - env_consts_ptr: *const c_void, -) -> *mut c_void { - ffi_io_guard(std::panic::AssertUnwindSafe(|| { - let rust_env = lean_ptr_to_env(env_consts_ptr); - let rust_env = Arc::new(rust_env); - let ref_graph = build_ref_graph(&rust_env); - let condensed = compute_sccs(&ref_graph.out_refs); - let mut cache = LeanBuildCache::with_capacity(rust_env.len()); - let result = build_condensed_blocks(&mut cache, &condensed); - unsafe { lean_io_result_mk_ok(result) } - })) -} diff --git a/src/lean/ffi/iroh.rs b/src/lean/ffi/iroh.rs deleted file mode 100644 index 91b4f7cd..00000000 --- a/src/lean/ffi/iroh.rs +++ /dev/null @@ -1,81 +0,0 @@ -use crate::lean::ffi::{BytesData, CResult, to_raw}; -use crate::lean::{as_ref_unsafe, ffi::drop_raw}; - -use std::ffi::{CString, c_char}; - -#[repr(C)] -pub struct PutResponseFFI { - pub message: *mut c_char, - pub hash: *mut c_char, -} - -impl PutResponseFFI { - pub fn new(message: &str, hash: &str) -> Self { - let message = CString::new(message).unwrap().into_raw(); - let hash = CString::new(hash).unwrap().into_raw(); - PutResponseFFI { message, hash } - } -} - -#[repr(C)] -pub struct GetResponseFFI { - pub message: *mut c_char, - pub hash: *mut c_char, - pub bytes: *const BytesData, -} - -impl GetResponseFFI { - pub fn new(message: &str, hash: &str, bytes: &[u8]) -> Self { - let message = CString::new(message).unwrap().into_raw(); - let hash = CString::new(hash).unwrap().into_raw(); - let bytes = to_raw(BytesData::from_vec(bytes.to_vec())); - GetResponseFFI { message, hash, bytes } - } -} - -// Frees a `CResult` object that corresponds to the Rust type `Result` -#[unsafe(no_mangle)] -extern "C" fn rs__c_result_iroh_put_response_string_free(ptr: *mut CResult) { - let c_result = as_ref_unsafe(ptr); - // Frees the `PutResponseFFI` struct and inner fields - if c_result.is_ok { - let put_response_ptr = c_result.data as *mut PutResponseFFI; - let put_response = as_ref_unsafe(put_response_ptr); - let message = unsafe { CString::from_raw(put_response.message) }; - let hash = unsafe { CString::from_raw(put_response.hash) }; - drop(message); - drop(hash); - drop_raw(put_response_ptr); - } - // Or free the String error message - else { - let char_ptr = c_result.data as *mut c_char; - let c_string = unsafe { CString::from_raw(char_ptr) }; - drop(c_string); - } - drop_raw(ptr); -} - -// Frees a `CResult` object that corresponds to the Rust type `Result` -#[unsafe(no_mangle)] -extern "C" fn rs__c_result_iroh_get_response_string_free(ptr: *mut CResult) { - let c_result = as_ref_unsafe(ptr); - // Frees the `GetResponseFFI` struct and inner fields - // `Bytes` is already freed by `rs_move_bytes` - if c_result.is_ok { - let get_response_ptr = c_result.data as *mut GetResponseFFI; - let get_response = as_ref_unsafe(get_response_ptr); - let message = unsafe { CString::from_raw(get_response.message) }; - let hash = unsafe { CString::from_raw(get_response.hash) }; - drop(message); - drop(hash); - drop_raw(get_response_ptr); - } - // Or free the String error message - else { - let char_ptr = c_result.data as *mut c_char; - let c_string = unsafe { CString::from_raw(char_ptr) }; - drop(c_string); - } - drop_raw(ptr); -} diff --git a/src/lean/ffi/ix/address.rs b/src/lean/ffi/ix/address.rs deleted file mode 100644 index 9b35abf8..00000000 --- a/src/lean/ffi/ix/address.rs +++ /dev/null @@ -1,41 +0,0 @@ -//! Ix.Address build/decode/roundtrip FFI. -//! -//! Address = { hash : ByteArray } - ByteArray wrapper for blake3 Hash - -use std::ffi::c_void; - -use crate::lean::{ - as_ref_unsafe, lean_alloc_sarray, lean_sarray_cptr, sarray::LeanSArrayObject, -}; - -/// Build a Ix.Address from a blake3::Hash. -/// Address = { hash : ByteArray } - single field struct, so UNBOXED to ByteArray -pub fn build_address(hash: &blake3::Hash) -> *mut c_void { - unsafe { - let bytes = hash.as_bytes(); - let ba = lean_alloc_sarray(1, bytes.len(), bytes.len()); - let data_ptr = lean_sarray_cptr(ba); - std::ptr::copy_nonoverlapping(bytes.as_ptr(), data_ptr, bytes.len()); - ba // Due to unboxing, ByteArray IS the Address - } -} - -/// Round-trip an Ix.Address: decode ByteArray, re-encode. -/// Address = { hash : ByteArray } - single field struct, so UNBOXED to ByteArray directly -#[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ix_address( - addr_ptr: *const c_void, -) -> *mut c_void { - unsafe { - // Address is a single-field struct { hash : ByteArray } - // Due to unboxing, addr_ptr IS the ByteArray directly - let ba: &LeanSArrayObject = as_ref_unsafe(addr_ptr.cast()); - let bytes = ba.data(); - - // Rebuild ByteArray - this IS the Address due to unboxing - let new_ba = lean_alloc_sarray(1, bytes.len(), bytes.len()); - let data_ptr = lean_sarray_cptr(new_ba); - std::ptr::copy_nonoverlapping(bytes.as_ptr(), data_ptr, bytes.len()); - new_ba - } -} diff --git a/src/lean/ffi/ix/constant.rs b/src/lean/ffi/ix/constant.rs deleted file mode 100644 index 19ebc7cf..00000000 --- a/src/lean/ffi/ix/constant.rs +++ /dev/null @@ -1,549 +0,0 @@ -//! Ix.ConstantInfo build/decode/roundtrip FFI. -//! -//! ConstantInfo variants: -//! - Tag 0: axiomInfo (v : AxiomVal) -//! - Tag 1: defnInfo (v : DefinitionVal) -//! - Tag 2: thmInfo (v : TheoremVal) -//! - Tag 3: opaqueInfo (v : OpaqueVal) -//! - Tag 4: quotInfo (v : QuotVal) -//! - Tag 5: inductInfo (v : InductiveVal) -//! - Tag 6: ctorInfo (v : ConstructorVal) -//! - Tag 7: recInfo (v : RecursorVal) - -use std::ffi::c_void; - -use crate::ix::env::{ - AxiomVal, ConstantInfo, ConstantVal, ConstructorVal, DefinitionSafety, - DefinitionVal, InductiveVal, Name, OpaqueVal, QuotKind, QuotVal, - RecursorRule, RecursorVal, ReducibilityHints, TheoremVal, -}; -use crate::lean::array::LeanArrayObject; -use crate::lean::nat::Nat; -use crate::lean::{ - as_ref_unsafe, lean_alloc_array, lean_alloc_ctor, lean_array_set_core, - lean_box_fn, lean_ctor_get, lean_ctor_set, lean_ctor_set_uint8, - lean_is_scalar, lean_obj_tag, -}; - -use super::super::builder::LeanBuildCache; -use super::super::primitives::build_nat; -use super::expr::{build_expr, decode_ix_expr}; -use super::name::{ - build_name, build_name_array, decode_ix_name, decode_name_array, -}; - -/// Build a Ix.ConstantVal structure. -pub fn build_constant_val( - cache: &mut LeanBuildCache, - cv: &ConstantVal, -) -> *mut c_void { - unsafe { - // ConstantVal = { name : Name, levelParams : Array Name, type : Expr } - let name_obj = build_name(cache, &cv.name); - let level_params_obj = build_name_array(cache, &cv.level_params); - let type_obj = build_expr(cache, &cv.typ); - - let obj = lean_alloc_ctor(0, 3, 0); - lean_ctor_set(obj, 0, name_obj); - lean_ctor_set(obj, 1, level_params_obj); - lean_ctor_set(obj, 2, type_obj); - obj - } -} - -/// Build ReducibilityHints. -/// NOTE: In Lean 4, 0-field constructors are boxed scalars when the inductive has -/// other constructors with fields. So opaque and abbrev use lean_box_fn. -pub fn build_reducibility_hints(hints: &ReducibilityHints) -> *mut c_void { - unsafe { - match hints { - // | opaque -- tag 0, boxed as scalar - ReducibilityHints::Opaque => lean_box_fn(0), - // | abbrev -- tag 1, boxed as scalar - ReducibilityHints::Abbrev => lean_box_fn(1), - // | regular (h : UInt32) -- tag 2, object constructor - ReducibilityHints::Regular(h) => { - // UInt32 is a scalar, stored inline - let obj = lean_alloc_ctor(2, 0, 4); - // Set the uint32 at offset 0 in the scalar area - let ptr = obj.cast::(); - *(ptr.add(8).cast::()) = *h; - obj - }, - } - } -} - -/// Build a Ix.ConstantInfo from a Rust ConstantInfo. -pub fn build_constant_info( - cache: &mut LeanBuildCache, - info: &ConstantInfo, -) -> *mut c_void { - unsafe { - match info { - // | axiomInfo (v : AxiomVal) -- tag 0 - ConstantInfo::AxiomInfo(v) => { - // AxiomVal = { cnst : ConstantVal, isUnsafe : Bool } - let cnst_obj = build_constant_val(cache, &v.cnst); - let axiom_val = lean_alloc_ctor(0, 1, 1); - lean_ctor_set(axiom_val, 0, cnst_obj); - lean_ctor_set_uint8(axiom_val, 8, v.is_unsafe as u8); - - let obj = lean_alloc_ctor(0, 1, 0); - lean_ctor_set(obj, 0, axiom_val); - obj - }, - // | defnInfo (v : DefinitionVal) -- tag 1 - ConstantInfo::DefnInfo(v) => { - // DefinitionVal = { cnst, value, hints, safety, all } - // NOTE: safety (DefinitionSafety) is a small enum stored as SCALAR - // Memory layout: 4 obj fields (cnst, value, hints, all), 1 scalar byte (safety) - let cnst_obj = build_constant_val(cache, &v.cnst); - let value_obj = build_expr(cache, &v.value); - let hints_obj = build_reducibility_hints(&v.hints); - let all_obj = build_name_array(cache, &v.all); - let safety_byte = match v.safety { - DefinitionSafety::Unsafe => 0u8, - DefinitionSafety::Safe => 1u8, - DefinitionSafety::Partial => 2u8, - }; - - let defn_val = lean_alloc_ctor(0, 4, 1); // 4 obj fields, 1 scalar byte - lean_ctor_set(defn_val, 0, cnst_obj); - lean_ctor_set(defn_val, 1, value_obj); - lean_ctor_set(defn_val, 2, hints_obj); - lean_ctor_set(defn_val, 3, all_obj); - lean_ctor_set_uint8(defn_val, 4 * 8, safety_byte); - - let obj = lean_alloc_ctor(1, 1, 0); - lean_ctor_set(obj, 0, defn_val); - obj - }, - // | thmInfo (v : TheoremVal) -- tag 2 - ConstantInfo::ThmInfo(v) => { - // TheoremVal = { cnst, value, all } - let cnst_obj = build_constant_val(cache, &v.cnst); - let value_obj = build_expr(cache, &v.value); - let all_obj = build_name_array(cache, &v.all); - - let thm_val = lean_alloc_ctor(0, 3, 0); - lean_ctor_set(thm_val, 0, cnst_obj); - lean_ctor_set(thm_val, 1, value_obj); - lean_ctor_set(thm_val, 2, all_obj); - - let obj = lean_alloc_ctor(2, 1, 0); - lean_ctor_set(obj, 0, thm_val); - obj - }, - // | opaqueInfo (v : OpaqueVal) -- tag 3 - ConstantInfo::OpaqueInfo(v) => { - // OpaqueVal = { cnst, value, isUnsafe, all } - let cnst_obj = build_constant_val(cache, &v.cnst); - let value_obj = build_expr(cache, &v.value); - let all_obj = build_name_array(cache, &v.all); - - let opaque_val = lean_alloc_ctor(0, 3, 1); - lean_ctor_set(opaque_val, 0, cnst_obj); - lean_ctor_set(opaque_val, 1, value_obj); - lean_ctor_set(opaque_val, 2, all_obj); - lean_ctor_set_uint8(opaque_val, 3 * 8, v.is_unsafe as u8); - - let obj = lean_alloc_ctor(3, 1, 0); - lean_ctor_set(obj, 0, opaque_val); - obj - }, - // | quotInfo (v : QuotVal) -- tag 4 - ConstantInfo::QuotInfo(v) => { - // QuotVal = { cnst, kind } - // NOTE: QuotKind is a small enum stored as SCALAR - // Memory layout: 1 obj field (cnst), 1 scalar byte (kind) - let cnst_obj = build_constant_val(cache, &v.cnst); - let kind_byte = match v.kind { - QuotKind::Type => 0u8, - QuotKind::Ctor => 1u8, - QuotKind::Lift => 2u8, - QuotKind::Ind => 3u8, - }; - - let quot_val = lean_alloc_ctor(0, 1, 1); // 1 obj field, 1 scalar byte - lean_ctor_set(quot_val, 0, cnst_obj); - lean_ctor_set_uint8(quot_val, 8, kind_byte); - - let obj = lean_alloc_ctor(4, 1, 0); - lean_ctor_set(obj, 0, quot_val); - obj - }, - // | inductInfo (v : InductiveVal) -- tag 5 - ConstantInfo::InductInfo(v) => { - // InductiveVal = { cnst, numParams, numIndices, all, ctors, numNested, isRec, isUnsafe, isReflexive } - let cnst_obj = build_constant_val(cache, &v.cnst); - let num_params_obj = build_nat(&v.num_params); - let num_indices_obj = build_nat(&v.num_indices); - let all_obj = build_name_array(cache, &v.all); - let ctors_obj = build_name_array(cache, &v.ctors); - let num_nested_obj = build_nat(&v.num_nested); - - // 6 object fields, 3 scalar bytes for bools - let induct_val = lean_alloc_ctor(0, 6, 3); - lean_ctor_set(induct_val, 0, cnst_obj); - lean_ctor_set(induct_val, 1, num_params_obj); - lean_ctor_set(induct_val, 2, num_indices_obj); - lean_ctor_set(induct_val, 3, all_obj); - lean_ctor_set(induct_val, 4, ctors_obj); - lean_ctor_set(induct_val, 5, num_nested_obj); - lean_ctor_set_uint8(induct_val, 6 * 8, v.is_rec as u8); - lean_ctor_set_uint8(induct_val, 6 * 8 + 1, v.is_unsafe as u8); - lean_ctor_set_uint8(induct_val, 6 * 8 + 2, v.is_reflexive as u8); - - let obj = lean_alloc_ctor(5, 1, 0); - lean_ctor_set(obj, 0, induct_val); - obj - }, - // | ctorInfo (v : ConstructorVal) -- tag 6 - ConstantInfo::CtorInfo(v) => { - // ConstructorVal = { cnst, induct, cidx, numParams, numFields, isUnsafe } - let cnst_obj = build_constant_val(cache, &v.cnst); - let induct_obj = build_name(cache, &v.induct); - let cidx_obj = build_nat(&v.cidx); - let num_params_obj = build_nat(&v.num_params); - let num_fields_obj = build_nat(&v.num_fields); - - // 5 object fields, 1 scalar byte for bool - let ctor_val = lean_alloc_ctor(0, 5, 1); - lean_ctor_set(ctor_val, 0, cnst_obj); - lean_ctor_set(ctor_val, 1, induct_obj); - lean_ctor_set(ctor_val, 2, cidx_obj); - lean_ctor_set(ctor_val, 3, num_params_obj); - lean_ctor_set(ctor_val, 4, num_fields_obj); - lean_ctor_set_uint8(ctor_val, 5 * 8, v.is_unsafe as u8); - - let obj = lean_alloc_ctor(6, 1, 0); - lean_ctor_set(obj, 0, ctor_val); - obj - }, - // | recInfo (v : RecursorVal) -- tag 7 - ConstantInfo::RecInfo(v) => { - // RecursorVal = { cnst, all, numParams, numIndices, numMotives, numMinors, rules, k, isUnsafe } - let cnst_obj = build_constant_val(cache, &v.cnst); - let all_obj = build_name_array(cache, &v.all); - let num_params_obj = build_nat(&v.num_params); - let num_indices_obj = build_nat(&v.num_indices); - let num_motives_obj = build_nat(&v.num_motives); - let num_minors_obj = build_nat(&v.num_minors); - let rules_obj = build_recursor_rules(cache, &v.rules); - - // 7 object fields, 2 scalar bytes for bools - let rec_val = lean_alloc_ctor(0, 7, 2); - lean_ctor_set(rec_val, 0, cnst_obj); - lean_ctor_set(rec_val, 1, all_obj); - lean_ctor_set(rec_val, 2, num_params_obj); - lean_ctor_set(rec_val, 3, num_indices_obj); - lean_ctor_set(rec_val, 4, num_motives_obj); - lean_ctor_set(rec_val, 5, num_minors_obj); - lean_ctor_set(rec_val, 6, rules_obj); - lean_ctor_set_uint8(rec_val, 7 * 8, v.k as u8); - lean_ctor_set_uint8(rec_val, 7 * 8 + 1, v.is_unsafe as u8); - - let obj = lean_alloc_ctor(7, 1, 0); - lean_ctor_set(obj, 0, rec_val); - obj - }, - } - } -} - -/// Build an Array of RecursorRule. -fn build_recursor_rules( - cache: &mut LeanBuildCache, - rules: &[RecursorRule], -) -> *mut c_void { - unsafe { - let arr = lean_alloc_array(rules.len(), rules.len()); - for (i, rule) in rules.iter().enumerate() { - // RecursorRule = { ctor : Name, nFields : Nat, rhs : Expr } - let ctor_obj = build_name(cache, &rule.ctor); - let n_fields_obj = build_nat(&rule.n_fields); - let rhs_obj = build_expr(cache, &rule.rhs); - - let rule_obj = lean_alloc_ctor(0, 3, 0); - lean_ctor_set(rule_obj, 0, ctor_obj); - lean_ctor_set(rule_obj, 1, n_fields_obj); - lean_ctor_set(rule_obj, 2, rhs_obj); - - lean_array_set_core(arr, i, rule_obj); - } - arr - } -} - -// ============================================================================= -// ConstantInfo Decoders -// ============================================================================= - -/// Decode Ix.ConstantVal from Lean pointer. -/// ConstantVal = { name : Name, levelParams : Array Name, type : Expr } -pub fn decode_constant_val(ptr: *const c_void) -> ConstantVal { - unsafe { - let name_ptr = lean_ctor_get(ptr as *mut _, 0); - let level_params_ptr = lean_ctor_get(ptr as *mut _, 1); - let type_ptr = lean_ctor_get(ptr as *mut _, 2); - - let name = decode_ix_name(name_ptr); - - let level_params_obj: &LeanArrayObject = - as_ref_unsafe(level_params_ptr.cast()); - let level_params: Vec = - level_params_obj.data().iter().map(|&p| decode_ix_name(p)).collect(); - - let typ = decode_ix_expr(type_ptr); - - ConstantVal { name, level_params, typ } - } -} - -/// Decode Lean.ReducibilityHints from Lean pointer. -/// | opaque -- tag 0 -/// | abbrev -- tag 1 -/// | regular (h : UInt32) -- tag 2 -/// -/// NOTE: In Lean 4, boxed scalars are `(tag << 1) | 1`: -/// - opaque (tag 0) → scalar value 1 -/// - abbrev (tag 1) → scalar value 3 -pub fn decode_reducibility_hints(ptr: *const c_void) -> ReducibilityHints { - unsafe { - if lean_is_scalar(ptr) { - // Unbox the scalar: tag = (ptr >> 1) - let tag = (ptr as usize) >> 1; - match tag { - 0 => return ReducibilityHints::Opaque, - 1 => return ReducibilityHints::Abbrev, - _ => panic!("Invalid ReducibilityHints scalar tag: {}", tag), - } - } - - let tag = lean_obj_tag(ptr as *mut _); - match tag { - 0 => ReducibilityHints::Opaque, - 1 => ReducibilityHints::Abbrev, - 2 => { - // regular: 0 obj fields, 4 scalar bytes (UInt32) - let ctor_ptr = ptr.cast::(); - let h = *(ctor_ptr.add(8).cast::()); - ReducibilityHints::Regular(h) - }, - _ => panic!("Invalid ReducibilityHints tag: {}", tag), - } - } -} - -/// Decode Ix.RecursorRule from Lean pointer. -/// RecursorRule = { ctor : Name, nfields : Nat, rhs : Expr } -fn decode_recursor_rule(ptr: *const c_void) -> RecursorRule { - unsafe { - let ctor_ptr = lean_ctor_get(ptr as *mut _, 0); - let n_fields_ptr = lean_ctor_get(ptr as *mut _, 1); - let rhs_ptr = lean_ctor_get(ptr as *mut _, 2); - - RecursorRule { - ctor: decode_ix_name(ctor_ptr), - n_fields: Nat::from_ptr(n_fields_ptr), - rhs: decode_ix_expr(rhs_ptr), - } - } -} - -/// Decode Ix.ConstantInfo from Lean pointer. -pub fn decode_constant_info(ptr: *const c_void) -> ConstantInfo { - unsafe { - let tag = lean_obj_tag(ptr as *mut _); - let inner_ptr = lean_ctor_get(ptr as *mut _, 0); - - match tag { - 0 => { - // axiomInfo: AxiomVal = { cnst : ConstantVal, isUnsafe : Bool } - // Structure: 1 obj field (cnst), 1 scalar byte (isUnsafe) - let cnst_ptr = lean_ctor_get(inner_ptr as *mut _, 0); - let ctor: &crate::lean::ctor::LeanCtorObject = - as_ref_unsafe(inner_ptr.cast()); - let is_unsafe = ctor.get_scalar_u8(1, 0) != 0; - - ConstantInfo::AxiomInfo(AxiomVal { - cnst: decode_constant_val(cnst_ptr), - is_unsafe, - }) - }, - 1 => { - // defnInfo: DefinitionVal = { cnst, value, hints, safety, all } - // NOTE: safety (DefinitionSafety) is a small enum and is stored as a SCALAR field - // Memory layout: 4 obj fields (cnst, value, hints, all), 1 scalar byte (safety) - let cnst_ptr = lean_ctor_get(inner_ptr as *mut _, 0); - let value_ptr = lean_ctor_get(inner_ptr as *mut _, 1); - let hints_ptr = lean_ctor_get(inner_ptr as *mut _, 2); - let all_ptr = lean_ctor_get(inner_ptr as *mut _, 3); // all is at index 3, not 4! - - // safety is a scalar at offset 4*8 = 32 bytes from start of object fields - let ctor: &crate::lean::ctor::LeanCtorObject = - as_ref_unsafe(inner_ptr.cast()); - let safety_byte = ctor.get_scalar_u8(4, 0); // 4 obj fields, offset 0 in scalar area - let safety = match safety_byte { - 0 => DefinitionSafety::Unsafe, - 1 => DefinitionSafety::Safe, - 2 => DefinitionSafety::Partial, - _ => panic!("Invalid DefinitionSafety: {}", safety_byte), - }; - - ConstantInfo::DefnInfo(DefinitionVal { - cnst: decode_constant_val(cnst_ptr), - value: decode_ix_expr(value_ptr), - hints: decode_reducibility_hints(hints_ptr), - safety, - all: decode_name_array(all_ptr), - }) - }, - 2 => { - // thmInfo: TheoremVal = { cnst, value, all } - let cnst_ptr = lean_ctor_get(inner_ptr as *mut _, 0); - let value_ptr = lean_ctor_get(inner_ptr as *mut _, 1); - let all_ptr = lean_ctor_get(inner_ptr as *mut _, 2); - - ConstantInfo::ThmInfo(TheoremVal { - cnst: decode_constant_val(cnst_ptr), - value: decode_ix_expr(value_ptr), - all: decode_name_array(all_ptr), - }) - }, - 3 => { - // opaqueInfo: OpaqueVal = { cnst, value, isUnsafe, all } - // Structure: 3 obj fields (cnst, value, all), 1 scalar byte (isUnsafe) - let cnst_ptr = lean_ctor_get(inner_ptr as *mut _, 0); - let value_ptr = lean_ctor_get(inner_ptr as *mut _, 1); - let all_ptr = lean_ctor_get(inner_ptr as *mut _, 2); - let ctor: &crate::lean::ctor::LeanCtorObject = - as_ref_unsafe(inner_ptr.cast()); - let is_unsafe = ctor.get_scalar_u8(3, 0) != 0; - - ConstantInfo::OpaqueInfo(OpaqueVal { - cnst: decode_constant_val(cnst_ptr), - value: decode_ix_expr(value_ptr), - is_unsafe, - all: decode_name_array(all_ptr), - }) - }, - 4 => { - // quotInfo: QuotVal = { cnst, kind } - // NOTE: QuotKind is a small enum (4 0-field ctors), stored as SCALAR - // Memory layout: 1 obj field (cnst), 1 scalar byte (kind) - let cnst_ptr = lean_ctor_get(inner_ptr as *mut _, 0); - - let ctor: &crate::lean::ctor::LeanCtorObject = - as_ref_unsafe(inner_ptr.cast()); - let kind_byte = ctor.get_scalar_u8(1, 0); // 1 obj field, offset 0 in scalar area - let kind = match kind_byte { - 0 => QuotKind::Type, - 1 => QuotKind::Ctor, - 2 => QuotKind::Lift, - 3 => QuotKind::Ind, - _ => panic!("Invalid QuotKind: {}", kind_byte), - }; - - ConstantInfo::QuotInfo(QuotVal { - cnst: decode_constant_val(cnst_ptr), - kind, - }) - }, - 5 => { - // inductInfo: InductiveVal = { cnst, numParams, numIndices, all, ctors, numNested, isRec, isUnsafe, isReflexive } - // 6 obj fields, 3 scalar bytes - let cnst_ptr = lean_ctor_get(inner_ptr as *mut _, 0); - let num_params_ptr = lean_ctor_get(inner_ptr as *mut _, 1); - let num_indices_ptr = lean_ctor_get(inner_ptr as *mut _, 2); - let all_ptr = lean_ctor_get(inner_ptr as *mut _, 3); - let ctors_ptr = lean_ctor_get(inner_ptr as *mut _, 4); - let num_nested_ptr = lean_ctor_get(inner_ptr as *mut _, 5); - - let ctor: &crate::lean::ctor::LeanCtorObject = - as_ref_unsafe(inner_ptr.cast()); - let is_rec = ctor.get_scalar_u8(6, 0) != 0; - let is_unsafe = ctor.get_scalar_u8(6, 1) != 0; - let is_reflexive = ctor.get_scalar_u8(6, 2) != 0; - - ConstantInfo::InductInfo(InductiveVal { - cnst: decode_constant_val(cnst_ptr), - num_params: Nat::from_ptr(num_params_ptr), - num_indices: Nat::from_ptr(num_indices_ptr), - all: decode_name_array(all_ptr), - ctors: decode_name_array(ctors_ptr), - num_nested: Nat::from_ptr(num_nested_ptr), - is_rec, - is_unsafe, - is_reflexive, - }) - }, - 6 => { - // ctorInfo: ConstructorVal = { cnst, induct, cidx, numParams, numFields, isUnsafe } - // 5 obj fields, 1 scalar byte - let cnst_ptr = lean_ctor_get(inner_ptr as *mut _, 0); - let induct_ptr = lean_ctor_get(inner_ptr as *mut _, 1); - let cidx_ptr = lean_ctor_get(inner_ptr as *mut _, 2); - let num_params_ptr = lean_ctor_get(inner_ptr as *mut _, 3); - let num_fields_ptr = lean_ctor_get(inner_ptr as *mut _, 4); - - let ctor: &crate::lean::ctor::LeanCtorObject = - as_ref_unsafe(inner_ptr.cast()); - let is_unsafe = ctor.get_scalar_u8(5, 0) != 0; - - ConstantInfo::CtorInfo(ConstructorVal { - cnst: decode_constant_val(cnst_ptr), - induct: decode_ix_name(induct_ptr), - cidx: Nat::from_ptr(cidx_ptr), - num_params: Nat::from_ptr(num_params_ptr), - num_fields: Nat::from_ptr(num_fields_ptr), - is_unsafe, - }) - }, - 7 => { - // recInfo: RecursorVal = { cnst, all, numParams, numIndices, numMotives, numMinors, rules, k, isUnsafe } - // 7 obj fields, 2 scalar bytes - let cnst_ptr = lean_ctor_get(inner_ptr as *mut _, 0); - let all_ptr = lean_ctor_get(inner_ptr as *mut _, 1); - let num_params_ptr = lean_ctor_get(inner_ptr as *mut _, 2); - let num_indices_ptr = lean_ctor_get(inner_ptr as *mut _, 3); - let num_motives_ptr = lean_ctor_get(inner_ptr as *mut _, 4); - let num_minors_ptr = lean_ctor_get(inner_ptr as *mut _, 5); - let rules_ptr = lean_ctor_get(inner_ptr as *mut _, 6); - - let ctor: &crate::lean::ctor::LeanCtorObject = - as_ref_unsafe(inner_ptr.cast()); - let k = ctor.get_scalar_u8(7, 0) != 0; - let is_unsafe = ctor.get_scalar_u8(7, 1) != 0; - - let rules_obj: &LeanArrayObject = as_ref_unsafe(rules_ptr.cast()); - let rules: Vec = - rules_obj.data().iter().map(|&p| decode_recursor_rule(p)).collect(); - - ConstantInfo::RecInfo(RecursorVal { - cnst: decode_constant_val(cnst_ptr), - all: decode_name_array(all_ptr), - num_params: Nat::from_ptr(num_params_ptr), - num_indices: Nat::from_ptr(num_indices_ptr), - num_motives: Nat::from_ptr(num_motives_ptr), - num_minors: Nat::from_ptr(num_minors_ptr), - rules, - k, - is_unsafe, - }) - }, - _ => panic!("Invalid ConstantInfo tag: {}", tag), - } - } -} - -/// Round-trip an Ix.ConstantInfo: decode from Lean, re-encode via LeanBuildCache. -#[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ix_constant_info( - info_ptr: *const c_void, -) -> *mut c_void { - let info = decode_constant_info(info_ptr); - let mut cache = LeanBuildCache::new(); - build_constant_info(&mut cache, &info) -} diff --git a/src/lean/ffi/ix/data.rs b/src/lean/ffi/ix/data.rs deleted file mode 100644 index e195c74e..00000000 --- a/src/lean/ffi/ix/data.rs +++ /dev/null @@ -1,530 +0,0 @@ -//! Ix.DataValue, Ix.Syntax, Ix.SourceInfo build/decode/roundtrip FFI. - -use std::ffi::c_void; - -use crate::ix::env::{ - DataValue, Int, Name, SourceInfo, Substring, Syntax, SyntaxPreresolved, -}; -use crate::lean::array::LeanArrayObject; -use crate::lean::nat::Nat; -use crate::lean::string::LeanStringObject; -use crate::lean::{ - as_ref_unsafe, lean_alloc_array, lean_alloc_ctor, lean_array_set_core, - lean_ctor_get, lean_ctor_set, lean_ctor_set_uint8, lean_is_scalar, - lean_mk_string, lean_obj_tag, -}; - -use super::super::builder::LeanBuildCache; -use super::super::primitives::build_nat; -use super::name::{build_name, decode_ix_name}; - -/// Build a Ix.Int (ofNat or negSucc). -pub fn build_int(int: &Int) -> *mut c_void { - unsafe { - match int { - Int::OfNat(n) => { - let obj = lean_alloc_ctor(0, 1, 0); - lean_ctor_set(obj, 0, build_nat(n)); - obj - }, - Int::NegSucc(n) => { - let obj = lean_alloc_ctor(1, 1, 0); - lean_ctor_set(obj, 0, build_nat(n)); - obj - }, - } - } -} - -/// Build a Ix.Substring. -pub fn build_substring(ss: &Substring) -> *mut c_void { - unsafe { - let s_cstr = crate::lean::safe_cstring(ss.str.as_str()); - let obj = lean_alloc_ctor(0, 3, 0); - lean_ctor_set(obj, 0, lean_mk_string(s_cstr.as_ptr())); - lean_ctor_set(obj, 1, build_nat(&ss.start_pos)); - lean_ctor_set(obj, 2, build_nat(&ss.stop_pos)); - obj - } -} - -/// Build a Ix.SourceInfo. -pub fn build_source_info(si: &SourceInfo) -> *mut c_void { - unsafe { - match si { - // | original (leading : Substring) (pos : Nat) (trailing : Substring) (endPos : Nat) -- tag 0 - SourceInfo::Original(leading, pos, trailing, end_pos) => { - let obj = lean_alloc_ctor(0, 4, 0); - lean_ctor_set(obj, 0, build_substring(leading)); - lean_ctor_set(obj, 1, build_nat(pos)); - lean_ctor_set(obj, 2, build_substring(trailing)); - lean_ctor_set(obj, 3, build_nat(end_pos)); - obj - }, - // | synthetic (pos : Nat) (endPos : Nat) (canonical : Bool) -- tag 1 - SourceInfo::Synthetic(pos, end_pos, canonical) => { - let obj = lean_alloc_ctor(1, 2, 1); - lean_ctor_set(obj, 0, build_nat(pos)); - lean_ctor_set(obj, 1, build_nat(end_pos)); - lean_ctor_set_uint8(obj, 2 * 8, *canonical as u8); - obj - }, - // | none -- tag 2 - SourceInfo::None => lean_alloc_ctor(2, 0, 0), - } - } -} - -/// Build a Ix.SyntaxPreresolved. -pub fn build_syntax_preresolved( - cache: &mut LeanBuildCache, - sp: &SyntaxPreresolved, -) -> *mut c_void { - unsafe { - match sp { - // | namespace (name : Name) -- tag 0 - SyntaxPreresolved::Namespace(name) => { - let obj = lean_alloc_ctor(0, 1, 0); - lean_ctor_set(obj, 0, build_name(cache, name)); - obj - }, - // | decl (name : Name) (aliases : Array String) -- tag 1 - SyntaxPreresolved::Decl(name, aliases) => { - let name_obj = build_name(cache, name); - let aliases_obj = build_string_array(aliases); - let obj = lean_alloc_ctor(1, 2, 0); - lean_ctor_set(obj, 0, name_obj); - lean_ctor_set(obj, 1, aliases_obj); - obj - }, - } - } -} - -/// Build an Array of Strings. -pub fn build_string_array(strings: &[String]) -> *mut c_void { - unsafe { - let arr = lean_alloc_array(strings.len(), strings.len()); - for (i, s) in strings.iter().enumerate() { - let s_cstr = crate::lean::safe_cstring(s.as_str()); - lean_array_set_core(arr, i, lean_mk_string(s_cstr.as_ptr())); - } - arr - } -} - -/// Build a Ix.Syntax. -pub fn build_syntax(cache: &mut LeanBuildCache, syn: &Syntax) -> *mut c_void { - unsafe { - match syn { - // | missing -- tag 0 - Syntax::Missing => lean_alloc_ctor(0, 0, 0), - // | node (info : SourceInfo) (kind : Name) (args : Array Syntax) -- tag 1 - Syntax::Node(info, kind, args) => { - let info_obj = build_source_info(info); - let kind_obj = build_name(cache, kind); - let args_obj = build_syntax_array(cache, args); - let obj = lean_alloc_ctor(1, 3, 0); - lean_ctor_set(obj, 0, info_obj); - lean_ctor_set(obj, 1, kind_obj); - lean_ctor_set(obj, 2, args_obj); - obj - }, - // | atom (info : SourceInfo) (val : String) -- tag 2 - Syntax::Atom(info, val) => { - let info_obj = build_source_info(info); - let val_cstr = crate::lean::safe_cstring(val.as_str()); - let obj = lean_alloc_ctor(2, 2, 0); - lean_ctor_set(obj, 0, info_obj); - lean_ctor_set(obj, 1, lean_mk_string(val_cstr.as_ptr())); - obj - }, - // | ident (info : SourceInfo) (rawVal : Substring) (val : Name) (preresolved : Array SyntaxPreresolved) -- tag 3 - Syntax::Ident(info, raw_val, val, preresolved) => { - let info_obj = build_source_info(info); - let raw_val_obj = build_substring(raw_val); - let val_obj = build_name(cache, val); - let preresolved_obj = - build_syntax_preresolved_array(cache, preresolved); - let obj = lean_alloc_ctor(3, 4, 0); - lean_ctor_set(obj, 0, info_obj); - lean_ctor_set(obj, 1, raw_val_obj); - lean_ctor_set(obj, 2, val_obj); - lean_ctor_set(obj, 3, preresolved_obj); - obj - }, - } - } -} - -/// Build an Array of Syntax. -pub fn build_syntax_array( - cache: &mut LeanBuildCache, - items: &[Syntax], -) -> *mut c_void { - unsafe { - let arr = lean_alloc_array(items.len(), items.len()); - for (i, item) in items.iter().enumerate() { - let item_obj = build_syntax(cache, item); - lean_array_set_core(arr, i, item_obj); - } - arr - } -} - -/// Build an Array of SyntaxPreresolved. -pub fn build_syntax_preresolved_array( - cache: &mut LeanBuildCache, - items: &[SyntaxPreresolved], -) -> *mut c_void { - unsafe { - let arr = lean_alloc_array(items.len(), items.len()); - for (i, item) in items.iter().enumerate() { - let item_obj = build_syntax_preresolved(cache, item); - lean_array_set_core(arr, i, item_obj); - } - arr - } -} - -/// Build Ix.DataValue. -pub fn build_data_value( - cache: &mut LeanBuildCache, - dv: &DataValue, -) -> *mut c_void { - unsafe { - match dv { - DataValue::OfString(s) => { - let s_cstr = crate::lean::safe_cstring(s.as_str()); - let obj = lean_alloc_ctor(0, 1, 0); - lean_ctor_set(obj, 0, lean_mk_string(s_cstr.as_ptr())); - obj - }, - DataValue::OfBool(b) => { - // 0 object fields, 1 scalar byte - let obj = lean_alloc_ctor(1, 0, 1); - lean_ctor_set_uint8(obj, 0, *b as u8); - obj - }, - DataValue::OfName(n) => { - let obj = lean_alloc_ctor(2, 1, 0); - lean_ctor_set(obj, 0, build_name(cache, n)); - obj - }, - DataValue::OfNat(n) => { - let obj = lean_alloc_ctor(3, 1, 0); - lean_ctor_set(obj, 0, build_nat(n)); - obj - }, - DataValue::OfInt(i) => { - let obj = lean_alloc_ctor(4, 1, 0); - lean_ctor_set(obj, 0, build_int(i)); - obj - }, - DataValue::OfSyntax(syn) => { - let obj = lean_alloc_ctor(5, 1, 0); - lean_ctor_set(obj, 0, build_syntax(cache, syn)); - obj - }, - } - } -} - -/// Build an Array of (Name × DataValue) for mdata. -pub fn build_kvmap( - cache: &mut LeanBuildCache, - data: &[(Name, DataValue)], -) -> *mut c_void { - unsafe { - let arr = lean_alloc_array(data.len(), data.len()); - for (i, (name, dv)) in data.iter().enumerate() { - let name_obj = build_name(cache, name); - let dv_obj = build_data_value(cache, dv); - // Prod (Name × DataValue) - let pair = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(pair, 0, name_obj); - lean_ctor_set(pair, 1, dv_obj); - lean_array_set_core(arr, i, pair); - } - arr - } -} - -// ============================================================================= -// Decode Functions -// ============================================================================= - -/// Decode Ix.Int from Lean pointer. -/// Ix.Int: ofNat (tag 0, 1 field) | negSucc (tag 1, 1 field) -pub fn decode_ix_int(ptr: *const c_void) -> Int { - unsafe { - let tag = lean_obj_tag(ptr as *mut _); - let nat_ptr = lean_ctor_get(ptr as *mut _, 0); - let nat = Nat::from_ptr(nat_ptr); - match tag { - 0 => Int::OfNat(nat), - 1 => Int::NegSucc(nat), - _ => panic!("Invalid Ix.Int tag: {}", tag), - } - } -} - -/// Decode Ix.DataValue from a Lean pointer. -pub fn decode_data_value(ptr: *const c_void) -> DataValue { - unsafe { - let tag = lean_obj_tag(ptr as *mut _); - - match tag { - 0 => { - // ofString: 1 object field - let inner_ptr = lean_ctor_get(ptr as *mut _, 0); - let str_obj: &LeanStringObject = as_ref_unsafe(inner_ptr.cast()); - DataValue::OfString(str_obj.as_string()) - }, - 1 => { - // ofBool: 0 object fields, 1 scalar byte - let ctor: &crate::lean::ctor::LeanCtorObject = - as_ref_unsafe(ptr.cast()); - let b = ctor.get_scalar_u8(0, 0) != 0; - DataValue::OfBool(b) - }, - 2 => { - // ofName: 1 object field - let inner_ptr = lean_ctor_get(ptr as *mut _, 0); - DataValue::OfName(decode_ix_name(inner_ptr)) - }, - 3 => { - // ofNat: 1 object field - let inner_ptr = lean_ctor_get(ptr as *mut _, 0); - DataValue::OfNat(Nat::from_ptr(inner_ptr)) - }, - 4 => { - // ofInt: 1 object field - let inner_ptr = lean_ctor_get(ptr as *mut _, 0); - let int_tag = lean_obj_tag(inner_ptr as *mut _); - let nat_ptr = lean_ctor_get(inner_ptr as *mut _, 0); - let nat = Nat::from_ptr(nat_ptr); - match int_tag { - 0 => DataValue::OfInt(Int::OfNat(nat)), - 1 => DataValue::OfInt(Int::NegSucc(nat)), - _ => panic!("Invalid Int tag: {}", int_tag), - } - }, - 5 => { - // ofSyntax: 1 object field - let inner_ptr = lean_ctor_get(ptr as *mut _, 0); - DataValue::OfSyntax(decode_ix_syntax(inner_ptr).into()) - }, - _ => panic!("Invalid DataValue tag: {}", tag), - } - } -} - -/// Decode Ix.Syntax from a Lean pointer. -pub fn decode_ix_syntax(ptr: *const c_void) -> Syntax { - unsafe { - if lean_is_scalar(ptr) { - return Syntax::Missing; - } - let tag = lean_obj_tag(ptr as *mut _); - match tag { - 0 => Syntax::Missing, - 1 => { - // node: info, kind, args - let info_ptr = lean_ctor_get(ptr as *mut _, 0); - let kind_ptr = lean_ctor_get(ptr as *mut _, 1); - let args_ptr = lean_ctor_get(ptr as *mut _, 2); - - let info = decode_ix_source_info(info_ptr); - let kind = decode_ix_name(kind_ptr); - let args_obj: &LeanArrayObject = as_ref_unsafe(args_ptr.cast()); - let args: Vec = - args_obj.data().iter().map(|&p| decode_ix_syntax(p)).collect(); - - Syntax::Node(info, kind, args) - }, - 2 => { - // atom: info, val - let info_ptr = lean_ctor_get(ptr as *mut _, 0); - let val_ptr = lean_ctor_get(ptr as *mut _, 1); - - let info = decode_ix_source_info(info_ptr); - let val_obj: &LeanStringObject = as_ref_unsafe(val_ptr.cast()); - - Syntax::Atom(info, val_obj.as_string()) - }, - 3 => { - // ident: info, rawVal, val, preresolved - let info_ptr = lean_ctor_get(ptr as *mut _, 0); - let raw_val_ptr = lean_ctor_get(ptr as *mut _, 1); - let val_ptr = lean_ctor_get(ptr as *mut _, 2); - let preresolved_ptr = lean_ctor_get(ptr as *mut _, 3); - - let info = decode_ix_source_info(info_ptr); - let raw_val = decode_substring(raw_val_ptr); - let val = decode_ix_name(val_ptr); - let preresolved_obj: &LeanArrayObject = - as_ref_unsafe(preresolved_ptr.cast()); - let preresolved: Vec = preresolved_obj - .data() - .iter() - .map(|&p| decode_syntax_preresolved(p)) - .collect(); - - Syntax::Ident(info, raw_val, val, preresolved) - }, - _ => panic!("Invalid Syntax tag: {}", tag), - } - } -} - -/// Decode Ix.SourceInfo. -pub fn decode_ix_source_info(ptr: *const c_void) -> SourceInfo { - unsafe { - if lean_is_scalar(ptr) { - return SourceInfo::None; - } - let tag = lean_obj_tag(ptr as *mut _); - match tag { - 0 => { - // original - let leading_ptr = lean_ctor_get(ptr as *mut _, 0); - let pos_ptr = lean_ctor_get(ptr as *mut _, 1); - let trailing_ptr = lean_ctor_get(ptr as *mut _, 2); - let end_pos_ptr = lean_ctor_get(ptr as *mut _, 3); - - SourceInfo::Original( - decode_substring(leading_ptr), - Nat::from_ptr(pos_ptr), - decode_substring(trailing_ptr), - Nat::from_ptr(end_pos_ptr), - ) - }, - 1 => { - // synthetic: 2 obj fields (pos, end_pos), 1 scalar byte (canonical) - let pos_ptr = lean_ctor_get(ptr as *mut _, 0); - let end_pos_ptr = lean_ctor_get(ptr as *mut _, 1); - - let ctor: &crate::lean::ctor::LeanCtorObject = - as_ref_unsafe(ptr.cast()); - let canonical = ctor.get_scalar_u8(2, 0) != 0; - - SourceInfo::Synthetic( - Nat::from_ptr(pos_ptr), - Nat::from_ptr(end_pos_ptr), - canonical, - ) - }, - 2 => SourceInfo::None, - _ => panic!("Invalid SourceInfo tag: {}", tag), - } - } -} - -/// Decode Ix.Substring. -pub fn decode_substring(ptr: *const c_void) -> Substring { - unsafe { - let str_ptr = lean_ctor_get(ptr as *mut _, 0); - let start_ptr = lean_ctor_get(ptr as *mut _, 1); - let stop_ptr = lean_ctor_get(ptr as *mut _, 2); - - let str_obj: &LeanStringObject = as_ref_unsafe(str_ptr.cast()); - Substring { - str: str_obj.as_string(), - start_pos: Nat::from_ptr(start_ptr), - stop_pos: Nat::from_ptr(stop_ptr), - } - } -} - -/// Decode Ix.SyntaxPreresolved. -pub fn decode_syntax_preresolved(ptr: *const c_void) -> SyntaxPreresolved { - unsafe { - let tag = lean_obj_tag(ptr as *mut _); - match tag { - 0 => { - // namespace - let name_ptr = lean_ctor_get(ptr as *mut _, 0); - SyntaxPreresolved::Namespace(decode_ix_name(name_ptr)) - }, - 1 => { - // decl - let name_ptr = lean_ctor_get(ptr as *mut _, 0); - let aliases_ptr = lean_ctor_get(ptr as *mut _, 1); - - let name = decode_ix_name(name_ptr); - let aliases_obj: &LeanArrayObject = as_ref_unsafe(aliases_ptr.cast()); - let aliases: Vec = aliases_obj - .data() - .iter() - .map(|&p| { - let s: &LeanStringObject = as_ref_unsafe(p.cast()); - s.as_string() - }) - .collect(); - - SyntaxPreresolved::Decl(name, aliases) - }, - _ => panic!("Invalid SyntaxPreresolved tag: {}", tag), - } - } -} - -// ============================================================================= -// FFI Exports -// ============================================================================= - -/// Round-trip an Ix.Int: decode from Lean, re-encode. -#[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ix_int(int_ptr: *const c_void) -> *mut c_void { - let int_val = decode_ix_int(int_ptr); - build_int(&int_val) -} - -/// Round-trip an Ix.Substring: decode from Lean, re-encode. -#[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ix_substring( - sub_ptr: *const c_void, -) -> *mut c_void { - let sub = decode_substring(sub_ptr); - build_substring(&sub) -} - -/// Round-trip an Ix.SourceInfo: decode from Lean, re-encode. -#[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ix_source_info( - si_ptr: *const c_void, -) -> *mut c_void { - let si = decode_ix_source_info(si_ptr); - build_source_info(&si) -} - -/// Round-trip an Ix.SyntaxPreresolved: decode from Lean, re-encode. -#[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ix_syntax_preresolved( - sp_ptr: *const c_void, -) -> *mut c_void { - let sp = decode_syntax_preresolved(sp_ptr); - let mut cache = LeanBuildCache::new(); - build_syntax_preresolved(&mut cache, &sp) -} - -/// Round-trip an Ix.Syntax: decode from Lean, re-encode. -#[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ix_syntax( - syn_ptr: *const c_void, -) -> *mut c_void { - let syn = decode_ix_syntax(syn_ptr); - let mut cache = LeanBuildCache::new(); - build_syntax(&mut cache, &syn) -} - -/// Round-trip an Ix.DataValue: decode from Lean, re-encode. -#[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ix_data_value( - dv_ptr: *const c_void, -) -> *mut c_void { - let dv = decode_data_value(dv_ptr); - let mut cache = LeanBuildCache::new(); - build_data_value(&mut cache, &dv) -} diff --git a/src/lean/ffi/ix/env.rs b/src/lean/ffi/ix/env.rs deleted file mode 100644 index 38776728..00000000 --- a/src/lean/ffi/ix/env.rs +++ /dev/null @@ -1,294 +0,0 @@ -//! Ix.Environment build/decode/roundtrip FFI. - -use std::ffi::c_void; - -use rustc_hash::FxHashMap; - -use crate::ix::env::{ConstantInfo, Name}; -use crate::lean::array::LeanArrayObject; -use crate::lean::{ - as_ref_unsafe, lean_alloc_array, lean_alloc_ctor, lean_array_set_core, - lean_box_fn, lean_ctor_get, lean_ctor_set, lean_is_scalar, lean_obj_tag, -}; - -use super::super::builder::LeanBuildCache; -use super::constant::{build_constant_info, decode_constant_info}; -use super::name::{build_name, decode_ix_name}; - -// ============================================================================= -// HashMap Building -// ============================================================================= - -/// Build a Lean HashMap from pre-built key-value pairs. -/// -/// Lean's Std.HashMap structure (with unboxing): -/// - HashMap α β unboxes through DHashMap to Raw -/// - Raw = { size : Nat, buckets : Array (AssocList α β) } -/// - Field 0 = size (Nat), Field 1 = buckets (Array) -/// -/// AssocList α β = nil | cons (key : α) (value : β) (tail : AssocList α β) -pub fn build_hashmap_from_pairs( - pairs: Vec<(*mut c_void, *mut c_void, u64)>, // (key_obj, val_obj, hash) -) -> *mut c_void { - let size = pairs.len(); - let bucket_count = (size * 4 / 3 + 1).next_power_of_two().max(8); - - unsafe { - // Create array of AssocLists (initially all nil = boxed 0) - let buckets = lean_alloc_array(bucket_count, bucket_count); - for i in 0..bucket_count { - lean_array_set_core(buckets, i, lean_box_fn(0)); // nil - } - - // Insert entries - for (key_obj, val_obj, hash) in pairs { - let bucket_idx = - usize::try_from(hash).expect("hash overflows usize") % bucket_count; - - // Get current bucket (AssocList) - let buckets_arr = buckets.cast::(); - let current_tail = (*buckets_arr).data()[bucket_idx]; - - // cons (key : α) (value : β) (tail : AssocList α β) -- tag 1 - let cons = lean_alloc_ctor(1, 3, 0); - lean_ctor_set(cons, 0, key_obj); - lean_ctor_set(cons, 1, val_obj); - lean_ctor_set(cons, 2, current_tail as *mut c_void); - - lean_array_set_core(buckets, bucket_idx, cons); - } - - // Build Raw { size : Nat, buckets : Array } - // Due to unboxing, this IS the HashMap directly - // Field 0 = size, Field 1 = buckets (2 object fields, no scalars) - let size_obj = if size <= (usize::MAX >> 1) { - lean_box_fn(size) - } else { - crate::lean::lean_uint64_to_nat(size as u64) - }; - - let raw = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(raw, 0, size_obj); - lean_ctor_set(raw, 1, buckets); - raw - } -} - -// ============================================================================= -// Environment Building -// ============================================================================= - -/// Build a Ix.RawEnvironment from collected caches. -/// RawEnvironment has arrays that Lean will convert to HashMaps. -/// -/// Ix.RawEnvironment = { -/// consts : Array (Name × ConstantInfo) -/// } -/// -/// NOTE: RawEnvironment with a single field is UNBOXED by Lean, -/// so we return just the array, not a structure containing it. -pub fn build_raw_environment( - cache: &mut LeanBuildCache, - consts: &FxHashMap, -) -> *mut c_void { - unsafe { - // Build consts array: Array (Name × ConstantInfo) - // RawEnvironment is a single-field structure that may be unboxed to just the array - let consts_arr = lean_alloc_array(consts.len(), consts.len()); - for (i, (name, info)) in consts.iter().enumerate() { - let key_obj = build_name(cache, name); - let val_obj = build_constant_info(cache, info); - // Build pair (Name × ConstantInfo) - let pair = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(pair, 0, key_obj); - lean_ctor_set(pair, 1, val_obj); - lean_array_set_core(consts_arr, i, pair); - } - - consts_arr - } -} - -// ============================================================================= -// Environment Decoder -// ============================================================================= - -/// Decode a HashMap's AssocList and collect key-value pairs using a custom decoder. -fn decode_assoc_list( - list_ptr: *const c_void, - decode_key: FK, - decode_val: FV, -) -> Vec<(K, V)> -where - FK: Fn(*const c_void) -> K, - FV: Fn(*const c_void) -> V, -{ - let mut result = Vec::new(); - let mut current = list_ptr; - - loop { - unsafe { - if lean_is_scalar(current) { - break; - } - - let tag = lean_obj_tag(current as *mut _); - if tag == 0 { - // AssocList.nil - break; - } - - // AssocList.cons: 3 fields (key, value, tail) - let key_ptr = lean_ctor_get(current as *mut _, 0); - let value_ptr = lean_ctor_get(current as *mut _, 1); - let tail_ptr = lean_ctor_get(current as *mut _, 2); - - result.push((decode_key(key_ptr), decode_val(value_ptr))); - current = tail_ptr; - } - } - - result -} - -/// Decode a Lean HashMap into a Vec of key-value pairs. -/// HashMap structure (after unboxing): Raw { size : Nat, buckets : Array (AssocList α β) } -/// -/// Due to single-field struct unboxing: -/// - HashMap { inner : DHashMap } unboxes to DHashMap -/// - DHashMap { inner : Raw, wf : Prop } unboxes to Raw (Prop is erased) -/// - Raw { size : Nat, buckets : Array } - field 0 = size, field 1 = buckets -fn decode_hashmap( - map_ptr: *const c_void, - decode_key: FK, - decode_val: FV, -) -> Vec<(K, V)> -where - FK: Fn(*const c_void) -> K + Copy, - FV: Fn(*const c_void) -> V + Copy, -{ - unsafe { - // Raw layout: field 0 = size (Nat), field 1 = buckets (Array) - let _size_ptr = lean_ctor_get(map_ptr as *mut _, 0); // unused but needed for layout - let buckets_ptr = lean_ctor_get(map_ptr as *mut _, 1); - - let buckets_obj: &LeanArrayObject = as_ref_unsafe(buckets_ptr.cast()); - - let mut pairs = Vec::new(); - for &bucket_ptr in buckets_obj.data() { - let bucket_pairs = decode_assoc_list(bucket_ptr, decode_key, decode_val); - pairs.extend(bucket_pairs); - } - - pairs - } -} - -/// Decode Ix.Environment from Lean pointer. -/// -/// Ix.Environment = { -/// consts : HashMap Name ConstantInfo -/// } -/// -/// NOTE: Environment with a single field is UNBOXED by Lean, -/// so the pointer IS the HashMap directly, not a structure containing it. -pub fn decode_ix_environment( - ptr: *const c_void, -) -> FxHashMap { - // Environment is unboxed - ptr IS the HashMap directly - let consts_pairs = decode_hashmap(ptr, decode_ix_name, decode_constant_info); - let mut consts: FxHashMap = FxHashMap::default(); - for (name, info) in consts_pairs { - consts.insert(name, info); - } - consts -} - -/// Decode Ix.RawEnvironment from Lean pointer into HashMap. -/// RawEnvironment = { consts : Array (Name × ConstantInfo) } -/// NOTE: Unboxed to just Array. This version deduplicates by name. -pub fn decode_ix_raw_environment( - ptr: *const c_void, -) -> FxHashMap { - unsafe { - // RawEnvironment is a single-field structure that may be unboxed - // Try treating ptr as the array directly first - let arr_obj: &LeanArrayObject = as_ref_unsafe(ptr.cast()); - let mut consts: FxHashMap = FxHashMap::default(); - - for &pair_ptr in arr_obj.data() { - let name_ptr = lean_ctor_get(pair_ptr as *mut _, 0); - let info_ptr = lean_ctor_get(pair_ptr as *mut _, 1); - let name = decode_ix_name(name_ptr); - let info = decode_constant_info(info_ptr); - consts.insert(name, info); - } - - consts - } -} - -/// Decode Ix.RawEnvironment from Lean pointer preserving array structure. -/// This version preserves all entries including duplicates. -pub fn decode_ix_raw_environment_vec( - ptr: *const c_void, -) -> Vec<(Name, ConstantInfo)> { - unsafe { - let arr_obj: &LeanArrayObject = as_ref_unsafe(ptr.cast()); - let mut consts = Vec::with_capacity(arr_obj.data().len()); - - for &pair_ptr in arr_obj.data() { - let name_ptr = lean_ctor_get(pair_ptr as *mut _, 0); - let info_ptr = lean_ctor_get(pair_ptr as *mut _, 1); - let name = decode_ix_name(name_ptr); - let info = decode_constant_info(info_ptr); - consts.push((name, info)); - } - - consts - } -} - -/// Build Ix.RawEnvironment from Vec, preserving order and duplicates. -pub fn build_raw_environment_from_vec( - cache: &mut LeanBuildCache, - consts: &[(Name, ConstantInfo)], -) -> *mut c_void { - unsafe { - let consts_arr = lean_alloc_array(consts.len(), consts.len()); - for (i, (name, info)) in consts.iter().enumerate() { - let key_obj = build_name(cache, name); - let val_obj = build_constant_info(cache, info); - let pair = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(pair, 0, key_obj); - lean_ctor_set(pair, 1, val_obj); - lean_array_set_core(consts_arr, i, pair); - } - consts_arr - } -} - -// ============================================================================= -// FFI Exports -// ============================================================================= - -/// Round-trip an Ix.Environment: decode from Lean, re-encode. -#[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ix_environment( - env_ptr: *const c_void, -) -> *mut c_void { - let env = decode_ix_environment(env_ptr); - let mut cache = LeanBuildCache::with_capacity(env.len()); - build_raw_environment(&mut cache, &env) -} - -/// Round-trip an Ix.RawEnvironment: decode from Lean, re-encode. -/// Uses Vec-preserving functions to maintain array structure and order. -#[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ix_raw_environment( - env_ptr: *const c_void, -) -> *mut c_void { - let env = decode_ix_raw_environment_vec(env_ptr); - let mut cache = LeanBuildCache::with_capacity(env.len()); - build_raw_environment_from_vec(&mut cache, &env) -} diff --git a/src/lean/ffi/ix/expr.rs b/src/lean/ffi/ix/expr.rs deleted file mode 100644 index 598d5a77..00000000 --- a/src/lean/ffi/ix/expr.rs +++ /dev/null @@ -1,430 +0,0 @@ -//! Ix.Expr build/decode/roundtrip FFI. -//! -//! Ix.Expr layout (12 constructors): -//! - Tag 0: bvar (idx : Nat) (hash : Address) -//! - Tag 1: fvar (name : Name) (hash : Address) -//! - Tag 2: mvar (name : Name) (hash : Address) -//! - Tag 3: sort (level : Level) (hash : Address) -//! - Tag 4: const (name : Name) (levels : Array Level) (hash : Address) -//! - Tag 5: app (fn arg : Expr) (hash : Address) -//! - Tag 6: lam (name : Name) (ty body : Expr) (bi : BinderInfo) (hash : Address) -//! - Tag 7: forallE (name : Name) (ty body : Expr) (bi : BinderInfo) (hash : Address) -//! - Tag 8: letE (name : Name) (ty val body : Expr) (nonDep : Bool) (hash : Address) -//! - Tag 9: lit (l : Literal) (hash : Address) -//! - Tag 10: mdata (data : Array (Name × DataValue)) (expr : Expr) (hash : Address) -//! - Tag 11: proj (typeName : Name) (idx : Nat) (struct : Expr) (hash : Address) - -use std::ffi::c_void; - -use crate::ix::env::{ - BinderInfo, DataValue, Expr, ExprData, Level, Literal, Name, -}; -use crate::lean::array::LeanArrayObject; -use crate::lean::nat::Nat; -use crate::lean::string::LeanStringObject; -use crate::lean::{ - as_ref_unsafe, lean_alloc_array, lean_alloc_ctor, lean_array_set_core, - lean_box_fn, lean_ctor_get, lean_ctor_set, lean_ctor_set_uint8, lean_inc, - lean_mk_string, lean_obj_tag, -}; - -use super::super::builder::LeanBuildCache; -use super::super::primitives::build_nat; -use super::address::build_address; -use super::data::{build_data_value, decode_data_value}; -use super::level::{build_level, build_level_array, decode_ix_level}; -use super::name::{build_name, decode_ix_name}; - -/// Build a Lean Ix.Expr with embedded hash. -/// Uses caching to avoid rebuilding the same expression. -pub fn build_expr(cache: &mut LeanBuildCache, expr: &Expr) -> *mut c_void { - let hash = *expr.get_hash(); - if let Some(&cached) = cache.exprs.get(&hash) { - unsafe { lean_inc(cached) }; - return cached; - } - - let result = unsafe { - match expr.as_data() { - ExprData::Bvar(idx, h) => { - let obj = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(obj, 0, build_nat(idx)); - lean_ctor_set(obj, 1, build_address(h)); - obj - }, - ExprData::Fvar(name, h) => { - let obj = lean_alloc_ctor(1, 2, 0); - lean_ctor_set(obj, 0, build_name(cache, name)); - lean_ctor_set(obj, 1, build_address(h)); - obj - }, - ExprData::Mvar(name, h) => { - let obj = lean_alloc_ctor(2, 2, 0); - lean_ctor_set(obj, 0, build_name(cache, name)); - lean_ctor_set(obj, 1, build_address(h)); - obj - }, - ExprData::Sort(level, h) => { - let obj = lean_alloc_ctor(3, 2, 0); - lean_ctor_set(obj, 0, build_level(cache, level)); - lean_ctor_set(obj, 1, build_address(h)); - obj - }, - ExprData::Const(name, levels, h) => { - let name_obj = build_name(cache, name); - let levels_obj = build_level_array(cache, levels); - let obj = lean_alloc_ctor(4, 3, 0); - lean_ctor_set(obj, 0, name_obj); - lean_ctor_set(obj, 1, levels_obj); - lean_ctor_set(obj, 2, build_address(h)); - obj - }, - ExprData::App(fn_expr, arg_expr, h) => { - let fn_obj = build_expr(cache, fn_expr); - let arg_obj = build_expr(cache, arg_expr); - let obj = lean_alloc_ctor(5, 3, 0); - lean_ctor_set(obj, 0, fn_obj); - lean_ctor_set(obj, 1, arg_obj); - lean_ctor_set(obj, 2, build_address(h)); - obj - }, - ExprData::Lam(name, ty, body, bi, h) => { - let name_obj = build_name(cache, name); - let ty_obj = build_expr(cache, ty); - let body_obj = build_expr(cache, body); - let hash_obj = build_address(h); - // 4 object fields, 1 scalar byte for BinderInfo - let obj = lean_alloc_ctor(6, 4, 1); - lean_ctor_set(obj, 0, name_obj); - lean_ctor_set(obj, 1, ty_obj); - lean_ctor_set(obj, 2, body_obj); - lean_ctor_set(obj, 3, hash_obj); - lean_ctor_set_uint8(obj, 4 * 8, binder_info_to_u8(bi)); - obj - }, - ExprData::ForallE(name, ty, body, bi, h) => { - let name_obj = build_name(cache, name); - let ty_obj = build_expr(cache, ty); - let body_obj = build_expr(cache, body); - let hash_obj = build_address(h); - let obj = lean_alloc_ctor(7, 4, 1); - lean_ctor_set(obj, 0, name_obj); - lean_ctor_set(obj, 1, ty_obj); - lean_ctor_set(obj, 2, body_obj); - lean_ctor_set(obj, 3, hash_obj); - lean_ctor_set_uint8(obj, 4 * 8, binder_info_to_u8(bi)); - obj - }, - ExprData::LetE(name, ty, val, body, non_dep, h) => { - let name_obj = build_name(cache, name); - let ty_obj = build_expr(cache, ty); - let val_obj = build_expr(cache, val); - let body_obj = build_expr(cache, body); - let hash_obj = build_address(h); - // 5 object fields, 1 scalar byte for Bool - let obj = lean_alloc_ctor(8, 5, 1); - lean_ctor_set(obj, 0, name_obj); - lean_ctor_set(obj, 1, ty_obj); - lean_ctor_set(obj, 2, val_obj); - lean_ctor_set(obj, 3, body_obj); - lean_ctor_set(obj, 4, hash_obj); - lean_ctor_set_uint8(obj, 5 * 8, *non_dep as u8); - obj - }, - ExprData::Lit(lit, h) => { - let lit_obj = build_literal(lit); - let obj = lean_alloc_ctor(9, 2, 0); - lean_ctor_set(obj, 0, lit_obj); - lean_ctor_set(obj, 1, build_address(h)); - obj - }, - ExprData::Mdata(md, inner, h) => { - let md_obj = build_mdata_array(cache, md); - let inner_obj = build_expr(cache, inner); - let obj = lean_alloc_ctor(10, 3, 0); - lean_ctor_set(obj, 0, md_obj); - lean_ctor_set(obj, 1, inner_obj); - lean_ctor_set(obj, 2, build_address(h)); - obj - }, - ExprData::Proj(type_name, idx, struct_expr, h) => { - let name_obj = build_name(cache, type_name); - let idx_obj = build_nat(idx); - let struct_obj = build_expr(cache, struct_expr); - let obj = lean_alloc_ctor(11, 4, 0); - lean_ctor_set(obj, 0, name_obj); - lean_ctor_set(obj, 1, idx_obj); - lean_ctor_set(obj, 2, struct_obj); - lean_ctor_set(obj, 3, build_address(h)); - obj - }, - } - }; - - cache.exprs.insert(hash, result); - result -} - -/// Build an Array of (Name × DataValue) for mdata. -fn build_mdata_array( - cache: &mut LeanBuildCache, - md: &[(Name, DataValue)], -) -> *mut c_void { - unsafe { - let arr = lean_alloc_array(md.len(), md.len()); - for (i, (name, dv)) in md.iter().enumerate() { - let pair = build_name_datavalue_pair(cache, name, dv); - lean_array_set_core(arr, i, pair); - } - arr - } -} - -/// Build a (Name, DataValue) pair (Prod). -fn build_name_datavalue_pair( - cache: &mut LeanBuildCache, - name: &Name, - dv: &DataValue, -) -> *mut c_void { - unsafe { - let name_obj = build_name(cache, name); - let dv_obj = build_data_value(cache, dv); - let pair = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(pair, 0, name_obj); - lean_ctor_set(pair, 1, dv_obj); - pair - } -} - -/// Build a Literal (natVal or strVal). -pub fn build_literal(lit: &Literal) -> *mut c_void { - unsafe { - match lit { - Literal::NatVal(n) => { - let obj = lean_alloc_ctor(0, 1, 0); - lean_ctor_set(obj, 0, build_nat(n)); - obj - }, - Literal::StrVal(s) => { - let s_cstr = crate::lean::safe_cstring(s.as_str()); - let obj = lean_alloc_ctor(1, 1, 0); - lean_ctor_set(obj, 0, lean_mk_string(s_cstr.as_ptr())); - obj - }, - } - } -} - -/// Build Ix.BinderInfo enum. -/// BinderInfo is a 4-constructor enum with no fields, stored as boxed scalar. -pub fn build_binder_info(bi: &BinderInfo) -> *mut c_void { - lean_box_fn(binder_info_to_u8(bi) as usize) -} - -/// Convert BinderInfo to u8 tag. -pub fn binder_info_to_u8(bi: &BinderInfo) -> u8 { - match bi { - BinderInfo::Default => 0, - BinderInfo::Implicit => 1, - BinderInfo::StrictImplicit => 2, - BinderInfo::InstImplicit => 3, - } -} - -/// Decode a Lean Ix.Expr to Rust Expr. -pub fn decode_ix_expr(ptr: *const c_void) -> Expr { - unsafe { - let tag = lean_obj_tag(ptr as *mut _); - match tag { - 0 => { - // bvar - let idx_ptr = lean_ctor_get(ptr as *mut _, 0); - let idx = Nat::from_ptr(idx_ptr); - Expr::bvar(idx) - }, - 1 => { - // fvar - let name_ptr = lean_ctor_get(ptr as *mut _, 0); - let name = decode_ix_name(name_ptr); - Expr::fvar(name) - }, - 2 => { - // mvar - let name_ptr = lean_ctor_get(ptr as *mut _, 0); - let name = decode_ix_name(name_ptr); - Expr::mvar(name) - }, - 3 => { - // sort - let level_ptr = lean_ctor_get(ptr as *mut _, 0); - let level = decode_ix_level(level_ptr); - Expr::sort(level) - }, - 4 => { - // const - let name_ptr = lean_ctor_get(ptr as *mut _, 0); - let levels_ptr = lean_ctor_get(ptr as *mut _, 1); - - let name = decode_ix_name(name_ptr); - let levels_obj: &LeanArrayObject = as_ref_unsafe(levels_ptr.cast()); - let levels: Vec = - levels_obj.data().iter().map(|&p| decode_ix_level(p)).collect(); - - Expr::cnst(name, levels) - }, - 5 => { - // app - let fn_ptr = lean_ctor_get(ptr as *mut _, 0); - let arg_ptr = lean_ctor_get(ptr as *mut _, 1); - let fn_expr = decode_ix_expr(fn_ptr); - let arg_expr = decode_ix_expr(arg_ptr); - Expr::app(fn_expr, arg_expr) - }, - 6 => { - // lam: name, ty, body, hash, bi (scalar) - let name_ptr = lean_ctor_get(ptr as *mut _, 0); - let ty_ptr = lean_ctor_get(ptr as *mut _, 1); - let body_ptr = lean_ctor_get(ptr as *mut _, 2); - // hash at field 3 - // bi is a scalar byte at offset 4*8 - - let name = decode_ix_name(name_ptr); - let ty = decode_ix_expr(ty_ptr); - let body = decode_ix_expr(body_ptr); - - // Read BinderInfo scalar (4 obj fields: name, ty, body, hash) - let ctor: &crate::lean::ctor::LeanCtorObject = - as_ref_unsafe(ptr.cast()); - let bi_byte = ctor.get_scalar_u8(4, 0); - let bi = decode_binder_info(bi_byte); - - Expr::lam(name, ty, body, bi) - }, - 7 => { - // forallE: same layout as lam - let name_ptr = lean_ctor_get(ptr as *mut _, 0); - let ty_ptr = lean_ctor_get(ptr as *mut _, 1); - let body_ptr = lean_ctor_get(ptr as *mut _, 2); - - let name = decode_ix_name(name_ptr); - let ty = decode_ix_expr(ty_ptr); - let body = decode_ix_expr(body_ptr); - - // 4 obj fields: name, ty, body, hash - let ctor: &crate::lean::ctor::LeanCtorObject = - as_ref_unsafe(ptr.cast()); - let bi_byte = ctor.get_scalar_u8(4, 0); - let bi = decode_binder_info(bi_byte); - - Expr::all(name, ty, body, bi) - }, - 8 => { - // letE: name, ty, val, body, hash, nonDep (scalar) - let name_ptr = lean_ctor_get(ptr as *mut _, 0); - let ty_ptr = lean_ctor_get(ptr as *mut _, 1); - let val_ptr = lean_ctor_get(ptr as *mut _, 2); - let body_ptr = lean_ctor_get(ptr as *mut _, 3); - // hash at field 4 - // nonDep is scalar byte after 5 obj fields - - let name = decode_ix_name(name_ptr); - let ty = decode_ix_expr(ty_ptr); - let val = decode_ix_expr(val_ptr); - let body = decode_ix_expr(body_ptr); - - // 5 obj fields: name, ty, val, body, hash - let ctor: &crate::lean::ctor::LeanCtorObject = - as_ref_unsafe(ptr.cast()); - let non_dep = ctor.get_scalar_u8(5, 0) != 0; - - Expr::letE(name, ty, val, body, non_dep) - }, - 9 => { - // lit - let lit_ptr = lean_ctor_get(ptr as *mut _, 0); - let lit = decode_literal(lit_ptr); - Expr::lit(lit) - }, - 10 => { - // mdata: data, expr, hash - let data_ptr = lean_ctor_get(ptr as *mut _, 0); - let expr_ptr = lean_ctor_get(ptr as *mut _, 1); - - let data_obj: &LeanArrayObject = as_ref_unsafe(data_ptr.cast()); - let data: Vec<(Name, DataValue)> = - data_obj.data().iter().map(|&p| decode_name_data_value(p)).collect(); - - let inner = decode_ix_expr(expr_ptr); - Expr::mdata(data, inner) - }, - 11 => { - // proj: typeName, idx, struct, hash - let type_name_ptr = lean_ctor_get(ptr as *mut _, 0); - let idx_ptr = lean_ctor_get(ptr as *mut _, 1); - let struct_ptr = lean_ctor_get(ptr as *mut _, 2); - - let type_name = decode_ix_name(type_name_ptr); - let idx = Nat::from_ptr(idx_ptr); - let struct_expr = decode_ix_expr(struct_ptr); - - Expr::proj(type_name, idx, struct_expr) - }, - _ => panic!("Invalid Ix.Expr tag: {}", tag), - } - } -} - -/// Decode Lean.Literal from a Lean pointer. -pub fn decode_literal(ptr: *const c_void) -> Literal { - unsafe { - let tag = lean_obj_tag(ptr as *mut _); - match tag { - 0 => { - // natVal - let nat_ptr = lean_ctor_get(ptr as *mut _, 0); - let nat = Nat::from_ptr(nat_ptr); - Literal::NatVal(nat) - }, - 1 => { - // strVal - let str_ptr = lean_ctor_get(ptr as *mut _, 0); - let str_obj: &LeanStringObject = as_ref_unsafe(str_ptr.cast()); - Literal::StrVal(str_obj.as_string()) - }, - _ => panic!("Invalid Literal tag: {}", tag), - } - } -} - -/// Decode a (Name × DataValue) pair for mdata. -fn decode_name_data_value(ptr: *const c_void) -> (Name, DataValue) { - unsafe { - // Prod: ctor 0 with 2 fields - let name_ptr = lean_ctor_get(ptr as *mut _, 0); - let dv_ptr = lean_ctor_get(ptr as *mut _, 1); - - let name = decode_ix_name(name_ptr); - let dv = decode_data_value(dv_ptr); - - (name, dv) - } -} - -/// Decode BinderInfo from byte. -pub fn decode_binder_info(bi_byte: u8) -> BinderInfo { - match bi_byte { - 0 => BinderInfo::Default, - 1 => BinderInfo::Implicit, - 2 => BinderInfo::StrictImplicit, - 3 => BinderInfo::InstImplicit, - _ => panic!("Invalid BinderInfo: {}", bi_byte), - } -} - -/// Round-trip an Ix.Expr: decode from Lean, re-encode via LeanBuildCache. -#[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ix_expr(expr_ptr: *const c_void) -> *mut c_void { - let expr = decode_ix_expr(expr_ptr); - let mut cache = LeanBuildCache::new(); - build_expr(&mut cache, &expr) -} diff --git a/src/lean/ffi/ix/level.rs b/src/lean/ffi/ix/level.rs deleted file mode 100644 index cc139a78..00000000 --- a/src/lean/ffi/ix/level.rs +++ /dev/null @@ -1,155 +0,0 @@ -//! Ix.Level build/decode/roundtrip FFI. -//! -//! Ix.Level layout: -//! - Tag 0: zero (hash : Address) -//! - Tag 1: succ (x : Level) (hash : Address) -//! - Tag 2: max (x y : Level) (hash : Address) -//! - Tag 3: imax (x y : Level) (hash : Address) -//! - Tag 4: param (n : Name) (hash : Address) -//! - Tag 5: mvar (n : Name) (hash : Address) - -use std::ffi::c_void; - -use crate::ix::env::{Level, LevelData}; -use crate::lean::{ - lean_alloc_array, lean_alloc_ctor, lean_array_set_core, lean_ctor_get, - lean_ctor_set, lean_inc, lean_obj_tag, -}; - -use super::super::builder::LeanBuildCache; -use super::address::build_address; -use super::name::{build_name, decode_ix_name}; - -/// Build a Lean Ix.Level with embedded hash. -/// Uses caching to avoid rebuilding the same level. -pub fn build_level(cache: &mut LeanBuildCache, level: &Level) -> *mut c_void { - let hash = *level.get_hash(); - if let Some(&cached) = cache.levels.get(&hash) { - unsafe { lean_inc(cached) }; - return cached; - } - - let result = unsafe { - match level.as_data() { - LevelData::Zero(h) => { - let obj = lean_alloc_ctor(0, 1, 0); - lean_ctor_set(obj, 0, build_address(h)); - obj - }, - LevelData::Succ(x, h) => { - let x_obj = build_level(cache, x); - let obj = lean_alloc_ctor(1, 2, 0); - lean_ctor_set(obj, 0, x_obj); - lean_ctor_set(obj, 1, build_address(h)); - obj - }, - LevelData::Max(x, y, h) => { - let x_obj = build_level(cache, x); - let y_obj = build_level(cache, y); - let obj = lean_alloc_ctor(2, 3, 0); - lean_ctor_set(obj, 0, x_obj); - lean_ctor_set(obj, 1, y_obj); - lean_ctor_set(obj, 2, build_address(h)); - obj - }, - LevelData::Imax(x, y, h) => { - let x_obj = build_level(cache, x); - let y_obj = build_level(cache, y); - let obj = lean_alloc_ctor(3, 3, 0); - lean_ctor_set(obj, 0, x_obj); - lean_ctor_set(obj, 1, y_obj); - lean_ctor_set(obj, 2, build_address(h)); - obj - }, - LevelData::Param(n, h) => { - let n_obj = build_name(cache, n); - let obj = lean_alloc_ctor(4, 2, 0); - lean_ctor_set(obj, 0, n_obj); - lean_ctor_set(obj, 1, build_address(h)); - obj - }, - LevelData::Mvar(n, h) => { - let n_obj = build_name(cache, n); - let obj = lean_alloc_ctor(5, 2, 0); - lean_ctor_set(obj, 0, n_obj); - lean_ctor_set(obj, 1, build_address(h)); - obj - }, - } - }; - - cache.levels.insert(hash, result); - result -} - -/// Build an Array of Levels. -pub fn build_level_array( - cache: &mut LeanBuildCache, - levels: &[Level], -) -> *mut c_void { - unsafe { - let arr = lean_alloc_array(levels.len(), levels.len()); - for (i, level) in levels.iter().enumerate() { - let level_obj = build_level(cache, level); - lean_array_set_core(arr, i, level_obj); - } - arr - } -} - -/// Decode a Lean Ix.Level to Rust Level. -pub fn decode_ix_level(ptr: *const c_void) -> Level { - unsafe { - let tag = lean_obj_tag(ptr as *mut _); - match tag { - 0 => Level::zero(), - 1 => { - let x_ptr = lean_ctor_get(ptr as *mut _, 0); - let x = decode_ix_level(x_ptr); - Level::succ(x) - }, - 2 => { - let x_ptr = lean_ctor_get(ptr as *mut _, 0); - let y_ptr = lean_ctor_get(ptr as *mut _, 1); - let x = decode_ix_level(x_ptr); - let y = decode_ix_level(y_ptr); - Level::max(x, y) - }, - 3 => { - let x_ptr = lean_ctor_get(ptr as *mut _, 0); - let y_ptr = lean_ctor_get(ptr as *mut _, 1); - let x = decode_ix_level(x_ptr); - let y = decode_ix_level(y_ptr); - Level::imax(x, y) - }, - 4 => { - let n_ptr = lean_ctor_get(ptr as *mut _, 0); - let n = decode_ix_name(n_ptr); - Level::param(n) - }, - 5 => { - let n_ptr = lean_ctor_get(ptr as *mut _, 0); - let n = decode_ix_name(n_ptr); - Level::mvar(n) - }, - _ => panic!("Invalid Ix.Level tag: {}", tag), - } - } -} - -/// Decode Array of Levels from Lean pointer. -pub fn decode_level_array(ptr: *const c_void) -> Vec { - let arr_obj: &crate::lean::array::LeanArrayObject = - crate::lean::as_ref_unsafe(ptr.cast()); - arr_obj.data().iter().map(|&p| decode_ix_level(p)).collect() -} - -/// Round-trip an Ix.Level: decode from Lean, re-encode via LeanBuildCache. -#[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ix_level( - level_ptr: *const c_void, -) -> *mut c_void { - let level = decode_ix_level(level_ptr); - let mut cache = LeanBuildCache::new(); - build_level(&mut cache, &level) -} diff --git a/src/lean/ffi/ix/name.rs b/src/lean/ffi/ix/name.rs deleted file mode 100644 index 052606eb..00000000 --- a/src/lean/ffi/ix/name.rs +++ /dev/null @@ -1,130 +0,0 @@ -//! Ix.Name build/decode/roundtrip FFI. -//! -//! Ix.Name layout: -//! - Tag 0: anonymous (hash : Address) -//! - Tag 1: str (parent : Name) (s : String) (hash : Address) -//! - Tag 2: num (parent : Name) (i : Nat) (hash : Address) - -use std::ffi::c_void; - -use crate::ix::env::{Name, NameData}; -use crate::lean::nat::Nat; -use crate::lean::string::LeanStringObject; -use crate::lean::{ - as_ref_unsafe, lean_alloc_array, lean_alloc_ctor, lean_array_set_core, - lean_ctor_get, lean_ctor_set, lean_inc, lean_mk_string, lean_obj_tag, -}; - -use super::super::builder::LeanBuildCache; -use super::super::primitives::build_nat; -use super::address::build_address; - -/// Build a Lean Ix.Name with embedded hash. -/// Uses caching to avoid rebuilding the same name. -pub fn build_name(cache: &mut LeanBuildCache, name: &Name) -> *mut c_void { - let hash = name.get_hash(); - if let Some(&cached) = cache.names.get(hash) { - unsafe { lean_inc(cached) }; - return cached; - } - - let result = unsafe { - match name.as_data() { - NameData::Anonymous(h) => { - // anonymous: (hash : Address) - let obj = lean_alloc_ctor(0, 1, 0); - lean_ctor_set(obj, 0, build_address(h)); - obj - }, - NameData::Str(parent, s, h) => { - // str: (parent : Name) (s : String) (hash : Address) - let parent_obj = build_name(cache, parent); - let s_cstr = crate::lean::safe_cstring(s.as_str()); - let obj = lean_alloc_ctor(1, 3, 0); - lean_ctor_set(obj, 0, parent_obj); - lean_ctor_set(obj, 1, lean_mk_string(s_cstr.as_ptr())); - lean_ctor_set(obj, 2, build_address(h)); - obj - }, - NameData::Num(parent, n, h) => { - // num: (parent : Name) (i : Nat) (hash : Address) - let parent_obj = build_name(cache, parent); - let n_obj = build_nat(n); - let obj = lean_alloc_ctor(2, 3, 0); - lean_ctor_set(obj, 0, parent_obj); - lean_ctor_set(obj, 1, n_obj); - lean_ctor_set(obj, 2, build_address(h)); - obj - }, - } - }; - - cache.names.insert(*hash, result); - result -} - -/// Build an Array of Names. -pub fn build_name_array( - cache: &mut LeanBuildCache, - names: &[Name], -) -> *mut c_void { - unsafe { - let arr = lean_alloc_array(names.len(), names.len()); - for (i, name) in names.iter().enumerate() { - let name_obj = build_name(cache, name); - lean_array_set_core(arr, i, name_obj); - } - arr - } -} - -/// Decode a Lean Ix.Name to Rust Name. -pub fn decode_ix_name(ptr: *const c_void) -> Name { - unsafe { - let tag = lean_obj_tag(ptr as *mut _); - match tag { - 0 => { - // anonymous: just has hash, construct anon Name - Name::anon() - }, - 1 => { - // str: parent, s, hash - let parent_ptr = lean_ctor_get(ptr as *mut _, 0); - let s_ptr = lean_ctor_get(ptr as *mut _, 1); - // hash at field 2 is ignored - Rust recomputes it - - let parent = decode_ix_name(parent_ptr); - let s_obj: &LeanStringObject = as_ref_unsafe(s_ptr.cast()); - let s = s_obj.as_string(); - - Name::str(parent, s) - }, - 2 => { - // num: parent, i, hash - let parent_ptr = lean_ctor_get(ptr as *mut _, 0); - let i_ptr = lean_ctor_get(ptr as *mut _, 1); - // hash at field 2 is ignored - - let parent = decode_ix_name(parent_ptr); - let i = Nat::from_ptr(i_ptr); - - Name::num(parent, i) - }, - _ => panic!("Invalid Ix.Name tag: {}", tag), - } - } -} - -/// Decode Array of Names from Lean pointer. -pub fn decode_name_array(ptr: *const c_void) -> Vec { - let arr_obj: &crate::lean::array::LeanArrayObject = as_ref_unsafe(ptr.cast()); - arr_obj.data().iter().map(|&p| decode_ix_name(p)).collect() -} - -/// Round-trip an Ix.Name: decode from Lean, re-encode via LeanBuildCache. -#[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ix_name(name_ptr: *const c_void) -> *mut c_void { - let name = decode_ix_name(name_ptr); - let mut cache = LeanBuildCache::new(); - build_name(&mut cache, &name) -} diff --git a/src/lean/ffi/ixon/constant.rs b/src/lean/ffi/ixon/constant.rs deleted file mode 100644 index e7692759..00000000 --- a/src/lean/ffi/ixon/constant.rs +++ /dev/null @@ -1,808 +0,0 @@ -//! Ixon constant types build/decode/roundtrip FFI. -//! -//! Includes: Definition, Axiom, Quotient, RecursorRule, Recursor, Constructor, -//! Inductive, InductiveProj, ConstructorProj, RecursorProj, DefinitionProj, -//! MutConst, ConstantInfo, Constant - -use std::ffi::c_void; -use std::sync::Arc; - -use crate::ix::address::Address; -use crate::ix::ixon::constant::{ - Axiom as IxonAxiom, Constant as IxonConstant, - ConstantInfo as IxonConstantInfo, Constructor as IxonConstructor, - ConstructorProj, DefKind, Definition as IxonDefinition, DefinitionProj, - Inductive as IxonInductive, InductiveProj, MutConst, - Quotient as IxonQuotient, Recursor as IxonRecursor, RecursorProj, - RecursorRule as IxonRecursorRule, -}; -use crate::lean::sarray::LeanSArrayObject; -use crate::lean::{ - as_ref_unsafe, lean_alloc_array, lean_alloc_ctor, lean_alloc_sarray, - lean_array_set_core, lean_ctor_get, lean_ctor_set, lean_obj_tag, - lean_sarray_cptr, -}; - -use super::expr::{ - build_ixon_expr, build_ixon_expr_array, decode_ixon_expr, - decode_ixon_expr_array, -}; -use super::univ::{build_ixon_univ_array, decode_ixon_univ_array}; - -/// Build Address from Ixon Address type (which is just a [u8; 32]). -pub fn build_address_from_ixon(addr: &Address) -> *mut c_void { - unsafe { - let ba = lean_alloc_sarray(1, 32, 32); - let data_ptr = lean_sarray_cptr(ba); - std::ptr::copy_nonoverlapping(addr.as_bytes().as_ptr(), data_ptr, 32); - ba - } -} - -/// Build an Array of Addresses. -pub fn build_address_array(addrs: &[Address]) -> *mut c_void { - unsafe { - let arr = lean_alloc_array(addrs.len(), addrs.len()); - for (i, addr) in addrs.iter().enumerate() { - let addr_obj = build_address_from_ixon(addr); - lean_array_set_core(arr, i, addr_obj); - } - arr - } -} - -/// Build Ixon.Definition -/// Lean stores scalar fields ordered by size (largest first). -/// Layout: header(8) + typ(8) + value(8) + lvls(8) + kind(1) + safety(1) + padding(6) -pub fn build_ixon_definition(def: &IxonDefinition) -> *mut c_void { - unsafe { - let typ_obj = build_ixon_expr(&def.typ); - let value_obj = build_ixon_expr(&def.value); - // 2 obj fields, 16 scalar bytes (lvls(8) + kind(1) + safety(1) + padding(6)) - let obj = lean_alloc_ctor(0, 2, 16); - lean_ctor_set(obj, 0, typ_obj); - lean_ctor_set(obj, 1, value_obj); - let base = obj.cast::(); - let scalar_base = base.add(2 * 8 + 8); // offset 24 - - // lvls at offset 0 (8 bytes) - largest scalar first - *scalar_base.cast::() = def.lvls; - // kind at offset 8 (1 byte) - let kind_val: u8 = match def.kind { - DefKind::Definition => 0, - DefKind::Opaque => 1, - DefKind::Theorem => 2, - }; - *scalar_base.add(8) = kind_val; - // safety at offset 9 (1 byte) - let safety_val: u8 = match def.safety { - crate::ix::env::DefinitionSafety::Unsafe => 0, - crate::ix::env::DefinitionSafety::Safe => 1, - crate::ix::env::DefinitionSafety::Partial => 2, - }; - *scalar_base.add(9) = safety_val; - obj - } -} - -/// Build Ixon.RecursorRule -pub fn build_ixon_recursor_rule(rule: &IxonRecursorRule) -> *mut c_void { - unsafe { - let rhs_obj = build_ixon_expr(&rule.rhs); - // 1 obj field, 8 scalar bytes - let obj = lean_alloc_ctor(0, 1, 8); - lean_ctor_set(obj, 0, rhs_obj); - let base = obj.cast::(); - *base.add(8 + 8).cast::() = rule.fields; - obj - } -} - -/// Build Ixon.Recursor -/// Scalars ordered by size: lvls(8) + params(8) + indices(8) + motives(8) + minors(8) + k(1) + isUnsafe(1) + padding(6) -pub fn build_ixon_recursor(rec: &IxonRecursor) -> *mut c_void { - unsafe { - let typ_obj = build_ixon_expr(&rec.typ); - // Build rules array - let rules_arr = lean_alloc_array(rec.rules.len(), rec.rules.len()); - for (i, rule) in rec.rules.iter().enumerate() { - let rule_obj = build_ixon_recursor_rule(rule); - lean_array_set_core(rules_arr, i, rule_obj); - } - // 2 obj fields (typ, rules), 48 scalar bytes (5×8 + 1 + 1 + 6 padding) - let obj = lean_alloc_ctor(0, 2, 48); - lean_ctor_set(obj, 0, typ_obj); - lean_ctor_set(obj, 1, rules_arr); - let base = obj.cast::(); - let scalar_base = base.add(2 * 8 + 8); - // u64 fields first - *scalar_base.cast::() = rec.lvls; - *scalar_base.add(8).cast::() = rec.params; - *scalar_base.add(16).cast::() = rec.indices; - *scalar_base.add(24).cast::() = rec.motives; - *scalar_base.add(32).cast::() = rec.minors; - // bool fields last - *scalar_base.add(40) = if rec.k { 1 } else { 0 }; - *scalar_base.add(41) = if rec.is_unsafe { 1 } else { 0 }; - obj - } -} - -/// Build Ixon.Axiom -/// Scalars ordered by size: lvls(8) + isUnsafe(1) + padding(7) -pub fn build_ixon_axiom(ax: &IxonAxiom) -> *mut c_void { - unsafe { - let typ_obj = build_ixon_expr(&ax.typ); - // 1 obj field, 16 scalar bytes (lvls(8) + isUnsafe(1) + padding(7)) - let obj = lean_alloc_ctor(0, 1, 16); - lean_ctor_set(obj, 0, typ_obj); - let base = obj.cast::(); - let scalar_base = base.add(8 + 8); - // lvls at offset 0 - *scalar_base.cast::() = ax.lvls; - // isUnsafe at offset 8 - *scalar_base.add(8) = if ax.is_unsafe { 1 } else { 0 }; - obj - } -} - -/// Build Ixon.Quotient -/// QuotKind is a simple enum stored as scalar u8, not object field. -/// Scalars ordered by size: lvls(8) + kind(1) + padding(7) -pub fn build_ixon_quotient(quot: &IxonQuotient) -> *mut c_void { - unsafe { - let typ_obj = build_ixon_expr(".typ); - // 1 obj field (typ), 16 scalar bytes (lvls(8) + kind(1) + padding(7)) - let obj = lean_alloc_ctor(0, 1, 16); - lean_ctor_set(obj, 0, typ_obj); - let base = obj.cast::(); - let scalar_base = base.add(8 + 8); - // lvls at offset 0 - *scalar_base.cast::() = quot.lvls; - // kind at offset 8 - let kind_val: u8 = match quot.kind { - crate::ix::env::QuotKind::Type => 0, - crate::ix::env::QuotKind::Ctor => 1, - crate::ix::env::QuotKind::Lift => 2, - crate::ix::env::QuotKind::Ind => 3, - }; - *scalar_base.add(8) = kind_val; - obj - } -} - -/// Build Ixon.Constructor -/// Scalars ordered by size: lvls(8) + cidx(8) + params(8) + fields(8) + isUnsafe(1) + padding(7) -pub fn build_ixon_constructor(ctor: &IxonConstructor) -> *mut c_void { - unsafe { - let typ_obj = build_ixon_expr(&ctor.typ); - // 1 obj field, 40 scalar bytes (4×8 + 1 + 7 padding) - let obj = lean_alloc_ctor(0, 1, 40); - lean_ctor_set(obj, 0, typ_obj); - let base = obj.cast::(); - let scalar_base = base.add(8 + 8); - // u64 fields first - *scalar_base.cast::() = ctor.lvls; - *scalar_base.add(8).cast::() = ctor.cidx; - *scalar_base.add(16).cast::() = ctor.params; - *scalar_base.add(24).cast::() = ctor.fields; - // bool field last - *scalar_base.add(32) = if ctor.is_unsafe { 1 } else { 0 }; - obj - } -} - -/// Build Ixon.Inductive -/// Scalars ordered by size: lvls(8) + params(8) + indices(8) + nested(8) + recr(1) + refl(1) + isUnsafe(1) + padding(5) -pub fn build_ixon_inductive(ind: &IxonInductive) -> *mut c_void { - unsafe { - let typ_obj = build_ixon_expr(&ind.typ); - // Build ctors array - let ctors_arr = lean_alloc_array(ind.ctors.len(), ind.ctors.len()); - for (i, ctor) in ind.ctors.iter().enumerate() { - let ctor_obj = build_ixon_constructor(ctor); - lean_array_set_core(ctors_arr, i, ctor_obj); - } - // 2 obj fields, 40 scalar bytes (4×8 + 3 + 5 padding) - let obj = lean_alloc_ctor(0, 2, 40); - lean_ctor_set(obj, 0, typ_obj); - lean_ctor_set(obj, 1, ctors_arr); - let base = obj.cast::(); - let scalar_base = base.add(2 * 8 + 8); - // u64 fields first - *scalar_base.cast::() = ind.lvls; - *scalar_base.add(8).cast::() = ind.params; - *scalar_base.add(16).cast::() = ind.indices; - *scalar_base.add(24).cast::() = ind.nested; - // bool fields last - *scalar_base.add(32) = if ind.recr { 1 } else { 0 }; - *scalar_base.add(33) = if ind.refl { 1 } else { 0 }; - *scalar_base.add(34) = if ind.is_unsafe { 1 } else { 0 }; - obj - } -} - -/// Build Ixon.InductiveProj -pub fn build_inductive_proj(proj: &InductiveProj) -> *mut c_void { - unsafe { - let block_obj = build_address_from_ixon(&proj.block); - let obj = lean_alloc_ctor(0, 1, 8); - lean_ctor_set(obj, 0, block_obj); - let base = obj.cast::(); - *base.add(8 + 8).cast::() = proj.idx; - obj - } -} - -/// Build Ixon.ConstructorProj -pub fn build_constructor_proj(proj: &ConstructorProj) -> *mut c_void { - unsafe { - let block_obj = build_address_from_ixon(&proj.block); - let obj = lean_alloc_ctor(0, 1, 16); - lean_ctor_set(obj, 0, block_obj); - let base = obj.cast::(); - *base.add(8 + 8).cast::() = proj.idx; - *base.add(8 + 16).cast::() = proj.cidx; - obj - } -} - -/// Build Ixon.RecursorProj -pub fn build_recursor_proj(proj: &RecursorProj) -> *mut c_void { - unsafe { - let block_obj = build_address_from_ixon(&proj.block); - let obj = lean_alloc_ctor(0, 1, 8); - lean_ctor_set(obj, 0, block_obj); - let base = obj.cast::(); - *base.add(8 + 8).cast::() = proj.idx; - obj - } -} - -/// Build Ixon.DefinitionProj -pub fn build_definition_proj(proj: &DefinitionProj) -> *mut c_void { - unsafe { - let block_obj = build_address_from_ixon(&proj.block); - let obj = lean_alloc_ctor(0, 1, 8); - lean_ctor_set(obj, 0, block_obj); - let base = obj.cast::(); - *base.add(8 + 8).cast::() = proj.idx; - obj - } -} - -/// Build Ixon.MutConst -pub fn build_mut_const(mc: &MutConst) -> *mut c_void { - unsafe { - match mc { - MutConst::Defn(def) => { - let def_obj = build_ixon_definition(def); - let obj = lean_alloc_ctor(0, 1, 0); - lean_ctor_set(obj, 0, def_obj); - obj - }, - MutConst::Indc(ind) => { - let ind_obj = build_ixon_inductive(ind); - let obj = lean_alloc_ctor(1, 1, 0); - lean_ctor_set(obj, 0, ind_obj); - obj - }, - MutConst::Recr(rec) => { - let rec_obj = build_ixon_recursor(rec); - let obj = lean_alloc_ctor(2, 1, 0); - lean_ctor_set(obj, 0, rec_obj); - obj - }, - } - } -} - -/// Build Ixon.ConstantInfo (9 constructors) -pub fn build_ixon_constant_info(info: &IxonConstantInfo) -> *mut c_void { - unsafe { - match info { - IxonConstantInfo::Defn(def) => { - let def_obj = build_ixon_definition(def); - let obj = lean_alloc_ctor(0, 1, 0); - lean_ctor_set(obj, 0, def_obj); - obj - }, - IxonConstantInfo::Recr(rec) => { - let rec_obj = build_ixon_recursor(rec); - let obj = lean_alloc_ctor(1, 1, 0); - lean_ctor_set(obj, 0, rec_obj); - obj - }, - IxonConstantInfo::Axio(ax) => { - let ax_obj = build_ixon_axiom(ax); - let obj = lean_alloc_ctor(2, 1, 0); - lean_ctor_set(obj, 0, ax_obj); - obj - }, - IxonConstantInfo::Quot(quot) => { - let quot_obj = build_ixon_quotient(quot); - let obj = lean_alloc_ctor(3, 1, 0); - lean_ctor_set(obj, 0, quot_obj); - obj - }, - IxonConstantInfo::CPrj(proj) => { - let proj_obj = build_constructor_proj(proj); - let obj = lean_alloc_ctor(4, 1, 0); - lean_ctor_set(obj, 0, proj_obj); - obj - }, - IxonConstantInfo::RPrj(proj) => { - let proj_obj = build_recursor_proj(proj); - let obj = lean_alloc_ctor(5, 1, 0); - lean_ctor_set(obj, 0, proj_obj); - obj - }, - IxonConstantInfo::IPrj(proj) => { - let proj_obj = build_inductive_proj(proj); - let obj = lean_alloc_ctor(6, 1, 0); - lean_ctor_set(obj, 0, proj_obj); - obj - }, - IxonConstantInfo::DPrj(proj) => { - let proj_obj = build_definition_proj(proj); - let obj = lean_alloc_ctor(7, 1, 0); - lean_ctor_set(obj, 0, proj_obj); - obj - }, - IxonConstantInfo::Muts(muts) => { - let arr = lean_alloc_array(muts.len(), muts.len()); - for (i, mc) in muts.iter().enumerate() { - let mc_obj = build_mut_const(mc); - lean_array_set_core(arr, i, mc_obj); - } - let obj = lean_alloc_ctor(8, 1, 0); - lean_ctor_set(obj, 0, arr); - obj - }, - } - } -} - -/// Build Ixon.Constant -pub fn build_ixon_constant(constant: &IxonConstant) -> *mut c_void { - unsafe { - let info_obj = build_ixon_constant_info(&constant.info); - let sharing_obj = build_ixon_expr_array(&constant.sharing); - let refs_obj = build_address_array(&constant.refs); - let univs_obj = build_ixon_univ_array(&constant.univs); - let obj = lean_alloc_ctor(0, 4, 0); - lean_ctor_set(obj, 0, info_obj); - lean_ctor_set(obj, 1, sharing_obj); - lean_ctor_set(obj, 2, refs_obj); - lean_ctor_set(obj, 3, univs_obj); - obj - } -} - -// ============================================================================= -// Decode Functions -// ============================================================================= - -/// Decode a ByteArray (Address) to Address. -pub fn decode_ixon_address(ptr: *const c_void) -> Address { - let ba: &LeanSArrayObject = as_ref_unsafe(ptr.cast()); - let bytes = ba.data(); - Address::from_slice(&bytes[..32]).expect("Address should be 32 bytes") -} - -/// Decode Array Address. -pub fn decode_ixon_address_array(ptr: *const c_void) -> Vec
{ - let arr: &crate::lean::array::LeanArrayObject = as_ref_unsafe(ptr.cast()); - arr.to_vec(decode_ixon_address) -} - -/// Decode Ixon.Definition. -/// Lean stores scalar fields ordered by size (largest first). -/// Layout: header(8) + typ(8) + value(8) + lvls(8) + kind(1) + safety(1) + padding(6) -pub fn decode_ixon_definition(ptr: *const c_void) -> IxonDefinition { - unsafe { - let typ_ptr = lean_ctor_get(ptr.cast_mut(), 0); - let value_ptr = lean_ctor_get(ptr.cast_mut(), 1); - - let base = ptr.cast::(); - // Scalars start after header (8) + 2 obj fields (16) = offset 24 - let scalar_base = base.add(24); - - // lvls at offset 0 (8 bytes) - largest scalar first - let lvls = *scalar_base.cast::(); - // kind at offset 8 (1 byte) - let kind_val = *scalar_base.add(8); - let kind = match kind_val { - 0 => DefKind::Definition, - 1 => DefKind::Opaque, - 2 => DefKind::Theorem, - _ => panic!("Invalid DefKind: {}", kind_val), - }; - // safety at offset 9 (1 byte) - let safety_val = *scalar_base.add(9); - let safety = match safety_val { - 0 => crate::ix::env::DefinitionSafety::Unsafe, - 1 => crate::ix::env::DefinitionSafety::Safe, - 2 => crate::ix::env::DefinitionSafety::Partial, - _ => panic!("Invalid DefinitionSafety: {}", safety_val), - }; - - IxonDefinition { - kind, - safety, - lvls, - typ: Arc::new(decode_ixon_expr(typ_ptr)), - value: Arc::new(decode_ixon_expr(value_ptr)), - } - } -} - -/// Decode Ixon.RecursorRule. -pub fn decode_ixon_recursor_rule(ptr: *const c_void) -> IxonRecursorRule { - unsafe { - let rhs_ptr = lean_ctor_get(ptr.cast_mut(), 0); - let base = ptr.cast::(); - let fields = *base.add(8 + 8).cast::(); - IxonRecursorRule { fields, rhs: Arc::new(decode_ixon_expr(rhs_ptr)) } - } -} - -/// Decode Ixon.Recursor. -/// Scalars ordered by size: lvls(8) + params(8) + indices(8) + motives(8) + minors(8) + k(1) + isUnsafe(1) + padding(6) -pub fn decode_ixon_recursor(ptr: *const c_void) -> IxonRecursor { - unsafe { - let typ_ptr = lean_ctor_get(ptr.cast_mut(), 0); - let rules_ptr = lean_ctor_get(ptr.cast_mut(), 1); - let base = ptr.cast::(); - let scalar_base = base.add(2 * 8 + 8); - // u64 fields first - let lvls = *scalar_base.cast::(); - let params = *scalar_base.add(8).cast::(); - let indices = *scalar_base.add(16).cast::(); - let motives = *scalar_base.add(24).cast::(); - let minors = *scalar_base.add(32).cast::(); - // bool fields last - let k = *scalar_base.add(40) != 0; - let is_unsafe = *scalar_base.add(41) != 0; - - let rules_arr: &crate::lean::array::LeanArrayObject = - as_ref_unsafe(rules_ptr.cast()); - let rules = rules_arr.to_vec(decode_ixon_recursor_rule); - - IxonRecursor { - k, - is_unsafe, - lvls, - params, - indices, - motives, - minors, - typ: Arc::new(decode_ixon_expr(typ_ptr)), - rules, - } - } -} - -/// Decode Ixon.Axiom. -/// Scalars ordered by size: lvls(8) + isUnsafe(1) + padding(7) -pub fn decode_ixon_axiom(ptr: *const c_void) -> IxonAxiom { - unsafe { - let typ_ptr = lean_ctor_get(ptr.cast_mut(), 0); - let base = ptr.cast::(); - let scalar_base = base.add(8 + 8); - // lvls at offset 0 - let lvls = *scalar_base.cast::(); - // isUnsafe at offset 8 - let is_unsafe = *scalar_base.add(8) != 0; - IxonAxiom { is_unsafe, lvls, typ: Arc::new(decode_ixon_expr(typ_ptr)) } - } -} - -/// Decode Ixon.Quotient. -/// QuotKind is a scalar (not object field). Scalars: lvls(8) + kind(1) + padding(7) -pub fn decode_ixon_quotient(ptr: *const c_void) -> IxonQuotient { - unsafe { - // typ is the only object field (at index 0) - let typ_ptr = lean_ctor_get(ptr.cast_mut(), 0); - let base = ptr.cast::(); - let scalar_base = base.add(8 + 8); - // lvls at offset 0 - let lvls = *scalar_base.cast::(); - // kind at offset 8 - let kind_val = *scalar_base.add(8); - let kind = match kind_val { - 0 => crate::ix::env::QuotKind::Type, - 1 => crate::ix::env::QuotKind::Ctor, - 2 => crate::ix::env::QuotKind::Lift, - 3 => crate::ix::env::QuotKind::Ind, - _ => panic!("Invalid QuotKind: {}", kind_val), - }; - IxonQuotient { kind, lvls, typ: Arc::new(decode_ixon_expr(typ_ptr)) } - } -} - -/// Decode Ixon.Constructor. -/// Scalars ordered by size: lvls(8) + cidx(8) + params(8) + fields(8) + isUnsafe(1) + padding(7) -pub fn decode_ixon_constructor(ptr: *const c_void) -> IxonConstructor { - unsafe { - let typ_ptr = lean_ctor_get(ptr.cast_mut(), 0); - let base = ptr.cast::(); - let scalar_base = base.add(8 + 8); - // u64 fields first - let lvls = *scalar_base.cast::(); - let cidx = *scalar_base.add(8).cast::(); - let params = *scalar_base.add(16).cast::(); - let fields = *scalar_base.add(24).cast::(); - // bool field last - let is_unsafe = *scalar_base.add(32) != 0; - IxonConstructor { - is_unsafe, - lvls, - cidx, - params, - fields, - typ: Arc::new(decode_ixon_expr(typ_ptr)), - } - } -} - -/// Decode Ixon.Inductive. -/// Scalars ordered by size: lvls(8) + params(8) + indices(8) + nested(8) + recr(1) + refl(1) + isUnsafe(1) + padding(5) -pub fn decode_ixon_inductive(ptr: *const c_void) -> IxonInductive { - unsafe { - let typ_ptr = lean_ctor_get(ptr.cast_mut(), 0); - let ctors_ptr = lean_ctor_get(ptr.cast_mut(), 1); - let base = ptr.cast::(); - let scalar_base = base.add(2 * 8 + 8); - // u64 fields first - let lvls = *scalar_base.cast::(); - let params = *scalar_base.add(8).cast::(); - let indices = *scalar_base.add(16).cast::(); - let nested = *scalar_base.add(24).cast::(); - // bool fields last - let recr = *scalar_base.add(32) != 0; - let refl = *scalar_base.add(33) != 0; - let is_unsafe = *scalar_base.add(34) != 0; - - let ctors_arr: &crate::lean::array::LeanArrayObject = - as_ref_unsafe(ctors_ptr.cast()); - let ctors = ctors_arr.to_vec(decode_ixon_constructor); - - IxonInductive { - recr, - refl, - is_unsafe, - lvls, - params, - indices, - nested, - typ: Arc::new(decode_ixon_expr(typ_ptr)), - ctors, - } - } -} - -/// Decode Ixon.InductiveProj. -pub fn decode_ixon_inductive_proj(ptr: *const c_void) -> InductiveProj { - unsafe { - let block_ptr = lean_ctor_get(ptr.cast_mut(), 0); - let base = ptr.cast::(); - let idx = *base.add(8 + 8).cast::(); - InductiveProj { idx, block: decode_ixon_address(block_ptr) } - } -} - -/// Decode Ixon.ConstructorProj. -pub fn decode_ixon_constructor_proj(ptr: *const c_void) -> ConstructorProj { - unsafe { - let block_ptr = lean_ctor_get(ptr.cast_mut(), 0); - let base = ptr.cast::(); - let idx = *base.add(8 + 8).cast::(); - let cidx = *base.add(8 + 16).cast::(); - ConstructorProj { idx, cidx, block: decode_ixon_address(block_ptr) } - } -} - -/// Decode Ixon.RecursorProj. -pub fn decode_ixon_recursor_proj(ptr: *const c_void) -> RecursorProj { - unsafe { - let block_ptr = lean_ctor_get(ptr.cast_mut(), 0); - let base = ptr.cast::(); - let idx = *base.add(8 + 8).cast::(); - RecursorProj { idx, block: decode_ixon_address(block_ptr) } - } -} - -/// Decode Ixon.DefinitionProj. -pub fn decode_ixon_definition_proj(ptr: *const c_void) -> DefinitionProj { - unsafe { - let block_ptr = lean_ctor_get(ptr.cast_mut(), 0); - let base = ptr.cast::(); - let idx = *base.add(8 + 8).cast::(); - DefinitionProj { idx, block: decode_ixon_address(block_ptr) } - } -} - -/// Decode Ixon.MutConst. -pub fn decode_ixon_mut_const(ptr: *const c_void) -> MutConst { - unsafe { - let tag = lean_obj_tag(ptr.cast_mut()); - let inner_ptr = lean_ctor_get(ptr.cast_mut(), 0); - match tag { - 0 => MutConst::Defn(decode_ixon_definition(inner_ptr)), - 1 => MutConst::Indc(decode_ixon_inductive(inner_ptr)), - 2 => MutConst::Recr(decode_ixon_recursor(inner_ptr)), - _ => panic!("Invalid Ixon.MutConst tag: {}", tag), - } - } -} - -/// Decode Ixon.ConstantInfo. -pub fn decode_ixon_constant_info(ptr: *const c_void) -> IxonConstantInfo { - unsafe { - let tag = lean_obj_tag(ptr.cast_mut()); - let inner_ptr = lean_ctor_get(ptr.cast_mut(), 0); - match tag { - 0 => IxonConstantInfo::Defn(decode_ixon_definition(inner_ptr)), - 1 => IxonConstantInfo::Recr(decode_ixon_recursor(inner_ptr)), - 2 => IxonConstantInfo::Axio(decode_ixon_axiom(inner_ptr)), - 3 => IxonConstantInfo::Quot(decode_ixon_quotient(inner_ptr)), - 4 => IxonConstantInfo::CPrj(decode_ixon_constructor_proj(inner_ptr)), - 5 => IxonConstantInfo::RPrj(decode_ixon_recursor_proj(inner_ptr)), - 6 => IxonConstantInfo::IPrj(decode_ixon_inductive_proj(inner_ptr)), - 7 => IxonConstantInfo::DPrj(decode_ixon_definition_proj(inner_ptr)), - 8 => { - let muts_arr: &crate::lean::array::LeanArrayObject = - as_ref_unsafe(inner_ptr.cast()); - let muts = muts_arr.to_vec(decode_ixon_mut_const); - IxonConstantInfo::Muts(muts) - }, - _ => panic!("Invalid Ixon.ConstantInfo tag: {}", tag), - } - } -} - -/// Decode Ixon.Constant. -pub fn decode_ixon_constant(ptr: *const c_void) -> IxonConstant { - unsafe { - let info_ptr = lean_ctor_get(ptr.cast_mut(), 0); - let sharing_ptr = lean_ctor_get(ptr.cast_mut(), 1); - let refs_ptr = lean_ctor_get(ptr.cast_mut(), 2); - let univs_ptr = lean_ctor_get(ptr.cast_mut(), 3); - - IxonConstant { - info: decode_ixon_constant_info(info_ptr), - sharing: decode_ixon_expr_array(sharing_ptr), - refs: decode_ixon_address_array(refs_ptr), - univs: decode_ixon_univ_array(univs_ptr), - } - } -} - -// ============================================================================= -// FFI Exports -// ============================================================================= - -/// Round-trip Ixon.Definition. -#[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ixon_definition( - ptr: *const c_void, -) -> *mut c_void { - let def = decode_ixon_definition(ptr); - build_ixon_definition(&def) -} - -/// Round-trip Ixon.Recursor. -#[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ixon_recursor( - ptr: *const c_void, -) -> *mut c_void { - let rec = decode_ixon_recursor(ptr); - build_ixon_recursor(&rec) -} - -/// Round-trip Ixon.Axiom. -#[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ixon_axiom(ptr: *const c_void) -> *mut c_void { - let ax = decode_ixon_axiom(ptr); - build_ixon_axiom(&ax) -} - -/// Round-trip Ixon.Quotient. -#[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ixon_quotient( - ptr: *const c_void, -) -> *mut c_void { - let quot = decode_ixon_quotient(ptr); - build_ixon_quotient(") -} - -/// Round-trip Ixon.ConstantInfo. -#[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ixon_constant_info( - ptr: *const c_void, -) -> *mut c_void { - let info = decode_ixon_constant_info(ptr); - build_ixon_constant_info(&info) -} - -/// Round-trip Ixon.Constant. -#[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ixon_constant( - ptr: *const c_void, -) -> *mut c_void { - let constant = decode_ixon_constant(ptr); - build_ixon_constant(&constant) -} - -/// Round-trip Ixon.RecursorRule. -#[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ixon_recursor_rule( - ptr: *const c_void, -) -> *mut c_void { - let rule = decode_ixon_recursor_rule(ptr); - build_ixon_recursor_rule(&rule) -} - -/// Round-trip Ixon.Constructor. -#[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ixon_constructor( - ptr: *const c_void, -) -> *mut c_void { - let ctor = decode_ixon_constructor(ptr); - build_ixon_constructor(&ctor) -} - -/// Round-trip Ixon.Inductive. -#[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ixon_inductive( - ptr: *const c_void, -) -> *mut c_void { - let ind = decode_ixon_inductive(ptr); - build_ixon_inductive(&ind) -} - -/// Round-trip Ixon.InductiveProj. -#[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ixon_inductive_proj( - ptr: *const c_void, -) -> *mut c_void { - let proj = decode_ixon_inductive_proj(ptr); - build_inductive_proj(&proj) -} - -/// Round-trip Ixon.ConstructorProj. -#[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ixon_constructor_proj( - ptr: *const c_void, -) -> *mut c_void { - let proj = decode_ixon_constructor_proj(ptr); - build_constructor_proj(&proj) -} - -/// Round-trip Ixon.RecursorProj. -#[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ixon_recursor_proj( - ptr: *const c_void, -) -> *mut c_void { - let proj = decode_ixon_recursor_proj(ptr); - build_recursor_proj(&proj) -} - -/// Round-trip Ixon.DefinitionProj. -#[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ixon_definition_proj( - ptr: *const c_void, -) -> *mut c_void { - let proj = decode_ixon_definition_proj(ptr); - build_definition_proj(&proj) -} - -/// Round-trip Ixon.MutConst. -#[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ixon_mut_const( - ptr: *const c_void, -) -> *mut c_void { - let mc = decode_ixon_mut_const(ptr); - build_mut_const(&mc) -} diff --git a/src/lean/ffi/ixon/enums.rs b/src/lean/ffi/ixon/enums.rs deleted file mode 100644 index e8c4b12c..00000000 --- a/src/lean/ffi/ixon/enums.rs +++ /dev/null @@ -1,117 +0,0 @@ -//! Ixon enum types: DefKind, DefinitionSafety, QuotKind build/decode/roundtrip FFI. - -use std::ffi::c_void; - -use crate::ix::env::{DefinitionSafety, QuotKind}; -use crate::ix::ixon::constant::DefKind; - -/// Build Ixon.DefKind -/// | defn -- tag 0 -/// | opaq -- tag 1 -/// | thm -- tag 2 -/// Simple enums are represented as raw tag values (unboxed scalars). -pub fn build_def_kind(kind: &DefKind) -> *mut c_void { - let tag = match kind { - DefKind::Definition => 0, - DefKind::Opaque => 1, - DefKind::Theorem => 2, - }; - tag as *mut c_void -} - -/// Build Ixon.DefinitionSafety -/// | unsaf -- tag 0 -/// | safe -- tag 1 -/// | part -- tag 2 -pub fn build_ixon_definition_safety(safety: &DefinitionSafety) -> *mut c_void { - let tag = match safety { - DefinitionSafety::Unsafe => 0, - DefinitionSafety::Safe => 1, - DefinitionSafety::Partial => 2, - }; - tag as *mut c_void -} - -/// Build Ixon.QuotKind -/// | type -- tag 0 -/// | ctor -- tag 1 -/// | lift -- tag 2 -/// | ind -- tag 3 -pub fn build_ixon_quot_kind(kind: &QuotKind) -> *mut c_void { - let tag = match kind { - QuotKind::Type => 0, - QuotKind::Ctor => 1, - QuotKind::Lift => 2, - QuotKind::Ind => 3, - }; - tag as *mut c_void -} - -// ============================================================================= -// Decode Functions -// ============================================================================= - -/// Decode Ixon.DefKind (simple enum, raw tag value). -pub fn decode_ixon_def_kind(ptr: *const c_void) -> DefKind { - let tag = ptr as usize; - match tag { - 0 => DefKind::Definition, - 1 => DefKind::Opaque, - 2 => DefKind::Theorem, - _ => panic!("Invalid Ixon.DefKind tag: {}", tag), - } -} - -/// Decode Ixon.DefinitionSafety (simple enum, raw tag value). -pub fn decode_ixon_definition_safety(ptr: *const c_void) -> DefinitionSafety { - let tag = ptr as usize; - match tag { - 0 => DefinitionSafety::Unsafe, - 1 => DefinitionSafety::Safe, - 2 => DefinitionSafety::Partial, - _ => panic!("Invalid Ixon.DefinitionSafety tag: {}", tag), - } -} - -/// Decode Ixon.QuotKind (simple enum, raw tag value). -pub fn decode_ixon_quot_kind(ptr: *const c_void) -> QuotKind { - let tag = ptr as usize; - match tag { - 0 => QuotKind::Type, - 1 => QuotKind::Ctor, - 2 => QuotKind::Lift, - 3 => QuotKind::Ind, - _ => panic!("Invalid Ixon.QuotKind tag: {}", tag), - } -} - -// ============================================================================= -// FFI Exports -// ============================================================================= - -/// Round-trip Ixon.DefKind. -#[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ixon_def_kind( - ptr: *const c_void, -) -> *mut c_void { - let kind = decode_ixon_def_kind(ptr); - build_def_kind(&kind) -} - -/// Round-trip Ixon.DefinitionSafety. -#[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ixon_definition_safety( - ptr: *const c_void, -) -> *mut c_void { - let safety = decode_ixon_definition_safety(ptr); - build_ixon_definition_safety(&safety) -} - -/// Round-trip Ixon.QuotKind. -#[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ixon_quot_kind( - ptr: *const c_void, -) -> *mut c_void { - let kind = decode_ixon_quot_kind(ptr); - build_ixon_quot_kind(&kind) -} diff --git a/src/lean/ffi/ixon/env.rs b/src/lean/ffi/ixon/env.rs deleted file mode 100644 index 68781735..00000000 --- a/src/lean/ffi/ixon/env.rs +++ /dev/null @@ -1,474 +0,0 @@ -//! Ixon.RawEnv FFI build/decode/roundtrip functions. -//! -//! Provides full decode/build cycle for RawEnv and its component types: -//! RawConst, RawNamed, RawBlob, RawComm. - -use std::ffi::c_void; - -use crate::ix::address::Address; -use crate::ix::env::Name; -use crate::ix::ixon::comm::Comm; -use crate::ix::ixon::constant::Constant as IxonConstant; -use crate::ix::ixon::env::{Env as IxonEnv, Named as IxonNamed}; -use crate::ix::ixon::metadata::ConstantMeta; -use crate::lean::array::LeanArrayObject; -use crate::lean::sarray::LeanSArrayObject; -use crate::lean::{ - as_ref_unsafe, lean_alloc_array, lean_alloc_ctor, lean_alloc_sarray, - lean_array_set_core, lean_ctor_get, lean_ctor_set, lean_mk_string, - lean_sarray_cptr, -}; - -use super::constant::{ - build_address_from_ixon, build_ixon_constant, decode_ixon_address, - decode_ixon_constant, -}; -use super::meta::{build_constant_meta, decode_constant_meta}; -use crate::lean::ffi::builder::LeanBuildCache; -use crate::lean::ffi::ix::name::{build_name, decode_ix_name}; - -// ============================================================================= -// Comm Type (secret: Address, payload: Address) -// ============================================================================= - -/// Decoded Ixon.Comm -pub struct DecodedComm { - pub secret: Address, - pub payload: Address, -} - -/// Decode Ixon.Comm from Lean pointer. -/// Comm = { secret : Address, payload : Address } -pub fn decode_comm(ptr: *const c_void) -> DecodedComm { - unsafe { - let secret_ptr = lean_ctor_get(ptr as *mut _, 0); - let payload_ptr = lean_ctor_get(ptr as *mut _, 1); - DecodedComm { - secret: decode_ixon_address(secret_ptr), - payload: decode_ixon_address(payload_ptr), - } - } -} - -/// Build Ixon.Comm Lean object. -pub fn build_comm(comm: &DecodedComm) -> *mut c_void { - unsafe { - let secret_obj = build_address_from_ixon(&comm.secret); - let payload_obj = build_address_from_ixon(&comm.payload); - let obj = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(obj, 0, secret_obj); - lean_ctor_set(obj, 1, payload_obj); - obj - } -} - -// ============================================================================= -// RawConst (addr: Address, const: Constant) -// ============================================================================= - -/// Decoded Ixon.RawConst -pub struct DecodedRawConst { - pub addr: Address, - pub constant: IxonConstant, -} - -/// Decode Ixon.RawConst from Lean pointer. -pub fn decode_raw_const(ptr: *const c_void) -> DecodedRawConst { - unsafe { - let addr_ptr = lean_ctor_get(ptr as *mut _, 0); - let const_ptr = lean_ctor_get(ptr as *mut _, 1); - DecodedRawConst { - addr: decode_ixon_address(addr_ptr), - constant: decode_ixon_constant(const_ptr), - } - } -} - -/// Build Ixon.RawConst Lean object. -pub fn build_raw_const(rc: &DecodedRawConst) -> *mut c_void { - unsafe { - let addr_obj = build_address_from_ixon(&rc.addr); - let const_obj = build_ixon_constant(&rc.constant); - let obj = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(obj, 0, addr_obj); - lean_ctor_set(obj, 1, const_obj); - obj - } -} - -// ============================================================================= -// RawNamed (name: Ix.Name, addr: Address, constMeta: ConstantMeta) -// ============================================================================= - -/// Decoded Ixon.RawNamed -pub struct DecodedRawNamed { - pub name: Name, - pub addr: Address, - pub const_meta: ConstantMeta, -} - -/// Decode Ixon.RawNamed from Lean pointer. -pub fn decode_raw_named(ptr: *const c_void) -> DecodedRawNamed { - unsafe { - let name_ptr = lean_ctor_get(ptr as *mut _, 0); - let addr_ptr = lean_ctor_get(ptr as *mut _, 1); - let meta_ptr = lean_ctor_get(ptr as *mut _, 2); - DecodedRawNamed { - name: decode_ix_name(name_ptr), - addr: decode_ixon_address(addr_ptr), - const_meta: decode_constant_meta(meta_ptr), - } - } -} - -/// Build Ixon.RawNamed Lean object. -pub fn build_raw_named( - cache: &mut LeanBuildCache, - rn: &DecodedRawNamed, -) -> *mut c_void { - unsafe { - let name_obj = build_name(cache, &rn.name); - let addr_obj = build_address_from_ixon(&rn.addr); - let meta_obj = build_constant_meta(&rn.const_meta); - let obj = lean_alloc_ctor(0, 3, 0); - lean_ctor_set(obj, 0, name_obj); - lean_ctor_set(obj, 1, addr_obj); - lean_ctor_set(obj, 2, meta_obj); - obj - } -} - -// ============================================================================= -// RawBlob (addr: Address, bytes: ByteArray) -// ============================================================================= - -/// Decoded Ixon.RawBlob -pub struct DecodedRawBlob { - pub addr: Address, - pub bytes: Vec, -} - -/// Decode Ixon.RawBlob from Lean pointer. -pub fn decode_raw_blob(ptr: *const c_void) -> DecodedRawBlob { - unsafe { - let addr_ptr = lean_ctor_get(ptr as *mut _, 0); - let bytes_ptr = lean_ctor_get(ptr as *mut _, 1); - let bytes_arr: &LeanSArrayObject = as_ref_unsafe(bytes_ptr.cast()); - DecodedRawBlob { - addr: decode_ixon_address(addr_ptr), - bytes: bytes_arr.data().to_vec(), - } - } -} - -/// Build Ixon.RawBlob Lean object. -pub fn build_raw_blob(rb: &DecodedRawBlob) -> *mut c_void { - unsafe { - let addr_obj = build_address_from_ixon(&rb.addr); - // Build ByteArray (SArray UInt8) - let len = rb.bytes.len(); - let bytes_obj = lean_alloc_sarray(1, len, len); - let data_ptr = lean_sarray_cptr(bytes_obj); - std::ptr::copy_nonoverlapping(rb.bytes.as_ptr(), data_ptr, len); - - let obj = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(obj, 0, addr_obj); - lean_ctor_set(obj, 1, bytes_obj); - obj - } -} - -// ============================================================================= -// RawComm (addr: Address, comm: Comm) -// ============================================================================= - -/// Decoded Ixon.RawComm -pub struct DecodedRawComm { - pub addr: Address, - pub comm: DecodedComm, -} - -/// Decode Ixon.RawComm from Lean pointer. -pub fn decode_raw_comm(ptr: *const c_void) -> DecodedRawComm { - unsafe { - let addr_ptr = lean_ctor_get(ptr as *mut _, 0); - let comm_ptr = lean_ctor_get(ptr as *mut _, 1); - DecodedRawComm { - addr: decode_ixon_address(addr_ptr), - comm: decode_comm(comm_ptr), - } - } -} - -/// Build Ixon.RawComm Lean object. -pub fn build_raw_comm(rc: &DecodedRawComm) -> *mut c_void { - unsafe { - let addr_obj = build_address_from_ixon(&rc.addr); - let comm_obj = build_comm(&rc.comm); - let obj = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(obj, 0, addr_obj); - lean_ctor_set(obj, 1, comm_obj); - obj - } -} - -// ============================================================================= -// RawNameEntry (addr: Address, name: Ix.Name) -// ============================================================================= - -/// Decoded Ixon.RawNameEntry -pub struct DecodedRawNameEntry { - pub addr: Address, - pub name: Name, -} - -/// Decode Ixon.RawNameEntry from Lean pointer. -pub fn decode_raw_name_entry(ptr: *const c_void) -> DecodedRawNameEntry { - unsafe { - let addr_ptr = lean_ctor_get(ptr as *mut _, 0); - let name_ptr = lean_ctor_get(ptr as *mut _, 1); - DecodedRawNameEntry { - addr: decode_ixon_address(addr_ptr), - name: decode_ix_name(name_ptr), - } - } -} - -/// Build Ixon.RawNameEntry Lean object. -pub fn build_raw_name_entry( - cache: &mut LeanBuildCache, - addr: &Address, - name: &Name, -) -> *mut c_void { - unsafe { - let addr_obj = build_address_from_ixon(addr); - let name_obj = build_name(cache, name); - let obj = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(obj, 0, addr_obj); - lean_ctor_set(obj, 1, name_obj); - obj - } -} - -// ============================================================================= -// RawEnv (consts, named, blobs, comms, names) -// ============================================================================= - -/// Decoded Ixon.RawEnv -pub struct DecodedRawEnv { - pub consts: Vec, - pub named: Vec, - pub blobs: Vec, - pub comms: Vec, - pub names: Vec, -} - -/// Decode Ixon.RawEnv from Lean pointer. -pub fn decode_raw_env(ptr: *const c_void) -> DecodedRawEnv { - unsafe { - let consts_ptr = lean_ctor_get(ptr as *mut _, 0); - let named_ptr = lean_ctor_get(ptr as *mut _, 1); - let blobs_ptr = lean_ctor_get(ptr as *mut _, 2); - let comms_ptr = lean_ctor_get(ptr as *mut _, 3); - let names_ptr = lean_ctor_get(ptr as *mut _, 4); - - let consts_arr: &LeanArrayObject = as_ref_unsafe(consts_ptr.cast()); - let named_arr: &LeanArrayObject = as_ref_unsafe(named_ptr.cast()); - let blobs_arr: &LeanArrayObject = as_ref_unsafe(blobs_ptr.cast()); - let comms_arr: &LeanArrayObject = as_ref_unsafe(comms_ptr.cast()); - let names_arr: &LeanArrayObject = as_ref_unsafe(names_ptr.cast()); - - DecodedRawEnv { - consts: consts_arr.to_vec(decode_raw_const), - named: named_arr.to_vec(decode_raw_named), - blobs: blobs_arr.to_vec(decode_raw_blob), - comms: comms_arr.to_vec(decode_raw_comm), - names: names_arr.to_vec(decode_raw_name_entry), - } - } -} - -/// Build Ixon.RawEnv Lean object. -pub fn build_raw_env(env: &DecodedRawEnv) -> *mut c_void { - unsafe { - let mut cache = LeanBuildCache::new(); - - // Build consts array - let consts_arr = lean_alloc_array(env.consts.len(), env.consts.len()); - for (i, rc) in env.consts.iter().enumerate() { - let obj = build_raw_const(rc); - lean_array_set_core(consts_arr, i, obj); - } - - // Build named array - let named_arr = lean_alloc_array(env.named.len(), env.named.len()); - for (i, rn) in env.named.iter().enumerate() { - let obj = build_raw_named(&mut cache, rn); - lean_array_set_core(named_arr, i, obj); - } - - // Build blobs array - let blobs_arr = lean_alloc_array(env.blobs.len(), env.blobs.len()); - for (i, rb) in env.blobs.iter().enumerate() { - let obj = build_raw_blob(rb); - lean_array_set_core(blobs_arr, i, obj); - } - - // Build comms array - let comms_arr = lean_alloc_array(env.comms.len(), env.comms.len()); - for (i, rc) in env.comms.iter().enumerate() { - let obj = build_raw_comm(rc); - lean_array_set_core(comms_arr, i, obj); - } - - // Build names array - let names_arr = lean_alloc_array(env.names.len(), env.names.len()); - for (i, rn) in env.names.iter().enumerate() { - let obj = build_raw_name_entry(&mut cache, &rn.addr, &rn.name); - lean_array_set_core(names_arr, i, obj); - } - - // Build RawEnv structure - let obj = lean_alloc_ctor(0, 5, 0); - lean_ctor_set(obj, 0, consts_arr); - lean_ctor_set(obj, 1, named_arr); - lean_ctor_set(obj, 2, blobs_arr); - lean_ctor_set(obj, 3, comms_arr); - lean_ctor_set(obj, 4, names_arr); - obj - } -} - -// ============================================================================= -// DecodedRawEnv ↔ IxonEnv Conversion Helpers -// ============================================================================= - -/// Reconstruct a Rust IxonEnv from a DecodedRawEnv. -pub fn decoded_to_ixon_env(decoded: &DecodedRawEnv) -> IxonEnv { - let env = IxonEnv::new(); - for rc in &decoded.consts { - env.store_const(rc.addr.clone(), rc.constant.clone()); - } - for rn in &decoded.names { - env.store_name(rn.addr.clone(), rn.name.clone()); - } - for rn in &decoded.named { - let named = IxonNamed::new(rn.addr.clone(), rn.const_meta.clone()); - env.register_name(rn.name.clone(), named); - } - for rb in &decoded.blobs { - env.blobs.insert(rb.addr.clone(), rb.bytes.clone()); - } - for rc in &decoded.comms { - let comm = - Comm { secret: rc.comm.secret.clone(), payload: rc.comm.payload.clone() }; - env.store_comm(rc.addr.clone(), comm); - } - env -} - -/// Convert a Rust IxonEnv to a DecodedRawEnv. -pub fn ixon_env_to_decoded(env: &IxonEnv) -> DecodedRawEnv { - let consts = env - .consts - .iter() - .map(|e| DecodedRawConst { - addr: e.key().clone(), - constant: e.value().clone(), - }) - .collect(); - let named = env - .named - .iter() - .map(|e| DecodedRawNamed { - name: e.key().clone(), - addr: e.value().addr.clone(), - const_meta: e.value().meta.clone(), - }) - .collect(); - let blobs = env - .blobs - .iter() - .map(|e| DecodedRawBlob { addr: e.key().clone(), bytes: e.value().clone() }) - .collect(); - let comms = env - .comms - .iter() - .map(|e| DecodedRawComm { - addr: e.key().clone(), - comm: DecodedComm { - secret: e.value().secret.clone(), - payload: e.value().payload.clone(), - }, - }) - .collect(); - let names = env - .names - .iter() - .map(|e| DecodedRawNameEntry { - addr: e.key().clone(), - name: e.value().clone(), - }) - .collect(); - DecodedRawEnv { consts, named, blobs, comms, names } -} - -// ============================================================================= -// rs_ser_env: Serialize an Ixon.RawEnv to bytes -// ============================================================================= - -/// FFI: Serialize an Ixon.RawEnv → ByteArray via Rust's Env.put. Pure. -#[unsafe(no_mangle)] -pub extern "C" fn rs_ser_env(raw_env_ptr: *const c_void) -> *mut c_void { - let decoded = decode_raw_env(raw_env_ptr); - let env = decoded_to_ixon_env(&decoded); - let mut buf = Vec::new(); - env.put(&mut buf).expect("Env serialization failed"); - - unsafe { - let ba = lean_alloc_sarray(1, buf.len(), buf.len()); - std::ptr::copy_nonoverlapping( - buf.as_ptr(), - lean_sarray_cptr(ba), - buf.len(), - ); - ba - } -} - -// ============================================================================= -// rs_des_env: Deserialize bytes to an Ixon.RawEnv -// ============================================================================= - -/// FFI: Deserialize ByteArray → Except String Ixon.RawEnv via Rust's Env.get. Pure. -#[unsafe(no_mangle)] -pub extern "C" fn rs_des_env(bytes_ptr: *const c_void) -> *mut c_void { - let bytes_arr: &LeanSArrayObject = as_ref_unsafe(bytes_ptr.cast()); - let data = bytes_arr.data(); - let mut slice: &[u8] = data; - match IxonEnv::get(&mut slice) { - Ok(env) => { - let decoded = ixon_env_to_decoded(&env); - let raw_env = build_raw_env(&decoded); - // Except.ok (tag 1) - unsafe { - let obj = lean_alloc_ctor(1, 1, 0); - lean_ctor_set(obj, 0, raw_env); - obj - } - }, - Err(e) => { - // Except.error (tag 0) - let msg = std::ffi::CString::new(format!("rs_des_env: {}", e)) - .unwrap_or_else(|_| { - std::ffi::CString::new("rs_des_env: deserialization error").unwrap() - }); - unsafe { - let lean_str = lean_mk_string(msg.as_ptr()); - let obj = lean_alloc_ctor(0, 1, 0); - lean_ctor_set(obj, 0, lean_str); - obj - } - }, - } -} diff --git a/src/lean/ffi/ixon/expr.rs b/src/lean/ffi/ixon/expr.rs deleted file mode 100644 index 060d91b2..00000000 --- a/src/lean/ffi/ixon/expr.rs +++ /dev/null @@ -1,287 +0,0 @@ -//! Ixon.Expr build/decode/roundtrip FFI. - -use std::ffi::c_void; -use std::sync::Arc; - -use crate::ix::ixon::expr::Expr as IxonExpr; -use crate::lean::{ - as_ref_unsafe, lean_alloc_array, lean_alloc_ctor, lean_array_set_core, - lean_ctor_get, lean_ctor_set, lean_obj_tag, -}; -use crate::lean_unbox; - -/// Build Ixon.Expr (12 constructors). -pub fn build_ixon_expr(expr: &IxonExpr) -> *mut c_void { - unsafe { - match expr { - IxonExpr::Sort(idx) => { - let obj = lean_alloc_ctor(0, 0, 8); - let base = obj.cast::(); - *base.add(8).cast::() = *idx; - obj - }, - IxonExpr::Var(idx) => { - let obj = lean_alloc_ctor(1, 0, 8); - let base = obj.cast::(); - *base.add(8).cast::() = *idx; - obj - }, - IxonExpr::Ref(ref_idx, univ_idxs) => { - let arr = lean_alloc_array(univ_idxs.len(), univ_idxs.len()); - for (i, idx) in univ_idxs.iter().enumerate() { - // Build heap-boxed UInt64: ctor with tag 0, 0 obj fields, 8 scalar bytes - let uint64_obj = lean_alloc_ctor(0, 0, 8); - let base = uint64_obj.cast::(); - *base.add(8).cast::() = *idx; - lean_array_set_core(arr, i, uint64_obj); - } - let obj = lean_alloc_ctor(2, 1, 8); - lean_ctor_set(obj, 0, arr); - let base = obj.cast::(); - *base.add(8 + 8).cast::() = *ref_idx; - obj - }, - IxonExpr::Rec(rec_idx, univ_idxs) => { - let arr = lean_alloc_array(univ_idxs.len(), univ_idxs.len()); - for (i, idx) in univ_idxs.iter().enumerate() { - let uint64_obj = lean_alloc_ctor(0, 0, 8); - let base = uint64_obj.cast::(); - *base.add(8).cast::() = *idx; - lean_array_set_core(arr, i, uint64_obj); - } - let obj = lean_alloc_ctor(3, 1, 8); - lean_ctor_set(obj, 0, arr); - let base = obj.cast::(); - *base.add(8 + 8).cast::() = *rec_idx; - obj - }, - IxonExpr::Prj(type_ref_idx, field_idx, val) => { - let val_obj = build_ixon_expr(val); - let obj = lean_alloc_ctor(4, 1, 16); - lean_ctor_set(obj, 0, val_obj); - let base = obj.cast::(); - *base.add(8 + 8).cast::() = *type_ref_idx; - *base.add(8 + 16).cast::() = *field_idx; - obj - }, - IxonExpr::Str(ref_idx) => { - let obj = lean_alloc_ctor(5, 0, 8); - let base = obj.cast::(); - *base.add(8).cast::() = *ref_idx; - obj - }, - IxonExpr::Nat(ref_idx) => { - let obj = lean_alloc_ctor(6, 0, 8); - let base = obj.cast::(); - *base.add(8).cast::() = *ref_idx; - obj - }, - IxonExpr::App(fun, arg) => { - let fun_obj = build_ixon_expr(fun); - let arg_obj = build_ixon_expr(arg); - let obj = lean_alloc_ctor(7, 2, 0); - lean_ctor_set(obj, 0, fun_obj); - lean_ctor_set(obj, 1, arg_obj); - obj - }, - IxonExpr::Lam(ty, body) => { - let ty_obj = build_ixon_expr(ty); - let body_obj = build_ixon_expr(body); - let obj = lean_alloc_ctor(8, 2, 0); - lean_ctor_set(obj, 0, ty_obj); - lean_ctor_set(obj, 1, body_obj); - obj - }, - IxonExpr::All(ty, body) => { - let ty_obj = build_ixon_expr(ty); - let body_obj = build_ixon_expr(body); - let obj = lean_alloc_ctor(9, 2, 0); - lean_ctor_set(obj, 0, ty_obj); - lean_ctor_set(obj, 1, body_obj); - obj - }, - IxonExpr::Let(non_dep, ty, val, body) => { - let ty_obj = build_ixon_expr(ty); - let val_obj = build_ixon_expr(val); - let body_obj = build_ixon_expr(body); - let obj = lean_alloc_ctor(10, 3, 1); - lean_ctor_set(obj, 0, ty_obj); - lean_ctor_set(obj, 1, val_obj); - lean_ctor_set(obj, 2, body_obj); - let base = obj.cast::(); - *base.add(3 * 8 + 8) = if *non_dep { 1 } else { 0 }; - obj - }, - IxonExpr::Share(idx) => { - let obj = lean_alloc_ctor(11, 0, 8); - let base = obj.cast::(); - *base.add(8).cast::() = *idx; - obj - }, - } - } -} - -/// Build an Array of Ixon.Expr. -pub fn build_ixon_expr_array(exprs: &[Arc]) -> *mut c_void { - unsafe { - let arr = lean_alloc_array(exprs.len(), exprs.len()); - for (i, expr) in exprs.iter().enumerate() { - let expr_obj = build_ixon_expr(expr); - lean_array_set_core(arr, i, expr_obj); - } - arr - } -} - -// ============================================================================= -// Decode Functions -// ============================================================================= - -/// Decode Array UInt64 from Lean. -/// UInt64 values in arrays are stored as: -/// - Scalars (odd pointers) for small values: use lean_unbox -/// - Heap objects (even pointers) with the u64 value at offset 8 -fn decode_u64_array(ptr: *const c_void) -> Vec { - use crate::lean::lean_is_scalar; - - let arr: &crate::lean::array::LeanArrayObject = as_ref_unsafe(ptr.cast()); - arr.to_vec(|elem| { - if lean_is_scalar(elem) { - // Small scalar value - lean_unbox!(u64, elem) - } else { - // Heap-boxed UInt64: value is at offset 8 (after 8-byte header) - unsafe { - let base = elem.cast::(); - *base.add(8).cast::() - } - } - }) -} - -/// Decode Ixon.Expr (12 constructors). -pub fn decode_ixon_expr(ptr: *const c_void) -> IxonExpr { - unsafe { - let tag = lean_obj_tag(ptr.cast_mut()); - match tag { - 0 => { - // sort (idx : UInt64) - let base = ptr.cast::(); - let idx = *base.add(8).cast::(); - IxonExpr::Sort(idx) - }, - 1 => { - // var (idx : UInt64) - let base = ptr.cast::(); - let idx = *base.add(8).cast::(); - IxonExpr::Var(idx) - }, - 2 => { - // ref (refIdx : UInt64) (univIdxs : Array UInt64) - let arr_ptr = lean_ctor_get(ptr.cast_mut(), 0); - let base = ptr.cast::(); - let ref_idx = *base.add(8 + 8).cast::(); - let univ_idxs = decode_u64_array(arr_ptr); - IxonExpr::Ref(ref_idx, univ_idxs) - }, - 3 => { - // recur (recIdx : UInt64) (univIdxs : Array UInt64) - let arr_ptr = lean_ctor_get(ptr.cast_mut(), 0); - let base = ptr.cast::(); - let rec_idx = *base.add(8 + 8).cast::(); - let univ_idxs = decode_u64_array(arr_ptr); - IxonExpr::Rec(rec_idx, univ_idxs) - }, - 4 => { - // prj (typeRefIdx : UInt64) (fieldIdx : UInt64) (val : Expr) - let val_ptr = lean_ctor_get(ptr.cast_mut(), 0); - let base = ptr.cast::(); - let type_ref_idx = *base.add(8 + 8).cast::(); - let field_idx = *base.add(8 + 16).cast::(); - IxonExpr::Prj( - type_ref_idx, - field_idx, - Arc::new(decode_ixon_expr(val_ptr)), - ) - }, - 5 => { - // str (refIdx : UInt64) - let base = ptr.cast::(); - let ref_idx = *base.add(8).cast::(); - IxonExpr::Str(ref_idx) - }, - 6 => { - // nat (refIdx : UInt64) - let base = ptr.cast::(); - let ref_idx = *base.add(8).cast::(); - IxonExpr::Nat(ref_idx) - }, - 7 => { - // app (f a : Expr) - let f_ptr = lean_ctor_get(ptr.cast_mut(), 0); - let a_ptr = lean_ctor_get(ptr.cast_mut(), 1); - IxonExpr::App( - Arc::new(decode_ixon_expr(f_ptr)), - Arc::new(decode_ixon_expr(a_ptr)), - ) - }, - 8 => { - // lam (ty body : Expr) - let ty_ptr = lean_ctor_get(ptr.cast_mut(), 0); - let body_ptr = lean_ctor_get(ptr.cast_mut(), 1); - IxonExpr::Lam( - Arc::new(decode_ixon_expr(ty_ptr)), - Arc::new(decode_ixon_expr(body_ptr)), - ) - }, - 9 => { - // all (ty body : Expr) - let ty_ptr = lean_ctor_get(ptr.cast_mut(), 0); - let body_ptr = lean_ctor_get(ptr.cast_mut(), 1); - IxonExpr::All( - Arc::new(decode_ixon_expr(ty_ptr)), - Arc::new(decode_ixon_expr(body_ptr)), - ) - }, - 10 => { - // letE (nonDep : Bool) (ty val body : Expr) - let ty_ptr = lean_ctor_get(ptr.cast_mut(), 0); - let val_ptr = lean_ctor_get(ptr.cast_mut(), 1); - let body_ptr = lean_ctor_get(ptr.cast_mut(), 2); - let base = ptr.cast::(); - let non_dep = *base.add(3 * 8 + 8) != 0; - IxonExpr::Let( - non_dep, - Arc::new(decode_ixon_expr(ty_ptr)), - Arc::new(decode_ixon_expr(val_ptr)), - Arc::new(decode_ixon_expr(body_ptr)), - ) - }, - 11 => { - // share (idx : UInt64) - let base = ptr.cast::(); - let idx = *base.add(8).cast::(); - IxonExpr::Share(idx) - }, - _ => panic!("Invalid Ixon.Expr tag: {}", tag), - } - } -} - -/// Decode Array Ixon.Expr. -pub fn decode_ixon_expr_array(ptr: *const c_void) -> Vec> { - let arr: &crate::lean::array::LeanArrayObject = as_ref_unsafe(ptr.cast()); - arr.to_vec(|e| Arc::new(decode_ixon_expr(e))) -} - -// ============================================================================= -// FFI Exports -// ============================================================================= - -/// Round-trip Ixon.Expr. -#[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ixon_expr(ptr: *const c_void) -> *mut c_void { - let expr = decode_ixon_expr(ptr); - build_ixon_expr(&expr) -} diff --git a/src/lean/ffi/ixon/meta.rs b/src/lean/ffi/ixon/meta.rs deleted file mode 100644 index dafe11f7..00000000 --- a/src/lean/ffi/ixon/meta.rs +++ /dev/null @@ -1,677 +0,0 @@ -//! Ixon metadata types build/decode/roundtrip FFI. -//! -//! Includes: DataValue, KVMap, ExprMetaData, ExprMetaArena, ConstantMeta, Named, Comm - -use std::ffi::c_void; - -use crate::ix::address::Address; -use crate::ix::env::BinderInfo; -use crate::ix::ixon::Comm; -use crate::ix::ixon::env::Named; -use crate::ix::ixon::metadata::{ - ConstantMeta, DataValue as IxonDataValue, ExprMeta, ExprMetaData, KVMap, -}; -use crate::lean::array::LeanArrayObject; -use crate::lean::ctor::LeanCtorObject; -use crate::lean::{ - as_ref_unsafe, lean_alloc_array, lean_alloc_ctor, lean_array_set_core, - lean_box_fn, lean_ctor_get, lean_ctor_set, lean_ctor_set_uint8, - lean_ctor_set_uint64, lean_is_scalar, lean_obj_tag, -}; - -use super::constant::{ - build_address_array, build_address_from_ixon, decode_ixon_address, -}; -use crate::lean::ffi::ix::constant::{ - build_reducibility_hints, decode_reducibility_hints, -}; -use crate::lean::ffi::ix::expr::binder_info_to_u8; - -// ============================================================================= -// DataValue Build/Decode -// ============================================================================= - -/// Build Ixon.DataValue (for metadata) -pub fn build_ixon_data_value(dv: &IxonDataValue) -> *mut c_void { - unsafe { - match dv { - IxonDataValue::OfString(addr) => { - let addr_obj = build_address_from_ixon(addr); - let obj = lean_alloc_ctor(0, 1, 0); - lean_ctor_set(obj, 0, addr_obj); - obj - }, - IxonDataValue::OfBool(b) => { - let obj = lean_alloc_ctor(1, 0, 1); - lean_ctor_set_uint8(obj, 0, if *b { 1 } else { 0 }); - obj - }, - IxonDataValue::OfName(addr) => { - let addr_obj = build_address_from_ixon(addr); - let obj = lean_alloc_ctor(2, 1, 0); - lean_ctor_set(obj, 0, addr_obj); - obj - }, - IxonDataValue::OfNat(addr) => { - let addr_obj = build_address_from_ixon(addr); - let obj = lean_alloc_ctor(3, 1, 0); - lean_ctor_set(obj, 0, addr_obj); - obj - }, - IxonDataValue::OfInt(addr) => { - let addr_obj = build_address_from_ixon(addr); - let obj = lean_alloc_ctor(4, 1, 0); - lean_ctor_set(obj, 0, addr_obj); - obj - }, - IxonDataValue::OfSyntax(addr) => { - let addr_obj = build_address_from_ixon(addr); - let obj = lean_alloc_ctor(5, 1, 0); - lean_ctor_set(obj, 0, addr_obj); - obj - }, - } - } -} - -/// Decode Ixon.DataValue. -pub fn decode_ixon_data_value(ptr: *const c_void) -> IxonDataValue { - unsafe { - let tag = lean_obj_tag(ptr as *mut _); - match tag { - 0 => { - let addr_ptr = lean_ctor_get(ptr as *mut _, 0); - IxonDataValue::OfString(decode_ixon_address(addr_ptr)) - }, - 1 => { - let ctor: &LeanCtorObject = as_ref_unsafe(ptr.cast()); - let b = ctor.get_scalar_u8(0, 0) != 0; - IxonDataValue::OfBool(b) - }, - 2 => { - let addr_ptr = lean_ctor_get(ptr as *mut _, 0); - IxonDataValue::OfName(decode_ixon_address(addr_ptr)) - }, - 3 => { - let addr_ptr = lean_ctor_get(ptr as *mut _, 0); - IxonDataValue::OfNat(decode_ixon_address(addr_ptr)) - }, - 4 => { - let addr_ptr = lean_ctor_get(ptr as *mut _, 0); - IxonDataValue::OfInt(decode_ixon_address(addr_ptr)) - }, - 5 => { - let addr_ptr = lean_ctor_get(ptr as *mut _, 0); - IxonDataValue::OfSyntax(decode_ixon_address(addr_ptr)) - }, - _ => panic!("Invalid Ixon.DataValue tag: {}", tag), - } - } -} - -// ============================================================================= -// KVMap Build/Decode -// ============================================================================= - -/// Build an Ixon.KVMap (Array (Address × DataValue)). -pub fn build_ixon_kvmap(kvmap: &KVMap) -> *mut c_void { - unsafe { - let arr = lean_alloc_array(kvmap.len(), kvmap.len()); - for (i, (addr, dv)) in kvmap.iter().enumerate() { - let addr_obj = build_address_from_ixon(addr); - let dv_obj = build_ixon_data_value(dv); - let pair = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(pair, 0, addr_obj); - lean_ctor_set(pair, 1, dv_obj); - lean_array_set_core(arr, i, pair); - } - arr - } -} - -/// Build Array KVMap. -pub fn build_kvmap_array(kvmaps: &[KVMap]) -> *mut c_void { - unsafe { - let arr = lean_alloc_array(kvmaps.len(), kvmaps.len()); - for (i, kvmap) in kvmaps.iter().enumerate() { - let kvmap_obj = build_ixon_kvmap(kvmap); - lean_array_set_core(arr, i, kvmap_obj); - } - arr - } -} - -/// Decode KVMap (Array (Address × DataValue)). -pub fn decode_ixon_kvmap(ptr: *const c_void) -> KVMap { - let arr: &LeanArrayObject = as_ref_unsafe(ptr.cast()); - arr.to_vec(|pair| unsafe { - let addr_ptr = lean_ctor_get(pair as *mut _, 0); - let dv_ptr = lean_ctor_get(pair as *mut _, 1); - (decode_ixon_address(addr_ptr), decode_ixon_data_value(dv_ptr)) - }) -} - -/// Decode Array KVMap. -fn decode_kvmap_array(ptr: *const c_void) -> Vec { - let arr: &LeanArrayObject = as_ref_unsafe(ptr.cast()); - arr.to_vec(decode_ixon_kvmap) -} - -// ============================================================================= -// Address Array Helpers -// ============================================================================= - -/// Decode Array Address. -fn decode_address_array(ptr: *const c_void) -> Vec
{ - let arr: &LeanArrayObject = as_ref_unsafe(ptr.cast()); - arr.to_vec(decode_ixon_address) -} - -/// Build Array UInt64. -fn build_u64_array(vals: &[u64]) -> *mut c_void { - unsafe { - let arr = lean_alloc_array(vals.len(), vals.len()); - for (i, &v) in vals.iter().enumerate() { - let obj = crate::lean::lean_box_u64(v); - lean_array_set_core(arr, i, obj); - } - arr - } -} - -/// Decode Array UInt64. -fn decode_u64_array(ptr: *const c_void) -> Vec { - let arr: &LeanArrayObject = as_ref_unsafe(ptr.cast()); - arr.to_vec(crate::lean::lean_unbox_u64) -} - -// ============================================================================= -// ExprMetaData Build/Decode -// ============================================================================= - -/// Build Ixon.ExprMetaData Lean object. -/// -/// | Variant | Tag | Obj fields | Scalar bytes | -/// |------------|-----|------------------------|--------------------------| -/// | leaf | 0 | 0 | 0 | -/// | app | 1 | 0 | 16 (2× u64) | -/// | binder | 2 | 1 (name: Address) | 17 (info: u8, 2× u64) | -/// | letBinder | 3 | 1 (name: Address) | 24 (3× u64) | -/// | ref | 4 | 1 (name: Address) | 0 | -/// | prj | 5 | 1 (structName: Address) | 8 (1× u64) | -/// | mdata | 6 | 1 (mdata: Array) | 8 (1× u64) | -pub fn build_expr_meta_data(node: &ExprMetaData) -> *mut c_void { - unsafe { - match node { - ExprMetaData::Leaf => lean_box_fn(0), - - ExprMetaData::App { children } => { - // Tag 1, 0 obj fields, 16 scalar bytes (2× u64) - let obj = lean_alloc_ctor(1, 0, 16); - lean_ctor_set_uint64(obj, 0, children[0]); - lean_ctor_set_uint64(obj, 8, children[1]); - obj - }, - - ExprMetaData::Binder { name, info, children } => { - // Tag 2, 1 obj field (name), scalar: 2× u64 + u8 (info) - // Lean ABI sorts scalars by size descending: [tyChild: u64 @ 0] [bodyChild: u64 @ 8] [info: u8 @ 16] - let obj = lean_alloc_ctor(2, 1, 17); - lean_ctor_set(obj, 0, build_address_from_ixon(name)); - lean_ctor_set_uint64(obj, 8, children[0]); - lean_ctor_set_uint64(obj, 8 + 8, children[1]); - lean_ctor_set_uint8(obj, 8 + 16, binder_info_to_u8(info)); - obj - }, - - ExprMetaData::LetBinder { name, children } => { - // Tag 3, 1 obj field (name), 24 scalar bytes (3× u64) - let obj = lean_alloc_ctor(3, 1, 24); - lean_ctor_set(obj, 0, build_address_from_ixon(name)); - lean_ctor_set_uint64(obj, 8, children[0]); - lean_ctor_set_uint64(obj, 8 + 8, children[1]); - lean_ctor_set_uint64(obj, 8 + 16, children[2]); - obj - }, - - ExprMetaData::Ref { name } => { - // Tag 4, 1 obj field (name), 0 scalar bytes - let obj = lean_alloc_ctor(4, 1, 0); - lean_ctor_set(obj, 0, build_address_from_ixon(name)); - obj - }, - - ExprMetaData::Prj { struct_name, child } => { - // Tag 5, 1 obj field (structName), 8 scalar bytes (1× u64) - let obj = lean_alloc_ctor(5, 1, 8); - lean_ctor_set(obj, 0, build_address_from_ixon(struct_name)); - lean_ctor_set_uint64(obj, 8, *child); - obj - }, - - ExprMetaData::Mdata { mdata, child } => { - // Tag 6, 1 obj field (mdata: Array KVMap), 8 scalar bytes (1× u64) - let mdata_obj = build_kvmap_array(mdata); - let obj = lean_alloc_ctor(6, 1, 8); - lean_ctor_set(obj, 0, mdata_obj); - lean_ctor_set_uint64(obj, 8, *child); - obj - }, - } - } -} - -/// Decode Ixon.ExprMetaData from Lean pointer. -pub fn decode_expr_meta_data(ptr: *const c_void) -> ExprMetaData { - unsafe { - // Leaf (tag 0, no fields) is represented as a scalar lean_box(0) - if lean_is_scalar(ptr) { - let tag = (ptr as usize) >> 1; - assert_eq!(tag, 0, "Invalid scalar ExprMetaData tag: {}", tag); - return ExprMetaData::Leaf; - } - let tag = lean_obj_tag(ptr as *mut _); - let ctor: &LeanCtorObject = as_ref_unsafe(ptr.cast()); - match tag { - 1 => { - // app: 0 obj fields, 2× u64 scalar - let fun_ = ctor.get_scalar_u64(0, 0); - let arg = ctor.get_scalar_u64(0, 8); - ExprMetaData::App { children: [fun_, arg] } - }, - - 2 => { - // binder: 1 obj field (name), scalar (Lean ABI: u64s first, then u8): - // [tyChild: u64 @ 0] [bodyChild: u64 @ 8] [info: u8 @ 16] - let name_ptr = lean_ctor_get(ptr as *mut _, 0); - let ty_child = ctor.get_scalar_u64(1, 0); - let body_child = ctor.get_scalar_u64(1, 8); - let info_byte = ctor.get_scalar_u8(1, 16); - let info = match info_byte { - 0 => BinderInfo::Default, - 1 => BinderInfo::Implicit, - 2 => BinderInfo::StrictImplicit, - 3 => BinderInfo::InstImplicit, - _ => panic!("Invalid BinderInfo tag: {}", info_byte), - }; - ExprMetaData::Binder { - name: decode_ixon_address(name_ptr), - info, - children: [ty_child, body_child], - } - }, - - 3 => { - // letBinder: 1 obj field (name), 3× u64 scalar - let name_ptr = lean_ctor_get(ptr as *mut _, 0); - let ty_child = ctor.get_scalar_u64(1, 0); - let val_child = ctor.get_scalar_u64(1, 8); - let body_child = ctor.get_scalar_u64(1, 16); - ExprMetaData::LetBinder { - name: decode_ixon_address(name_ptr), - children: [ty_child, val_child, body_child], - } - }, - - 4 => { - // ref: 1 obj field (name), 0 scalar - let name_ptr = lean_ctor_get(ptr as *mut _, 0); - ExprMetaData::Ref { name: decode_ixon_address(name_ptr) } - }, - - 5 => { - // prj: 1 obj field (structName), 1× u64 scalar - let name_ptr = lean_ctor_get(ptr as *mut _, 0); - let child = ctor.get_scalar_u64(1, 0); - ExprMetaData::Prj { struct_name: decode_ixon_address(name_ptr), child } - }, - - 6 => { - // mdata: 1 obj field (mdata: Array KVMap), 1× u64 scalar - let mdata_ptr = lean_ctor_get(ptr as *mut _, 0); - let child = ctor.get_scalar_u64(1, 0); - ExprMetaData::Mdata { mdata: decode_kvmap_array(mdata_ptr), child } - }, - - _ => panic!("Invalid Ixon.ExprMetaData tag: {}", tag), - } - } -} - -// ============================================================================= -// ExprMetaArena Build/Decode -// ============================================================================= - -/// Build Ixon.ExprMetaArena Lean object. -/// ExprMetaArena is a single-field structure (nodes : Array ExprMetaData), -/// which Lean unboxes — the value IS the Array directly. -pub fn build_expr_meta_arena(arena: &ExprMeta) -> *mut c_void { - unsafe { - let arr = lean_alloc_array(arena.nodes.len(), arena.nodes.len()); - for (i, node) in arena.nodes.iter().enumerate() { - lean_array_set_core(arr, i, build_expr_meta_data(node)); - } - arr - } -} - -/// Decode Ixon.ExprMetaArena from Lean pointer. -/// Single-field struct is unboxed — ptr IS the Array directly. -pub fn decode_expr_meta_arena(ptr: *const c_void) -> ExprMeta { - let arr: &LeanArrayObject = as_ref_unsafe(ptr.cast()); - ExprMeta { nodes: arr.to_vec(decode_expr_meta_data) } -} - -// ============================================================================= -// ConstantMeta Build/Decode -// ============================================================================= - -/// Build Ixon.ConstantMeta Lean object. -/// -/// | Variant | Tag | Obj fields | Scalar bytes | -/// |---------|-----|-----------|-------------| -/// | empty | 0 | 0 | 0 | -/// | defn | 1 | 6 (name, lvls, hints, all, ctx, arena) | 16 (2× u64) | -/// | axio | 2 | 3 (name, lvls, arena) | 8 (1× u64) | -/// | quot | 3 | 3 (name, lvls, arena) | 8 (1× u64) | -/// | indc | 4 | 6 (name, lvls, ctors, all, ctx, arena) | 8 (1× u64) | -/// | ctor | 5 | 4 (name, lvls, induct, arena) | 8 (1× u64) | -/// | recr | 6 | 7 (name, lvls, rules, all, ctx, arena, ruleRoots) | 8 (1× u64) | -pub fn build_constant_meta(meta: &ConstantMeta) -> *mut c_void { - unsafe { - match meta { - ConstantMeta::Empty => lean_box_fn(0), - - ConstantMeta::Def { - name, - lvls, - hints, - all, - ctx, - arena, - type_root, - value_root, - } => { - let obj = lean_alloc_ctor(1, 6, 16); - lean_ctor_set(obj, 0, build_address_from_ixon(name)); - lean_ctor_set(obj, 1, build_address_array(lvls)); - lean_ctor_set(obj, 2, build_reducibility_hints(hints)); - lean_ctor_set(obj, 3, build_address_array(all)); - lean_ctor_set(obj, 4, build_address_array(ctx)); - lean_ctor_set(obj, 5, build_expr_meta_arena(arena)); - lean_ctor_set_uint64(obj, 6 * 8, *type_root); - lean_ctor_set_uint64(obj, 6 * 8 + 8, *value_root); - obj - }, - - ConstantMeta::Axio { name, lvls, arena, type_root } => { - let obj = lean_alloc_ctor(2, 3, 8); - lean_ctor_set(obj, 0, build_address_from_ixon(name)); - lean_ctor_set(obj, 1, build_address_array(lvls)); - lean_ctor_set(obj, 2, build_expr_meta_arena(arena)); - lean_ctor_set_uint64(obj, 3 * 8, *type_root); - obj - }, - - ConstantMeta::Quot { name, lvls, arena, type_root } => { - let obj = lean_alloc_ctor(3, 3, 8); - lean_ctor_set(obj, 0, build_address_from_ixon(name)); - lean_ctor_set(obj, 1, build_address_array(lvls)); - lean_ctor_set(obj, 2, build_expr_meta_arena(arena)); - lean_ctor_set_uint64(obj, 3 * 8, *type_root); - obj - }, - - ConstantMeta::Indc { name, lvls, ctors, all, ctx, arena, type_root } => { - let obj = lean_alloc_ctor(4, 6, 8); - lean_ctor_set(obj, 0, build_address_from_ixon(name)); - lean_ctor_set(obj, 1, build_address_array(lvls)); - lean_ctor_set(obj, 2, build_address_array(ctors)); - lean_ctor_set(obj, 3, build_address_array(all)); - lean_ctor_set(obj, 4, build_address_array(ctx)); - lean_ctor_set(obj, 5, build_expr_meta_arena(arena)); - lean_ctor_set_uint64(obj, 6 * 8, *type_root); - obj - }, - - ConstantMeta::Ctor { name, lvls, induct, arena, type_root } => { - let obj = lean_alloc_ctor(5, 4, 8); - lean_ctor_set(obj, 0, build_address_from_ixon(name)); - lean_ctor_set(obj, 1, build_address_array(lvls)); - lean_ctor_set(obj, 2, build_address_from_ixon(induct)); - lean_ctor_set(obj, 3, build_expr_meta_arena(arena)); - lean_ctor_set_uint64(obj, 4 * 8, *type_root); - obj - }, - - ConstantMeta::Rec { - name, - lvls, - rules, - all, - ctx, - arena, - type_root, - rule_roots, - } => { - let obj = lean_alloc_ctor(6, 7, 8); - lean_ctor_set(obj, 0, build_address_from_ixon(name)); - lean_ctor_set(obj, 1, build_address_array(lvls)); - lean_ctor_set(obj, 2, build_address_array(rules)); - lean_ctor_set(obj, 3, build_address_array(all)); - lean_ctor_set(obj, 4, build_address_array(ctx)); - lean_ctor_set(obj, 5, build_expr_meta_arena(arena)); - lean_ctor_set(obj, 6, build_u64_array(rule_roots)); - lean_ctor_set_uint64(obj, 7 * 8, *type_root); - obj - }, - } - } -} - -/// Decode Ixon.ConstantMeta from Lean pointer. -pub fn decode_constant_meta(ptr: *const c_void) -> ConstantMeta { - unsafe { - // Empty (tag 0, no fields) is represented as a scalar lean_box(0) - if lean_is_scalar(ptr) { - let tag = (ptr as usize) >> 1; - assert_eq!(tag, 0, "Invalid scalar ConstantMeta tag: {}", tag); - return ConstantMeta::Empty; - } - let tag = lean_obj_tag(ptr as *mut _); - let ctor: &LeanCtorObject = as_ref_unsafe(ptr.cast()); - match tag { - 1 => { - // defn: 6 obj fields, 2× u64 scalar - let name = decode_ixon_address(lean_ctor_get(ptr as *mut _, 0)); - let lvls = decode_address_array(lean_ctor_get(ptr as *mut _, 1)); - let hints = decode_reducibility_hints(lean_ctor_get(ptr as *mut _, 2)); - let all = decode_address_array(lean_ctor_get(ptr as *mut _, 3)); - let ctx = decode_address_array(lean_ctor_get(ptr as *mut _, 4)); - let arena = decode_expr_meta_arena(lean_ctor_get(ptr as *mut _, 5)); - let type_root = ctor.get_scalar_u64(6, 0); - let value_root = ctor.get_scalar_u64(6, 8); - ConstantMeta::Def { - name, - lvls, - hints, - all, - ctx, - arena, - type_root, - value_root, - } - }, - - 2 => { - // axio: 3 obj fields, 1× u64 scalar - let name = decode_ixon_address(lean_ctor_get(ptr as *mut _, 0)); - let lvls = decode_address_array(lean_ctor_get(ptr as *mut _, 1)); - let arena = decode_expr_meta_arena(lean_ctor_get(ptr as *mut _, 2)); - let type_root = ctor.get_scalar_u64(3, 0); - ConstantMeta::Axio { name, lvls, arena, type_root } - }, - - 3 => { - // quot: 3 obj fields, 1× u64 scalar - let name = decode_ixon_address(lean_ctor_get(ptr as *mut _, 0)); - let lvls = decode_address_array(lean_ctor_get(ptr as *mut _, 1)); - let arena = decode_expr_meta_arena(lean_ctor_get(ptr as *mut _, 2)); - let type_root = ctor.get_scalar_u64(3, 0); - ConstantMeta::Quot { name, lvls, arena, type_root } - }, - - 4 => { - // indc: 6 obj fields, 1× u64 scalar - let name = decode_ixon_address(lean_ctor_get(ptr as *mut _, 0)); - let lvls = decode_address_array(lean_ctor_get(ptr as *mut _, 1)); - let ctors = decode_address_array(lean_ctor_get(ptr as *mut _, 2)); - let all = decode_address_array(lean_ctor_get(ptr as *mut _, 3)); - let ctx = decode_address_array(lean_ctor_get(ptr as *mut _, 4)); - let arena = decode_expr_meta_arena(lean_ctor_get(ptr as *mut _, 5)); - let type_root = ctor.get_scalar_u64(6, 0); - ConstantMeta::Indc { name, lvls, ctors, all, ctx, arena, type_root } - }, - - 5 => { - // ctor: 4 obj fields, 1× u64 scalar - let name = decode_ixon_address(lean_ctor_get(ptr as *mut _, 0)); - let lvls = decode_address_array(lean_ctor_get(ptr as *mut _, 1)); - let induct = decode_ixon_address(lean_ctor_get(ptr as *mut _, 2)); - let arena = decode_expr_meta_arena(lean_ctor_get(ptr as *mut _, 3)); - let type_root = ctor.get_scalar_u64(4, 0); - ConstantMeta::Ctor { name, lvls, induct, arena, type_root } - }, - - 6 => { - // recr: 7 obj fields, 1× u64 scalar - let name = decode_ixon_address(lean_ctor_get(ptr as *mut _, 0)); - let lvls = decode_address_array(lean_ctor_get(ptr as *mut _, 1)); - let rules = decode_address_array(lean_ctor_get(ptr as *mut _, 2)); - let all = decode_address_array(lean_ctor_get(ptr as *mut _, 3)); - let ctx = decode_address_array(lean_ctor_get(ptr as *mut _, 4)); - let arena = decode_expr_meta_arena(lean_ctor_get(ptr as *mut _, 5)); - let rule_roots = decode_u64_array(lean_ctor_get(ptr as *mut _, 6)); - let type_root = ctor.get_scalar_u64(7, 0); - ConstantMeta::Rec { - name, - lvls, - rules, - all, - ctx, - arena, - type_root, - rule_roots, - } - }, - - _ => panic!("Invalid Ixon.ConstantMeta tag: {}", tag), - } - } -} - -// ============================================================================= -// Named and Comm Build/Decode -// ============================================================================= - -/// Build Ixon.Named { addr : Address, constMeta : ConstantMeta } -pub fn build_named(addr: &Address, meta: &ConstantMeta) -> *mut c_void { - unsafe { - let addr_obj = build_address_from_ixon(addr); - let meta_obj = build_constant_meta(meta); - let obj = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(obj, 0, addr_obj); - lean_ctor_set(obj, 1, meta_obj); - obj - } -} - -/// Decode Ixon.Named. -pub fn decode_named(ptr: *const c_void) -> Named { - unsafe { - let addr_ptr = lean_ctor_get(ptr as *mut _, 0); - let meta_ptr = lean_ctor_get(ptr as *mut _, 1); - Named { - addr: decode_ixon_address(addr_ptr), - meta: decode_constant_meta(meta_ptr), - } - } -} - -/// Build Ixon.Comm { secret : Address, payload : Address } -pub fn build_ixon_comm(comm: &Comm) -> *mut c_void { - unsafe { - let secret_obj = build_address_from_ixon(&comm.secret); - let payload_obj = build_address_from_ixon(&comm.payload); - let obj = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(obj, 0, secret_obj); - lean_ctor_set(obj, 1, payload_obj); - obj - } -} - -/// Decode Ixon.Comm. -pub fn decode_ixon_comm(ptr: *const c_void) -> Comm { - unsafe { - let secret_ptr = lean_ctor_get(ptr as *mut _, 0); - let payload_ptr = lean_ctor_get(ptr as *mut _, 1); - Comm { - secret: decode_ixon_address(secret_ptr), - payload: decode_ixon_address(payload_ptr), - } - } -} - -// ============================================================================= -// FFI Exports -// ============================================================================= - -/// Round-trip Ixon.DataValue. -#[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ixon_data_value( - ptr: *const c_void, -) -> *mut c_void { - let dv = decode_ixon_data_value(ptr); - build_ixon_data_value(&dv) -} - -/// Round-trip Ixon.Comm. -#[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ixon_comm(ptr: *const c_void) -> *mut c_void { - let comm = decode_ixon_comm(ptr); - build_ixon_comm(&comm) -} - -/// Round-trip Ixon.ExprMetaData. -#[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ixon_expr_meta_data( - ptr: *const c_void, -) -> *mut c_void { - let node = decode_expr_meta_data(ptr); - build_expr_meta_data(&node) -} - -/// Round-trip Ixon.ExprMetaArena. -#[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ixon_expr_meta_arena( - ptr: *const c_void, -) -> *mut c_void { - let arena = decode_expr_meta_arena(ptr); - build_expr_meta_arena(&arena) -} - -/// Round-trip Ixon.ConstantMeta (full arena-based). -#[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ixon_constant_meta( - ptr: *const c_void, -) -> *mut c_void { - let meta = decode_constant_meta(ptr); - build_constant_meta(&meta) -} - -/// Round-trip Ixon.Named (with real metadata). -#[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ixon_named(ptr: *const c_void) -> *mut c_void { - let named = decode_named(ptr); - build_named(&named.addr, &named.meta) -} diff --git a/src/lean/ffi/ixon/serialize.rs b/src/lean/ffi/ixon/serialize.rs deleted file mode 100644 index e9c7eb22..00000000 --- a/src/lean/ffi/ixon/serialize.rs +++ /dev/null @@ -1,322 +0,0 @@ -//! Ixon serialization compatibility FFI. -//! -//! Contains FFI functions for comparing Lean and Rust serialization outputs, -//! and Env serialization roundtrip testing. - -use std::ffi::c_void; -use std::sync::Arc; - -use crate::ix::address::Address; -use crate::ix::ixon::expr::Expr as IxonExpr; -use crate::ix::ixon::serialize::put_expr; -use crate::ix::ixon::sharing::hash_expr; -use crate::ix::ixon::univ::{Univ as IxonUniv, put_univ}; -use crate::lean::array::LeanArrayObject; -use crate::lean::ctor::LeanCtorObject; -use crate::lean::sarray::LeanSArrayObject; -use crate::lean::{as_ref_unsafe, lean_is_scalar, lean_unbox_u64}; - -use super::constant::{decode_ixon_address, decode_ixon_constant}; - -/// Unbox a Lean UInt64, handling both scalar and boxed representations. -fn lean_ptr_to_u64(ptr: *const c_void) -> u64 { - if lean_is_scalar(ptr) { - (ptr as usize >> 1) as u64 - } else { - lean_unbox_u64(ptr) - } -} - -/// Decode a Lean `Ixon.Expr` to a Rust `IxonExpr`. -pub fn lean_ptr_to_ixon_expr(ptr: *const c_void) -> Arc { - assert!(!lean_is_scalar(ptr), "Ixon.Expr should not be scalar"); - let ctor: &LeanCtorObject = as_ref_unsafe(ptr.cast()); - match ctor.tag() { - 0 => { - let idx = ctor.get_scalar_u64(0, 0); - Arc::new(IxonExpr::Sort(idx)) - }, - 1 => { - let idx = ctor.get_scalar_u64(0, 0); - Arc::new(IxonExpr::Var(idx)) - }, - 2 => { - let [univs_ptr] = ctor.objs(); - let ref_idx = ctor.get_scalar_u64(1, 0); - let univs_arr: &LeanArrayObject = as_ref_unsafe(univs_ptr.cast()); - let univs = univs_arr.to_vec(lean_ptr_to_u64); - Arc::new(IxonExpr::Ref(ref_idx, univs)) - }, - 3 => { - let [univs_ptr] = ctor.objs(); - let rec_idx = ctor.get_scalar_u64(1, 0); - let univs_arr: &LeanArrayObject = as_ref_unsafe(univs_ptr.cast()); - let univs = univs_arr.to_vec(lean_ptr_to_u64); - Arc::new(IxonExpr::Rec(rec_idx, univs)) - }, - 4 => { - let [val_ptr] = ctor.objs(); - let type_idx = ctor.get_scalar_u64(1, 0); - let field_idx = ctor.get_scalar_u64(1, 8); - let val = lean_ptr_to_ixon_expr(val_ptr); - Arc::new(IxonExpr::Prj(type_idx, field_idx, val)) - }, - 5 => { - let idx = ctor.get_scalar_u64(0, 0); - Arc::new(IxonExpr::Str(idx)) - }, - 6 => { - let idx = ctor.get_scalar_u64(0, 0); - Arc::new(IxonExpr::Nat(idx)) - }, - 7 => { - let [fun_ptr, arg_ptr] = ctor.objs(); - let fun_ = lean_ptr_to_ixon_expr(fun_ptr); - let arg = lean_ptr_to_ixon_expr(arg_ptr); - Arc::new(IxonExpr::App(fun_, arg)) - }, - 8 => { - let [ty_ptr, body_ptr] = ctor.objs(); - let ty = lean_ptr_to_ixon_expr(ty_ptr); - let body = lean_ptr_to_ixon_expr(body_ptr); - Arc::new(IxonExpr::Lam(ty, body)) - }, - 9 => { - let [ty_ptr, body_ptr] = ctor.objs(); - let ty = lean_ptr_to_ixon_expr(ty_ptr); - let body = lean_ptr_to_ixon_expr(body_ptr); - Arc::new(IxonExpr::All(ty, body)) - }, - 10 => { - let [ty_ptr, val_ptr, body_ptr] = ctor.objs(); - let base_ptr = (ctor as *const LeanCtorObject).cast::(); - let non_dep = unsafe { *base_ptr.add(8 + 3 * 8) } != 0; - let ty = lean_ptr_to_ixon_expr(ty_ptr); - let val = lean_ptr_to_ixon_expr(val_ptr); - let body = lean_ptr_to_ixon_expr(body_ptr); - Arc::new(IxonExpr::Let(non_dep, ty, val, body)) - }, - 11 => { - let idx = ctor.get_scalar_u64(0, 0); - Arc::new(IxonExpr::Share(idx)) - }, - tag => panic!("Unknown Ixon.Expr tag: {}", tag), - } -} - -/// Check if Lean's computed hash matches Rust's computed hash. -#[unsafe(no_mangle)] -pub extern "C" fn rs_expr_hash_matches( - expr_ptr: *const c_void, - expected_hash: *const c_void, -) -> bool { - let expr = lean_ptr_to_ixon_expr(expr_ptr); - let hash = hash_expr(&expr); - let expected = decode_ixon_address(expected_hash); - Address::from_slice(hash.as_bytes()).is_ok_and(|h| h == expected) -} - -/// Decode a Lean `Ixon.Univ` to a Rust `IxonUniv`. -fn lean_ptr_to_ixon_univ(ptr: *const c_void) -> Arc { - if lean_is_scalar(ptr) { - return IxonUniv::zero(); - } - let ctor: &LeanCtorObject = as_ref_unsafe(ptr.cast()); - match ctor.tag() { - 1 => { - let [inner] = ctor.objs(); - IxonUniv::succ(lean_ptr_to_ixon_univ(inner)) - }, - 2 => { - let [a, b] = ctor.objs(); - IxonUniv::max(lean_ptr_to_ixon_univ(a), lean_ptr_to_ixon_univ(b)) - }, - 3 => { - let [a, b] = ctor.objs(); - IxonUniv::imax(lean_ptr_to_ixon_univ(a), lean_ptr_to_ixon_univ(b)) - }, - 4 => IxonUniv::var(ctor.get_scalar_u64(0, 0)), - tag => panic!("Unknown Ixon.Univ tag: {}", tag), - } -} - -/// Check if Lean's Ixon.Univ serialization matches Rust. -#[unsafe(no_mangle)] -pub extern "C" fn rs_eq_univ_serialization( - univ_ptr: *const c_void, - bytes: &LeanSArrayObject, -) -> bool { - let univ = lean_ptr_to_ixon_univ(univ_ptr); - let bytes_data = bytes.data(); - let mut buf = Vec::with_capacity(bytes_data.len()); - put_univ(&univ, &mut buf); - buf == bytes_data -} - -/// Check if Lean's Ixon.Expr serialization matches Rust. -#[unsafe(no_mangle)] -pub extern "C" fn rs_eq_expr_serialization( - expr_ptr: *const c_void, - bytes: &LeanSArrayObject, -) -> bool { - let expr = lean_ptr_to_ixon_expr(expr_ptr); - let bytes_data = bytes.data(); - let mut buf = Vec::with_capacity(bytes_data.len()); - put_expr(&expr, &mut buf); - buf == bytes_data -} - -/// Check if Lean's Ixon.Constant serialization matches Rust. -#[unsafe(no_mangle)] -pub extern "C" fn rs_eq_constant_serialization( - constant_ptr: *const c_void, - bytes: &LeanSArrayObject, -) -> bool { - let constant = decode_ixon_constant(constant_ptr); - let bytes_data = bytes.data(); - let mut buf = Vec::with_capacity(bytes_data.len()); - constant.put(&mut buf); - buf == bytes_data -} - -/// Check if Lean's Ixon.Env serialization can be deserialized by Rust and content matches. -/// Due to HashMap ordering differences, we compare deserialized content rather than bytes. -#[unsafe(no_mangle)] -pub extern "C" fn rs_eq_env_serialization( - raw_env_ptr: *const c_void, - bytes: &LeanSArrayObject, -) -> bool { - use super::env::decode_raw_env; - use crate::ix::ixon::env::Env; - - let decoded = decode_raw_env(raw_env_ptr); - let bytes_data = bytes.data(); - - // Deserialize Lean's bytes using Rust's deserializer - let rust_env = match Env::get(&mut &bytes_data[..]) { - Ok(env) => env, - Err(_) => return false, - }; - - // Compare content: check that all items from decoded RawEnv are in the deserialized Env - // Consts - if rust_env.consts.len() != decoded.consts.len() { - return false; - } - for rc in &decoded.consts { - match rust_env.consts.get(&rc.addr) { - Some(c) if *c == rc.constant => {}, - _ => return false, - } - } - - // Blobs - if rust_env.blobs.len() != decoded.blobs.len() { - return false; - } - for rb in &decoded.blobs { - match rust_env.blobs.get(&rb.addr) { - Some(b) if *b == rb.bytes => {}, - _ => return false, - } - } - - // Comms - if rust_env.comms.len() != decoded.comms.len() { - return false; - } - for rc in &decoded.comms { - let expected_comm = crate::ix::ixon::comm::Comm { - secret: rc.comm.secret.clone(), - payload: rc.comm.payload.clone(), - }; - match rust_env.comms.get(&rc.addr) { - Some(c) if *c == expected_comm => {}, - _ => return false, - } - } - - // Named: compare by checking all entries exist with matching addresses - if rust_env.named.len() != decoded.named.len() { - return false; - } - for rn in &decoded.named { - match rust_env.named.get(&rn.name) { - Some(named) if named.addr == rn.addr => {}, - _ => return false, - } - } - - true -} - -/// FFI: Test Env serialization roundtrip. -/// Takes: -/// - lean_bytes_ptr: pointer to ByteArray containing serialized Env from Lean -/// -/// Returns: true if Rust can deserialize and re-serialize to the same bytes -#[unsafe(no_mangle)] -extern "C" fn rs_env_serde_roundtrip(lean_bytes_ptr: *const c_void) -> bool { - use crate::ix::ixon::env::Env; - - // Get bytes from Lean ByteArray - let bytes_arr: &LeanSArrayObject = as_ref_unsafe(lean_bytes_ptr.cast()); - let lean_bytes = bytes_arr.data().to_vec(); - - // Try to deserialize with Rust - let mut slice = lean_bytes.as_slice(); - let env = match Env::get(&mut slice) { - Ok(e) => e, - Err(e) => { - eprintln!("Rust Env::get failed: {}", e); - return false; - }, - }; - - // Re-serialize - let mut rust_bytes = Vec::new(); - if let Err(e) = env.put(&mut rust_bytes) { - eprintln!("Rust Env::put failed: {}", e); - return false; - } - - // Compare - if lean_bytes != rust_bytes { - eprintln!("Env roundtrip mismatch:"); - eprintln!(" Input: {} bytes", lean_bytes.len()); - eprintln!(" Output: {} bytes", rust_bytes.len()); - if lean_bytes.len() <= 200 { - eprintln!(" Input bytes: {:?}", lean_bytes); - } - if rust_bytes.len() <= 200 { - eprintln!(" Output bytes: {:?}", rust_bytes); - } - return false; - } - - true -} - -/// FFI: Compare Env serialization between Lean and Rust. -/// Takes: -/// - lean_bytes_ptr: pointer to ByteArray containing serialized Env from Lean -/// -/// Returns: true if Rust can deserialize and the counts match -#[unsafe(no_mangle)] -extern "C" fn rs_env_serde_check(lean_bytes_ptr: *const c_void) -> bool { - use crate::ix::ixon::env::Env; - - // Get bytes from Lean ByteArray - let bytes_arr: &LeanSArrayObject = as_ref_unsafe(lean_bytes_ptr.cast()); - let lean_bytes = bytes_arr.data().to_vec(); - - // Try to deserialize with Rust - let mut slice = lean_bytes.as_slice(); - match Env::get(&mut slice) { - Ok(_) => true, - Err(e) => { - eprintln!("Rust Env::get failed: {}", e); - false - }, - } -} diff --git a/src/lean/ffi/ixon/univ.rs b/src/lean/ffi/ixon/univ.rs deleted file mode 100644 index 3558c244..00000000 --- a/src/lean/ffi/ixon/univ.rs +++ /dev/null @@ -1,126 +0,0 @@ -//! Ixon.Univ build/decode/roundtrip FFI. - -use std::ffi::c_void; -use std::sync::Arc; - -use crate::ix::ixon::univ::Univ as IxonUniv; -use crate::lean::{ - as_ref_unsafe, lean_alloc_array, lean_alloc_ctor, lean_array_set_core, - lean_box_fn, lean_ctor_get, lean_ctor_set, lean_is_scalar, lean_obj_tag, -}; - -/// Build Ixon.Univ -pub fn build_ixon_univ(univ: &IxonUniv) -> *mut c_void { - unsafe { - match univ { - IxonUniv::Zero => lean_box_fn(0), - IxonUniv::Succ(inner) => { - let inner_obj = build_ixon_univ(inner); - let obj = lean_alloc_ctor(1, 1, 0); - lean_ctor_set(obj, 0, inner_obj); - obj - }, - IxonUniv::Max(a, b) => { - let a_obj = build_ixon_univ(a); - let b_obj = build_ixon_univ(b); - let obj = lean_alloc_ctor(2, 2, 0); - lean_ctor_set(obj, 0, a_obj); - lean_ctor_set(obj, 1, b_obj); - obj - }, - IxonUniv::IMax(a, b) => { - let a_obj = build_ixon_univ(a); - let b_obj = build_ixon_univ(b); - let obj = lean_alloc_ctor(3, 2, 0); - lean_ctor_set(obj, 0, a_obj); - lean_ctor_set(obj, 1, b_obj); - obj - }, - IxonUniv::Var(idx) => { - let obj = lean_alloc_ctor(4, 0, 8); - let base = obj.cast::(); - *base.add(8).cast::() = *idx; - obj - }, - } - } -} - -/// Build an Array of Ixon.Univ. -pub fn build_ixon_univ_array(univs: &[Arc]) -> *mut c_void { - unsafe { - let arr = lean_alloc_array(univs.len(), univs.len()); - for (i, univ) in univs.iter().enumerate() { - let univ_obj = build_ixon_univ(univ); - lean_array_set_core(arr, i, univ_obj); - } - arr - } -} - -// ============================================================================= -// Decode Functions -// ============================================================================= - -/// Decode Ixon.Univ (recursive enum). -/// | zero -- tag 0 (no fields) -/// | succ (u : Univ) -- tag 1 -/// | max (a b : Univ) -- tag 2 -/// | imax (a b : Univ) -- tag 3 -/// | var (idx : UInt64) -- tag 4 (scalar field) -pub fn decode_ixon_univ(ptr: *const c_void) -> IxonUniv { - unsafe { - // Note: .zero is a nullary constructor with tag 0, represented as lean_box(0) - if lean_is_scalar(ptr) { - return IxonUniv::Zero; - } - let tag = lean_obj_tag(ptr as *mut _); - match tag { - 0 => IxonUniv::Zero, - 1 => { - let inner_ptr = lean_ctor_get(ptr as *mut _, 0); - IxonUniv::Succ(Arc::new(decode_ixon_univ(inner_ptr))) - }, - 2 => { - let a_ptr = lean_ctor_get(ptr as *mut _, 0); - let b_ptr = lean_ctor_get(ptr as *mut _, 1); - IxonUniv::Max( - Arc::new(decode_ixon_univ(a_ptr)), - Arc::new(decode_ixon_univ(b_ptr)), - ) - }, - 3 => { - let a_ptr = lean_ctor_get(ptr as *mut _, 0); - let b_ptr = lean_ctor_get(ptr as *mut _, 1); - IxonUniv::IMax( - Arc::new(decode_ixon_univ(a_ptr)), - Arc::new(decode_ixon_univ(b_ptr)), - ) - }, - 4 => { - // scalar field: UInt64 at offset 8 (after header) - let base = ptr.cast::(); - let idx = *(base.add(8).cast::()); - IxonUniv::Var(idx) - }, - _ => panic!("Invalid Ixon.Univ tag: {}", tag), - } - } -} - -/// Decode Array Ixon.Univ. -pub fn decode_ixon_univ_array(ptr: *const c_void) -> Vec> { - let arr: &crate::lean::array::LeanArrayObject = as_ref_unsafe(ptr.cast()); - arr.to_vec(|u| Arc::new(decode_ixon_univ(u))) -} - -// ============================================================================= -// FFI Exports -// ============================================================================= - -/// Round-trip Ixon.Univ. -#[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ixon_univ(ptr: *const c_void) -> *mut c_void { - let univ = decode_ixon_univ(ptr); - build_ixon_univ(&univ) -} diff --git a/src/lean/ffi/keccak.rs b/src/lean/ffi/keccak.rs deleted file mode 100644 index c310a3cc..00000000 --- a/src/lean/ffi/keccak.rs +++ /dev/null @@ -1,35 +0,0 @@ -use crate::lean::sarray::LeanSArrayObject; -use tiny_keccak::{Hasher, Keccak}; - -use super::{drop_raw, to_raw}; - -#[unsafe(no_mangle)] -extern "C" fn rs_keccak256_hasher_init() -> *const Keccak { - let hasher = Keccak::v256(); - to_raw(hasher) -} - -#[unsafe(no_mangle)] -extern "C" fn rs_keccak256_hasher_free(hasher: *mut Keccak) { - drop_raw(hasher); -} - -#[unsafe(no_mangle)] -extern "C" fn rs_keccak256_hasher_update( - hasher: &Keccak, - input: &LeanSArrayObject, -) -> *const Keccak { - let mut hasher = hasher.clone(); - hasher.update(input.data()); - to_raw(hasher) -} - -#[unsafe(no_mangle)] -extern "C" fn rs_keccak256_hasher_finalize( - hasher: &Keccak, - output: &mut LeanSArrayObject, -) { - let mut data = [0u8; 32]; - hasher.clone().finalize(&mut data); - output.set_data(&data); -} diff --git a/src/lean/ffi/primitives.rs b/src/lean/ffi/primitives.rs deleted file mode 100644 index 7dab07a2..00000000 --- a/src/lean/ffi/primitives.rs +++ /dev/null @@ -1,444 +0,0 @@ -//! Basic Lean type encode/decode/roundtrip operations. -//! -//! This module provides FFI functions for primitive Lean types: -//! - Nat, String, Bool -//! - Option, Pair -//! - List, Array, ByteArray -//! - AssocList, HashMap - -use std::ffi::c_void; - -use crate::lean::array::LeanArrayObject; -use crate::lean::nat::Nat; -use crate::lean::sarray::LeanSArrayObject; -use crate::lean::string::LeanStringObject; -use crate::lean::{ - as_ref_unsafe, lean_alloc_array, lean_alloc_ctor, lean_alloc_sarray, - lean_array_get_core, lean_array_set_core, lean_box_fn, lean_ctor_get, - lean_ctor_set, lean_is_scalar, lean_mk_string, lean_obj_tag, - lean_sarray_cptr, lean_uint64_to_nat, -}; - -// ============================================================================= -// Nat Building -// ============================================================================= - -/// Build a Lean Nat from a Rust Nat. -pub fn build_nat(n: &Nat) -> *mut c_void { - // Try to get as u64 first - if let Some(val) = n.to_u64() { - // For small values that fit in a boxed scalar (max value is usize::MAX >> 1) - if val <= (usize::MAX >> 1) as u64 { - #[allow(clippy::cast_possible_truncation)] - return lean_box_fn(val as usize); - } - // For larger u64 values, use lean_uint64_to_nat - return unsafe { lean_uint64_to_nat(val) }; - } - // For values larger than u64, convert to limbs and use GMP - let bytes = n.to_le_bytes(); - let mut limbs: Vec = Vec::with_capacity(bytes.len().div_ceil(8)); - for chunk in bytes.chunks(8) { - let mut arr = [0u8; 8]; - arr[..chunk.len()].copy_from_slice(chunk); - limbs.push(u64::from_le_bytes(arr)); - } - unsafe { crate::lean::lean_nat_from_limbs(limbs.len(), limbs.as_ptr()) } -} - -// ============================================================================= -// Round-trip FFI Functions for Testing -// ============================================================================= - -/// Round-trip a Nat: decode from Lean, re-encode to Lean. -#[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_nat(nat_ptr: *const c_void) -> *mut c_void { - // Decode - let nat = Nat::from_ptr(nat_ptr); - // Re-encode - build_nat(&nat) -} - -/// Round-trip a String: decode from Lean, re-encode to Lean. -#[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_string(s_ptr: *const c_void) -> *mut c_void { - // Decode - let s_obj: &LeanStringObject = as_ref_unsafe(s_ptr.cast()); - let s = s_obj.as_string(); - // Re-encode - unsafe { - let cstr = crate::lean::safe_cstring(s.as_str()); - lean_mk_string(cstr.as_ptr()) - } -} - -/// Round-trip a List Nat: decode from Lean, re-encode to Lean. -#[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_list_nat( - list_ptr: *const c_void, -) -> *mut c_void { - // Decode list to Vec - let nats: Vec = crate::lean::collect_list(list_ptr, Nat::from_ptr); - // Re-encode as Lean List - build_list_nat(&nats) -} - -/// Round-trip an Array Nat: decode from Lean, re-encode to Lean. -#[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_array_nat( - arr_ptr: *const c_void, -) -> *mut c_void { - // Decode array - let arr_obj: &LeanArrayObject = as_ref_unsafe(arr_ptr.cast()); - let nats: Vec = - arr_obj.data().iter().map(|&p| Nat::from_ptr(p)).collect(); - // Re-encode as Lean Array - build_array_nat(&nats) -} - -/// Round-trip a ByteArray: decode from Lean, re-encode to Lean. -#[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_bytearray(ba_ptr: *const c_void) -> *mut c_void { - // Decode ByteArray (scalar array of u8) - let sarray: &LeanSArrayObject = as_ref_unsafe(ba_ptr.cast()); - let bytes = sarray.data(); - // Re-encode - unsafe { - let ba = lean_alloc_sarray(1, bytes.len(), bytes.len()); - let data_ptr = lean_sarray_cptr(ba); - std::ptr::copy_nonoverlapping(bytes.as_ptr(), data_ptr, bytes.len()); - ba - } -} - -/// Round-trip a Bool: decode from Lean, re-encode. -/// Bool in Lean is passed as unboxed scalar: false = 0, true = 1 -#[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_bool(bool_ptr: *const c_void) -> *mut c_void { - // Bool is passed as unboxed scalar - just return it as-is - bool_ptr as *mut c_void -} - -// ============================================================================= -// Helper functions for building basic Lean types -// ============================================================================= - -/// Build a Lean List Nat from a Vec. -fn build_list_nat(nats: &[Nat]) -> *mut c_void { - unsafe { - // Build list in reverse (cons builds from the end) - let mut list = lean_box_fn(0); // nil - for nat in nats.iter().rev() { - let nat_obj = build_nat(nat); - // cons : α → List α → List α (tag 1, 2 object fields) - let cons = lean_alloc_ctor(1, 2, 0); - lean_ctor_set(cons, 0, nat_obj); - lean_ctor_set(cons, 1, list); - list = cons; - } - list - } -} - -/// Build a Lean Array Nat from a Vec. -fn build_array_nat(nats: &[Nat]) -> *mut c_void { - unsafe { - let arr = lean_alloc_array(nats.len(), nats.len()); - for (i, nat) in nats.iter().enumerate() { - let nat_obj = build_nat(nat); - lean_array_set_core(arr, i, nat_obj); - } - arr - } -} - -// ============================================================================= -// FFI roundtrip functions for struct/inductive/HashMap -// ============================================================================= - -/// Round-trip a Point (structure with x, y : Nat). -/// Point is a structure, which in Lean is represented as a constructor with tag 0. -#[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_point(point_ptr: *const c_void) -> *mut c_void { - unsafe { - // Point is a structure (single constructor, tag 0) with 2 Nat fields - let x_ptr = lean_ctor_get(point_ptr as *mut _, 0); - let y_ptr = lean_ctor_get(point_ptr as *mut _, 1); - - // Decode the Nats - let x = Nat::from_ptr(x_ptr); - let y = Nat::from_ptr(y_ptr); - - // Re-encode as Point - let point = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(point, 0, build_nat(&x)); - lean_ctor_set(point, 1, build_nat(&y)); - point - } -} - -/// Round-trip a NatTree (inductive with leaf : Nat → NatTree | node : NatTree → NatTree → NatTree). -#[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_nat_tree( - tree_ptr: *const c_void, -) -> *mut c_void { - roundtrip_nat_tree_recursive(tree_ptr) -} - -fn roundtrip_nat_tree_recursive(tree_ptr: *const c_void) -> *mut c_void { - unsafe { - let tag = lean_obj_tag(tree_ptr as *mut _); - match tag { - 0 => { - // leaf : Nat → NatTree - let nat_ptr = lean_ctor_get(tree_ptr as *mut _, 0); - let nat = Nat::from_ptr(nat_ptr); - let leaf = lean_alloc_ctor(0, 1, 0); - lean_ctor_set(leaf, 0, build_nat(&nat)); - leaf - }, - 1 => { - // node : NatTree → NatTree → NatTree - let left_ptr = lean_ctor_get(tree_ptr as *mut _, 0); - let right_ptr = lean_ctor_get(tree_ptr as *mut _, 1); - let left = roundtrip_nat_tree_recursive(left_ptr); - let right = roundtrip_nat_tree_recursive(right_ptr); - let node = lean_alloc_ctor(1, 2, 0); - lean_ctor_set(node, 0, left); - lean_ctor_set(node, 1, right); - node - }, - _ => panic!("Invalid NatTree tag: {}", tag), - } - } -} - -/// Round-trip an AssocList Nat Nat. -/// AssocList: nil (tag 0, 0 fields) | cons key value tail (tag 1, 3 fields) -/// Note: nil with 0 fields may be represented as lean_box(0) -#[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_assoclist_nat_nat( - list_ptr: *const c_void, -) -> *mut c_void { - // Check if it's a scalar (nil represented as lean_box(0)) - if lean_is_scalar(list_ptr) { - // Return lean_box(0) for nil - return lean_box_fn(0); - } - let pairs = decode_assoc_list_nat_nat(list_ptr); - build_assoc_list_nat_nat(&pairs) -} - -/// Build an AssocList Nat Nat from pairs -fn build_assoc_list_nat_nat(pairs: &[(Nat, Nat)]) -> *mut c_void { - unsafe { - // Build in reverse to preserve order - // AssocList.nil with 0 fields is represented as lean_box(0) - let mut list = lean_box_fn(0); - for (k, v) in pairs.iter().rev() { - let cons = lean_alloc_ctor(1, 3, 0); // AssocList.cons - lean_ctor_set(cons, 0, build_nat(k)); - lean_ctor_set(cons, 1, build_nat(v)); - lean_ctor_set(cons, 2, list); - list = cons; - } - list - } -} - -/// Round-trip a DHashMap.Raw Nat Nat. -#[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_dhashmap_raw_nat_nat( - raw_ptr: *const c_void, -) -> *mut c_void { - unsafe { - if lean_is_scalar(raw_ptr) { - return raw_ptr as *mut c_void; - } - - let size_ptr = lean_ctor_get(raw_ptr as *mut _, 0); - let buckets_ptr = lean_ctor_get(raw_ptr as *mut _, 1); - - let size = Nat::from_ptr(size_ptr); - - // Decode and rebuild buckets - let buckets_obj: &LeanArrayObject = as_ref_unsafe(buckets_ptr.cast()); - let num_buckets = buckets_obj.data().len(); - - let mut all_pairs: Vec<(Nat, Nat)> = Vec::new(); - for &bucket_ptr in buckets_obj.data() { - let pairs = decode_assoc_list_nat_nat(bucket_ptr); - all_pairs.extend(pairs); - } - - // Rebuild buckets - let new_buckets = lean_alloc_array(num_buckets, num_buckets); - for i in 0..num_buckets { - lean_array_set_core(new_buckets, i, lean_box_fn(0)); // AssocList.nil - } - - for (k, v) in &all_pairs { - let k_u64 = k.to_u64().unwrap_or_else(|| { - let bytes = k.to_le_bytes(); - let mut arr = [0u8; 8]; - let len = bytes.len().min(8); - arr[..len].copy_from_slice(&bytes[..len]); - u64::from_le_bytes(arr) - }); - #[allow(clippy::cast_possible_truncation)] - let bucket_idx = (k_u64 as usize) & (num_buckets - 1); - - let old_bucket = - lean_array_get_core(new_buckets, bucket_idx) as *mut c_void; - let new_bucket = lean_alloc_ctor(1, 3, 0); - lean_ctor_set(new_bucket, 0, build_nat(k)); - lean_ctor_set(new_bucket, 1, build_nat(v)); - lean_ctor_set(new_bucket, 2, old_bucket); - lean_array_set_core(new_buckets, bucket_idx, new_bucket); - } - - // Build Raw - let raw = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(raw, 0, build_nat(&size)); - lean_ctor_set(raw, 1, new_buckets); - - raw - } -} - -/// Round-trip a Std.HashMap Nat Nat. -/// -/// IMPORTANT: Single-field structures are unboxed in Lean 4! -/// - HashMap has 1 field (inner : DHashMap) -/// - DHashMap has 1 field (inner : Raw) - wf : Prop is erased -/// So HashMap pointer points DIRECTLY to Raw! -/// -/// Memory layout (after unboxing): -/// - HashMap/DHashMap/Raw all share the same pointer -/// - Raw: ctor 0, 2 fields -/// - field 0: size : Nat -/// - field 1: buckets : Array (AssocList α β) -/// - AssocList: -/// - nil: lean_box(0) -/// - cons key value tail: ctor 1, 3 fields -#[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_hashmap_nat_nat( - map_ptr: *const c_void, -) -> *mut c_void { - unsafe { - // Due to unboxing, map_ptr points directly to Raw - let size_ptr = lean_ctor_get(map_ptr as *mut _, 0); - let buckets_ptr = lean_ctor_get(map_ptr as *mut _, 1); - - let size = Nat::from_ptr(size_ptr); - - // Decode buckets (Array of AssocLists) - let buckets_obj: &LeanArrayObject = as_ref_unsafe(buckets_ptr.cast()); - let mut pairs: Vec<(Nat, Nat)> = Vec::new(); - - for &bucket_ptr in buckets_obj.data() { - // Each bucket is an AssocList - let bucket_pairs = decode_assoc_list_nat_nat(bucket_ptr); - pairs.extend(bucket_pairs); - } - - // Rebuild the HashMap with the same bucket count - let num_buckets = buckets_obj.data().len(); - let new_buckets = lean_alloc_array(num_buckets, num_buckets); - - // Initialize all buckets to AssocList.nil (lean_box(0)) - for i in 0..num_buckets { - lean_array_set_core(new_buckets, i, lean_box_fn(0)); // AssocList.nil - } - - // Insert each pair into the appropriate bucket using Lean's hash function - for (k, v) in &pairs { - // Hash the key - for Nat, Lean uses the value itself as hash - let k_u64 = k.to_u64().unwrap_or_else(|| { - // For large nats, use low 64 bits - let bytes = k.to_le_bytes(); - let mut arr = [0u8; 8]; - let len = bytes.len().min(8); - arr[..len].copy_from_slice(&bytes[..len]); - u64::from_le_bytes(arr) - }); - // Lean uses (hash & (buckets.size - 1)) for bucket index (power of 2) - #[allow(clippy::cast_possible_truncation)] - let bucket_idx = (k_u64 as usize) & (num_buckets - 1); - - // Get current bucket AssocList - let old_bucket = - lean_array_get_core(new_buckets, bucket_idx) as *mut c_void; - - // Build AssocList.cons key value tail (tag 1, 3 fields) - let new_bucket = lean_alloc_ctor(1, 3, 0); - lean_ctor_set(new_bucket, 0, build_nat(k)); - lean_ctor_set(new_bucket, 1, build_nat(v)); - lean_ctor_set(new_bucket, 2, old_bucket); - - lean_array_set_core(new_buckets, bucket_idx, new_bucket); - } - - // Build Raw (ctor 0, 2 fields: size, buckets) - // Due to unboxing, this IS the HashMap - let raw = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(raw, 0, build_nat(&size)); - lean_ctor_set(raw, 1, new_buckets); - - raw - } -} - -/// Decode a Lean AssocList Nat Nat to Vec of pairs -/// AssocList: nil (tag 0) | cons key value tail (tag 1, 3 fields) -pub fn decode_assoc_list_nat_nat(list_ptr: *const c_void) -> Vec<(Nat, Nat)> { - let mut result = Vec::new(); - let mut current = list_ptr; - - loop { - unsafe { - // Check if scalar (shouldn't happen) or object - if lean_is_scalar(current) { - break; - } - - let tag = lean_obj_tag(current as *mut _); - if tag == 0 { - // AssocList.nil - break; - } - - // AssocList.cons: 3 fields (key, value, tail) - let key_ptr = lean_ctor_get(current as *mut _, 0); - let value_ptr = lean_ctor_get(current as *mut _, 1); - let tail_ptr = lean_ctor_get(current as *mut _, 2); - - let k = Nat::from_ptr(key_ptr); - let v = Nat::from_ptr(value_ptr); - - result.push((k, v)); - current = tail_ptr; - } - } - - result -} - -// ============================================================================= -// Utility FFI Functions -// ============================================================================= - -/// Read first 8 bytes of a ByteArray as little-endian UInt64. -/// Used by Address.Hashable to match Rust's bucket hash computation. -/// This is essentially just a pointer cast - very fast. -#[unsafe(no_mangle)] -pub extern "C" fn rs_bytearray_to_u64_le(ba_ptr: *const c_void) -> u64 { - unsafe { - let arr: &LeanSArrayObject = &*ba_ptr.cast::(); - if arr.data().len() < 8 { - return 0; - } - let data_ptr = lean_sarray_cptr(ba_ptr as *mut _); - std::ptr::read_unaligned(data_ptr as *const u64) - } -} diff --git a/src/lean/nat.rs b/src/lean/nat.rs deleted file mode 100644 index 847536be..00000000 --- a/src/lean/nat.rs +++ /dev/null @@ -1,98 +0,0 @@ -//! Lean `Nat` (arbitrary-precision natural number) representation. -//! -//! Lean stores small naturals as tagged scalars and large ones as GMP -//! `mpz_object`s on the heap. This module handles both representations. - -use std::ffi::c_void; -use std::fmt; - -use num_bigint::BigUint; - -use crate::{ - lean::{as_ref_unsafe, lean_is_scalar, object::LeanObject}, - lean_unbox, -}; - -/// Arbitrary-precision natural number, wrapping `BigUint`. -#[derive(Hash, PartialEq, Eq, Debug, Clone, PartialOrd, Ord)] -pub struct Nat(pub BigUint); - -impl fmt::Display for Nat { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{}", self.0) - } -} - -impl From for Nat { - fn from(x: u64) -> Self { - Nat(BigUint::from(x)) - } -} - -impl Nat { - pub const ZERO: Self = Self(BigUint::ZERO); - - /// Try to convert to u64, returning None if the value is too large. - #[inline] - pub fn to_u64(&self) -> Option { - u64::try_from(&self.0).ok() - } - - /// Decode a `Nat` from a Lean object pointer. Handles both scalar (unboxed) - /// and heap-allocated (GMP `mpz_object`) representations. - pub fn from_ptr(ptr: *const c_void) -> Nat { - if lean_is_scalar(ptr) { - let u = lean_unbox!(usize, ptr); - Nat(BigUint::from_bytes_le(&u.to_le_bytes())) - } else { - // Heap-allocated big integer (mpz_object) - let obj: &MpzObject = as_ref_unsafe(ptr.cast()); - Nat(obj.m_value.to_biguint()) - } - } - - #[inline] - pub fn from_le_bytes(bytes: &[u8]) -> Nat { - Nat(BigUint::from_bytes_le(bytes)) - } - - #[inline] - pub fn to_le_bytes(&self) -> Vec { - self.0.to_bytes_le() - } -} - -/// From https://github.com/leanprover/lean4/blob/master/src/runtime/object.h: -/// ```cpp -/// struct mpz_object { -/// lean_object m_header; -/// mpz m_value; -/// mpz_object() {} -/// explicit mpz_object(mpz const & m):m_value(m) {} -/// }; -/// ``` -#[repr(C)] -struct MpzObject { - m_header: LeanObject, - m_value: Mpz, -} - -#[repr(C)] -struct Mpz { - alloc: i32, - size: i32, - d: *const u64, -} - -impl Mpz { - fn to_biguint(&self) -> BigUint { - let nlimbs = self.size.unsigned_abs() as usize; - let limbs = unsafe { std::slice::from_raw_parts(self.d, nlimbs) }; - - // Convert limbs (little-endian by limb) - let bytes: Vec<_> = - limbs.iter().flat_map(|&limb| limb.to_le_bytes()).collect(); - - BigUint::from_bytes_le(&bytes) - } -} diff --git a/src/lean/object.rs b/src/lean/object.rs deleted file mode 100644 index 3aca5245..00000000 --- a/src/lean/object.rs +++ /dev/null @@ -1,30 +0,0 @@ -/// ```c -/// typedef struct { -/// int m_rc; -/// unsigned m_cs_sz:16; -/// unsigned m_other:8; -/// unsigned m_tag:8; -/// } lean_object; -/// ``` -#[repr(C)] -pub struct LeanObject { - m_rc: i32, - packed_bits: u32, -} - -impl LeanObject { - #[inline] - pub fn m_cs_sz(&self) -> u16 { - (self.packed_bits & 0xFFFF) as u16 - } - - #[inline] - pub fn m_other(&self) -> u8 { - ((self.packed_bits >> 16) & 0xFF) as u8 - } - - #[inline] - pub fn m_tag(&self) -> u8 { - ((self.packed_bits >> 24) & 0xFF) as u8 - } -} diff --git a/src/lean/sarray.rs b/src/lean/sarray.rs deleted file mode 100644 index b3b5789e..00000000 --- a/src/lean/sarray.rs +++ /dev/null @@ -1,37 +0,0 @@ -//! Lean scalar array (`ByteArray`) object layout. - -use super::{CArray, object::LeanObject}; - -/// ```c -/// typedef struct { -/// lean_object m_header; -/// size_t m_size; -/// size_t m_capacity; -/// uint8_t m_data[]; -/// } lean_sarray_object; -/// ``` -#[repr(C)] -pub struct LeanSArrayObject { - m_header: LeanObject, - m_size: usize, - m_capacity: usize, - m_data: CArray, -} - -impl LeanSArrayObject { - #[inline] - pub fn data(&self) -> &[u8] { - self.m_data.slice(self.m_size) - } - - #[inline] - pub fn data_mut(&mut self) -> &mut [u8] { - self.m_data.slice_mut(self.m_size) - } - - pub fn set_data(&mut self, data: &[u8]) { - assert!(self.m_capacity >= data.len()); - self.m_data.copy_from_slice(data); - self.m_size = data.len(); - } -} diff --git a/src/lean/string.rs b/src/lean/string.rs deleted file mode 100644 index 40bd415f..00000000 --- a/src/lean/string.rs +++ /dev/null @@ -1,27 +0,0 @@ -use crate::lean::{CArray, object::LeanObject}; - -/// ```c -/// typedef struct { -/// lean_object m_header; -/// size_t m_size; /* byte length including '\0' terminator */ -/// size_t m_capacity; -/// size_t m_length; /* UTF8 length */ -/// char m_data[]; -/// } lean_string_object; -/// ``` -#[repr(C)] -pub struct LeanStringObject { - m_header: LeanObject, - m_size: usize, - m_capacity: usize, - m_length: usize, - m_data: CArray, -} - -impl LeanStringObject { - #[inline] - pub fn as_string(&self) -> String { - let bytes = self.m_data.slice(self.m_size - 1); // Ignore the last '\0' - unsafe { String::from_utf8_unchecked(bytes.to_vec()) } - } -} diff --git a/src/lib.rs b/src/lib.rs index c2b2d7de..181c9e3f 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -13,6 +13,12 @@ use indexmap::{IndexMap, IndexSet}; use rustc_hash::FxBuildHasher; pub mod aiur; +pub mod ffi; +// Iroh functionality is enabled by the `net` feature. However, Iroh doesn't work on `aarch64-darwin`, so it is always disabled for that target. +#[cfg(all( + feature = "net", + not(all(target_os = "macos", target_arch = "aarch64")) +))] pub mod iroh; pub mod ix; pub mod lean; diff --git a/src/sha256.rs b/src/sha256.rs index 61d1c142..fef0f35d 100644 --- a/src/sha256.rs +++ b/src/sha256.rs @@ -1,17 +1,11 @@ use sha2::{Digest, Sha256}; -use std::ffi::c_void; -use crate::lean::{as_mut_unsafe, lean_alloc_sarray, sarray::LeanSArrayObject}; +use lean_ffi::object::LeanByteArray; #[unsafe(no_mangle)] -extern "C" fn rs_sha256(bytes: &LeanSArrayObject) -> *mut c_void { +extern "C" fn rs_sha256(bytes: LeanByteArray) -> LeanByteArray { let mut hasher = Sha256::new(); - hasher.update(bytes.data()); + hasher.update(bytes.as_bytes()); let digest = hasher.finalize(); - let digest_slice = digest.as_slice(); - assert_eq!(digest_slice.len(), 32); - let arr_ptr = unsafe { lean_alloc_sarray(1, 32, 32) }; - let arr: &mut LeanSArrayObject = as_mut_unsafe(arr_ptr.cast()); - arr.set_data(digest_slice); - arr_ptr + LeanByteArray::from_bytes(digest.as_slice()) }