From 0b0f47d98a458d54e49b77eff3b1e973cd8a13df Mon Sep 17 00:00:00 2001 From: samuelburnham <45365069+samuelburnham@users.noreply.github.com> Date: Fri, 27 Feb 2026 15:06:24 -0500 Subject: [PATCH 01/27] feat: Replace C FFI with rust-bindgen --- Cargo.lock | 105 +++++++++- Cargo.toml | 4 + README.md | 8 +- c/aiur.c | 158 --------------- c/common.h | 4 - c/iroh.c | 91 --------- c/ixon_ffi.c | 243 ----------------------- c/keccak.c | 35 ---- c/linear.h | 93 --------- c/rust.h | 191 ------------------ c/unsigned.c | 26 --- flake.nix | 2 + lakefile.lean | 31 --- src/iroh/_client.rs | 41 ++-- src/iroh/_server.rs | 14 +- src/iroh/client.rs | 139 ++++++++------ src/iroh/server.rs | 20 +- src/lean.rs | 102 +++++++--- src/lean/ffi.rs | 70 +------ src/lean/ffi/aiur/protocol.rs | 351 +++++++++++++++++++--------------- src/lean/ffi/iroh.rs | 83 +------- src/lean/ffi/keccak.rs | 68 +++++-- src/lean/ffi/primitives.rs | 2 +- src/lean/nat.rs | 56 +++++- 24 files changed, 607 insertions(+), 1330 deletions(-) delete mode 100644 c/aiur.c delete mode 100644 c/common.h delete mode 100644 c/iroh.c delete mode 100644 c/ixon_ffi.c delete mode 100644 c/keccak.c delete mode 100644 c/linear.h delete mode 100644 c/rust.h delete mode 100644 c/unsigned.c diff --git a/Cargo.lock b/Cargo.lock index 793be9fc..ca961f54 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -211,6 +211,26 @@ dependencies = [ "virtue", ] +[[package]] +name = "bindgen" +version = "0.71.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f58bf3d7db68cfbac37cfc485a8d711e87e064c3d0fe0435b92f7a407f9d6b3" +dependencies = [ + "bitflags", + "cexpr", + "clang-sys", + "itertools 0.13.0", + "log", + "prettyplease", + "proc-macro2", + "quote", + "regex", + "rustc-hash", + "shlex", + "syn", +] + [[package]] name = "bitflags" version = "2.10.0" @@ -280,6 +300,15 @@ dependencies = [ "shlex", ] +[[package]] +name = "cexpr" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766" +dependencies = [ + "nom", +] + [[package]] name = "cfg-if" version = "1.0.4" @@ -326,6 +355,17 @@ dependencies = [ "zeroize", ] +[[package]] +name = "clang-sys" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b023947811758c97c59bf9d1c188fd619ad4718dcaa767947df1cadb14f39f4" +dependencies = [ + "glob", + "libc", + "libloading", +] + [[package]] name = "cobs" version = "0.3.0" @@ -1705,6 +1745,15 @@ dependencies = [ "z32", ] +[[package]] +name = "itertools" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186" +dependencies = [ + "either", +] + [[package]] name = "itertools" version = "0.14.0" @@ -1726,13 +1775,15 @@ version = "0.1.0" dependencies = [ "anyhow", "bincode", + "bindgen", "blake3", "bytes", + "cc", "dashmap", "indexmap", "iroh", "iroh-base", - "itertools", + "itertools 0.14.0", "multi-stark", "n0-snafu", "n0-watcher", @@ -1772,6 +1823,16 @@ version = "0.2.180" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bcc35a38544a891a5f7c865aca548a982ccb3b8650a5b06d0fd33a10283c56fc" +[[package]] +name = "libloading" +version = "0.8.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d7c4b02199fee7c5d21a5ae7d8cfa79a6ef5bb2fc834d6e9058e89c825efdc55" +dependencies = [ + "cfg-if", + "windows-link 0.2.1", +] + [[package]] name = "litemap" version = "0.8.1" @@ -1848,6 +1909,12 @@ version = "2.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f52b00d39961fc5b2736ea853c9cc86238e165017a493d1d5c8eac6bdc4cc273" +[[package]] +name = "minimal-lexical" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" + [[package]] name = "miniz_oxide" version = "0.8.9" @@ -2102,6 +2169,16 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "43794a0ace135be66a25d3ae77d41b91615fb68ae937f904090203e81f755b65" +[[package]] +name = "nom" +version = "7.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" +dependencies = [ + "memchr", + "minimal-lexical", +] + [[package]] name = "ntimestamp" version = "1.0.0" @@ -2234,7 +2311,7 @@ name = "p3-commit" version = "0.4.2" source = "git+https://github.com/Plonky3/Plonky3?rev=0835481398d2b481bef0c6d0e8188b484ab9a636#0835481398d2b481bef0c6d0e8188b484ab9a636" dependencies = [ - "itertools", + "itertools 0.14.0", "p3-challenger", "p3-dft", "p3-field", @@ -2248,7 +2325,7 @@ name = "p3-dft" version = "0.4.2" source = "git+https://github.com/Plonky3/Plonky3?rev=0835481398d2b481bef0c6d0e8188b484ab9a636#0835481398d2b481bef0c6d0e8188b484ab9a636" dependencies = [ - "itertools", + "itertools 0.14.0", "p3-field", "p3-matrix", "p3-maybe-rayon", @@ -2262,7 +2339,7 @@ name = "p3-field" version = "0.4.2" source = "git+https://github.com/Plonky3/Plonky3?rev=0835481398d2b481bef0c6d0e8188b484ab9a636#0835481398d2b481bef0c6d0e8188b484ab9a636" dependencies = [ - "itertools", + "itertools 0.14.0", "num-bigint", "p3-maybe-rayon", "p3-util", @@ -2277,7 +2354,7 @@ name = "p3-fri" version = "0.4.2" source = "git+https://github.com/Plonky3/Plonky3?rev=0835481398d2b481bef0c6d0e8188b484ab9a636#0835481398d2b481bef0c6d0e8188b484ab9a636" dependencies = [ - "itertools", + "itertools 0.14.0", "p3-challenger", "p3-commit", "p3-dft", @@ -2337,7 +2414,7 @@ name = "p3-matrix" version = "0.4.2" source = "git+https://github.com/Plonky3/Plonky3?rev=0835481398d2b481bef0c6d0e8188b484ab9a636#0835481398d2b481bef0c6d0e8188b484ab9a636" dependencies = [ - "itertools", + "itertools 0.14.0", "p3-field", "p3-maybe-rayon", "p3-util", @@ -2372,7 +2449,7 @@ name = "p3-merkle-tree" version = "0.4.2" source = "git+https://github.com/Plonky3/Plonky3?rev=0835481398d2b481bef0c6d0e8188b484ab9a636#0835481398d2b481bef0c6d0e8188b484ab9a636" dependencies = [ - "itertools", + "itertools 0.14.0", "p3-commit", "p3-field", "p3-matrix", @@ -2390,7 +2467,7 @@ name = "p3-monty-31" version = "0.4.2" source = "git+https://github.com/Plonky3/Plonky3?rev=0835481398d2b481bef0c6d0e8188b484ab9a636#0835481398d2b481bef0c6d0e8188b484ab9a636" dependencies = [ - "itertools", + "itertools 0.14.0", "num-bigint", "p3-dft", "p3-field", @@ -2425,7 +2502,7 @@ name = "p3-symmetric" version = "0.4.2" source = "git+https://github.com/Plonky3/Plonky3?rev=0835481398d2b481bef0c6d0e8188b484ab9a636#0835481398d2b481bef0c6d0e8188b484ab9a636" dependencies = [ - "itertools", + "itertools 0.14.0", "p3-field", "serde", ] @@ -2787,6 +2864,16 @@ dependencies = [ "ucd-parse", ] +[[package]] +name = "prettyplease" +version = "0.2.37" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "479ca8adacdd7ce8f1fb39ce9ecccbfe93a3f1344b3d0d97f20bc0196208f62b" +dependencies = [ + "proc-macro2", + "syn", +] + [[package]] name = "proc-macro-crate" version = "3.4.0" diff --git a/Cargo.toml b/Cargo.toml index 917e4ecf..6e5a29e4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -32,6 +32,10 @@ bincode = { version = "2.0.1", optional = true } serde = { version = "1.0.219", features = ["derive"], optional = true } +[build-dependencies] +bindgen = "0.71" +cc = "1" + [dev-dependencies] quickcheck = "1.0.3" rand = "0.8.5" diff --git a/README.md b/README.md index ee26cf51..70c90fa6 100644 --- a/README.md +++ b/README.md @@ -176,7 +176,13 @@ Ix consists of the following core components: Compiler performance benchmarks are tracked at https://bencher.dev/console/projects/ix/plots -## Build & Install +## Usage + +### Prerequisites + +- Install Clang to enable Bindgen, then set `LIBCLANG_PATH` per https://rust-lang.github.io/rust-bindgen/requirements.html + +### Build - Build and test the Ix library with `lake build` and `lake test` - Install the `ix` binary with `lake run install`, or run with `lake exe ix` diff --git a/c/aiur.c b/c/aiur.c deleted file mode 100644 index 075bfd7c..00000000 --- a/c/aiur.c +++ /dev/null @@ -1,158 +0,0 @@ -#include "lean/lean.h" -#include "common.h" -#include "rust.h" - -static lean_external_class *g_aiur_proof_class = NULL; - -static lean_external_class *get_aiur_proof_class() { - if (g_aiur_proof_class == NULL) { - g_aiur_proof_class = lean_register_external_class( - &rs_aiur_proof_free, - &noop_foreach - ); - } - return g_aiur_proof_class; -} - -extern lean_obj_res c_rs_aiur_proof_to_bytes(b_lean_obj_arg proof) { - bytes_data *proof_bytes = rs_aiur_proof_to_bytes(lean_get_external_data(proof)); - size_t proof_size = proof_bytes->size; - lean_object *byte_array = lean_alloc_sarray(1, proof_size, proof_size); - rs_move_bytes(proof_bytes, byte_array); - return byte_array; -} - -extern lean_obj_res c_rs_aiur_proof_of_bytes(b_lean_obj_arg bytes) { - void *proof = rs_aiur_proof_of_bytes(bytes); - return lean_alloc_external(get_aiur_proof_class(), proof); -} - -static lean_external_class *g_aiur_system_class = NULL; - -static lean_external_class *get_aiur_system_class() { - if (g_aiur_system_class == NULL) { - g_aiur_system_class = lean_register_external_class( - &rs_aiur_system_free, - &noop_foreach - ); - } - return g_aiur_system_class; -} - -extern lean_obj_res c_rs_aiur_system_build(b_lean_obj_arg toplevel) { - void *aiur_system = rs_aiur_system_build(toplevel); - return lean_alloc_external(get_aiur_system_class(), aiur_system); -} - -extern lean_obj_res c_rs_aiur_system_prove( - b_lean_obj_arg aiur_system, - b_lean_obj_arg fri_parameters, - b_lean_obj_arg fun_idx, - b_lean_obj_arg args, - b_lean_obj_arg input_io_data, - b_lean_obj_arg input_io_map -) { - assert(lean_is_scalar(fun_idx)); - prove_data *pd = rs_aiur_system_prove( - lean_get_external_data(aiur_system), - fri_parameters, - fun_idx, - args, - input_io_data, - input_io_map - ); - - // Build the claim object - size_t claim_size = pd->claim_size; - lean_object *claim = lean_alloc_array(claim_size, claim_size); - lean_object **claim_values = lean_array_cptr(claim); - for (size_t i = 0; i < claim_size; i++) { - claim_values[i] = lean_box_uint64(0); - } - rs_set_array_g_values(claim, pd->claim); - rs_aiur_claim_free(pd->claim); - - // Build the io_data - size_t io_data_size = pd->io_data_size; - lean_object *io_data = lean_alloc_array(io_data_size, io_data_size); - lean_object **io_data_values = lean_array_cptr(io_data); - for (size_t i = 0; i < io_data_size; i++) { - io_data_values[i] = lean_box_uint64(0); - } - rs_set_aiur_io_data_values(io_data, pd->io_buffer); - - // Build io_map - size_t io_map_size = pd->io_map_size; - lean_object *io_map = lean_alloc_array(io_map_size, io_map_size); - lean_object **io_map_values = lean_array_cptr(io_map); - for (size_t i = 0; i < io_map_size; i++) { - // Array G - size_t key_size = pd->io_keys_sizes[i]; - lean_object *key = lean_alloc_array(key_size, key_size); - lean_object **key_values = lean_array_cptr(key); - for (size_t j = 0; j < key_size; j++) { - key_values[j] = lean_box_uint64(0); - } - - // IOKeyInfo - lean_object *key_info = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(key_info, 0, lean_box(0)); - lean_ctor_set(key_info, 1, lean_box(0)); - - // Array G × IOKeyInfo - lean_object *map_elt = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(map_elt, 0, key); - lean_ctor_set(map_elt, 1, key_info); - io_map_values[i] = map_elt; - } - rs_set_aiur_io_map_values(io_map, pd->io_buffer); - - // Free data regarding the io buffer - rs_aiur_prove_data_io_buffer_free(pd); - - // Array G × Array (Array G × IOKeyInfo) - lean_object *io_tuple = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(io_tuple, 0, io_data); - lean_ctor_set(io_tuple, 1, io_map); - - // Proof × Array G × Array (Array G × IOKeyInfo) - lean_object *proof_io_tuple = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(proof_io_tuple, 0, lean_alloc_external(get_aiur_proof_class(), pd->proof)); - lean_ctor_set(proof_io_tuple, 1, io_tuple); - - // Array G × Proof × Array G × Array (Array G × IOKeyInfo) - lean_object *claim_proof_io_tuple = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(claim_proof_io_tuple, 0, claim); - lean_ctor_set(claim_proof_io_tuple, 1, proof_io_tuple); - - // Free the outer ProveData struct (note: the proof object still lives!) - rs_aiur_prove_data_free(pd); - - return claim_proof_io_tuple; -} - -extern lean_obj_res c_rs_aiur_system_verify( - b_lean_obj_arg aiur_system, - b_lean_obj_arg fri_parameters, - b_lean_obj_arg claim, - b_lean_obj_arg proof -) { - c_result *result = rs_aiur_system_verify( - lean_get_external_data(aiur_system), - fri_parameters, - claim, - lean_get_external_data(proof) - ); - - lean_object *except; - if (result->is_ok) { - except = lean_alloc_ctor(1, 1, 0); - lean_ctor_set(except, 0, lean_box(0)); - } else { - except = lean_alloc_ctor(0, 1, 0); - lean_ctor_set(except, 0, lean_mk_string(result->data)); - } - rs__c_result_unit_string_free(result); - - return except; -} diff --git a/c/common.h b/c/common.h deleted file mode 100644 index 28617d5a..00000000 --- a/c/common.h +++ /dev/null @@ -1,4 +0,0 @@ -#pragma once -#include "lean/lean.h" - -static void noop_foreach(void *mod, b_lean_obj_arg fn) {} diff --git a/c/iroh.c b/c/iroh.c deleted file mode 100644 index a2bcbc72..00000000 --- a/c/iroh.c +++ /dev/null @@ -1,91 +0,0 @@ -#include "lean/lean.h" -#include "rust.h" - -extern lean_obj_res c_rs_iroh_serve() { - c_result *result = rs_iroh_serve(); - - lean_object *except; - if (result->is_ok) { - except = lean_alloc_ctor(1, 1, 0); - lean_ctor_set(except, 0, lean_box(0)); - } else { - except = lean_alloc_ctor(0, 1, 0); - lean_ctor_set(except, 0, lean_mk_string(result->data)); - } - - rs__c_result_unit_string_free(result); - return except; -} - -typedef struct { - char *message; - char *hash; -} put_response_ffi; - -extern lean_obj_res c_rs_iroh_put(b_lean_obj_arg node_id, b_lean_obj_arg addrs, b_lean_obj_arg relay_url, b_lean_obj_arg input) { - char const *node_id_str = lean_string_cstr(node_id); - char const *relay_url_str = lean_string_cstr(relay_url); - char const *input_str = lean_string_cstr(input); - - c_result *result = rs_iroh_put(node_id_str, addrs, relay_url_str, input_str); - - lean_object *except; - if (result->is_ok) { - put_response_ffi *put_response = result->data; - lean_object *message = lean_mk_string(put_response->message); - lean_object *hash = lean_mk_string(put_response->hash); - - lean_object *put_response_ctor = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(put_response_ctor, 0, message); - lean_ctor_set(put_response_ctor, 1, hash); - - except = lean_alloc_ctor(1, 1, 0); - lean_ctor_set(except, 0, put_response_ctor); - } else { - except = lean_alloc_ctor(0, 1, 0); - lean_ctor_set(except, 0, lean_mk_string(result->data)); - } - - rs__c_result_iroh_put_response_string_free(result); - return except; -} - -typedef struct { - char *message; - char *hash; - bytes_data *bytes; -} get_response_ffi; - -extern lean_obj_res c_rs_iroh_get(b_lean_obj_arg node_id, b_lean_obj_arg addrs, b_lean_obj_arg relay_url, b_lean_obj_arg hash) { - char const *node_id_str = lean_string_cstr(node_id); - char const *relay_url_str = lean_string_cstr(relay_url); - char const *hash_str = lean_string_cstr(hash); - - c_result *result = rs_iroh_get(node_id_str, addrs, relay_url_str, hash_str); - - lean_object *except; - if (result->is_ok) { - get_response_ffi *get_response = result->data; - lean_object *message = lean_mk_string(get_response->message); - lean_object *hash = lean_mk_string(get_response->hash); - - bytes_data *rs_bytes = get_response->bytes; - size_t bytes_size = rs_bytes->size; - lean_object *byte_array = lean_alloc_sarray(1, bytes_size, bytes_size); - rs_move_bytes(rs_bytes, byte_array); - - lean_object *get_response_ctor = lean_alloc_ctor(0, 3, 0); - lean_ctor_set(get_response_ctor, 0, message); - lean_ctor_set(get_response_ctor, 1, hash); - lean_ctor_set(get_response_ctor, 2, byte_array); - - except = lean_alloc_ctor(1, 1, 0); - lean_ctor_set(except, 0, get_response_ctor); - } else { - except = lean_alloc_ctor(0, 1, 0); - lean_ctor_set(except, 0, lean_mk_string(result->data)); - } - - rs__c_result_iroh_get_response_string_free(result); - return except; -} diff --git a/c/ixon_ffi.c b/c/ixon_ffi.c deleted file mode 100644 index 4823b87a..00000000 --- a/c/ixon_ffi.c +++ /dev/null @@ -1,243 +0,0 @@ -#include "lean/lean.h" -#include - -// Lean's internal mpz allocation - takes ownership of the mpz_t value -// (declared in Lean's runtime but not exposed in public headers) -extern lean_object * lean_alloc_mpz(mpz_t v); -#include "common.h" -#include "rust.h" - -// External class for RustCompiledEnv -static lean_external_class *g_rust_compiled_env_class = NULL; - -static lean_external_class *get_rust_compiled_env_class() { - if (g_rust_compiled_env_class == NULL) { - g_rust_compiled_env_class = lean_register_external_class( - &rs_free_rust_env, - &noop_foreach - ); - } - return g_rust_compiled_env_class; -} - -// FFI wrapper: Test round-trip (just pass through, returns scalar) -extern uint64_t c_rs_test_ffi_roundtrip(b_lean_obj_arg name) { - return rs_test_ffi_roundtrip(name); -} - -// FFI wrapper: Compile environment with Rust -// Returns: IO RustCompiledEnv (external object) -extern lean_obj_res c_rs_compile_env_rust_first(b_lean_obj_arg env_consts, lean_obj_arg world) { - void *rust_env = rs_compile_env_rust_first(env_consts); - if (rust_env == NULL) { - // Return IO error - lean_object *err = lean_mk_string("Rust compilation failed"); - lean_object *io_err = lean_io_result_mk_error(lean_mk_io_user_error(err)); - return io_err; - } - lean_object *external = lean_alloc_external(get_rust_compiled_env_class(), rust_env); - return lean_io_result_mk_ok(external); -} - -// FFI wrapper: Free RustCompiledEnv -// Returns: IO Unit -extern lean_obj_res c_rs_free_rust_env(lean_obj_arg rust_env_obj, lean_obj_arg world) { - // The external object will be freed by Lean's GC when it's no longer referenced - // We don't need to do anything here since we registered a finalizer - lean_dec(rust_env_obj); - return lean_io_result_mk_ok(lean_box(0)); -} - -// FFI wrapper: Get block count -extern uint64_t c_rs_get_rust_env_block_count(b_lean_obj_arg rust_env_obj) { - void *rust_env = lean_get_external_data(rust_env_obj); - return rs_get_rust_env_block_count(rust_env); -} - -// FFI wrapper: Compare a single block -extern uint64_t c_rs_compare_block( - b_lean_obj_arg rust_env_obj, - b_lean_obj_arg name, - b_lean_obj_arg lean_bytes -) { - void *rust_env = lean_get_external_data(rust_env_obj); - return rs_compare_block(rust_env, name, lean_bytes); -} - -// FFI wrapper: Get Rust block bytes as ByteArray -// Returns: IO ByteArray -extern lean_obj_res c_rs_get_block_bytes( - b_lean_obj_arg rust_env_obj, - b_lean_obj_arg name, - lean_obj_arg world -) { - void *rust_env = lean_get_external_data(rust_env_obj); - - // Get the length first - uint64_t len = rs_get_block_bytes_len(rust_env, name); - - // Allocate ByteArray - lean_object *byte_array = lean_alloc_sarray(1, len, len); - - // Copy bytes into it - if (len > 0) { - rs_copy_block_bytes(rust_env, name, byte_array); - } - - return lean_io_result_mk_ok(byte_array); -} - -// FFI wrapper: Get Rust sharing vector length -extern uint64_t c_rs_get_block_sharing_len( - b_lean_obj_arg rust_env_obj, - b_lean_obj_arg name -) { - void *rust_env = lean_get_external_data(rust_env_obj); - return rs_get_block_sharing_len(rust_env, name); -} - -// FFI wrapper: Get pre-sharing expressions buffer length -extern uint64_t c_rs_get_pre_sharing_exprs_len( - b_lean_obj_arg rust_env_obj, - b_lean_obj_arg name -) { - void *rust_env = lean_get_external_data(rust_env_obj); - return rs_get_pre_sharing_exprs_len(rust_env, name); -} - -// FFI wrapper: Get pre-sharing expressions -// Returns: IO UInt64 (number of expressions) -extern lean_obj_res c_rs_get_pre_sharing_exprs( - b_lean_obj_arg rust_env_obj, - b_lean_obj_arg name, - lean_obj_arg out_buf, - lean_obj_arg world -) { - void *rust_env = lean_get_external_data(rust_env_obj); - uint64_t n_exprs = rs_get_pre_sharing_exprs(rust_env, name, out_buf); - return lean_io_result_mk_ok(lean_box_uint64(n_exprs)); -} - -// FFI wrapper: Look up a constant's compiled address -// Returns: IO Bool (true if found) -extern lean_obj_res c_rs_lookup_const_addr( - b_lean_obj_arg rust_env_obj, - b_lean_obj_arg name, - lean_obj_arg out_addr, - lean_obj_arg world -) { - void *rust_env = lean_get_external_data(rust_env_obj); - uint64_t found = rs_lookup_const_addr(rust_env, name, out_addr); - return lean_io_result_mk_ok(lean_box(found != 0)); -} - -// FFI wrapper: Get compiled constant count -extern uint64_t c_rs_get_compiled_const_count(b_lean_obj_arg rust_env_obj) { - void *rust_env = lean_get_external_data(rust_env_obj); - return rs_get_compiled_const_count(rust_env); -} - -// ============================================================================= -// Lean C API wrappers for Rust to call -// These wrap Lean's allocation functions so they can be linked from Rust -// ============================================================================= - -lean_object *c_lean_alloc_ctor(unsigned tag, unsigned num_objs, unsigned scalar_sz) { - return lean_alloc_ctor(tag, num_objs, scalar_sz); -} - -void c_lean_ctor_set(lean_object *o, unsigned i, lean_object *v) { - lean_ctor_set(o, i, v); -} - -lean_object *c_lean_ctor_get(lean_object *o, unsigned i) { - return lean_ctor_get(o, i); -} - -unsigned c_lean_obj_tag(lean_object *o) { - return lean_obj_tag(o); -} - -void c_lean_ctor_set_uint8(lean_object *o, unsigned offset, uint8_t v) { - lean_ctor_set_uint8(o, offset, v); -} - -void c_lean_ctor_set_uint64(lean_object *o, unsigned offset, uint64_t v) { - lean_ctor_set_uint64(o, offset, v); -} - -lean_object *c_lean_mk_string(char const *s) { - return lean_mk_string(s); -} - -lean_object *c_lean_alloc_sarray(unsigned elem_size, size_t size, size_t capacity) { - return lean_alloc_sarray(elem_size, size, capacity); -} - -uint8_t *c_lean_sarray_cptr(lean_object *o) { - return lean_sarray_cptr(o); -} - -lean_object *c_lean_alloc_array(size_t size, size_t capacity) { - return lean_alloc_array(size, capacity); -} - -void c_lean_array_set_core(lean_object *o, size_t i, lean_object *v) { - lean_array_set_core(o, i, v); -} - -lean_object *c_lean_array_get_core(lean_object *o, size_t i) { - return lean_array_get_core(o, i); -} - -void c_lean_inc(lean_object *o) { - lean_inc(o); -} - -void c_lean_inc_n(lean_object *o, size_t n) { - lean_inc_n(o, n); -} - -lean_object *c_lean_io_result_mk_ok(lean_object *v) { - return lean_io_result_mk_ok(v); -} - -lean_object *c_lean_io_result_mk_error(lean_object *err) { - return lean_io_result_mk_error(err); -} - -lean_object *c_lean_mk_io_user_error(lean_object *msg) { - return lean_mk_io_user_error(msg); -} - -lean_object *c_lean_uint64_to_nat(uint64_t n) { - return lean_uint64_to_nat(n); -} - -// Create a big Nat from limbs (little-endian u64 array) -// This uses GMP's mpz_import and Lean's lean_alloc_mpz -lean_object *c_lean_nat_from_limbs(size_t num_limbs, uint64_t const *limbs) { - if (num_limbs == 0) { - return lean_box(0); - } - if (num_limbs == 1 && limbs[0] <= LEAN_MAX_SMALL_NAT) { - return lean_box(limbs[0]); - } - if (num_limbs == 1) { - return lean_uint64_to_nat(limbs[0]); - } - - // For multi-limb values, use GMP - mpz_t value; - mpz_init(value); - // Import limbs: little-endian order, native endian within limbs - // order = -1 (least significant limb first) - // size = 8 bytes per limb - // endian = 0 (native) - // nails = 0 (full limbs) - mpz_import(value, num_limbs, -1, sizeof(uint64_t), 0, 0, limbs); - - lean_object *result = lean_alloc_mpz(value); - // lean_alloc_mpz takes ownership, so we don't clear - return result; -} diff --git a/c/keccak.c b/c/keccak.c deleted file mode 100644 index 02d47895..00000000 --- a/c/keccak.c +++ /dev/null @@ -1,35 +0,0 @@ -#include "lean/lean.h" -#include "common.h" -#include "linear.h" -#include "rust.h" - -static lean_external_class *g_keccak256_hasher_class = NULL; - -static lean_external_class *get_keccak256_hasher_class() { - if (g_keccak256_hasher_class == NULL) { - g_keccak256_hasher_class = lean_register_external_class( - &rs_keccak256_hasher_free, - &noop_foreach - ); - } - return g_keccak256_hasher_class; -} - -extern lean_obj_res c_rs_keccak256_hasher_init() { - void *hasher = rs_keccak256_hasher_init(); - return lean_alloc_external(get_keccak256_hasher_class(), hasher); -} - -extern lean_obj_res c_rs_keccak256_hasher_update( - lean_obj_arg hasher, - b_lean_obj_arg input -) { - void* hasher_cloned = rs_keccak256_hasher_update(lean_get_external_data(hasher), input); - return lean_alloc_external(get_keccak256_hasher_class(), hasher_cloned); -} - -extern lean_obj_res c_rs_keccak256_hasher_finalize(lean_obj_arg hasher) { - lean_object *buffer = lean_alloc_sarray(1, 0, 32); - rs_keccak256_hasher_finalize(lean_get_external_data(hasher), buffer); - return buffer; -} diff --git a/c/linear.h b/c/linear.h deleted file mode 100644 index 56160cb1..00000000 --- a/c/linear.h +++ /dev/null @@ -1,93 +0,0 @@ -#pragma once -#include "lean/lean.h" -#include "common.h" - -/* -This file provides a framework for enforcing linear usage of mutating objects by -Lean's runtime. It's particularly useful when making use of Rust objects that -don't implement `Clone` and work on the basis of mutation. -*/ - -typedef struct { - /* A reference to the underlying mutable object */ - void *object_ref; - /* A pointer to a function that can free `object_ref` */ - void (*finalizer)(void *); - /* If set to `true`, the resource pointed by `object_ref` cannot be used */ - bool outdated; - /* If set to `true`, allow the finalizer to be called on outdated objects */ - bool finalize_even_if_outdated; -} linear_object; - -static inline linear_object *linear_object_init(void *object_ref, void (*finalizer)(void *)) { - linear_object *linear = malloc(sizeof(linear_object)); - linear->object_ref = object_ref; - linear->finalizer = finalizer; - linear->outdated = false; - linear->finalize_even_if_outdated = false; - return linear; -} - -static inline linear_object *to_linear_object(void *ptr) { - return (linear_object*)ptr; -} - -static inline void *get_object_ref(linear_object *linear) { - return linear->object_ref; -} - -static inline linear_object *linear_bump(linear_object *linear) { - linear_object *copy = malloc(sizeof(linear_object)); - *copy = *linear; - linear->outdated = true; - return copy; -} - -static inline void ditch_linear(linear_object *linear) { - linear->outdated = true; - linear->finalize_even_if_outdated = true; -} - -static inline void assert_linearity(linear_object *linear) { - if (LEAN_UNLIKELY(linear->outdated)) { - lean_internal_panic("Non-linear usage of linear object"); - } -} - -static inline void free_linear_object(linear_object *linear) { - // Only finalize `object_ref` if `linear` is the latest linear object reference - // or if the finalizer was forcibly set as allowed. By doing this, we avoid - // double-free attempts. - if (LEAN_UNLIKELY(!linear->outdated || linear->finalize_even_if_outdated)) { - linear->finalizer(linear->object_ref); - } - free(linear); -} - -/* --- API to implement Lean objects --- */ - -static void linear_object_finalizer(void *ptr) { - free_linear_object(to_linear_object(ptr)); -} - -static lean_external_class *g_linear_object_class = NULL; - -static lean_external_class *get_linear_object_class() { - if (g_linear_object_class == NULL) { - g_linear_object_class = lean_register_external_class( - &linear_object_finalizer, - &noop_foreach - ); - } - return g_linear_object_class; -} - -static inline lean_object *alloc_lean_linear_object(linear_object *linear) { - return lean_alloc_external(get_linear_object_class(), linear); -} - -static inline linear_object *validated_linear(lean_object *obj) { - linear_object *linear = to_linear_object(lean_get_external_data(obj)); - assert_linearity(linear); - return linear; -} diff --git a/c/rust.h b/c/rust.h deleted file mode 100644 index 03060475..00000000 --- a/c/rust.h +++ /dev/null @@ -1,191 +0,0 @@ -#pragma once - -#include "lean/lean.h" - -typedef struct { - bool is_ok; - void *data; -} c_result; - -/* --- Aiur -- */ - -typedef struct { - size_t size; - void *bytes_vec; -} bytes_data; - -void rs_move_bytes(bytes_data*, lean_obj_arg); - -bytes_data *rs_aiur_proof_to_bytes(void*); -void *rs_aiur_proof_of_bytes(b_lean_obj_arg); - -void rs_aiur_system_free(void*); -void *rs_aiur_system_build(b_lean_obj_arg); - -typedef struct { - size_t claim_size; - void *claim; - void *proof; - void *io_buffer; - size_t io_data_size; - size_t io_map_size; - size_t *io_keys_sizes; -} prove_data; - -void rs_aiur_claim_free(void*); -void rs_aiur_proof_free(void*); -void rs_aiur_prove_data_io_buffer_free(void*); -void rs_aiur_prove_data_free(prove_data*); - -prove_data *rs_aiur_system_prove( - void*, b_lean_obj_arg, b_lean_obj_arg, b_lean_obj_arg, b_lean_obj_arg, b_lean_obj_arg -); -void rs_set_array_g_values(lean_obj_arg, void*); -void rs_set_aiur_io_data_values(lean_obj_arg, void*); -void rs_set_aiur_io_map_values(lean_obj_arg, void*); - -c_result *rs_aiur_system_verify(void*, b_lean_obj_arg, b_lean_obj_arg, void*); - -void rs__c_result_unit_string_free(c_result *); - -/* --- Iroh --- */ - -c_result *rs_iroh_serve(void); -c_result *rs_iroh_put(char const *, b_lean_obj_arg, char const *, char const *); -c_result *rs_iroh_get(char const *, b_lean_obj_arg, char const *, char const *); - -void rs__c_result_iroh_put_response_string_free(c_result *); -void rs__c_result_iroh_get_response_string_free(c_result *); - -/* --- Keccak Hasher --- */ - -void *rs_keccak256_hasher_init(void); -void rs_keccak256_hasher_free(void*); -void *rs_keccak256_hasher_update(void*, void*); -void *rs_keccak256_hasher_finalize(void*, void*); - -/* --- Ixon FFI (incremental block comparison) --- */ - -// Test FFI round-trip -uint64_t rs_test_ffi_roundtrip(b_lean_obj_arg name); - -// Compile environment with Rust, returns opaque RustCompiledEnv* -void *rs_compile_env_rust_first(b_lean_obj_arg env_consts); - -// Free a RustCompiledEnv -void rs_free_rust_env(void *rust_env); - -// Get block count from RustCompiledEnv -uint64_t rs_get_rust_env_block_count(void const *rust_env); - -// Compare a single block, returns packed result -uint64_t rs_compare_block(void const *rust_env, b_lean_obj_arg name, b_lean_obj_arg lean_bytes); - -// Get the length of Rust's compiled bytes for a block -uint64_t rs_get_block_bytes_len(void const *rust_env, b_lean_obj_arg name); - -// Copy Rust's compiled bytes into a pre-allocated ByteArray -void rs_copy_block_bytes(void const *rust_env, b_lean_obj_arg name, lean_obj_arg dest); - -// Get Rust's sharing vector length for a block -uint64_t rs_get_block_sharing_len(void const *rust_env, b_lean_obj_arg name); - -// Compare block with typed result (returns BlockCompareDetail) -lean_obj_res rs_compare_block_v2(void const *rust_env, b_lean_obj_arg name, b_lean_obj_arg lean_bytes, uint64_t lean_sharing_len); - -// Get the buffer length needed for pre-sharing expressions -uint64_t rs_get_pre_sharing_exprs_len(void const *rust_env, b_lean_obj_arg name); - -// Get pre-sharing root expressions for a constant -uint64_t rs_get_pre_sharing_exprs(void const *rust_env, b_lean_obj_arg name, lean_obj_arg out_buf); - -// Look up a constant's compiled address (32-byte blake3 hash) -// Returns 1 on success, 0 if name not found -uint64_t rs_lookup_const_addr(void const *rust_env, b_lean_obj_arg name, lean_obj_arg out_addr); - -// Get the total number of compiled constants -uint64_t rs_get_compiled_const_count(void const *rust_env); - -/* --- Utility FFI --- */ - -// Read first 8 bytes of ByteArray as little-endian UInt64 (for Address.Hashable) -uint64_t rs_bytearray_to_u64_le(b_lean_obj_arg ba); - -/* --- Ix Canonicalization FFI --- */ - -// Canonicalize environment and return Ix.Environment -// Takes: List (Lean.Name × Lean.ConstantInfo) -// Returns: IO Ix.Environment -lean_obj_res rs_canonicalize_env_to_ix(b_lean_obj_arg env_consts); - -/* --- Round-trip FFI for testing Lean object construction --- */ - -// Round-trip basic types: Lean -> Rust -> Lean -lean_object *rs_roundtrip_nat(b_lean_obj_arg nat); -lean_object *rs_roundtrip_string(b_lean_obj_arg str); -lean_object *rs_roundtrip_list_nat(b_lean_obj_arg list); -lean_object *rs_roundtrip_array_nat(b_lean_obj_arg arr); -lean_object *rs_roundtrip_bytearray(b_lean_obj_arg ba); - -// Round-trip Ix types: Lean -> Rust -> Lean -lean_object *rs_roundtrip_ix_address(b_lean_obj_arg addr); -lean_object *rs_roundtrip_ix_name(b_lean_obj_arg name); -lean_object *rs_roundtrip_ix_level(b_lean_obj_arg level); -lean_object *rs_roundtrip_ix_expr(b_lean_obj_arg expr); -lean_object *rs_roundtrip_ix_int(b_lean_obj_arg int_val); -lean_object *rs_roundtrip_ix_substring(b_lean_obj_arg sub); -lean_object *rs_roundtrip_ix_source_info(b_lean_obj_arg si); -lean_object *rs_roundtrip_ix_syntax_preresolved(b_lean_obj_arg sp); -lean_object *rs_roundtrip_ix_syntax(b_lean_obj_arg syn); -lean_object *rs_roundtrip_ix_data_value(b_lean_obj_arg dv); -lean_object *rs_roundtrip_bool(b_lean_obj_arg b); -lean_object *rs_roundtrip_ix_constant_info(b_lean_obj_arg info); -lean_object *rs_roundtrip_ix_environment(b_lean_obj_arg env); -lean_object *rs_roundtrip_ix_raw_environment(b_lean_obj_arg raw_env); - -// Round-trip BlockCompareResult and BlockCompareDetail -lean_object *rs_roundtrip_block_compare_result(b_lean_obj_arg ptr); -lean_object *rs_roundtrip_block_compare_detail(b_lean_obj_arg ptr); - -/* --- RawCompiledEnv FFI --- */ - -// Compile environment and return RawCompiledEnv -// Takes: List (Lean.Name × Lean.ConstantInfo) -// Returns: IO RawCompiledEnv -lean_obj_res rs_compile_env_to_raw(b_lean_obj_arg env_consts); - -// Complete compilation pipeline - returns RustCompilationResult -// (rawEnv, condensed, compiled) -lean_obj_res rs_compile_env_full(b_lean_obj_arg env_consts); - -// Compile environment to Ixon RawEnv (structured Lean objects) -// Takes: List (Lean.Name × Lean.ConstantInfo) -// Returns: IO RawEnv -lean_obj_res rs_compile_env_to_ixon(b_lean_obj_arg env_consts); - -// Round-trip RawEnv for FFI testing -lean_object *rs_roundtrip_raw_env(b_lean_obj_arg raw_env); - -// Round-trip RustCondensedBlocks for FFI testing -lean_object *rs_roundtrip_rust_condensed_blocks(b_lean_obj_arg condensed); - -// Round-trip RustCompilePhases for FFI testing -lean_object *rs_roundtrip_rust_compile_phases(b_lean_obj_arg phases); - -// Combined compilation phases - returns RustCompilePhases -// (rawEnv, condensed, compileEnv) -// Takes: List (Lean.Name × Lean.ConstantInfo) -// Returns: IO RustCompilePhases -lean_obj_res rs_compile_phases(b_lean_obj_arg env_consts); - -/* --- Graph/SCC FFI --- */ - -// Build reference graph in Rust (returns Ix.Name-based graph) -// Takes: List (Lean.Name × Lean.ConstantInfo) -// Returns: IO (Array (Ix.Name × Array Ix.Name)) -lean_obj_res rs_build_ref_graph(b_lean_obj_arg env_consts); - -// Compute SCCs in Rust (returns Ix.Name-based CondensedBlocks) -// Takes: List (Lean.Name × Lean.ConstantInfo) -// Returns: IO RustCondensedBlocks -lean_obj_res rs_compute_sccs(b_lean_obj_arg env_consts); diff --git a/c/unsigned.c b/c/unsigned.c deleted file mode 100644 index e1493bd7..00000000 --- a/c/unsigned.c +++ /dev/null @@ -1,26 +0,0 @@ -#include "lean/lean.h" -#include "rust.h" - -#define memcpy __builtin_memcpy // Avoids including `string.h` - -static inline lean_obj_res mk_byte_array(size_t size, uint8_t *bytes) { - lean_object *o = lean_alloc_sarray(1, size, size); - memcpy(lean_sarray_cptr(o), bytes, size); - return o; -} - -extern lean_obj_res c_u16_to_le_bytes(uint16_t u16) { - return mk_byte_array(sizeof(uint16_t), (uint8_t*)&u16); -} - -extern lean_obj_res c_u32_to_le_bytes(uint32_t u32) { - return mk_byte_array(sizeof(uint32_t), (uint8_t*)&u32); -} - -extern lean_obj_res c_u64_to_le_bytes(uint64_t u64) { - return mk_byte_array(sizeof(uint64_t), (uint8_t*)&u64); -} - -extern lean_obj_res c_usize_to_le_bytes(size_t usize) { - return mk_byte_array(sizeof(size_t), (uint8_t*)&usize); -} diff --git a/flake.nix b/flake.nix index d4e6a802..041fb67a 100644 --- a/flake.nix +++ b/flake.nix @@ -138,10 +138,12 @@ # Provide a unified dev shell with Lean + Rust devShells.default = pkgs.mkShell { + LIBCLANG_PATH = "${pkgs.llvmPackages.libclang.lib}/lib"; packages = with pkgs; [ pkg-config openssl clang + libclang # needed for bindgen in build.rs rustToolchain rust-analyzer lean.lean-all # Includes Lean compiler, lake, stdlib, etc. diff --git a/lakefile.lean b/lakefile.lean index 87cb7a6a..6cdde581 100644 --- a/lakefile.lean +++ b/lakefile.lean @@ -66,37 +66,6 @@ end IxApplications section FFI -/-- Build the static lib for the C files -/ -extern_lib ix_c pkg := do - let compiler := "gcc" - let cDir := pkg.dir / "c" - let buildCDir := pkg.buildDir / "c" - let weakArgs := #["-fPIC", "-I", (← getLeanIncludeDir).toString, "-I", cDir.toString] - - let cDirEntries ← cDir.readDir - - -- Include every C header file in the trace mix - let extraDepTrace := cDirEntries.foldl (init := getLeanTrace) fun acc dirEntry => - let filePath := dirEntry.path - if filePath.extension == some "h" then do - let x ← acc - let y ← computeTrace $ TextFilePath.mk filePath - pure $ x.mix y - else acc - - -- Collect a build job for every C file in `cDir` - let mut buildJobs := #[] - for dirEntry in cDirEntries do - let filePath := dirEntry.path - if filePath.extension == some "c" then - let oFile := buildCDir / dirEntry.fileName |>.withExtension "o" - let srcJob ← inputTextFile filePath - let buildJob ← buildO oFile srcJob weakArgs #[] compiler extraDepTrace - buildJobs := buildJobs.push buildJob - - let libName := nameToStaticLib "ix_c" - buildStaticLib (pkg.staticLibDir / libName) buildJobs - /-- Build the static lib for the Rust crate -/ extern_lib ix_rs pkg := do -- Defaults to `--features parallel`, configured via env var diff --git a/src/iroh/_client.rs b/src/iroh/_client.rs index aadb73df..9aa7f2fa 100644 --- a/src/iroh/_client.rs +++ b/src/iroh/_client.rs @@ -1,30 +1,29 @@ -use std::ffi::{CString, c_char}; +use std::ffi::c_void; -use crate::lean::{ - array::LeanArrayObject, - ffi::{CResult, to_raw}, -}; +use crate::lean::{array::LeanArrayObject, lean_except_error_string}; +const ERR_MSG: &str = + "Iroh functions not supported when the Rust `net` feature is disabled \ + or on MacOS aarch64-darwin"; + +/// `Iroh.Connect.putBytes' : @& String → @& Array String → @& String → @& String → Except String PutResponse` #[unsafe(no_mangle)] -extern "C" fn rs_iroh_put( - _node_id: *const c_char, +extern "C" fn c_rs_iroh_put( + _node_id: *const c_void, _addrs: &LeanArrayObject, - _relay_url: *const c_char, - _file_path: *const c_char, -) -> *const CResult { - let msg = CString::new("Iroh functions not supported when the Rust `net` feature is disabled or on MacOS aarch64-darwin").expect("CString::new failure"); - let c_result = CResult { is_ok: false, data: msg.into_raw().cast() }; - to_raw(c_result) + _relay_url: *const c_void, + _input: *const c_void, +) -> *mut c_void { + lean_except_error_string(ERR_MSG) } +/// `Iroh.Connect.getBytes' : @& String → @& Array String → @& String → @& String → Except String GetResponse` #[unsafe(no_mangle)] -extern "C" fn rs_iroh_get( - _node_id: *const c_char, +extern "C" fn c_rs_iroh_get( + _node_id: *const c_void, _addrs: &LeanArrayObject, - _relay_url: *const c_char, - _hash: *const c_char, -) -> *const CResult { - let msg = CString::new("Iroh functions not supported when the Rust `net` feature is disabled or on MacOS aarch64-darwin").expect("CString::new failure"); - let c_result = CResult { is_ok: false, data: msg.into_raw().cast() }; - to_raw(c_result) + _relay_url: *const c_void, + _hash: *const c_void, +) -> *mut c_void { + lean_except_error_string(ERR_MSG) } diff --git a/src/iroh/_server.rs b/src/iroh/_server.rs index f5bcd892..7130c902 100644 --- a/src/iroh/_server.rs +++ b/src/iroh/_server.rs @@ -1,10 +1,12 @@ -use std::ffi::CString; +use std::ffi::c_void; -use crate::lean::ffi::{CResult, to_raw}; +use crate::lean::lean_except_error_string; +/// `Iroh.Serve.serve' : Unit → Except String Unit` #[unsafe(no_mangle)] -extern "C" fn rs_iroh_serve() -> *const CResult { - let msg = CString::new("Iroh functions not supported when the Rust `net` feature is disabled or on MacOS aarch64-darwin").expect("CString::new failure"); - let c_result = CResult { is_ok: false, data: msg.into_raw().cast() }; - to_raw(c_result) +extern "C" fn c_rs_iroh_serve() -> *mut c_void { + lean_except_error_string( + "Iroh functions not supported when the Rust `net` feature is disabled \ + or on MacOS aarch64-darwin", + ) } diff --git a/src/iroh/client.rs b/src/iroh/client.rs index 7df9cde7..5d1f9f40 100644 --- a/src/iroh/client.rs +++ b/src/iroh/client.rs @@ -1,7 +1,7 @@ use iroh::{Endpoint, NodeAddr, NodeId, RelayMode, RelayUrl, SecretKey}; use n0_snafu::{Result, ResultExt}; use n0_watcher::Watcher as _; -use std::ffi::{CString, c_char}; +use std::ffi::c_void; use std::net::SocketAddr; use tracing::info; use tracing_subscriber::layer::SubscriberExt; @@ -10,103 +10,126 @@ use tracing_subscriber::{EnvFilter, fmt}; use crate::iroh::common::{GetRequest, PutRequest, Request, Response}; use crate::lean::array::LeanArrayObject; -use crate::lean::as_ref_unsafe; -use crate::lean::ffi::iroh::{GetResponseFFI, PutResponseFFI}; -use crate::lean::ffi::{CResult, raw_to_str, to_raw}; use crate::lean::string::LeanStringObject; +use crate::lean::{ + as_mut_unsafe, as_ref_unsafe, lean_alloc_ctor, lean_alloc_sarray, + lean_ctor_set, lean_except_error_string, lean_except_ok, lean_mk_string, + safe_cstring, sarray::LeanSArrayObject, +}; // An example ALPN that we are using to communicate over the `Endpoint` const EXAMPLE_ALPN: &[u8] = b"n0/iroh/examples/magic/0"; // Maximum number of characters to read from the server. Connection automatically closed if this is exceeded const READ_SIZE_LIMIT: usize = 100_000_000; +/// Build a Lean `PutResponse` structure: +/// ``` +/// structure PutResponse where +/// message: String +/// hash: String +/// ``` +fn mk_put_response(message: &str, hash: &str) -> *mut c_void { + let c_message = safe_cstring(message); + let c_hash = safe_cstring(hash); + unsafe { + let ctor = lean_alloc_ctor(0, 2, 0); + lean_ctor_set(ctor, 0, lean_mk_string(c_message.as_ptr())); + lean_ctor_set(ctor, 1, lean_mk_string(c_hash.as_ptr())); + ctor + } +} + +/// Build a Lean `GetResponse` structure: +/// ``` +/// structure GetResponse where +/// message: String +/// hash: String +/// bytes: ByteArray +/// ``` +fn mk_get_response(message: &str, hash: &str, bytes: &[u8]) -> *mut c_void { + let c_message = safe_cstring(message); + let c_hash = safe_cstring(hash); + unsafe { + let byte_array = lean_alloc_sarray(1, bytes.len(), bytes.len()); + let arr: &mut LeanSArrayObject = as_mut_unsafe(byte_array.cast()); + arr.set_data(bytes); + + let ctor = lean_alloc_ctor(0, 3, 0); + lean_ctor_set(ctor, 0, lean_mk_string(c_message.as_ptr())); + lean_ctor_set(ctor, 1, lean_mk_string(c_hash.as_ptr())); + lean_ctor_set(ctor, 2, byte_array); + ctor + } +} + +/// `Iroh.Connect.putBytes' : @& String → @& Array String → @& String → @& String → Except String PutResponse` #[unsafe(no_mangle)] -extern "C" fn rs_iroh_put( - node_id: *const c_char, +extern "C" fn c_rs_iroh_put( + node_id: &LeanStringObject, addrs: &LeanArrayObject, - relay_url: *const c_char, - input: *const c_char, -) -> *const CResult { - let node_id = raw_to_str(node_id); + relay_url: &LeanStringObject, + input: &LeanStringObject, +) -> *mut c_void { + let node_id = node_id.as_string(); let addrs: Vec = addrs.to_vec(|ptr| { let string: &LeanStringObject = as_ref_unsafe(ptr.cast()); string.as_string() }); - let relay_url = raw_to_str(relay_url); - let input = raw_to_str(input); + let relay_url = relay_url.as_string(); + let input_str = input.as_string(); - let request = Request::Put(PutRequest { bytes: input.as_bytes().to_vec() }); - // Create a Tokio runtime to block on the async function + let request = + Request::Put(PutRequest { bytes: input_str.as_bytes().to_vec() }); let rt = tokio::runtime::Runtime::new().expect("Failed to create Tokio runtime"); - // Run the async function and block until we get the result - let c_result = match rt.block_on(connect(node_id, &addrs, relay_url, request)) - { + match rt.block_on(connect(&node_id, &addrs, &relay_url, request)) { Ok(response) => match response { Response::Put(put_response) => { - let put_response_ffi = - PutResponseFFI::new(&put_response.message, &put_response.hash); - CResult { is_ok: true, data: to_raw(put_response_ffi).cast() } - }, - _ => { - let msg = CString::new("error: incorrect server response") - .expect("CString::new failure"); - CResult { is_ok: false, data: msg.into_raw().cast() } + lean_except_ok(mk_put_response( + &put_response.message, + &put_response.hash, + )) }, + _ => lean_except_error_string("error: incorrect server response"), }, - Err(err) => { - let msg = CString::new(err.to_string()).expect("CString::new failure"); - CResult { is_ok: false, data: msg.into_raw().cast() } - }, - }; - - to_raw(c_result) + Err(err) => lean_except_error_string(&err.to_string()), + } } +/// `Iroh.Connect.getBytes' : @& String → @& Array String → @& String → @& String → Except String GetResponse` #[unsafe(no_mangle)] -extern "C" fn rs_iroh_get( - node_id: *const c_char, +extern "C" fn c_rs_iroh_get( + node_id: &LeanStringObject, addrs: &LeanArrayObject, - relay_url: *const c_char, - hash: *const c_char, -) -> *const CResult { - let node_id = raw_to_str(node_id); + relay_url: &LeanStringObject, + hash: &LeanStringObject, +) -> *mut c_void { + let node_id = node_id.as_string(); let addrs: Vec = addrs.to_vec(|ptr| { let string: &LeanStringObject = as_ref_unsafe(ptr.cast()); string.as_string() }); - let relay_url = raw_to_str(relay_url); - let hash = raw_to_str(hash); - let request = Request::Get(GetRequest { hash: hash.to_owned() }); + let relay_url = relay_url.as_string(); + let hash = hash.as_string(); + let request = Request::Get(GetRequest { hash: hash.clone() }); let rt = tokio::runtime::Runtime::new().expect("Failed to create Tokio runtime"); - let c_result = match rt.block_on(connect(node_id, &addrs, relay_url, request)) - { + match rt.block_on(connect(&node_id, &addrs, &relay_url, request)) { Ok(response) => match response { Response::Get(get_response) => { - let get_response_ffi = GetResponseFFI::new( + lean_except_ok(mk_get_response( &get_response.message, &get_response.hash, &get_response.bytes, - ); - CResult { is_ok: true, data: to_raw(get_response_ffi).cast() } - }, - _ => { - let msg = CString::new("error: incorrect server response") - .expect("CString::new failure"); - CResult { is_ok: false, data: msg.into_raw().cast() } + )) }, + _ => lean_except_error_string("error: incorrect server response"), }, - Err(err) => { - let msg = CString::new(err.to_string()).expect("CString::new failure"); - CResult { is_ok: false, data: msg.into_raw().cast() } - }, - }; - - to_raw(c_result) + Err(err) => lean_except_error_string(&err.to_string()), + } } // Largely taken from https://github.com/n0-computer/iroh/blob/main/iroh/examples/connect.rs diff --git a/src/iroh/server.rs b/src/iroh/server.rs index 1820867f..2d714efe 100644 --- a/src/iroh/server.rs +++ b/src/iroh/server.rs @@ -1,5 +1,5 @@ use std::collections::BTreeMap; -use std::ffi::CString; +use std::ffi::c_void; use std::sync::{Arc, Mutex}; use std::time::Duration; @@ -12,29 +12,25 @@ use tracing_subscriber::util::SubscriberInitExt; use tracing_subscriber::{EnvFilter, fmt}; use crate::iroh::common::{GetResponse, PutResponse, Request, Response}; -use crate::lean::ffi::{CResult, to_raw}; +use crate::lean::{lean_box_fn, lean_except_error_string, lean_except_ok}; // An example ALPN that we are using to communicate over the `Endpoint` const EXAMPLE_ALPN: &[u8] = b"n0/iroh/examples/magic/0"; // Maximum number of characters to read from the client. Connection automatically closed if this is exceeded const READ_SIZE_LIMIT: usize = 100_000_000; +/// `Iroh.Serve.serve' : Unit → Except String Unit` #[unsafe(no_mangle)] -extern "C" fn rs_iroh_serve() -> *const CResult { +extern "C" fn c_rs_iroh_serve() -> *mut c_void { // Create a Tokio runtime to block on the async function let rt = tokio::runtime::Runtime::new().expect("Failed to create Tokio runtime"); // Run the async function and block until we get the result - let c_result = match rt.block_on(serve()) { - Ok(()) => CResult { is_ok: true, data: std::ptr::null() }, - Err(err) => { - let msg = CString::new(err.to_string()).expect("CString::new failure"); - CResult { is_ok: false, data: msg.into_raw().cast() } - }, - }; - - to_raw(c_result) + match rt.block_on(serve()) { + Ok(()) => lean_except_ok(lean_box_fn(0)), + Err(err) => lean_except_error_string(&err.to_string()), + } } // Largely taken from https://github.com/n0-computer/iroh/blob/main/iroh/examples/listen.rs diff --git a/src/lean.rs b/src/lean.rs index 676fb0a8..27ea1a5e 100644 --- a/src/lean.rs +++ b/src/lean.rs @@ -206,13 +206,13 @@ pub fn collect_list_with( use std::ffi::c_uint; -// Lean C API wrappers (defined in c/ixon_ffi.c) -// These wrap Lean's allocation functions so they can be linked from Rust +// Lean C API bindings. Static inline functions use bindgen-generated +// wrappers (suffix __extern). LEAN_EXPORT functions link directly. unsafe extern "C" { // Object allocation /// Allocate a constructor object with the given tag, number of object fields, /// and scalar size in bytes. - #[link_name = "c_lean_alloc_ctor"] + #[link_name = "lean_alloc_ctor__extern"] pub fn lean_alloc_ctor( tag: c_uint, num_objs: c_uint, @@ -220,33 +220,32 @@ unsafe extern "C" { ) -> *mut c_void; /// Set the i-th object field of a constructor. - #[link_name = "c_lean_ctor_set"] + #[link_name = "lean_ctor_set__extern"] pub fn lean_ctor_set(o: *mut c_void, i: c_uint, v: *mut c_void); /// Get the i-th object field of a constructor. - #[link_name = "c_lean_ctor_get"] + #[link_name = "lean_ctor_get__extern"] pub fn lean_ctor_get(o: *mut c_void, i: c_uint) -> *const c_void; /// Get the tag of a Lean object. - #[link_name = "c_lean_obj_tag"] + #[link_name = "lean_obj_tag__extern"] pub fn lean_obj_tag(o: *mut c_void) -> c_uint; /// Set a uint8 scalar field at the given byte offset (after object fields). - #[link_name = "c_lean_ctor_set_uint8"] + #[link_name = "lean_ctor_set_uint8__extern"] pub fn lean_ctor_set_uint8(o: *mut c_void, offset: usize, v: u8); /// Set a uint64 scalar field at the given byte offset (after object fields). - #[link_name = "c_lean_ctor_set_uint64"] + #[link_name = "lean_ctor_set_uint64__extern"] pub fn lean_ctor_set_uint64(o: *mut c_void, offset: usize, v: u64); - // String allocation + // String allocation (LEAN_EXPORT — links directly, no wrapper needed) /// Create a Lean string from a null-terminated C string. - #[link_name = "c_lean_mk_string"] pub fn lean_mk_string(s: *const std::ffi::c_char) -> *mut c_void; // Scalar array (ByteArray) allocation /// Allocate a scalar array with the given element size, initial size, and capacity. - #[link_name = "c_lean_alloc_sarray"] + #[link_name = "lean_alloc_sarray__extern"] pub fn lean_alloc_sarray( elem_size: c_uint, size: usize, @@ -254,55 +253,68 @@ unsafe extern "C" { ) -> *mut c_void; /// Get a pointer to the data area of a scalar array. - #[link_name = "c_lean_sarray_cptr"] + #[link_name = "lean_sarray_cptr__extern"] pub fn lean_sarray_cptr(o: *mut c_void) -> *mut u8; // Array allocation /// Allocate an array with the given initial size and capacity. - #[link_name = "c_lean_alloc_array"] + #[link_name = "lean_alloc_array__extern"] pub fn lean_alloc_array(size: usize, capacity: usize) -> *mut c_void; /// Set the i-th element of an array (does not update size). - #[link_name = "c_lean_array_set_core"] + #[link_name = "lean_array_set_core__extern"] pub fn lean_array_set_core(o: *mut c_void, i: usize, v: *mut c_void); /// Get the i-th element of an array. - #[link_name = "c_lean_array_get_core"] + #[link_name = "lean_array_get_core__extern"] pub fn lean_array_get_core(o: *mut c_void, i: usize) -> *const c_void; // Reference counting /// Increment the reference count of a Lean object. - #[link_name = "c_lean_inc"] + #[link_name = "lean_inc__extern"] pub fn lean_inc(o: *mut c_void); /// Increment the reference count by n. - #[link_name = "c_lean_inc_n"] + #[link_name = "lean_inc_n__extern"] pub fn lean_inc_n(o: *mut c_void, n: usize); + /// Decrement the reference count of a Lean object. + #[link_name = "lean_dec__extern"] + pub fn lean_dec(o: *mut c_void); + + // External object support + /// Register an external class with finalizer and foreach callbacks. + /// This is a LEAN_EXPORT function and can be linked directly. + pub fn lean_register_external_class( + finalize: extern "C" fn(*mut c_void), + foreach: extern "C" fn(*mut c_void, *mut c_void), + ) -> *mut c_void; + + /// Allocate an external object wrapping opaque data. + #[link_name = "lean_alloc_external__extern"] + pub fn lean_alloc_external( + cls: *mut c_void, + data: *mut c_void, + ) -> *mut c_void; + // IO result construction /// Wrap a value in a successful IO result. - #[link_name = "c_lean_io_result_mk_ok"] + #[link_name = "lean_io_result_mk_ok__extern"] pub fn lean_io_result_mk_ok(v: *mut c_void) -> *mut c_void; /// Wrap an error in an IO error result. - #[link_name = "c_lean_io_result_mk_error"] + #[link_name = "lean_io_result_mk_error__extern"] pub fn lean_io_result_mk_error(err: *mut c_void) -> *mut c_void; - /// Create an IO.Error.userError from a String. - #[link_name = "c_lean_mk_io_user_error"] + /// Create an IO.Error.userError from a String (LEAN_EXPORT — links directly). pub fn lean_mk_io_user_error(msg: *mut c_void) -> *mut c_void; // Nat allocation for large values /// Create a Nat from a uint64. For values > max boxed, allocates on heap. - #[link_name = "c_lean_uint64_to_nat"] + #[link_name = "lean_uint64_to_nat__extern"] pub fn lean_uint64_to_nat(n: u64) -> *mut c_void; - /// Create a Nat from limbs (little-endian u64 array). Uses GMP internally. - #[link_name = "c_lean_nat_from_limbs"] - pub fn lean_nat_from_limbs( - num_limbs: usize, - limbs: *const u64, - ) -> *mut c_void; + // lean_nat_from_limbs moved to src/lean/nat.rs (uses GMP directly) } /// Box a scalar value into a Lean object pointer. @@ -313,3 +325,37 @@ unsafe extern "C" { pub fn lean_box_fn(n: usize) -> *mut c_void { ((n << 1) | 1) as *mut c_void } + +// ============================================================================= +// Lean Except constructors +// ============================================================================= + +/// Build `Except.ok val` — tag 1, one object field. +#[inline] +pub fn lean_except_ok(val: *mut c_void) -> *mut c_void { + unsafe { + let obj = lean_alloc_ctor(1, 1, 0); + lean_ctor_set(obj, 0, val); + obj + } +} + +/// Build `Except.error msg` — tag 0, one object field. +#[inline] +pub fn lean_except_error(msg: *mut c_void) -> *mut c_void { + unsafe { + let obj = lean_alloc_ctor(0, 1, 0); + lean_ctor_set(obj, 0, msg); + obj + } +} + +/// Build `Except.error (lean_mk_string str)` from a Rust string. +#[inline] +pub fn lean_except_error_string(msg: &str) -> *mut c_void { + let c_msg = safe_cstring(msg); + unsafe { lean_except_error(lean_mk_string(c_msg.as_ptr())) } +} + +/// No-op foreach callback for external classes that hold no Lean references. +pub extern "C" fn noop_foreach(_: *mut c_void, _: *mut c_void) {} diff --git a/src/lean/ffi.rs b/src/lean/ffi.rs index 07003a57..672de956 100644 --- a/src/lean/ffi.rs +++ b/src/lean/ffi.rs @@ -3,6 +3,7 @@ pub mod byte_array; pub mod iroh; pub mod keccak; pub mod lean_env; +pub mod unsigned; // Modular FFI structure pub mod builder; // IxEnvBuilder struct @@ -12,7 +13,13 @@ pub mod ix; // Ix types: Name, Level, Expr, ConstantInfo, Environment pub mod ixon; // Ixon types: Univ, Expr, Constant, metadata pub mod primitives; // Primitives: rs_roundtrip_nat, rs_roundtrip_string, etc. -use std::ffi::{CStr, CString, c_char, c_void}; +use std::ffi::{CString, c_void}; + +/// Wrapper to allow OnceLock storage of an external class pointer. +pub(crate) struct ExternalClassPtr(pub(crate) *mut c_void); +// Safety: the class pointer is initialized once and read-only thereafter. +unsafe impl Send for ExternalClassPtr {} +unsafe impl Sync for ExternalClassPtr {} use crate::lean::{ array::LeanArrayObject, as_ref_unsafe, lean_io_result_mk_error, @@ -49,31 +56,6 @@ where } } -/// ```c -/// typedef struct { -/// bool is_ok; -/// void *data; -/// } c_result; -/// ``` -#[repr(C)] -pub struct CResult { - pub is_ok: bool, - pub data: *const c_void, -} - -// Free a `CResult` object that corresponds to the Rust type `Result<(), String>` -#[unsafe(no_mangle)] -extern "C" fn rs__c_result_unit_string_free(ptr: *mut CResult) { - let c_result = as_ref_unsafe(ptr); - // Free the string error message - if !c_result.is_ok { - let char_ptr = c_result.data as *mut c_char; - let c_string = unsafe { CString::from_raw(char_ptr) }; - drop(c_string); - } - drop_raw(ptr); -} - #[inline] pub(crate) fn to_raw(t: T) -> *const T { Box::into_raw(Box::new(t)) @@ -86,17 +68,6 @@ pub(super) fn drop_raw(ptr: *mut T) { drop(t); } -// Only used in the Iroh client for the moment -#[inline] -#[cfg_attr( - any(not(feature = "net"), all(target_os = "macos", target_arch = "aarch64")), - allow(dead_code) -)] -pub(crate) fn raw_to_str<'a>(ptr: *const c_char) -> &'a str { - let c_str = unsafe { CStr::from_ptr(ptr) }; - c_str.to_str().expect("Invalid UTF-8 string") -} - #[unsafe(no_mangle)] extern "C" fn rs_boxed_u32s_are_equivalent_to_bytes( u32s: &LeanArrayObject, @@ -109,28 +80,3 @@ extern "C" fn rs_boxed_u32s_are_equivalent_to_bytes( .collect::>(); u32s == bytes.data() } - -#[repr(C)] -pub struct BytesData { - size: usize, - bytes_vec: *const Vec, -} - -impl BytesData { - #[inline] - pub(super) fn from_vec(vec: Vec) -> Self { - Self { size: vec.len(), bytes_vec: to_raw(vec) } - } -} - -#[unsafe(no_mangle)] -extern "C" fn rs_move_bytes( - bytes_data: *mut BytesData, - byte_array: &mut LeanSArrayObject, -) { - let bytes_data = unsafe { Box::from_raw(bytes_data) }; - let bytes_vec = unsafe { Box::from_raw(bytes_data.bytes_vec as *mut Vec<_>) }; - byte_array.set_data(&bytes_vec); - drop(bytes_vec); - drop(bytes_data); -} diff --git a/src/lean/ffi/aiur/protocol.rs b/src/lean/ffi/aiur/protocol.rs index be3afef6..a2c44b2b 100644 --- a/src/lean/ffi/aiur/protocol.rs +++ b/src/lean/ffi/aiur/protocol.rs @@ -4,10 +4,8 @@ use multi_stark::{ types::{CommitmentParameters, FriParameters}, }; use rustc_hash::{FxBuildHasher, FxHashMap}; -use std::{ - ffi::{CString, c_void}, - slice, -}; +use std::ffi::c_void; +use std::sync::OnceLock; use crate::{ aiur::{ @@ -18,61 +16,228 @@ use crate::{ lean::{ array::LeanArrayObject, as_mut_unsafe, as_ref_unsafe, - boxed::BoxedU64, ctor::LeanCtorObject, + external::LeanExternalObject, ffi::{ - BytesData, CResult, + ExternalClassPtr, aiur::{ lean_unbox_g, lean_unbox_nat_as_usize, toplevel::lean_ctor_to_toplevel, }, drop_raw, to_raw, }, + lean_alloc_array, lean_alloc_ctor, lean_alloc_external, lean_alloc_sarray, + lean_array_set_core, lean_box_fn, lean_box_u64, lean_ctor_set, + lean_except_error_string, lean_except_ok, lean_register_external_class, + noop_foreach, sarray::LeanSArrayObject, }, - lean_box, }; +// ============================================================================= +// External class registration (OnceLock pattern) +// ============================================================================= + +static AIUR_PROOF_CLASS: OnceLock = OnceLock::new(); +static AIUR_SYSTEM_CLASS: OnceLock = OnceLock::new(); + +fn get_aiur_proof_class() -> *mut c_void { + AIUR_PROOF_CLASS + .get_or_init(|| { + ExternalClassPtr(unsafe { + lean_register_external_class(aiur_proof_finalizer, noop_foreach) + }) + }) + .0 +} + +fn get_aiur_system_class() -> *mut c_void { + AIUR_SYSTEM_CLASS + .get_or_init(|| { + ExternalClassPtr(unsafe { + lean_register_external_class(aiur_system_finalizer, noop_foreach) + }) + }) + .0 +} + +extern "C" fn aiur_proof_finalizer(ptr: *mut c_void) { + drop_raw(ptr as *mut Proof); +} + +extern "C" fn aiur_system_finalizer(ptr: *mut c_void) { + drop_raw(ptr as *mut AiurSystem); +} + +// ============================================================================= +// Lean FFI functions +// ============================================================================= + +/// `Aiur.Proof.toBytes : @& Proof → ByteArray` #[unsafe(no_mangle)] -extern "C" fn rs_aiur_proof_to_bytes(proof: &Proof) -> *const BytesData { +extern "C" fn c_rs_aiur_proof_to_bytes( + proof_obj: *const c_void, +) -> *mut c_void { + let external: &LeanExternalObject = as_ref_unsafe(proof_obj.cast()); + let proof: &Proof = as_ref_unsafe(external.cast_data()); let bytes = proof.to_bytes().expect("Serialization error"); - let bytes_data = BytesData::from_vec(bytes); - to_raw(bytes_data) + let len = bytes.len(); + let arr_ptr = unsafe { lean_alloc_sarray(1, len, len) }; + let arr: &mut LeanSArrayObject = as_mut_unsafe(arr_ptr.cast()); + arr.set_data(&bytes); + arr_ptr } +/// `Aiur.Proof.ofBytes : @& ByteArray → Proof` #[unsafe(no_mangle)] -extern "C" fn rs_aiur_proof_of_bytes( +extern "C" fn c_rs_aiur_proof_of_bytes( byte_array: &LeanSArrayObject, -) -> *const Proof { +) -> *mut c_void { let proof = Proof::from_bytes(byte_array.data()).expect("Deserialization error"); - to_raw(proof) + let ptr = to_raw(proof) as *mut c_void; + unsafe { lean_alloc_external(get_aiur_proof_class(), ptr) } +} + +/// `AiurSystem.build : @&Bytecode.Toplevel → @&CommitmentParameters → AiurSystem` +#[unsafe(no_mangle)] +extern "C" fn c_rs_aiur_system_build( + toplevel: &LeanCtorObject, + commitment_parameters: *const c_void, +) -> *mut c_void { + let system = AiurSystem::build( + lean_ctor_to_toplevel(toplevel), + lean_ptr_to_commitment_parameters(commitment_parameters), + ); + let ptr = to_raw(system) as *mut c_void; + unsafe { lean_alloc_external(get_aiur_system_class(), ptr) } } +/// `AiurSystem.verify : @& AiurSystem → @& FriParameters → @& Array G → @& Proof → Except String Unit` #[unsafe(no_mangle)] -extern "C" fn rs_aiur_system_free(ptr: *mut AiurSystem) { - drop_raw(ptr); +extern "C" fn c_rs_aiur_system_verify( + aiur_system_obj: *const c_void, + fri_parameters: &LeanCtorObject, + claim: &LeanArrayObject, + proof_obj: *const c_void, +) -> *mut c_void { + let aiur_external: &LeanExternalObject = + as_ref_unsafe(aiur_system_obj.cast()); + let aiur_system: &AiurSystem = as_ref_unsafe(aiur_external.cast_data()); + + let proof_external: &LeanExternalObject = as_ref_unsafe(proof_obj.cast()); + let proof: &Proof = as_ref_unsafe(proof_external.cast_data()); + + let fri_parameters = lean_ctor_to_fri_parameters(fri_parameters); + let claim = claim.to_vec(lean_unbox_g); + match aiur_system.verify(fri_parameters, &claim, proof) { + Ok(()) => lean_except_ok(lean_box_fn(0)), + Err(err) => lean_except_error_string(&format!("{err:?}")), + } +} + +/// `AiurSystem.prove`: runs the prover and returns +/// `Array G × Proof × Array G × Array (Array G × IOKeyInfo)` +#[unsafe(no_mangle)] +extern "C" fn c_rs_aiur_system_prove( + aiur_system_obj: *const c_void, + fri_parameters: &LeanCtorObject, + fun_idx: *const c_void, + args: &LeanArrayObject, + io_data_arr: &LeanArrayObject, + io_map_arr: &LeanArrayObject, +) -> *mut c_void { + let aiur_external: &LeanExternalObject = + as_ref_unsafe(aiur_system_obj.cast()); + let aiur_system: &AiurSystem = as_ref_unsafe(aiur_external.cast_data()); + + let fri_parameters = lean_ctor_to_fri_parameters(fri_parameters); + let fun_idx = lean_unbox_nat_as_usize(fun_idx); + let args = args.to_vec(lean_unbox_g); + let io_data = io_data_arr.to_vec(lean_unbox_g); + let io_map = lean_array_to_io_buffer_map(io_map_arr); + let mut io_buffer = IOBuffer { data: io_data, map: io_map }; + + let (claim, proof) = + aiur_system.prove(fri_parameters, fun_idx, &args, &mut io_buffer); + + // Build Lean objects directly from the results. + + // claim: Array G + let lean_claim = build_g_array(&claim); + + // proof: Proof (external object) + let lean_proof = unsafe { + lean_alloc_external(get_aiur_proof_class(), to_raw(proof) as *mut c_void) + }; + + // io_data: Array G + let lean_io_data = build_g_array(&io_buffer.data); + + // io_map: Array (Array G × IOKeyInfo) + let lean_io_map = unsafe { + let arr = lean_alloc_array(io_buffer.map.len(), io_buffer.map.len()); + for (i, (key, info)) in io_buffer.map.iter().enumerate() { + let key_arr = build_g_array(key); + // IOKeyInfo ctor (tag 0, 2 object fields) + let key_info = lean_alloc_ctor(0, 2, 0); + lean_ctor_set(key_info, 0, lean_box_fn(info.idx)); + lean_ctor_set(key_info, 1, lean_box_fn(info.len)); + // (Array G × IOKeyInfo) tuple + let map_elt = lean_alloc_ctor(0, 2, 0); + lean_ctor_set(map_elt, 0, key_arr); + lean_ctor_set(map_elt, 1, key_info); + lean_array_set_core(arr, i, map_elt); + } + arr + }; + + // Build nested tuple: + // Array G × Array (Array G × IOKeyInfo) + let io_tuple = unsafe { + let obj = lean_alloc_ctor(0, 2, 0); + lean_ctor_set(obj, 0, lean_io_data); + lean_ctor_set(obj, 1, lean_io_map); + obj + }; + // Proof × Array G × Array (Array G × IOKeyInfo) + let proof_io_tuple = unsafe { + let obj = lean_alloc_ctor(0, 2, 0); + lean_ctor_set(obj, 0, lean_proof); + lean_ctor_set(obj, 1, io_tuple); + obj + }; + // Array G × Proof × Array G × Array (Array G × IOKeyInfo) + unsafe { + let obj = lean_alloc_ctor(0, 2, 0); + lean_ctor_set(obj, 0, lean_claim); + lean_ctor_set(obj, 1, proof_io_tuple); + obj + } +} + +// ============================================================================= +// Helpers +// ============================================================================= + +/// Build a Lean `Array G` from a slice of field elements. +fn build_g_array(values: &[G]) -> *mut c_void { + unsafe { + let arr = lean_alloc_array(values.len(), values.len()); + for (i, g) in values.iter().enumerate() { + lean_array_set_core(arr, i, lean_box_u64(g.as_canonical_u64())); + } + arr + } } fn lean_ptr_to_commitment_parameters( commitment_parameters_ptr: *const c_void, ) -> CommitmentParameters { - // Single-attribute structure in Lean. CommitmentParameters { log_blowup: lean_unbox_nat_as_usize(commitment_parameters_ptr), } } -#[unsafe(no_mangle)] -extern "C" fn rs_aiur_system_build( - toplevel: &LeanCtorObject, - commitment_parameters: *const c_void, -) -> *const AiurSystem { - to_raw(AiurSystem::build( - lean_ctor_to_toplevel(toplevel), - lean_ptr_to_commitment_parameters(commitment_parameters), - )) -} - fn lean_ctor_to_fri_parameters(ctor: &LeanCtorObject) -> FriParameters { let [ log_final_poly_len_ptr, @@ -90,45 +255,6 @@ fn lean_ctor_to_fri_parameters(ctor: &LeanCtorObject) -> FriParameters { } } -#[repr(C)] -struct ProveData { - claim_size: usize, - claim: *const Vec, - proof: *const Proof, - io_buffer: *const IOBuffer, - io_data_size: usize, - io_map_size: usize, - io_keys_sizes: *const usize, -} - -#[unsafe(no_mangle)] -extern "C" fn rs_aiur_claim_free(ptr: *mut Vec) { - drop_raw(ptr); -} - -#[unsafe(no_mangle)] -extern "C" fn rs_aiur_proof_free(ptr: *mut Proof) { - drop_raw(ptr); -} - -#[unsafe(no_mangle)] -extern "C" fn rs_aiur_prove_data_io_buffer_free(prove_data: &ProveData) { - let boxed_io_keys_sizes = unsafe { - let slice = slice::from_raw_parts_mut( - prove_data.io_keys_sizes as *mut usize, - prove_data.io_map_size, - ); - Box::from_raw(slice) - }; - drop(boxed_io_keys_sizes); - drop_raw(prove_data.io_buffer as *mut ProveData); -} - -#[unsafe(no_mangle)] -extern "C" fn rs_aiur_prove_data_free(ptr: *mut ProveData) { - drop_raw(ptr); -} - fn lean_array_to_io_buffer_map( array: &LeanArrayObject, ) -> FxHashMap, IOKeyInfo> { @@ -150,96 +276,3 @@ fn lean_array_to_io_buffer_map( } map } - -#[unsafe(no_mangle)] -extern "C" fn rs_aiur_system_prove( - aiur_system: &AiurSystem, - fri_parameters: &LeanCtorObject, - fun_idx: *const c_void, - args: &LeanArrayObject, - io_data: &LeanArrayObject, - io_map: &LeanArrayObject, -) -> *const ProveData { - let fri_parameters = lean_ctor_to_fri_parameters(fri_parameters); - let fun_idx = lean_unbox_nat_as_usize(fun_idx); - let args = args.to_vec(lean_unbox_g); - let io_data = io_data.to_vec(lean_unbox_g); - let io_map = lean_array_to_io_buffer_map(io_map); - let mut io_buffer = IOBuffer { data: io_data, map: io_map }; - let (claim, proof) = - aiur_system.prove(fri_parameters, fun_idx, &args, &mut io_buffer); - let claim_size = claim.len(); - let io_keys_sizes_boxed: Box<[usize]> = - io_buffer.map.keys().map(Vec::len).collect(); - let io_keys_sizes = io_keys_sizes_boxed.as_ptr(); - std::mem::forget(io_keys_sizes_boxed); - let io_data_size = io_buffer.data.len(); - let io_map_size = io_buffer.map.len(); - let prove_data = ProveData { - claim_size, - claim: to_raw(claim), - proof: to_raw(proof), - io_buffer: to_raw(io_buffer), - io_data_size, - io_map_size, - io_keys_sizes, - }; - to_raw(prove_data) -} - -#[unsafe(no_mangle)] -extern "C" fn rs_set_array_g_values(array: &LeanArrayObject, values: &Vec) { - let array_values = array.data(); - assert_eq!(array_values.len(), values.len()); - array_values.iter().zip(values).for_each(|(ptr, g)| { - let boxed_u64 = as_mut_unsafe(*ptr as *mut BoxedU64); - boxed_u64.value = g.as_canonical_u64(); - }); -} - -#[unsafe(no_mangle)] -extern "C" fn rs_set_aiur_io_data_values( - io_data_array: &LeanArrayObject, - io_buffer: &IOBuffer, -) { - rs_set_array_g_values(io_data_array, &io_buffer.data); -} - -#[unsafe(no_mangle)] -extern "C" fn rs_set_aiur_io_map_values( - io_map_array: &LeanArrayObject, - io_buffer: &IOBuffer, -) { - let io_map_values = io_map_array.data(); - assert_eq!(io_map_values.len(), io_buffer.map.len()); - io_map_values.iter().zip(&io_buffer.map).for_each(|(ptr, (key, info))| { - let ctor: &LeanCtorObject = as_ref_unsafe(ptr.cast()); - let [key_array, key_info] = ctor.objs(); - rs_set_array_g_values( - as_mut_unsafe(key_array as *mut LeanArrayObject), - key, - ); - - let key_info_ctor: &mut LeanCtorObject = as_mut_unsafe(key_info as *mut _); - key_info_ctor.set_objs(&[lean_box!(info.idx), lean_box!(info.len)]); - }); -} - -#[unsafe(no_mangle)] -extern "C" fn rs_aiur_system_verify( - aiur_system: &AiurSystem, - fri_parameters: &LeanCtorObject, - claim: &LeanArrayObject, - proof: &Proof, -) -> *const CResult { - let fri_parameters = lean_ctor_to_fri_parameters(fri_parameters); - let claim = claim.to_vec(lean_unbox_g); - let c_result = match aiur_system.verify(fri_parameters, &claim, proof) { - Ok(()) => CResult { is_ok: true, data: std::ptr::null() }, - Err(err) => { - let msg = CString::new(format!("{err:?}")).expect("CString::new failure"); - CResult { is_ok: false, data: msg.into_raw().cast() } - }, - }; - to_raw(c_result) -} diff --git a/src/lean/ffi/iroh.rs b/src/lean/ffi/iroh.rs index 91b4f7cd..23b5d758 100644 --- a/src/lean/ffi/iroh.rs +++ b/src/lean/ffi/iroh.rs @@ -1,81 +1,2 @@ -use crate::lean::ffi::{BytesData, CResult, to_raw}; -use crate::lean::{as_ref_unsafe, ffi::drop_raw}; - -use std::ffi::{CString, c_char}; - -#[repr(C)] -pub struct PutResponseFFI { - pub message: *mut c_char, - pub hash: *mut c_char, -} - -impl PutResponseFFI { - pub fn new(message: &str, hash: &str) -> Self { - let message = CString::new(message).unwrap().into_raw(); - let hash = CString::new(hash).unwrap().into_raw(); - PutResponseFFI { message, hash } - } -} - -#[repr(C)] -pub struct GetResponseFFI { - pub message: *mut c_char, - pub hash: *mut c_char, - pub bytes: *const BytesData, -} - -impl GetResponseFFI { - pub fn new(message: &str, hash: &str, bytes: &[u8]) -> Self { - let message = CString::new(message).unwrap().into_raw(); - let hash = CString::new(hash).unwrap().into_raw(); - let bytes = to_raw(BytesData::from_vec(bytes.to_vec())); - GetResponseFFI { message, hash, bytes } - } -} - -// Frees a `CResult` object that corresponds to the Rust type `Result` -#[unsafe(no_mangle)] -extern "C" fn rs__c_result_iroh_put_response_string_free(ptr: *mut CResult) { - let c_result = as_ref_unsafe(ptr); - // Frees the `PutResponseFFI` struct and inner fields - if c_result.is_ok { - let put_response_ptr = c_result.data as *mut PutResponseFFI; - let put_response = as_ref_unsafe(put_response_ptr); - let message = unsafe { CString::from_raw(put_response.message) }; - let hash = unsafe { CString::from_raw(put_response.hash) }; - drop(message); - drop(hash); - drop_raw(put_response_ptr); - } - // Or free the String error message - else { - let char_ptr = c_result.data as *mut c_char; - let c_string = unsafe { CString::from_raw(char_ptr) }; - drop(c_string); - } - drop_raw(ptr); -} - -// Frees a `CResult` object that corresponds to the Rust type `Result` -#[unsafe(no_mangle)] -extern "C" fn rs__c_result_iroh_get_response_string_free(ptr: *mut CResult) { - let c_result = as_ref_unsafe(ptr); - // Frees the `GetResponseFFI` struct and inner fields - // `Bytes` is already freed by `rs_move_bytes` - if c_result.is_ok { - let get_response_ptr = c_result.data as *mut GetResponseFFI; - let get_response = as_ref_unsafe(get_response_ptr); - let message = unsafe { CString::from_raw(get_response.message) }; - let hash = unsafe { CString::from_raw(get_response.hash) }; - drop(message); - drop(hash); - drop_raw(get_response_ptr); - } - // Or free the String error message - else { - let char_ptr = c_result.data as *mut c_char; - let c_string = unsafe { CString::from_raw(char_ptr) }; - drop(c_string); - } - drop_raw(ptr); -} +// Iroh FFI types have been removed. +// The iroh server/client modules now build Lean objects directly. diff --git a/src/lean/ffi/keccak.rs b/src/lean/ffi/keccak.rs index c310a3cc..ef52515c 100644 --- a/src/lean/ffi/keccak.rs +++ b/src/lean/ffi/keccak.rs @@ -1,35 +1,65 @@ -use crate::lean::sarray::LeanSArrayObject; +use std::ffi::c_void; +use std::sync::OnceLock; + use tiny_keccak::{Hasher, Keccak}; -use super::{drop_raw, to_raw}; +use crate::lean::{ + as_mut_unsafe, as_ref_unsafe, external::LeanExternalObject, + lean_alloc_external, lean_alloc_sarray, lean_register_external_class, + noop_foreach, sarray::LeanSArrayObject, +}; -#[unsafe(no_mangle)] -extern "C" fn rs_keccak256_hasher_init() -> *const Keccak { - let hasher = Keccak::v256(); - to_raw(hasher) +use super::{ExternalClassPtr, drop_raw, to_raw}; + +static KECCAK_CLASS: OnceLock = OnceLock::new(); + +fn get_keccak_class() -> *mut c_void { + KECCAK_CLASS + .get_or_init(|| { + ExternalClassPtr(unsafe { + lean_register_external_class(keccak_finalizer, noop_foreach) + }) + }) + .0 +} + +extern "C" fn keccak_finalizer(ptr: *mut c_void) { + drop_raw(ptr as *mut Keccak); } +/// `Keccak.Hasher.init : Unit → Hasher` #[unsafe(no_mangle)] -extern "C" fn rs_keccak256_hasher_free(hasher: *mut Keccak) { - drop_raw(hasher); +extern "C" fn c_rs_keccak256_hasher_init(_unit: *const c_void) -> *mut c_void { + let hasher = Keccak::v256(); + let ptr = to_raw(hasher) as *mut c_void; + unsafe { lean_alloc_external(get_keccak_class(), ptr) } } +/// `Keccak.Hasher.update : (hasher: Hasher) → (input: @& ByteArray) → Hasher` #[unsafe(no_mangle)] -extern "C" fn rs_keccak256_hasher_update( - hasher: &Keccak, +extern "C" fn c_rs_keccak256_hasher_update( + hasher_obj: *mut c_void, input: &LeanSArrayObject, -) -> *const Keccak { - let mut hasher = hasher.clone(); - hasher.update(input.data()); - to_raw(hasher) +) -> *mut c_void { + let external: &LeanExternalObject = as_ref_unsafe(hasher_obj.cast()); + let hasher: &Keccak = as_ref_unsafe(external.cast_data()); + let mut new_hasher = hasher.clone(); + new_hasher.update(input.data()); + let ptr = to_raw(new_hasher) as *mut c_void; + unsafe { lean_alloc_external(get_keccak_class(), ptr) } } +/// `Keccak.Hasher.finalize : (hasher: Hasher) → ByteArray` #[unsafe(no_mangle)] -extern "C" fn rs_keccak256_hasher_finalize( - hasher: &Keccak, - output: &mut LeanSArrayObject, -) { +extern "C" fn c_rs_keccak256_hasher_finalize( + hasher_obj: *mut c_void, +) -> *mut c_void { + let external: &LeanExternalObject = as_ref_unsafe(hasher_obj.cast()); + let hasher: &Keccak = as_ref_unsafe(external.cast_data()); let mut data = [0u8; 32]; hasher.clone().finalize(&mut data); - output.set_data(&data); + let arr_ptr = unsafe { lean_alloc_sarray(1, 32, 32) }; + let arr: &mut LeanSArrayObject = as_mut_unsafe(arr_ptr.cast()); + arr.set_data(&data); + arr_ptr } diff --git a/src/lean/ffi/primitives.rs b/src/lean/ffi/primitives.rs index 7dab07a2..11ed850c 100644 --- a/src/lean/ffi/primitives.rs +++ b/src/lean/ffi/primitives.rs @@ -43,7 +43,7 @@ pub fn build_nat(n: &Nat) -> *mut c_void { arr[..chunk.len()].copy_from_slice(chunk); limbs.push(u64::from_le_bytes(arr)); } - unsafe { crate::lean::lean_nat_from_limbs(limbs.len(), limbs.as_ptr()) } + crate::lean::nat::lean_nat_from_limbs(limbs.len(), limbs.as_ptr()) } // ============================================================================= diff --git a/src/lean/nat.rs b/src/lean/nat.rs index 847536be..65b1c495 100644 --- a/src/lean/nat.rs +++ b/src/lean/nat.rs @@ -3,8 +3,9 @@ //! Lean stores small naturals as tagged scalars and large ones as GMP //! `mpz_object`s on the heap. This module handles both representations. -use std::ffi::c_void; +use std::ffi::{c_int, c_void}; use std::fmt; +use std::mem::MaybeUninit; use num_bigint::BigUint; @@ -96,3 +97,56 @@ impl Mpz { BigUint::from_bytes_le(&bytes) } } + +// ============================================================================= +// GMP interop for building Lean Nat objects from limbs +// ============================================================================= + +use super::{lean_box_fn, lean_uint64_to_nat}; + +/// LEAN_MAX_SMALL_NAT = SIZE_MAX >> 1 +const LEAN_MAX_SMALL_NAT: u64 = (usize::MAX >> 1) as u64; + +unsafe extern "C" { + #[link_name = "__gmpz_init"] + fn mpz_init(x: *mut Mpz); + + #[link_name = "__gmpz_import"] + fn mpz_import( + rop: *mut Mpz, + count: usize, + order: c_int, + size: usize, + endian: c_int, + nails: usize, + op: *const u64, + ); + + /// Lean's internal mpz allocation — takes ownership of the mpz_t value. + fn lean_alloc_mpz(v: *mut Mpz) -> *mut c_void; +} + +/// Create a Lean `Nat` from a little-endian array of u64 limbs. +/// Replaces the C function `c_lean_nat_from_limbs` from `ixon_ffi.c`. +pub fn lean_nat_from_limbs(num_limbs: usize, limbs: *const u64) -> *mut c_void { + if num_limbs == 0 { + return lean_box_fn(0); + } + let first = unsafe { *limbs }; + if num_limbs == 1 && first <= LEAN_MAX_SMALL_NAT { + return lean_box_fn(first as usize); + } + if num_limbs == 1 { + return unsafe { lean_uint64_to_nat(first) }; + } + // Multi-limb: use GMP + unsafe { + let mut value = MaybeUninit::::uninit(); + mpz_init(value.as_mut_ptr()); + // order = -1 (least significant limb first) + // size = 8 bytes per limb, endian = 0 (native), nails = 0 + mpz_import(value.as_mut_ptr(), num_limbs, -1, 8, 0, 0, limbs); + // lean_alloc_mpz takes ownership of the mpz value + lean_alloc_mpz(value.as_mut_ptr()) + } +} From 9c9f1fa281c1c2e66408c505dde9e098f2dc8373 Mon Sep 17 00:00:00 2001 From: samuelburnham <45365069+samuelburnham@users.noreply.github.com> Date: Fri, 27 Feb 2026 17:33:15 -0500 Subject: [PATCH 02/27] Use bindgen Rust functions directly --- Ix/Aiur/Protocol.lean | 10 +- Ix/Iroh/Connect.lean | 4 +- Ix/Iroh/Serve.lean | 2 +- Ix/Keccak.lean | 6 +- Tests/Ix/Ixon.lean | 5 - build.rs | 61 ++++++ flake.nix | 7 +- src/iroh/_client.rs | 10 +- src/iroh/_server.rs | 2 +- src/iroh/client.rs | 57 +++--- src/iroh/server.rs | 2 +- src/lean.rs | 334 ++++++++++++--------------------- src/lean/array.rs | 46 ----- src/lean/boxed.rs | 16 -- src/lean/ctor.rs | 64 ------- src/lean/external.rs | 24 --- src/lean/ffi.rs | 17 +- src/lean/ffi/aiur/protocol.rs | 124 ++++++------ src/lean/ffi/aiur/toplevel.rs | 124 ++++++------ src/lean/ffi/byte_array.rs | 10 +- src/lean/ffi/compile.rs | 304 +++++++++++++++--------------- src/lean/ffi/graph.rs | 26 +-- src/lean/ffi/iroh.rs | 2 - src/lean/ffi/ix/address.rs | 9 +- src/lean/ffi/ix/constant.rs | 278 +++++++++++++-------------- src/lean/ffi/ix/data.rs | 189 +++++++++---------- src/lean/ffi/ix/env.rs | 62 +++--- src/lean/ffi/ix/expr.rs | 195 +++++++++---------- src/lean/ffi/ix/level.rs | 64 +++---- src/lean/ffi/ix/name.rs | 44 +++-- src/lean/ffi/ixon/compare.rs | 22 +-- src/lean/ffi/ixon/constant.rs | 239 ++++++++++++----------- src/lean/ffi/ixon/env.rs | 112 +++++------ src/lean/ffi/ixon/expr.rs | 112 ++++++----- src/lean/ffi/ixon/meta.rs | 299 ++++++++++++++--------------- src/lean/ffi/ixon/serialize.rs | 84 ++++----- src/lean/ffi/ixon/sharing.rs | 16 +- src/lean/ffi/ixon/univ.rs | 54 +++--- src/lean/ffi/keccak.rs | 34 ++-- src/lean/ffi/lean_env.rs | 158 +++++++--------- src/lean/ffi/primitives.rs | 131 +++++++------ src/lean/ffi/unsigned.rs | 31 +++ src/lean/nat.rs | 9 +- src/lean/object.rs | 30 --- src/lean/sarray.rs | 37 ---- src/lean/string.rs | 27 --- src/sha256.rs | 11 +- 47 files changed, 1539 insertions(+), 1965 deletions(-) create mode 100644 build.rs delete mode 100644 src/lean/array.rs delete mode 100644 src/lean/boxed.rs delete mode 100644 src/lean/ctor.rs delete mode 100644 src/lean/external.rs delete mode 100644 src/lean/ffi/iroh.rs create mode 100644 src/lean/ffi/unsigned.rs delete mode 100644 src/lean/object.rs delete mode 100644 src/lean/sarray.rs delete mode 100644 src/lean/string.rs diff --git a/Ix/Aiur/Protocol.lean b/Ix/Aiur/Protocol.lean index aee0b44d..fb6fa701 100644 --- a/Ix/Aiur/Protocol.lean +++ b/Ix/Aiur/Protocol.lean @@ -12,10 +12,10 @@ instance : Nonempty Proof := PoofNonempty.property namespace Proof -@[extern "c_rs_aiur_proof_to_bytes"] +@[extern "rs_aiur_proof_to_bytes"] opaque toBytes : @& Proof → ByteArray -@[extern "c_rs_aiur_proof_of_bytes"] +@[extern "rs_aiur_proof_of_bytes"] opaque ofBytes : @& ByteArray → Proof end Proof @@ -58,10 +58,10 @@ instance : BEq IOBuffer where namespace AiurSystem -@[extern "c_rs_aiur_system_build"] +@[extern "rs_aiur_system_build"] opaque build : @&Bytecode.Toplevel → @&CommitmentParameters → AiurSystem -@[extern "c_rs_aiur_system_prove"] +@[extern "rs_aiur_system_prove"] private opaque prove' : @& AiurSystem → @& FriParameters → @& Bytecode.FunIdx → @& Array G → (ioData : @& Array G) → (ioMap : @& Array (Array G × IOKeyInfo)) → @@ -77,7 +77,7 @@ def prove (system : @& AiurSystem) (friParameters : @& FriParameters) let ioMap := ioMap.foldl (fun acc (k, v) => acc.insert k v) ∅ (claim, proof, ⟨ioData, ioMap⟩) -@[extern "c_rs_aiur_system_verify"] +@[extern "rs_aiur_system_verify"] opaque verify : @& AiurSystem → @& FriParameters → @& Array G → @& Proof → Except String Unit diff --git a/Ix/Iroh/Connect.lean b/Ix/Iroh/Connect.lean index ec770855..8de05f9a 100644 --- a/Ix/Iroh/Connect.lean +++ b/Ix/Iroh/Connect.lean @@ -15,7 +15,7 @@ structure GetResponse where bytes: ByteArray deriving Inhabited -@[never_extract, extern "c_rs_iroh_put"] +@[never_extract, extern "rs_iroh_put"] private opaque putBytes' : @& String → @& Array String → @& String → @& String → Except String PutResponse def putBytes (nodeId : @& String) (addrs : @& Array String) (relayUrl : @& String) (input : @& String) : IO Unit := do @@ -23,7 +23,7 @@ def putBytes (nodeId : @& String) (addrs : @& Array String) (relayUrl : @& Strin | .ok response => IO.println s!"Pinned hash {response.hash}" | .error e => throw (IO.userError e) -@[never_extract, extern "c_rs_iroh_get"] +@[never_extract, extern "rs_iroh_get"] private opaque getBytes' : @& String → @& Array String → @& String → @& String → Except String GetResponse def getBytes (nodeId : @& String) (addrs : @& Array String) (relayUrl : @& String) (hash : @& String) (writeToDisk : Bool): IO Unit := do diff --git a/Ix/Iroh/Serve.lean b/Ix/Iroh/Serve.lean index 6a098a63..2fe46ef7 100644 --- a/Ix/Iroh/Serve.lean +++ b/Ix/Iroh/Serve.lean @@ -4,7 +4,7 @@ public section namespace Iroh.Serve -@[never_extract, extern "c_rs_iroh_serve"] +@[never_extract, extern "rs_iroh_serve"] private opaque serve' : Unit → Except String Unit def serve : IO Unit := diff --git a/Ix/Keccak.lean b/Ix/Keccak.lean index f6107fea..abde63da 100644 --- a/Ix/Keccak.lean +++ b/Ix/Keccak.lean @@ -12,13 +12,13 @@ instance : Nonempty Hasher := GenericNonempty.property namespace Hasher -@[extern "c_rs_keccak256_hasher_init"] +@[extern "rs_keccak256_hasher_init"] opaque init : Unit → Hasher -@[extern "c_rs_keccak256_hasher_update"] +@[extern "rs_keccak256_hasher_update"] opaque update : (hasher: Hasher) → (input: @& ByteArray) → Hasher -@[extern "c_rs_keccak256_hasher_finalize"] +@[extern "rs_keccak256_hasher_finalize"] opaque finalize : (hasher: Hasher) → ByteArray end Hasher diff --git a/Tests/Ix/Ixon.lean b/Tests/Ix/Ixon.lean index caf6764d..9355efc7 100644 --- a/Tests/Ix/Ixon.lean +++ b/Tests/Ix/Ixon.lean @@ -1,8 +1,3 @@ -/- - Pure Lean serialization tests for Ixon types. - Generators have been moved to Tests/Gen/Ixon.lean. --/ - module public import Ix.Ixon public import Ix.Sharing diff --git a/build.rs b/build.rs new file mode 100644 index 00000000..8f3f0a44 --- /dev/null +++ b/build.rs @@ -0,0 +1,61 @@ +use std::{env, path::PathBuf, process::Command}; + +fn find_lean_include_dir() -> PathBuf { + // 1. Try LEAN_SYSROOT env var + if let Ok(sysroot) = env::var("LEAN_SYSROOT") { + let inc = PathBuf::from(sysroot).join("include"); + if inc.exists() { + return inc; + } + } + // 2. Try `lean --print-prefix` + if let Ok(output) = Command::new("lean").arg("--print-prefix").output() { + if output.status.success() { + let prefix = + String::from_utf8_lossy(&output.stdout).trim().to_string(); + let inc = PathBuf::from(prefix).join("include"); + if inc.exists() { + return inc; + } + } + } + panic!( + "Cannot find Lean include directory. \ + Set LEAN_SYSROOT or ensure `lean` is on PATH." + ); +} + +fn main() { + let lean_include = find_lean_include_dir(); + let lean_h = lean_include.join("lean").join("lean.h"); + let out_dir = PathBuf::from(env::var("OUT_DIR").unwrap()); + let wrapper_c = out_dir.join("lean_static_fns.c"); + + // Generate C wrappers for lean.h's static inline functions and + // Rust bindings for all types and functions. + bindgen::Builder::default() + .header(lean_h.to_str().unwrap()) + .clang_arg(format!("-I{}", lean_include.display())) + .parse_callbacks(Box::new(bindgen::CargoCallbacks::new())) + .wrap_static_fns(true) + .wrap_static_fns_path(&wrapper_c) + // lean_get_rc_mt_addr returns `_Atomic(int)*` which bindgen + // cannot wrap. Types using `_Atomic` are made opaque. + .blocklist_function("lean_get_rc_mt_addr") + .opaque_type("lean_thunk_object") + .opaque_type("lean_task_object") + .generate() + .expect("bindgen failed to process lean.h") + .write_to_file(out_dir.join("lean.rs")) + .expect("Couldn't write bindings"); + + // Compile the generated C wrappers into a static library. + cc::Build::new() + .file(&wrapper_c) + .include(&lean_include) + .compile("lean_static_fns"); + + println!("cargo:rerun-if-env-changed=LEAN_SYSROOT"); + println!("cargo:rerun-if-changed={}", lean_h.display()); + println!("cargo:rerun-if-changed=build.rs"); +} diff --git a/flake.nix b/flake.nix index 041fb67a..d7fd3c86 100644 --- a/flake.nix +++ b/flake.nix @@ -73,6 +73,11 @@ inherit src; strictDeps = true; + # build.rs uses LEAN_SYSROOT to locate lean/lean.h for bindgen + LEAN_SYSROOT = "${pkgs.lean.lean-all}"; + # bindgen needs libclang to parse C headers + LIBCLANG_PATH = "${pkgs.llvmPackages.libclang.lib}/lib"; + buildInputs = [] ++ pkgs.lib.optionals pkgs.stdenv.isDarwin [ @@ -138,12 +143,12 @@ # Provide a unified dev shell with Lean + Rust devShells.default = pkgs.mkShell { + # Add libclang for FFI with rust-bindgen LIBCLANG_PATH = "${pkgs.llvmPackages.libclang.lib}/lib"; packages = with pkgs; [ pkg-config openssl clang - libclang # needed for bindgen in build.rs rustToolchain rust-analyzer lean.lean-all # Includes Lean compiler, lake, stdlib, etc. diff --git a/src/iroh/_client.rs b/src/iroh/_client.rs index 9aa7f2fa..7046f720 100644 --- a/src/iroh/_client.rs +++ b/src/iroh/_client.rs @@ -1,6 +1,6 @@ use std::ffi::c_void; -use crate::lean::{array::LeanArrayObject, lean_except_error_string}; +use crate::lean::lean_except_error_string; const ERR_MSG: &str = "Iroh functions not supported when the Rust `net` feature is disabled \ @@ -8,9 +8,9 @@ const ERR_MSG: &str = /// `Iroh.Connect.putBytes' : @& String → @& Array String → @& String → @& String → Except String PutResponse` #[unsafe(no_mangle)] -extern "C" fn c_rs_iroh_put( +extern "C" fn rs_iroh_put( _node_id: *const c_void, - _addrs: &LeanArrayObject, + _addrs: *const c_void, _relay_url: *const c_void, _input: *const c_void, ) -> *mut c_void { @@ -19,9 +19,9 @@ extern "C" fn c_rs_iroh_put( /// `Iroh.Connect.getBytes' : @& String → @& Array String → @& String → @& String → Except String GetResponse` #[unsafe(no_mangle)] -extern "C" fn c_rs_iroh_get( +extern "C" fn rs_iroh_get( _node_id: *const c_void, - _addrs: &LeanArrayObject, + _addrs: *const c_void, _relay_url: *const c_void, _hash: *const c_void, ) -> *mut c_void { diff --git a/src/iroh/_server.rs b/src/iroh/_server.rs index 7130c902..db7ee4a4 100644 --- a/src/iroh/_server.rs +++ b/src/iroh/_server.rs @@ -4,7 +4,7 @@ use crate::lean::lean_except_error_string; /// `Iroh.Serve.serve' : Unit → Except String Unit` #[unsafe(no_mangle)] -extern "C" fn c_rs_iroh_serve() -> *mut c_void { +extern "C" fn rs_iroh_serve() -> *mut c_void { lean_except_error_string( "Iroh functions not supported when the Rust `net` feature is disabled \ or on MacOS aarch64-darwin", diff --git a/src/iroh/client.rs b/src/iroh/client.rs index 5d1f9f40..e9df4f53 100644 --- a/src/iroh/client.rs +++ b/src/iroh/client.rs @@ -9,12 +9,10 @@ use tracing_subscriber::util::SubscriberInitExt; use tracing_subscriber::{EnvFilter, fmt}; use crate::iroh::common::{GetRequest, PutRequest, Request, Response}; -use crate::lean::array::LeanArrayObject; -use crate::lean::string::LeanStringObject; use crate::lean::{ - as_mut_unsafe, as_ref_unsafe, lean_alloc_ctor, lean_alloc_sarray, - lean_ctor_set, lean_except_error_string, lean_except_ok, lean_mk_string, - safe_cstring, sarray::LeanSArrayObject, + lean::{ lean_alloc_ctor, lean_alloc_sarray, lean_ctor_set, lean_mk_string }, + lean_array_to_vec, lean_except_error_string, lean_except_ok, + lean_obj_to_string, lean_sarray_set_data, safe_cstring, }; // An example ALPN that we are using to communicate over the `Endpoint` @@ -35,7 +33,7 @@ fn mk_put_response(message: &str, hash: &str) -> *mut c_void { let ctor = lean_alloc_ctor(0, 2, 0); lean_ctor_set(ctor, 0, lean_mk_string(c_message.as_ptr())); lean_ctor_set(ctor, 1, lean_mk_string(c_hash.as_ptr())); - ctor + ctor.cast() } } @@ -51,32 +49,28 @@ fn mk_get_response(message: &str, hash: &str, bytes: &[u8]) -> *mut c_void { let c_hash = safe_cstring(hash); unsafe { let byte_array = lean_alloc_sarray(1, bytes.len(), bytes.len()); - let arr: &mut LeanSArrayObject = as_mut_unsafe(byte_array.cast()); - arr.set_data(bytes); + lean_sarray_set_data(byte_array.cast(), bytes); let ctor = lean_alloc_ctor(0, 3, 0); lean_ctor_set(ctor, 0, lean_mk_string(c_message.as_ptr())); lean_ctor_set(ctor, 1, lean_mk_string(c_hash.as_ptr())); lean_ctor_set(ctor, 2, byte_array); - ctor + ctor.cast() } } /// `Iroh.Connect.putBytes' : @& String → @& Array String → @& String → @& String → Except String PutResponse` #[unsafe(no_mangle)] -extern "C" fn c_rs_iroh_put( - node_id: &LeanStringObject, - addrs: &LeanArrayObject, - relay_url: &LeanStringObject, - input: &LeanStringObject, +extern "C" fn rs_iroh_put( + node_id: *const c_void, + addrs: *const c_void, + relay_url: *const c_void, + input: *const c_void, ) -> *mut c_void { - let node_id = node_id.as_string(); - let addrs: Vec = addrs.to_vec(|ptr| { - let string: &LeanStringObject = as_ref_unsafe(ptr.cast()); - string.as_string() - }); - let relay_url = relay_url.as_string(); - let input_str = input.as_string(); + let node_id = lean_obj_to_string(node_id); + let addrs: Vec = lean_array_to_vec(addrs, lean_obj_to_string); + let relay_url = lean_obj_to_string(relay_url); + let input_str = lean_obj_to_string(input); let request = Request::Put(PutRequest { bytes: input_str.as_bytes().to_vec() }); @@ -99,19 +93,16 @@ extern "C" fn c_rs_iroh_put( /// `Iroh.Connect.getBytes' : @& String → @& Array String → @& String → @& String → Except String GetResponse` #[unsafe(no_mangle)] -extern "C" fn c_rs_iroh_get( - node_id: &LeanStringObject, - addrs: &LeanArrayObject, - relay_url: &LeanStringObject, - hash: &LeanStringObject, +extern "C" fn rs_iroh_get( + node_id: *const c_void, + addrs: *const c_void, + relay_url: *const c_void, + hash: *const c_void, ) -> *mut c_void { - let node_id = node_id.as_string(); - let addrs: Vec = addrs.to_vec(|ptr| { - let string: &LeanStringObject = as_ref_unsafe(ptr.cast()); - string.as_string() - }); - let relay_url = relay_url.as_string(); - let hash = hash.as_string(); + let node_id = lean_obj_to_string(node_id); + let addrs: Vec = lean_array_to_vec(addrs, lean_obj_to_string); + let relay_url = lean_obj_to_string(relay_url); + let hash = lean_obj_to_string(hash); let request = Request::Get(GetRequest { hash: hash.clone() }); let rt = diff --git a/src/iroh/server.rs b/src/iroh/server.rs index 2d714efe..cd647c94 100644 --- a/src/iroh/server.rs +++ b/src/iroh/server.rs @@ -21,7 +21,7 @@ const READ_SIZE_LIMIT: usize = 100_000_000; /// `Iroh.Serve.serve' : Unit → Except String Unit` #[unsafe(no_mangle)] -extern "C" fn c_rs_iroh_serve() -> *mut c_void { +extern "C" fn rs_iroh_serve() -> *mut c_void { // Create a Tokio runtime to block on the async function let rt = tokio::runtime::Runtime::new().expect("Failed to create Tokio runtime"); diff --git a/src/lean.rs b/src/lean.rs index 27ea1a5e..c33612a7 100644 --- a/src/lean.rs +++ b/src/lean.rs @@ -1,26 +1,27 @@ //! Rust bindings for Lean, implemented by mimicking the memory layout of Lean's //! low-level C objects. //! -//! This crate must be kept in sync with `lean/lean.h`. Pay close attention to -//! definitions containing C code in their docstrings. +//! The `lean` submodule contains auto-generated bindings from `lean.h` via +//! bindgen. Higher-level helpers and custom `#[repr(C)]` types are defined +//! alongside it in sibling modules. + +#[allow( + non_upper_case_globals, + non_camel_case_types, + non_snake_case, + dead_code, + unsafe_op_in_unsafe_fn, + clippy::all +)] +pub mod lean { + include!(concat!(env!("OUT_DIR"), "/lean.rs")); +} -pub mod array; -pub mod boxed; -pub mod ctor; -pub mod external; pub mod ffi; pub mod nat; -pub mod object; -pub mod sarray; -pub mod string; use std::ffi::{CString, c_void}; -use crate::lean::{ - boxed::{BoxedU64, BoxedUSize}, - ctor::LeanCtorObject, -}; - #[inline] #[allow(clippy::not_unsafe_ptr_arg_deref)] pub fn as_ref_unsafe<'a, T>(ptr: *const T) -> &'a T { @@ -74,84 +75,117 @@ macro_rules! lean_unbox { }; } -/// ```c -/// unsigned lean_unbox_uint32(b_lean_obj_arg o) { -/// if (sizeof(void*) == 4) { -/// /* 32-bit implementation */ -/// return lean_ctor_get_uint32(o, 0); -/// } else { -/// /* 64-bit implementation */ -/// return lean_unbox(o); -/// } -/// } -/// ``` #[inline] pub fn lean_unbox_u32(ptr: *const c_void) -> u32 { - if cfg!(target_pointer_width = "32") { - let boxed_usize: &BoxedUSize = as_ref_unsafe(ptr.cast()); - u32::try_from(boxed_usize.value).expect("Cannot convert from usize") - } else { - lean_unbox!(u32, ptr) - } + unsafe { lean::lean_unbox_uint32(ptr as *mut _) as u32 } } -/// ```c -/// uint64_t lean_unbox_uint64(b_lean_obj_arg o) { -/// return lean_ctor_get_uint64(o, 0); -/// } -/// ``` #[inline] pub fn lean_unbox_u64(ptr: *const c_void) -> u64 { - let boxed_usize: &BoxedU64 = as_ref_unsafe(ptr.cast()); - boxed_usize.value + unsafe { lean::lean_unbox_uint64(ptr as *mut _) } } -/// ```c -/// lean_object * lean_box_uint64(uint64_t v) { -/// lean_object * r = lean_alloc_ctor(0, 0, sizeof(uint64_t)); -/// lean_ctor_set_uint64(r, 0, v); -/// return r; -/// } -/// ``` #[inline] pub fn lean_box_u64(v: u64) -> *mut c_void { + unsafe { lean::lean_box_uint64(v).cast() } +} + +pub fn lean_obj_to_string(ptr: *const c_void) -> String { unsafe { - let obj = lean_alloc_ctor(0, 0, 8); - lean_ctor_set_uint64(obj, 0, v); - obj + let obj = ptr as *mut lean::lean_object; + let len = lean::lean_string_size(obj) - 1; // m_size includes NUL + let data = lean::lean_string_cstr(obj); + let bytes = std::slice::from_raw_parts(data as *const u8, len); + String::from_utf8_unchecked(bytes.to_vec()) } } -pub fn boxed_usize_ptr_to_usize(ptr: *const c_void) -> usize { - let boxed_usize_ptr = ptr.cast::(); - let boxed_usize = as_ref_unsafe(boxed_usize_ptr); - boxed_usize.value +#[inline] +pub fn lean_tag(ptr: *const c_void) -> u8 { + unsafe { lean::lean_obj_tag(ptr as *mut _) as u8 } } -/// Emulates arrays of flexible size from C. -#[repr(C)] -pub struct CArray([T; 0]); +#[inline] +pub fn lean_ctor_objs(ptr: *const c_void) -> [*const c_void; N] { + // Use raw pointer arithmetic instead of lean_ctor_get to avoid its + // bounds-check assertion. Call sites legitimately read past the object + // fields into the scalar area (e.g. Expr.Data hash, Bool/BinderInfo + // stored as UInt8 scalars). This matches the old LeanCtorObject::objs(). + let base = unsafe { (ptr as *const *const c_void).add(1) }; + std::array::from_fn(|i| unsafe { *base.add(i) }) +} -impl CArray { - #[inline] - pub fn slice(&self, len: usize) -> &[T] { - unsafe { std::slice::from_raw_parts(self.0.as_ptr(), len) } +#[inline] +pub fn lean_ctor_scalar_u64(ptr: *const c_void, num_objs: usize, offset: usize) -> u64 { + unsafe { + std::ptr::read_unaligned(ptr.cast::().add(8 + num_objs * 8 + offset).cast()) } +} + +#[inline] +pub fn lean_ctor_scalar_u8(ptr: *const c_void, num_objs: usize, offset: usize) -> u8 { + unsafe { *ptr.cast::().add(8 + num_objs * 8 + offset) } +} + +#[inline] +pub fn lean_ctor_scalar_bool(ptr: *const c_void, num_objs: usize, offset: usize) -> bool { + lean_ctor_scalar_u8(ptr, num_objs, offset) != 0 +} - #[inline] - pub fn slice_mut(&mut self, len: usize) -> &mut [T] { - unsafe { std::slice::from_raw_parts_mut(self.0.as_mut_ptr(), len) } +// ============================================================================= +// Array helpers (replace LeanArrayObject) +// ============================================================================= + +/// Return a slice over the elements of a Lean `Array` object. +pub fn lean_array_data(ptr: *const c_void) -> &'static [*const c_void] { + unsafe { + let obj = ptr as *mut lean::lean_object; + let size = lean::lean_array_size(obj); + let cptr = lean::lean_array_cptr(obj); + std::slice::from_raw_parts(cptr.cast(), size) } +} - #[inline] - pub fn copy_from_slice(&mut self, src: &[T]) { - unsafe { - std::ptr::copy_nonoverlapping( - src.as_ptr(), - self.0.as_ptr() as *mut _, - src.len(), - ); - } +/// Convert a Lean `Array` to a `Vec` by mapping each element. +pub fn lean_array_to_vec(ptr: *const c_void, f: fn(*const c_void) -> T) -> Vec { + lean_array_data(ptr).iter().map(|&p| f(p)).collect() +} + +/// Like `lean_array_to_vec` but threads a mutable context through each call. +pub fn lean_array_to_vec_with( + ptr: *const c_void, + f: fn(*const c_void, &mut C) -> T, + c: &mut C, +) -> Vec { + lean_array_data(ptr).iter().map(|&p| f(p, c)).collect() +} + +// ============================================================================= +// SArray (ByteArray) helpers (replace LeanSArrayObject) +// ============================================================================= + +/// Return a byte slice over a Lean `ByteArray` (scalar array) object. +pub fn lean_sarray_data(ptr: *const c_void) -> &'static [u8] { + unsafe { + let obj = ptr as *mut lean::lean_object; + let size = lean::lean_sarray_size(obj); + let cptr = lean::lean_sarray_cptr(obj); + std::slice::from_raw_parts(cptr, size) + } +} + +/// Write bytes into a Lean `ByteArray` and update its size. +/// +/// # Safety +/// The caller must ensure `ptr` points to a valid `lean_sarray_object` +/// with sufficient capacity for `data`. +pub unsafe fn lean_sarray_set_data(ptr: *mut c_void, data: &[u8]) { + unsafe { + let obj = ptr as *mut lean::lean_object; + let cptr = lean::lean_sarray_cptr(obj); + std::ptr::copy_nonoverlapping(data.as_ptr(), cptr, data.len()); + // Update m_size: at offset 8 (after lean_object header) + *(ptr.cast::().add(8) as *mut usize) = data.len(); } } @@ -164,8 +198,7 @@ impl Iterator for ListIterator { if lean_is_scalar(ptr) { return None; } - let ctor: &LeanCtorObject = as_ref_unsafe(ptr.cast()); - let [head_ptr, tail_ptr] = ctor.objs(); + let [head_ptr, tail_ptr] = lean_ctor_objs(ptr); self.0 = tail_ptr; Some(head_ptr) } @@ -177,146 +210,13 @@ pub fn collect_list( ) -> Vec { let mut vec = Vec::new(); while !lean_is_scalar(ptr) { - let ctor: &LeanCtorObject = as_ref_unsafe(ptr.cast()); - let [head_ptr, tail_ptr] = ctor.objs(); + let [head_ptr, tail_ptr] = lean_ctor_objs(ptr); vec.push(map_fn(head_ptr)); ptr = tail_ptr; } vec } -pub fn collect_list_with( - mut ptr: *const c_void, - map_fn: fn(*const c_void, &mut C) -> T, - c: &mut C, -) -> Vec { - let mut vec = Vec::new(); - while !lean_is_scalar(ptr) { - let ctor: &LeanCtorObject = as_ref_unsafe(ptr.cast()); - let [head_ptr, tail_ptr] = ctor.objs(); - vec.push(map_fn(head_ptr, c)); - ptr = tail_ptr; - } - vec -} - -// ============================================================================= -// Lean C API extern declarations for object construction -// ============================================================================= - -use std::ffi::c_uint; - -// Lean C API bindings. Static inline functions use bindgen-generated -// wrappers (suffix __extern). LEAN_EXPORT functions link directly. -unsafe extern "C" { - // Object allocation - /// Allocate a constructor object with the given tag, number of object fields, - /// and scalar size in bytes. - #[link_name = "lean_alloc_ctor__extern"] - pub fn lean_alloc_ctor( - tag: c_uint, - num_objs: c_uint, - scalar_sz: c_uint, - ) -> *mut c_void; - - /// Set the i-th object field of a constructor. - #[link_name = "lean_ctor_set__extern"] - pub fn lean_ctor_set(o: *mut c_void, i: c_uint, v: *mut c_void); - - /// Get the i-th object field of a constructor. - #[link_name = "lean_ctor_get__extern"] - pub fn lean_ctor_get(o: *mut c_void, i: c_uint) -> *const c_void; - - /// Get the tag of a Lean object. - #[link_name = "lean_obj_tag__extern"] - pub fn lean_obj_tag(o: *mut c_void) -> c_uint; - - /// Set a uint8 scalar field at the given byte offset (after object fields). - #[link_name = "lean_ctor_set_uint8__extern"] - pub fn lean_ctor_set_uint8(o: *mut c_void, offset: usize, v: u8); - - /// Set a uint64 scalar field at the given byte offset (after object fields). - #[link_name = "lean_ctor_set_uint64__extern"] - pub fn lean_ctor_set_uint64(o: *mut c_void, offset: usize, v: u64); - - // String allocation (LEAN_EXPORT — links directly, no wrapper needed) - /// Create a Lean string from a null-terminated C string. - pub fn lean_mk_string(s: *const std::ffi::c_char) -> *mut c_void; - - // Scalar array (ByteArray) allocation - /// Allocate a scalar array with the given element size, initial size, and capacity. - #[link_name = "lean_alloc_sarray__extern"] - pub fn lean_alloc_sarray( - elem_size: c_uint, - size: usize, - capacity: usize, - ) -> *mut c_void; - - /// Get a pointer to the data area of a scalar array. - #[link_name = "lean_sarray_cptr__extern"] - pub fn lean_sarray_cptr(o: *mut c_void) -> *mut u8; - - // Array allocation - /// Allocate an array with the given initial size and capacity. - #[link_name = "lean_alloc_array__extern"] - pub fn lean_alloc_array(size: usize, capacity: usize) -> *mut c_void; - - /// Set the i-th element of an array (does not update size). - #[link_name = "lean_array_set_core__extern"] - pub fn lean_array_set_core(o: *mut c_void, i: usize, v: *mut c_void); - - /// Get the i-th element of an array. - #[link_name = "lean_array_get_core__extern"] - pub fn lean_array_get_core(o: *mut c_void, i: usize) -> *const c_void; - - // Reference counting - /// Increment the reference count of a Lean object. - #[link_name = "lean_inc__extern"] - pub fn lean_inc(o: *mut c_void); - - /// Increment the reference count by n. - #[link_name = "lean_inc_n__extern"] - pub fn lean_inc_n(o: *mut c_void, n: usize); - - /// Decrement the reference count of a Lean object. - #[link_name = "lean_dec__extern"] - pub fn lean_dec(o: *mut c_void); - - // External object support - /// Register an external class with finalizer and foreach callbacks. - /// This is a LEAN_EXPORT function and can be linked directly. - pub fn lean_register_external_class( - finalize: extern "C" fn(*mut c_void), - foreach: extern "C" fn(*mut c_void, *mut c_void), - ) -> *mut c_void; - - /// Allocate an external object wrapping opaque data. - #[link_name = "lean_alloc_external__extern"] - pub fn lean_alloc_external( - cls: *mut c_void, - data: *mut c_void, - ) -> *mut c_void; - - // IO result construction - /// Wrap a value in a successful IO result. - #[link_name = "lean_io_result_mk_ok__extern"] - pub fn lean_io_result_mk_ok(v: *mut c_void) -> *mut c_void; - - /// Wrap an error in an IO error result. - #[link_name = "lean_io_result_mk_error__extern"] - pub fn lean_io_result_mk_error(err: *mut c_void) -> *mut c_void; - - /// Create an IO.Error.userError from a String (LEAN_EXPORT — links directly). - pub fn lean_mk_io_user_error(msg: *mut c_void) -> *mut c_void; - - // Nat allocation for large values - /// Create a Nat from a uint64. For values > max boxed, allocates on heap. - #[link_name = "lean_uint64_to_nat__extern"] - pub fn lean_uint64_to_nat(n: u64) -> *mut c_void; - - // lean_nat_from_limbs moved to src/lean/nat.rs (uses GMP directly) -} - /// Box a scalar value into a Lean object pointer. /// ```c /// lean_object * lean_box(size_t n) { return (lean_object*)(((size_t)(n) << 1) | 1); } @@ -334,9 +234,9 @@ pub fn lean_box_fn(n: usize) -> *mut c_void { #[inline] pub fn lean_except_ok(val: *mut c_void) -> *mut c_void { unsafe { - let obj = lean_alloc_ctor(1, 1, 0); - lean_ctor_set(obj, 0, val); - obj + let obj = lean::lean_alloc_ctor(1, 1, 0); + lean::lean_ctor_set(obj, 0, val.cast()); + obj.cast() } } @@ -344,9 +244,9 @@ pub fn lean_except_ok(val: *mut c_void) -> *mut c_void { #[inline] pub fn lean_except_error(msg: *mut c_void) -> *mut c_void { unsafe { - let obj = lean_alloc_ctor(0, 1, 0); - lean_ctor_set(obj, 0, msg); - obj + let obj = lean::lean_alloc_ctor(0, 1, 0); + lean::lean_ctor_set(obj, 0, msg.cast()); + obj.cast() } } @@ -354,8 +254,12 @@ pub fn lean_except_error(msg: *mut c_void) -> *mut c_void { #[inline] pub fn lean_except_error_string(msg: &str) -> *mut c_void { let c_msg = safe_cstring(msg); - unsafe { lean_except_error(lean_mk_string(c_msg.as_ptr())) } + unsafe { lean_except_error(lean::lean_mk_string(c_msg.as_ptr()).cast()) } } /// No-op foreach callback for external classes that hold no Lean references. -pub extern "C" fn noop_foreach(_: *mut c_void, _: *mut c_void) {} +pub unsafe extern "C" fn noop_foreach( + _: *mut c_void, + _: *mut lean::lean_object, +) { +} diff --git a/src/lean/array.rs b/src/lean/array.rs deleted file mode 100644 index 0bb468fb..00000000 --- a/src/lean/array.rs +++ /dev/null @@ -1,46 +0,0 @@ -use std::ffi::c_void; - -use super::{CArray, object::LeanObject}; - -/// ```c -/// typedef struct { -/// lean_object m_header; -/// size_t m_size; -/// size_t m_capacity; -/// lean_object * m_data[]; -/// } lean_array_object; -/// ``` -#[repr(C)] -pub struct LeanArrayObject { - m_header: LeanObject, - m_size: usize, - m_capacity: usize, - m_data: CArray<*const c_void>, -} - -impl LeanArrayObject { - #[inline] - pub fn data(&self) -> &[*const c_void] { - self.m_data.slice(self.m_size) - } - - #[inline] - pub fn to_vec(&self, map_fn: fn(*const c_void) -> T) -> Vec { - self.data().iter().map(|ptr| map_fn(*ptr)).collect() - } - - #[inline] - pub fn to_vec_with( - &self, - map_fn: fn(*const c_void, &mut C) -> T, - c: &mut C, - ) -> Vec { - self.data().iter().map(|ptr| map_fn(*ptr, c)).collect() - } - - pub fn set_data(&mut self, data: &[*const c_void]) { - assert!(self.m_capacity >= data.len()); - self.m_data.copy_from_slice(data); - self.m_size = data.len(); - } -} diff --git a/src/lean/boxed.rs b/src/lean/boxed.rs deleted file mode 100644 index f7e709e9..00000000 --- a/src/lean/boxed.rs +++ /dev/null @@ -1,16 +0,0 @@ -use super::object::LeanObject; - -/// This is equivalent to a `lean_ctor_object` with `m_objs` of size 1. -#[repr(C)] -pub struct BoxedUSize { - m_header: LeanObject, - pub value: usize, -} - -/// This is equivalent to a `lean_ctor_object` with `m_objs` of size 1 on x64 -/// and 2 on x32. -#[repr(C)] -pub struct BoxedU64 { - m_header: LeanObject, - pub value: u64, -} diff --git a/src/lean/ctor.rs b/src/lean/ctor.rs deleted file mode 100644 index 4e17f439..00000000 --- a/src/lean/ctor.rs +++ /dev/null @@ -1,64 +0,0 @@ -//! Lean constructor object layout and field access. - -use std::{ffi::c_void, ptr}; - -use super::{CArray, object::LeanObject}; - -/// ```c -/// typedef struct { -/// lean_object m_header; -/// lean_object * m_objs[]; -/// } lean_ctor_object; -/// ``` -#[repr(C)] -pub struct LeanCtorObject { - m_header: LeanObject, - m_objs: CArray<*const c_void>, -} - -impl LeanCtorObject { - #[inline] - pub fn tag(&self) -> u8 { - self.m_header.m_tag() - } - - /// The number of objects must be known at compile time, given the context - /// in which the data is being read. - #[inline] - pub fn objs(&self) -> [*const c_void; N] { - let mut ptrs = [ptr::null(); N]; - ptrs.copy_from_slice(self.m_objs.slice(N)); - ptrs - } - - #[inline] - pub fn set_objs(&mut self, data: &[*const c_void]) { - self.m_objs.copy_from_slice(data); - } - - /// Read a u64 scalar field from the constructor. - /// `num_objs` is the number of object fields (pointers) in this constructor. - /// `scalar_offset` is the byte offset within the scalar area. - /// Scalar fields are stored after the object fields in memory. - #[inline] - pub fn get_scalar_u64(&self, num_objs: usize, scalar_offset: usize) -> u64 { - // Scalar area starts after: header (8 bytes) + object pointers (8 bytes each) - let base_ptr = (self as *const Self).cast::(); - let scalar_area = unsafe { base_ptr.add(8 + num_objs * 8 + scalar_offset) }; - unsafe { ptr::read_unaligned(scalar_area.cast::()) } - } - - /// Read a u8 scalar field from the constructor. - #[inline] - pub fn get_scalar_u8(&self, num_objs: usize, scalar_offset: usize) -> u8 { - let base_ptr = (self as *const Self).cast::(); - let scalar_area = unsafe { base_ptr.add(8 + num_objs * 8 + scalar_offset) }; - unsafe { *scalar_area } - } - - /// Read a bool scalar field from the constructor. - #[inline] - pub fn get_scalar_bool(&self, num_objs: usize, scalar_offset: usize) -> bool { - self.get_scalar_u8(num_objs, scalar_offset) != 0 - } -} diff --git a/src/lean/external.rs b/src/lean/external.rs deleted file mode 100644 index a16437b8..00000000 --- a/src/lean/external.rs +++ /dev/null @@ -1,24 +0,0 @@ -use std::ffi::c_void; - -use super::object::LeanObject; - -/// ```c -/// typedef struct { -/// lean_object m_header; -/// lean_external_class * m_class; -/// void * m_data; -/// } lean_external_object; -/// ``` -#[repr(C)] -pub struct LeanExternalObject { - m_header: LeanObject, - m_class: *const c_void, - m_data: *const c_void, -} - -impl LeanExternalObject { - #[inline] - pub fn cast_data(&self) -> *const T { - self.m_data.cast() - } -} diff --git a/src/lean/ffi.rs b/src/lean/ffi.rs index 672de956..a0ff9e54 100644 --- a/src/lean/ffi.rs +++ b/src/lean/ffi.rs @@ -1,6 +1,5 @@ pub mod aiur; pub mod byte_array; -pub mod iroh; pub mod keccak; pub mod lean_env; pub mod unsigned; @@ -22,9 +21,8 @@ unsafe impl Send for ExternalClassPtr {} unsafe impl Sync for ExternalClassPtr {} use crate::lean::{ - array::LeanArrayObject, as_ref_unsafe, lean_io_result_mk_error, - lean_mk_io_user_error, lean_mk_string, lean_unbox_u32, - sarray::LeanSArrayObject, + lean::{ lean_io_result_mk_error, lean_mk_io_user_error, lean_mk_string }, + lean_array_to_vec, lean_sarray_data, lean_unbox_u32, }; /// Guard an FFI function that returns a Lean IO result against panics. @@ -50,7 +48,7 @@ where unsafe { let lean_msg = lean_mk_string(c_msg.as_ptr()); let lean_err = lean_mk_io_user_error(lean_msg); - lean_io_result_mk_error(lean_err) + lean_io_result_mk_error(lean_err).cast() } }, } @@ -70,13 +68,12 @@ pub(super) fn drop_raw(ptr: *mut T) { #[unsafe(no_mangle)] extern "C" fn rs_boxed_u32s_are_equivalent_to_bytes( - u32s: &LeanArrayObject, - bytes: &LeanSArrayObject, + u32s: *const c_void, + bytes: *const c_void, ) -> bool { - let u32s = u32s - .to_vec(lean_unbox_u32) + let u32s = lean_array_to_vec(u32s, lean_unbox_u32) .into_iter() .flat_map(u32::to_le_bytes) .collect::>(); - u32s == bytes.data() + u32s == lean_sarray_data(bytes) } diff --git a/src/lean/ffi/aiur/protocol.rs b/src/lean/ffi/aiur/protocol.rs index a2c44b2b..fe0cd27f 100644 --- a/src/lean/ffi/aiur/protocol.rs +++ b/src/lean/ffi/aiur/protocol.rs @@ -14,22 +14,23 @@ use crate::{ synthesis::AiurSystem, }, lean::{ - array::LeanArrayObject, - as_mut_unsafe, as_ref_unsafe, - ctor::LeanCtorObject, - external::LeanExternalObject, + lean_array_data, lean_array_to_vec, lean_sarray_data, lean_sarray_set_data, + lean_ctor_objs, ffi::{ ExternalClassPtr, aiur::{ - lean_unbox_g, lean_unbox_nat_as_usize, toplevel::lean_ctor_to_toplevel, + lean_unbox_g, lean_unbox_nat_as_usize, toplevel::lean_ptr_to_toplevel, }, drop_raw, to_raw, }, - lean_alloc_array, lean_alloc_ctor, lean_alloc_external, lean_alloc_sarray, - lean_array_set_core, lean_box_fn, lean_box_u64, lean_ctor_set, - lean_except_error_string, lean_except_ok, lean_register_external_class, + lean::{ + lean_alloc_array, lean_alloc_ctor, lean_alloc_external, lean_alloc_sarray, + lean_array_set_core, lean_ctor_set, lean_get_external_data, + lean_register_external_class, + }, + lean_box_fn, lean_box_u64, + lean_except_error_string, lean_except_ok, noop_foreach, - sarray::LeanSArrayObject, }, }; @@ -44,8 +45,8 @@ fn get_aiur_proof_class() -> *mut c_void { AIUR_PROOF_CLASS .get_or_init(|| { ExternalClassPtr(unsafe { - lean_register_external_class(aiur_proof_finalizer, noop_foreach) - }) + lean_register_external_class(Some(aiur_proof_finalizer), Some(noop_foreach)) + }.cast()) }) .0 } @@ -54,8 +55,8 @@ fn get_aiur_system_class() -> *mut c_void { AIUR_SYSTEM_CLASS .get_or_init(|| { ExternalClassPtr(unsafe { - lean_register_external_class(aiur_system_finalizer, noop_foreach) - }) + lean_register_external_class(Some(aiur_system_finalizer), Some(noop_foreach)) + }.cast()) }) .0 } @@ -74,61 +75,58 @@ extern "C" fn aiur_system_finalizer(ptr: *mut c_void) { /// `Aiur.Proof.toBytes : @& Proof → ByteArray` #[unsafe(no_mangle)] -extern "C" fn c_rs_aiur_proof_to_bytes( +extern "C" fn rs_aiur_proof_to_bytes( proof_obj: *const c_void, ) -> *mut c_void { - let external: &LeanExternalObject = as_ref_unsafe(proof_obj.cast()); - let proof: &Proof = as_ref_unsafe(external.cast_data()); + let proof: &Proof = unsafe { &*lean_get_external_data(proof_obj as *mut _).cast() }; let bytes = proof.to_bytes().expect("Serialization error"); let len = bytes.len(); let arr_ptr = unsafe { lean_alloc_sarray(1, len, len) }; - let arr: &mut LeanSArrayObject = as_mut_unsafe(arr_ptr.cast()); - arr.set_data(&bytes); - arr_ptr + unsafe { lean_sarray_set_data(arr_ptr.cast(), &bytes) }; + arr_ptr.cast() } /// `Aiur.Proof.ofBytes : @& ByteArray → Proof` #[unsafe(no_mangle)] -extern "C" fn c_rs_aiur_proof_of_bytes( - byte_array: &LeanSArrayObject, +extern "C" fn rs_aiur_proof_of_bytes( + byte_array: *const c_void, ) -> *mut c_void { let proof = - Proof::from_bytes(byte_array.data()).expect("Deserialization error"); + Proof::from_bytes(lean_sarray_data(byte_array)).expect("Deserialization error"); let ptr = to_raw(proof) as *mut c_void; - unsafe { lean_alloc_external(get_aiur_proof_class(), ptr) } + unsafe { lean_alloc_external(get_aiur_proof_class().cast(), ptr) }.cast() } /// `AiurSystem.build : @&Bytecode.Toplevel → @&CommitmentParameters → AiurSystem` #[unsafe(no_mangle)] -extern "C" fn c_rs_aiur_system_build( - toplevel: &LeanCtorObject, +extern "C" fn rs_aiur_system_build( + toplevel: *const c_void, commitment_parameters: *const c_void, ) -> *mut c_void { let system = AiurSystem::build( - lean_ctor_to_toplevel(toplevel), + lean_ptr_to_toplevel(toplevel), lean_ptr_to_commitment_parameters(commitment_parameters), ); let ptr = to_raw(system) as *mut c_void; - unsafe { lean_alloc_external(get_aiur_system_class(), ptr) } + unsafe { lean_alloc_external(get_aiur_system_class().cast(), ptr) }.cast() } /// `AiurSystem.verify : @& AiurSystem → @& FriParameters → @& Array G → @& Proof → Except String Unit` #[unsafe(no_mangle)] -extern "C" fn c_rs_aiur_system_verify( +extern "C" fn rs_aiur_system_verify( aiur_system_obj: *const c_void, - fri_parameters: &LeanCtorObject, - claim: &LeanArrayObject, + fri_parameters: *const c_void, + claim: *const c_void, proof_obj: *const c_void, ) -> *mut c_void { - let aiur_external: &LeanExternalObject = - as_ref_unsafe(aiur_system_obj.cast()); - let aiur_system: &AiurSystem = as_ref_unsafe(aiur_external.cast_data()); + let aiur_system: &AiurSystem = + unsafe { &*lean_get_external_data(aiur_system_obj as *mut _).cast() }; - let proof_external: &LeanExternalObject = as_ref_unsafe(proof_obj.cast()); - let proof: &Proof = as_ref_unsafe(proof_external.cast_data()); + let proof: &Proof = + unsafe { &*lean_get_external_data(proof_obj as *mut _).cast() }; let fri_parameters = lean_ctor_to_fri_parameters(fri_parameters); - let claim = claim.to_vec(lean_unbox_g); + let claim = lean_array_to_vec(claim, lean_unbox_g); match aiur_system.verify(fri_parameters, &claim, proof) { Ok(()) => lean_except_ok(lean_box_fn(0)), Err(err) => lean_except_error_string(&format!("{err:?}")), @@ -138,22 +136,21 @@ extern "C" fn c_rs_aiur_system_verify( /// `AiurSystem.prove`: runs the prover and returns /// `Array G × Proof × Array G × Array (Array G × IOKeyInfo)` #[unsafe(no_mangle)] -extern "C" fn c_rs_aiur_system_prove( +extern "C" fn rs_aiur_system_prove( aiur_system_obj: *const c_void, - fri_parameters: &LeanCtorObject, + fri_parameters: *const c_void, fun_idx: *const c_void, - args: &LeanArrayObject, - io_data_arr: &LeanArrayObject, - io_map_arr: &LeanArrayObject, + args: *const c_void, + io_data_arr: *const c_void, + io_map_arr: *const c_void, ) -> *mut c_void { - let aiur_external: &LeanExternalObject = - as_ref_unsafe(aiur_system_obj.cast()); - let aiur_system: &AiurSystem = as_ref_unsafe(aiur_external.cast_data()); + let aiur_system: &AiurSystem = + unsafe { &*lean_get_external_data(aiur_system_obj as *mut _).cast() }; let fri_parameters = lean_ctor_to_fri_parameters(fri_parameters); let fun_idx = lean_unbox_nat_as_usize(fun_idx); - let args = args.to_vec(lean_unbox_g); - let io_data = io_data_arr.to_vec(lean_unbox_g); + let args = lean_array_to_vec(args, lean_unbox_g); + let io_data = lean_array_to_vec(io_data_arr, lean_unbox_g); let io_map = lean_array_to_io_buffer_map(io_map_arr); let mut io_buffer = IOBuffer { data: io_data, map: io_map }; @@ -167,7 +164,7 @@ extern "C" fn c_rs_aiur_system_prove( // proof: Proof (external object) let lean_proof = unsafe { - lean_alloc_external(get_aiur_proof_class(), to_raw(proof) as *mut c_void) + lean_alloc_external(get_aiur_proof_class().cast(), to_raw(proof) as *mut c_void) }; // io_data: Array G @@ -180,11 +177,11 @@ extern "C" fn c_rs_aiur_system_prove( let key_arr = build_g_array(key); // IOKeyInfo ctor (tag 0, 2 object fields) let key_info = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(key_info, 0, lean_box_fn(info.idx)); - lean_ctor_set(key_info, 1, lean_box_fn(info.len)); + lean_ctor_set(key_info, 0, lean_box_fn(info.idx).cast()); + lean_ctor_set(key_info, 1, lean_box_fn(info.len).cast()); // (Array G × IOKeyInfo) tuple let map_elt = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(map_elt, 0, key_arr); + lean_ctor_set(map_elt, 0, key_arr.cast()); lean_ctor_set(map_elt, 1, key_info); lean_array_set_core(arr, i, map_elt); } @@ -195,7 +192,7 @@ extern "C" fn c_rs_aiur_system_prove( // Array G × Array (Array G × IOKeyInfo) let io_tuple = unsafe { let obj = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(obj, 0, lean_io_data); + lean_ctor_set(obj, 0, lean_io_data.cast()); lean_ctor_set(obj, 1, lean_io_map); obj }; @@ -209,9 +206,9 @@ extern "C" fn c_rs_aiur_system_prove( // Array G × Proof × Array G × Array (Array G × IOKeyInfo) unsafe { let obj = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(obj, 0, lean_claim); + lean_ctor_set(obj, 0, lean_claim.cast()); lean_ctor_set(obj, 1, proof_io_tuple); - obj + obj.cast() } } @@ -224,9 +221,9 @@ fn build_g_array(values: &[G]) -> *mut c_void { unsafe { let arr = lean_alloc_array(values.len(), values.len()); for (i, g) in values.iter().enumerate() { - lean_array_set_core(arr, i, lean_box_u64(g.as_canonical_u64())); + lean_array_set_core(arr, i, lean_box_u64(g.as_canonical_u64()).cast()); } - arr + arr.cast() } } @@ -238,13 +235,13 @@ fn lean_ptr_to_commitment_parameters( } } -fn lean_ctor_to_fri_parameters(ctor: &LeanCtorObject) -> FriParameters { +fn lean_ctor_to_fri_parameters(ptr: *const c_void) -> FriParameters { let [ log_final_poly_len_ptr, num_queries_ptr, commit_proof_of_work_bits, query_proof_of_work_bits, - ] = ctor.objs(); + ] = lean_ctor_objs(ptr); FriParameters { log_final_poly_len: lean_unbox_nat_as_usize(log_final_poly_len_ptr), num_queries: lean_unbox_nat_as_usize(num_queries_ptr), @@ -256,18 +253,15 @@ fn lean_ctor_to_fri_parameters(ctor: &LeanCtorObject) -> FriParameters { } fn lean_array_to_io_buffer_map( - array: &LeanArrayObject, + array: *const c_void, ) -> FxHashMap, IOKeyInfo> { - let array_data = array.data(); + let array_data = lean_array_data(array); let mut map = FxHashMap::with_capacity_and_hasher(array_data.len(), FxBuildHasher); for ptr in array_data { - let ctor: &LeanCtorObject = as_ref_unsafe(ptr.cast()); - let [key_ptr, info_ptr] = ctor.objs(); - let key_array: &LeanArrayObject = as_ref_unsafe(key_ptr.cast()); - let key = key_array.to_vec(lean_unbox_g); - let info_ctor: &LeanCtorObject = as_ref_unsafe(info_ptr.cast()); - let [idx_ptr, len_ptr] = info_ctor.objs(); + let [key_ptr, info_ptr] = lean_ctor_objs(*ptr); + let key = lean_array_to_vec(key_ptr, lean_unbox_g); + let [idx_ptr, len_ptr] = lean_ctor_objs(info_ptr); let info = IOKeyInfo { idx: lean_unbox_nat_as_usize(idx_ptr), len: lean_unbox_nat_as_usize(len_ptr), diff --git a/src/lean/ffi/aiur/toplevel.rs b/src/lean/ffi/aiur/toplevel.rs index 96c8d27b..465f0ddb 100644 --- a/src/lean/ffi/aiur/toplevel.rs +++ b/src/lean/ffi/aiur/toplevel.rs @@ -9,76 +9,69 @@ use crate::{ bytecode::{Block, Ctrl, Function, FunctionLayout, Op, Toplevel, ValIdx}, }, lean::{ - array::LeanArrayObject, - ctor::LeanCtorObject, - ffi::{ - aiur::{lean_unbox_g, lean_unbox_nat_as_usize}, - as_ref_unsafe, - }, - lean_is_scalar, - string::LeanStringObject, + lean_array_to_vec, + ffi::aiur::{lean_unbox_g, lean_unbox_nat_as_usize}, + lean_ctor_objs, lean_is_scalar, lean_obj_to_string, lean_tag, }, }; fn lean_ptr_to_vec_val_idx(ptr: *const c_void) -> Vec { - let array: &LeanArrayObject = as_ref_unsafe(ptr.cast()); - array.to_vec(lean_unbox_nat_as_usize) + lean_array_to_vec(ptr, lean_unbox_nat_as_usize) } fn lean_ptr_to_op(ptr: *const c_void) -> Op { - let ctor: &LeanCtorObject = as_ref_unsafe(ptr.cast()); - match ctor.tag() { + match lean_tag(ptr) { 0 => { - let [const_val_ptr] = ctor.objs(); + let [const_val_ptr] = lean_ctor_objs(ptr); Op::Const(G::from_u64(const_val_ptr as u64)) }, 1 => { - let [a_ptr, b_ptr] = ctor.objs(); + let [a_ptr, b_ptr] = lean_ctor_objs(ptr); Op::Add(lean_unbox_nat_as_usize(a_ptr), lean_unbox_nat_as_usize(b_ptr)) }, 2 => { - let [a_ptr, b_ptr] = ctor.objs(); + let [a_ptr, b_ptr] = lean_ctor_objs(ptr); Op::Sub(lean_unbox_nat_as_usize(a_ptr), lean_unbox_nat_as_usize(b_ptr)) }, 3 => { - let [a_ptr, b_ptr] = ctor.objs(); + let [a_ptr, b_ptr] = lean_ctor_objs(ptr); Op::Mul(lean_unbox_nat_as_usize(a_ptr), lean_unbox_nat_as_usize(b_ptr)) }, 4 => { - let [a_ptr] = ctor.objs(); + let [a_ptr] = lean_ctor_objs(ptr); Op::EqZero(lean_unbox_nat_as_usize(a_ptr)) }, 5 => { - let [fun_idx_ptr, val_idxs_ptr, output_size_ptr] = ctor.objs(); + let [fun_idx_ptr, val_idxs_ptr, output_size_ptr] = lean_ctor_objs(ptr); let fun_idx = lean_unbox_nat_as_usize(fun_idx_ptr); let val_idxs = lean_ptr_to_vec_val_idx(val_idxs_ptr); let output_size = lean_unbox_nat_as_usize(output_size_ptr); Op::Call(fun_idx, val_idxs, output_size) }, 6 => { - let [val_idxs_ptr] = ctor.objs(); + let [val_idxs_ptr] = lean_ctor_objs(ptr); Op::Store(lean_ptr_to_vec_val_idx(val_idxs_ptr)) }, 7 => { - let [width_ptr, val_idx_ptr] = ctor.objs(); + let [width_ptr, val_idx_ptr] = lean_ctor_objs(ptr); Op::Load( lean_unbox_nat_as_usize(width_ptr), lean_unbox_nat_as_usize(val_idx_ptr), ) }, 8 => { - let [as_ptr, bs_ptr] = ctor.objs(); + let [as_ptr, bs_ptr] = lean_ctor_objs(ptr); Op::AssertEq( lean_ptr_to_vec_val_idx(as_ptr), lean_ptr_to_vec_val_idx(bs_ptr), ) }, 9 => { - let [key_ptr] = ctor.objs(); + let [key_ptr] = lean_ctor_objs(ptr); Op::IOGetInfo(lean_ptr_to_vec_val_idx(key_ptr)) }, 10 => { - let [key_ptr, idx_ptr, len_ptr] = ctor.objs(); + let [key_ptr, idx_ptr, len_ptr] = lean_ctor_objs(ptr); Op::IOSetInfo( lean_ptr_to_vec_val_idx(key_ptr), lean_unbox_nat_as_usize(idx_ptr), @@ -86,63 +79,60 @@ fn lean_ptr_to_op(ptr: *const c_void) -> Op { ) }, 11 => { - let [idx_ptr, len_ptr] = ctor.objs(); + let [idx_ptr, len_ptr] = lean_ctor_objs(ptr); Op::IORead( lean_unbox_nat_as_usize(idx_ptr), lean_unbox_nat_as_usize(len_ptr), ) }, 12 => { - let [data_ptr] = ctor.objs(); + let [data_ptr] = lean_ctor_objs(ptr); Op::IOWrite(lean_ptr_to_vec_val_idx(data_ptr)) }, 13 => { - let [byte_ptr] = ctor.objs(); + let [byte_ptr] = lean_ctor_objs(ptr); Op::U8BitDecomposition(lean_unbox_nat_as_usize(byte_ptr)) }, 14 => { - let [byte_ptr] = ctor.objs(); + let [byte_ptr] = lean_ctor_objs(ptr); Op::U8ShiftLeft(lean_unbox_nat_as_usize(byte_ptr)) }, 15 => { - let [byte_ptr] = ctor.objs(); + let [byte_ptr] = lean_ctor_objs(ptr); Op::U8ShiftRight(lean_unbox_nat_as_usize(byte_ptr)) }, 16 => { - let [i, j] = ctor.objs().map(lean_unbox_nat_as_usize); + let [i, j] = lean_ctor_objs::<2>(ptr).map(lean_unbox_nat_as_usize); Op::U8Xor(i, j) }, 17 => { - let [i, j] = ctor.objs().map(lean_unbox_nat_as_usize); + let [i, j] = lean_ctor_objs::<2>(ptr).map(lean_unbox_nat_as_usize); Op::U8Add(i, j) }, 18 => { - let [i, j] = ctor.objs().map(lean_unbox_nat_as_usize); + let [i, j] = lean_ctor_objs::<2>(ptr).map(lean_unbox_nat_as_usize); Op::U8Sub(i, j) }, 19 => { - let [i, j] = ctor.objs().map(lean_unbox_nat_as_usize); + let [i, j] = lean_ctor_objs::<2>(ptr).map(lean_unbox_nat_as_usize); Op::U8And(i, j) }, 20 => { - let [i, j] = ctor.objs().map(lean_unbox_nat_as_usize); + let [i, j] = lean_ctor_objs::<2>(ptr).map(lean_unbox_nat_as_usize); Op::U8Or(i, j) }, 21 => { - let [i, j] = ctor.objs().map(lean_unbox_nat_as_usize); + let [i, j] = lean_ctor_objs::<2>(ptr).map(lean_unbox_nat_as_usize); Op::U8LessThan(i, j) }, 22 => { - let [label_ptr, idxs_ptr] = ctor.objs(); - let label_str: &LeanStringObject = as_ref_unsafe(label_ptr.cast()); - let label = label_str.as_string(); + let [label_ptr, idxs_ptr] = lean_ctor_objs(ptr); + let label = lean_obj_to_string(label_ptr); let idxs = if lean_is_scalar(idxs_ptr) { None } else { - let option_ctor: &LeanCtorObject = as_ref_unsafe(idxs_ptr.cast()); - let [idxs_ptr] = option_ctor.objs(); - let idxs: &LeanArrayObject = as_ref_unsafe(idxs_ptr.cast()); - Some(idxs.to_vec(lean_unbox_nat_as_usize)) + let [idxs_ptr] = lean_ctor_objs(idxs_ptr); + Some(lean_array_to_vec(idxs_ptr, lean_unbox_nat_as_usize)) }; Op::Debug(label, idxs) }, @@ -151,33 +141,30 @@ fn lean_ptr_to_op(ptr: *const c_void) -> Op { } fn lean_ptr_to_g_block_pair(ptr: *const c_void) -> (G, Block) { - let ctor: &LeanCtorObject = as_ref_unsafe(ptr.cast()); - let [g_ptr, block_ptr] = ctor.objs(); + let [g_ptr, block_ptr] = lean_ctor_objs(ptr); let g = lean_unbox_g(g_ptr); - let block = lean_ctor_to_block(as_ref_unsafe(block_ptr.cast())); + let block = lean_ptr_to_block(block_ptr); (g, block) } -fn lean_ctor_to_ctrl(ctor: &LeanCtorObject) -> Ctrl { - match ctor.tag() { +fn lean_ptr_to_ctrl(ptr: *const c_void) -> Ctrl { + match lean_tag(ptr) { 0 => { - let [val_idx_ptr, cases_ptr, default_ptr] = ctor.objs(); + let [val_idx_ptr, cases_ptr, default_ptr] = lean_ctor_objs(ptr); let val_idx = lean_unbox_nat_as_usize(val_idx_ptr); - let cases_array: &LeanArrayObject = as_ref_unsafe(cases_ptr.cast()); - let vec_cases = cases_array.to_vec(lean_ptr_to_g_block_pair); + let vec_cases = lean_array_to_vec(cases_ptr, lean_ptr_to_g_block_pair); let cases = FxIndexMap::from_iter(vec_cases); let default = if lean_is_scalar(default_ptr) { None } else { - let default_ctor: &LeanCtorObject = as_ref_unsafe(default_ptr.cast()); - let [block_ptr] = default_ctor.objs(); - let block = lean_ctor_to_block(as_ref_unsafe(block_ptr.cast())); + let [block_ptr] = lean_ctor_objs(default_ptr); + let block = lean_ptr_to_block(block_ptr); Some(Box::new(block)) }; Ctrl::Match(val_idx, cases, default) }, 1 => { - let [sel_idx_ptr, val_idxs_ptr] = ctor.objs(); + let [sel_idx_ptr, val_idxs_ptr] = lean_ctor_objs(ptr); let sel_idx = lean_unbox_nat_as_usize(sel_idx_ptr); let val_idxs = lean_ptr_to_vec_val_idx(val_idxs_ptr); Ctrl::Return(sel_idx, val_idxs) @@ -186,20 +173,19 @@ fn lean_ctor_to_ctrl(ctor: &LeanCtorObject) -> Ctrl { } } -fn lean_ctor_to_block(ctor: &LeanCtorObject) -> Block { +fn lean_ptr_to_block(ptr: *const c_void) -> Block { let [ops_ptr, ctrl_ptr, min_sel_included_ptr, max_sel_excluded_ptr] = - ctor.objs(); - let ops_array: &LeanArrayObject = as_ref_unsafe(ops_ptr.cast()); - let ops = ops_array.to_vec(lean_ptr_to_op); - let ctrl = lean_ctor_to_ctrl(as_ref_unsafe(ctrl_ptr.cast())); + lean_ctor_objs(ptr); + let ops = lean_array_to_vec(ops_ptr, lean_ptr_to_op); + let ctrl = lean_ptr_to_ctrl(ctrl_ptr); let min_sel_included = lean_unbox_nat_as_usize(min_sel_included_ptr); let max_sel_excluded = lean_unbox_nat_as_usize(max_sel_excluded_ptr); Block { ops, ctrl, min_sel_included, max_sel_excluded } } -fn lean_ctor_to_function_layout(ctor: &LeanCtorObject) -> FunctionLayout { +fn lean_ptr_to_function_layout(ptr: *const c_void) -> FunctionLayout { let [input_size_ptr, selectors_ptr, auxiliaries_ptr, lookups_ptr] = - ctor.objs(); + lean_ctor_objs(ptr); FunctionLayout { input_size: lean_unbox_nat_as_usize(input_size_ptr), selectors: lean_unbox_nat_as_usize(selectors_ptr), @@ -209,20 +195,16 @@ fn lean_ctor_to_function_layout(ctor: &LeanCtorObject) -> FunctionLayout { } fn lean_ptr_to_function(ptr: *const c_void) -> Function { - let ctor: &LeanCtorObject = as_ref_unsafe(ptr.cast()); - let [body_ptr, layout_ptr, unconstrained_ptr] = ctor.objs(); - let body = lean_ctor_to_block(as_ref_unsafe(body_ptr.cast())); - let layout = lean_ctor_to_function_layout(as_ref_unsafe(layout_ptr.cast())); + let [body_ptr, layout_ptr, unconstrained_ptr] = lean_ctor_objs(ptr); + let body = lean_ptr_to_block(body_ptr); + let layout = lean_ptr_to_function_layout(layout_ptr); let unconstrained = unconstrained_ptr as usize != 0; Function { body, layout, unconstrained } } -pub(crate) fn lean_ctor_to_toplevel(ctor: &LeanCtorObject) -> Toplevel { - let [functions_ptr, memory_sizes_ptr] = ctor.objs(); - let functions_array: &LeanArrayObject = as_ref_unsafe(functions_ptr.cast()); - let functions = functions_array.to_vec(lean_ptr_to_function); - let memory_sizes_array: &LeanArrayObject = - as_ref_unsafe(memory_sizes_ptr.cast()); - let memory_sizes = memory_sizes_array.to_vec(lean_unbox_nat_as_usize); +pub(crate) fn lean_ptr_to_toplevel(ptr: *const c_void) -> Toplevel { + let [functions_ptr, memory_sizes_ptr] = lean_ctor_objs(ptr); + let functions = lean_array_to_vec(functions_ptr, lean_ptr_to_function); + let memory_sizes = lean_array_to_vec(memory_sizes_ptr, lean_unbox_nat_as_usize); Toplevel { functions, memory_sizes } } diff --git a/src/lean/ffi/byte_array.rs b/src/lean/ffi/byte_array.rs index 86bc01cf..4fc41f53 100644 --- a/src/lean/ffi/byte_array.rs +++ b/src/lean/ffi/byte_array.rs @@ -1,11 +1,13 @@ -use crate::lean::sarray::LeanSArrayObject; +use std::ffi::c_void; + +use crate::lean::lean_sarray_data; /// `@& ByteArray → @& ByteArray → Bool` /// Efficient implementation for `BEq ByteArray` #[unsafe(no_mangle)] extern "C" fn rs_byte_array_beq( - a: &LeanSArrayObject, - b: &LeanSArrayObject, + a: *const c_void, + b: *const c_void, ) -> bool { - a.data() == b.data() + lean_sarray_data(a) == lean_sarray_data(b) } diff --git a/src/lean/ffi/compile.rs b/src/lean/ffi/compile.rs index 41c0a7a2..9a1fc342 100644 --- a/src/lean/ffi/compile.rs +++ b/src/lean/ffi/compile.rs @@ -30,14 +30,13 @@ use crate::ix::ixon::expr::Expr as IxonExpr; use crate::ix::ixon::serialize::put_expr; use crate::ix::ixon::{Comm, ConstantMeta}; use crate::lean::nat::Nat; -use crate::lean::sarray::LeanSArrayObject; -use crate::lean::string::LeanStringObject; -use crate::lean::{ - as_ref_unsafe, lean_alloc_array, lean_alloc_ctor, lean_alloc_sarray, - lean_array_set_core, lean_ctor_get, lean_ctor_set, lean_ctor_set_uint8, - lean_ctor_set_uint64, lean_inc, lean_io_result_mk_error, - lean_io_result_mk_ok, lean_mk_io_user_error, lean_mk_string, lean_obj_tag, - lean_sarray_cptr, lean_uint64_to_nat, +use crate::lean::{lean_obj_to_string, lean_sarray_data, lean_sarray_set_data}; +use crate::lean::lean::{ + lean_alloc_array, lean_alloc_ctor, lean_alloc_sarray, lean_array_set_core, + lean_ctor_get, lean_ctor_set, lean_ctor_set_uint8, lean_ctor_set_uint64, + lean_inc, lean_io_result_mk_error, lean_io_result_mk_ok, + lean_mk_io_user_error, lean_mk_string, lean_obj_tag, lean_sarray_cptr, + lean_uint64_to_nat, }; use dashmap::DashMap; @@ -67,9 +66,9 @@ pub fn build_raw_const(addr: &Address, constant: &IxonConstant) -> *mut c_void { let addr_obj = build_address_from_ixon(addr); let const_obj = build_ixon_constant(constant); let obj = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(obj, 0, addr_obj); - lean_ctor_set(obj, 1, const_obj); - obj + lean_ctor_set(obj, 0, addr_obj.cast()); + lean_ctor_set(obj, 1, const_obj.cast()); + obj.cast() } } @@ -85,10 +84,10 @@ pub fn build_raw_named( let addr_obj = build_address_from_ixon(addr); let meta_obj = build_constant_meta(meta); let obj = lean_alloc_ctor(0, 3, 0); - lean_ctor_set(obj, 0, name_obj); - lean_ctor_set(obj, 1, addr_obj); - lean_ctor_set(obj, 2, meta_obj); - obj + lean_ctor_set(obj, 0, name_obj.cast()); + lean_ctor_set(obj, 1, addr_obj.cast()); + lean_ctor_set(obj, 2, meta_obj.cast()); + obj.cast() } } @@ -100,9 +99,9 @@ pub fn build_raw_blob(addr: &Address, bytes: &[u8]) -> *mut c_void { let ba_data = lean_sarray_cptr(ba); std::ptr::copy_nonoverlapping(bytes.as_ptr(), ba_data, bytes.len()); let obj = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(obj, 0, addr_obj); + lean_ctor_set(obj, 0, addr_obj.cast()); lean_ctor_set(obj, 1, ba); - obj + obj.cast() } } @@ -112,9 +111,9 @@ pub fn build_raw_comm(addr: &Address, comm: &Comm) -> *mut c_void { let addr_obj = build_address_from_ixon(addr); let comm_obj = build_ixon_comm(comm); let obj = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(obj, 0, addr_obj); - lean_ctor_set(obj, 1, comm_obj); - obj + lean_ctor_set(obj, 0, addr_obj.cast()); + lean_ctor_set(obj, 1, comm_obj.cast()); + obj.cast() } } @@ -128,9 +127,9 @@ pub extern "C" fn rs_roundtrip_rust_condensed_blocks( ptr: *const c_void, ) -> *mut c_void { unsafe { - let low_links = lean_ctor_get(ptr as *mut _, 0) as *mut c_void; - let blocks = lean_ctor_get(ptr as *mut _, 1) as *mut c_void; - let block_refs = lean_ctor_get(ptr as *mut _, 2) as *mut c_void; + let low_links = lean_ctor_get(ptr as *mut _, 0); + let blocks = lean_ctor_get(ptr as *mut _, 1); + let block_refs = lean_ctor_get(ptr as *mut _, 2); lean_inc(low_links); lean_inc(blocks); @@ -140,7 +139,7 @@ pub extern "C" fn rs_roundtrip_rust_condensed_blocks( lean_ctor_set(result, 0, low_links); lean_ctor_set(result, 1, blocks); lean_ctor_set(result, 2, block_refs); - result + result.cast() } } @@ -150,9 +149,9 @@ pub extern "C" fn rs_roundtrip_rust_compile_phases( ptr: *const c_void, ) -> *mut c_void { unsafe { - let raw_env = lean_ctor_get(ptr as *mut _, 0) as *mut c_void; - let condensed = lean_ctor_get(ptr as *mut _, 1) as *mut c_void; - let compile_env = lean_ctor_get(ptr as *mut _, 2) as *mut c_void; + let raw_env = lean_ctor_get(ptr as *mut _, 0); + let condensed = lean_ctor_get(ptr as *mut _, 1); + let compile_env = lean_ctor_get(ptr as *mut _, 2); lean_inc(raw_env); lean_inc(condensed); @@ -162,7 +161,7 @@ pub extern "C" fn rs_roundtrip_rust_compile_phases( lean_ctor_set(result, 0, raw_env); lean_ctor_set(result, 1, condensed); lean_ctor_set(result, 2, compile_env); - result + result.cast() } } @@ -178,7 +177,7 @@ pub extern "C" fn rs_roundtrip_block_compare_result( unsafe { let tag = lean_obj_tag(ptr as *mut _); match tag { - 0 => lean_alloc_ctor(0, 0, 0), + 0 => lean_alloc_ctor(0, 0, 0).cast(), 1 => { let base = ptr.cast::(); let lean_size = *base.add(8).cast::(); @@ -190,9 +189,9 @@ pub extern "C" fn rs_roundtrip_block_compare_result( *out_base.add(8).cast::() = lean_size; *out_base.add(16).cast::() = rust_size; *out_base.add(24).cast::() = first_diff; - obj + obj.cast() }, - 2 => lean_alloc_ctor(2, 0, 0), + 2 => lean_alloc_ctor(2, 0, 0).cast(), _ => unreachable!("Invalid BlockCompareResult tag: {}", tag), } } @@ -209,14 +208,14 @@ pub extern "C" fn rs_roundtrip_block_compare_detail( let lean_sharing_len = *base.add(16).cast::(); let rust_sharing_len = *base.add(24).cast::(); - let result_obj = rs_roundtrip_block_compare_result(result_ptr); + let result_obj = rs_roundtrip_block_compare_result(result_ptr.cast()); let obj = lean_alloc_ctor(0, 1, 16); - lean_ctor_set(obj, 0, result_obj); + lean_ctor_set(obj, 0, result_obj.cast()); let out_base = obj.cast::(); *out_base.add(16).cast::() = lean_sharing_len; *out_base.add(24).cast::() = rust_sharing_len; - obj + obj.cast() } } @@ -231,7 +230,7 @@ unsafe fn make_compile_io_error(msg: &str) -> *mut c_void { .unwrap_or_else(|_| CString::new("compilation error").unwrap()); let lean_msg = lean_mk_string(c_msg.as_ptr()); let lean_err = lean_mk_io_user_error(lean_msg); - lean_io_result_mk_error(lean_err) + lean_io_result_mk_error(lean_err).cast() } } @@ -299,7 +298,7 @@ pub extern "C" fn rs_compile_env_full( std::ptr::copy_nonoverlapping(bytes.as_ptr(), ba_data, bytes.len()); let block = lean_alloc_ctor(0, 2, 8); - lean_ctor_set(block, 0, name_obj); + lean_ctor_set(block, 0, name_obj.cast()); lean_ctor_set(block, 1, ba); let base = block.cast::(); *base.add(8 + 16).cast::() = *sharing_len as u64; @@ -323,7 +322,7 @@ pub extern "C" fn rs_compile_env_full( std::ptr::copy_nonoverlapping(addr_bytes.as_ptr(), addr_data, 32); let entry_obj = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(entry_obj, 0, name_obj); + lean_ctor_set(entry_obj, 0, name_obj.cast()); lean_ctor_set(entry_obj, 1, addr_ba); lean_array_set_core(name_to_addr_arr, i, entry_obj); @@ -336,11 +335,11 @@ pub extern "C" fn rs_compile_env_full( // Build RustCompilationResult let result = lean_alloc_ctor(0, 3, 0); - lean_ctor_set(result, 0, raw_env); - lean_ctor_set(result, 1, condensed_obj); + lean_ctor_set(result, 0, raw_env.cast()); + lean_ctor_set(result, 1, condensed_obj.cast()); lean_ctor_set(result, 2, compiled_obj); - lean_io_result_mk_ok(result) + lean_io_result_mk_ok(result).cast() } })) } @@ -372,7 +371,7 @@ pub extern "C" fn rs_compile_env(env_consts_ptr: *const c_void) -> *mut c_void { let ba = lean_alloc_sarray(1, buf.len(), buf.len()); let ba_data = lean_sarray_cptr(ba); std::ptr::copy_nonoverlapping(buf.as_ptr(), ba_data, buf.len()); - lean_io_result_mk_ok(ba) + lean_io_result_mk_ok(ba).cast() } })) } @@ -424,7 +423,7 @@ pub extern "C" fn rs_compile_phases( let consts_arr = lean_alloc_array(consts.len(), consts.len()); for (i, (addr, constant)) in consts.iter().enumerate() { let raw_const = build_raw_const(addr, constant); - lean_array_set_core(consts_arr, i, raw_const); + lean_array_set_core(consts_arr, i, raw_const.cast()); } let named: Vec<_> = compile_stt @@ -436,7 +435,7 @@ pub extern "C" fn rs_compile_phases( let named_arr = lean_alloc_array(named.len(), named.len()); for (i, (name, n)) in named.iter().enumerate() { let raw_named = build_raw_named(&mut cache, name, &n.addr, &n.meta); - lean_array_set_core(named_arr, i, raw_named); + lean_array_set_core(named_arr, i, raw_named.cast()); } let blobs: Vec<_> = compile_stt @@ -448,7 +447,7 @@ pub extern "C" fn rs_compile_phases( let blobs_arr = lean_alloc_array(blobs.len(), blobs.len()); for (i, (addr, bytes)) in blobs.iter().enumerate() { let raw_blob = build_raw_blob(addr, bytes); - lean_array_set_core(blobs_arr, i, raw_blob); + lean_array_set_core(blobs_arr, i, raw_blob.cast()); } let comms: Vec<_> = compile_stt @@ -460,7 +459,7 @@ pub extern "C" fn rs_compile_phases( let comms_arr = lean_alloc_array(comms.len(), comms.len()); for (i, (addr, comm)) in comms.iter().enumerate() { let raw_comm = build_raw_comm(addr, comm); - lean_array_set_core(comms_arr, i, raw_comm); + lean_array_set_core(comms_arr, i, raw_comm.cast()); } // Build names array (Address → Ix.Name) @@ -473,7 +472,7 @@ pub extern "C" fn rs_compile_phases( let names_arr = lean_alloc_array(names.len(), names.len()); for (i, (addr, name)) in names.iter().enumerate() { let obj = build_raw_name_entry(&mut cache, addr, name); - lean_array_set_core(names_arr, i, obj); + lean_array_set_core(names_arr, i, obj.cast()); } let raw_ixon_env = lean_alloc_ctor(0, 5, 0); @@ -484,11 +483,11 @@ pub extern "C" fn rs_compile_phases( lean_ctor_set(raw_ixon_env, 4, names_arr); let result = lean_alloc_ctor(0, 3, 0); - lean_ctor_set(result, 0, raw_env); - lean_ctor_set(result, 1, condensed_obj); + lean_ctor_set(result, 0, raw_env.cast()); + lean_ctor_set(result, 1, condensed_obj.cast()); lean_ctor_set(result, 2, raw_ixon_env); - lean_io_result_mk_ok(result) + lean_io_result_mk_ok(result).cast() } })) } @@ -523,7 +522,7 @@ pub extern "C" fn rs_compile_env_to_ixon( let consts_arr = lean_alloc_array(consts.len(), consts.len()); for (i, (addr, constant)) in consts.iter().enumerate() { let raw_const = build_raw_const(addr, constant); - lean_array_set_core(consts_arr, i, raw_const); + lean_array_set_core(consts_arr, i, raw_const.cast()); } let named: Vec<_> = compile_stt @@ -535,7 +534,7 @@ pub extern "C" fn rs_compile_env_to_ixon( let named_arr = lean_alloc_array(named.len(), named.len()); for (i, (name, n)) in named.iter().enumerate() { let raw_named = build_raw_named(&mut cache, name, &n.addr, &n.meta); - lean_array_set_core(named_arr, i, raw_named); + lean_array_set_core(named_arr, i, raw_named.cast()); } let blobs: Vec<_> = compile_stt @@ -547,7 +546,7 @@ pub extern "C" fn rs_compile_env_to_ixon( let blobs_arr = lean_alloc_array(blobs.len(), blobs.len()); for (i, (addr, bytes)) in blobs.iter().enumerate() { let raw_blob = build_raw_blob(addr, bytes); - lean_array_set_core(blobs_arr, i, raw_blob); + lean_array_set_core(blobs_arr, i, raw_blob.cast()); } let comms: Vec<_> = compile_stt @@ -559,7 +558,7 @@ pub extern "C" fn rs_compile_env_to_ixon( let comms_arr = lean_alloc_array(comms.len(), comms.len()); for (i, (addr, comm)) in comms.iter().enumerate() { let raw_comm = build_raw_comm(addr, comm); - lean_array_set_core(comms_arr, i, raw_comm); + lean_array_set_core(comms_arr, i, raw_comm.cast()); } // Build names array (Address → Ix.Name) @@ -572,7 +571,7 @@ pub extern "C" fn rs_compile_env_to_ixon( let names_arr = lean_alloc_array(names.len(), names.len()); for (i, (addr, name)) in names.iter().enumerate() { let obj = build_raw_name_entry(&mut cache, addr, name); - lean_array_set_core(names_arr, i, obj); + lean_array_set_core(names_arr, i, obj.cast()); } let result = lean_alloc_ctor(0, 5, 0); @@ -581,7 +580,7 @@ pub extern "C" fn rs_compile_env_to_ixon( lean_ctor_set(result, 2, blobs_arr); lean_ctor_set(result, 3, comms_arr); lean_ctor_set(result, 4, names_arr); - lean_io_result_mk_ok(result) + lean_io_result_mk_ok(result).cast() } })) } @@ -595,7 +594,7 @@ pub extern "C" fn rs_canonicalize_env_to_ix( let rust_env = lean_ptr_to_env(env_consts_ptr); let mut cache = LeanBuildCache::with_capacity(rust_env.len()); let raw_env = build_raw_environment(&mut cache, &rust_env); - unsafe { lean_io_result_mk_ok(raw_env) } + unsafe { lean_io_result_mk_ok(raw_env.cast()).cast() } })) } @@ -687,7 +686,7 @@ extern "C" fn rs_compile_env_rust_first( extern "C" fn rs_compare_block( rust_env: *const RustCompiledEnv, lowlink_name: *const c_void, - lean_bytes: &LeanSArrayObject, + lean_bytes: *const c_void, ) -> u64 { if rust_env.is_null() { return 2u64 << 32; // not found @@ -696,7 +695,7 @@ extern "C" fn rs_compare_block( let name = lean_ptr_to_name(lowlink_name, &global_cache); let rust_env = unsafe { &*rust_env }; - let lean_data = lean_bytes.data(); + let lean_data = lean_sarray_data(lean_bytes); // Look up Rust's compiled block let rust_bytes = match rust_env.blocks.get(&name) { @@ -786,8 +785,7 @@ extern "C" fn rs_copy_block_bytes( }; // Copy into the Lean ByteArray - let dest_arr: &mut LeanSArrayObject = unsafe { &mut *dest.cast() }; - dest_arr.set_data(bytes); + unsafe { lean_sarray_set_data(dest, bytes) }; } /// FFI: Get Rust's sharing vector length for a block. @@ -1006,8 +1004,7 @@ extern "C" fn rs_get_pre_sharing_exprs( } // Write to output buffer - let out_arr: &mut LeanSArrayObject = unsafe { &mut *out_buf.cast() }; - out_arr.set_data(&output_bytes); + unsafe { lean_sarray_set_data(out_buf, &output_bytes) }; n_exprs } @@ -1091,8 +1088,7 @@ extern "C" fn rs_lookup_const_addr( match rust_env.compile_state.name_to_addr.get(&name) { Some(addr_ref) => { // Copy the 32-byte address into the output ByteArray - let out_arr: &mut LeanSArrayObject = unsafe { &mut *out_addr.cast() }; - out_arr.set_data(addr_ref.as_bytes()); + unsafe { lean_sarray_set_data(out_addr, addr_ref.as_bytes()) }; 1 }, None => 0, @@ -1121,12 +1117,12 @@ use crate::ix::ixon::error::{CompileError, DecompileError, SerializeError}; fn build_lean_string(s: &str) -> *mut c_void { let cstr = CString::new(s) .unwrap_or_else(|_| CString::new("(invalid string)").unwrap()); - unsafe { lean_mk_string(cstr.as_ptr()) } + unsafe { lean_mk_string(cstr.as_ptr()).cast() } } /// Build a Lean Nat from a usize. fn build_lean_nat_usize(n: usize) -> *mut c_void { - unsafe { lean_uint64_to_nat(n as u64) } + unsafe { lean_uint64_to_nat(n as u64).cast() } } /// Build a Lean Ixon.SerializeError from a Rust SerializeError. @@ -1144,39 +1140,39 @@ pub fn build_serialize_error(se: &SerializeError) -> *mut c_void { match se { SerializeError::UnexpectedEof { expected } => { let obj = lean_alloc_ctor(0, 1, 0); - lean_ctor_set(obj, 0, build_lean_string(expected)); - obj + lean_ctor_set(obj, 0, build_lean_string(expected).cast()); + obj.cast() }, SerializeError::InvalidTag { tag, context } => { // 1 obj (String) + 1 scalar byte (UInt8) let obj = lean_alloc_ctor(1, 1, 1); - lean_ctor_set(obj, 0, build_lean_string(context)); + lean_ctor_set(obj, 0, build_lean_string(context).cast()); lean_ctor_set_uint8(obj, 8, *tag); - obj + obj.cast() }, SerializeError::InvalidFlag { flag, context } => { let obj = lean_alloc_ctor(2, 1, 1); - lean_ctor_set(obj, 0, build_lean_string(context)); + lean_ctor_set(obj, 0, build_lean_string(context).cast()); lean_ctor_set_uint8(obj, 8, *flag); - obj + obj.cast() }, SerializeError::InvalidVariant { variant, context } => { let obj = lean_alloc_ctor(3, 1, 8); - lean_ctor_set(obj, 0, build_lean_string(context)); + lean_ctor_set(obj, 0, build_lean_string(context).cast()); lean_ctor_set_uint64(obj, 8, *variant); - obj + obj.cast() }, SerializeError::InvalidBool { value } => { let obj = lean_alloc_ctor(4, 0, 1); lean_ctor_set_uint8(obj, 0, *value); - obj + obj.cast() }, - SerializeError::AddressError => lean_alloc_ctor(5, 0, 0), + SerializeError::AddressError => lean_alloc_ctor(5, 0, 0).cast(), SerializeError::InvalidShareIndex { idx, max } => { let obj = lean_alloc_ctor(6, 1, 8); - lean_ctor_set(obj, 0, build_lean_nat_usize(*max)); + lean_ctor_set(obj, 0, build_lean_nat_usize(*max).cast()); lean_ctor_set_uint64(obj, 8, *idx); - obj + obj.cast() }, } } @@ -1190,7 +1186,7 @@ pub fn decode_serialize_error(ptr: *const c_void) -> SerializeError { 0 => { let str_ptr = lean_ctor_get(ptr as *mut _, 0); let expected = - as_ref_unsafe::(str_ptr.cast()).as_string(); + lean_obj_to_string(str_ptr.cast()); SerializeError::UnexpectedEof { expected } }, 1 => { @@ -1198,7 +1194,7 @@ pub fn decode_serialize_error(ptr: *const c_void) -> SerializeError { let base = ptr.cast::(); let tag_val = *base.add(8 + 8); let context = - as_ref_unsafe::(str_ptr.cast()).as_string(); + lean_obj_to_string(str_ptr.cast()); SerializeError::InvalidTag { tag: tag_val, context } }, 2 => { @@ -1206,7 +1202,7 @@ pub fn decode_serialize_error(ptr: *const c_void) -> SerializeError { let base = ptr.cast::(); let flag = *base.add(8 + 8); let context = - as_ref_unsafe::(str_ptr.cast()).as_string(); + lean_obj_to_string(str_ptr.cast()); SerializeError::InvalidFlag { flag, context } }, 3 => { @@ -1214,7 +1210,7 @@ pub fn decode_serialize_error(ptr: *const c_void) -> SerializeError { let base = ptr.cast::(); let variant = *base.add(8 + 8).cast::(); let context = - as_ref_unsafe::(str_ptr.cast()).as_string(); + lean_obj_to_string(str_ptr.cast()); SerializeError::InvalidVariant { variant, context } }, 4 => { @@ -1227,7 +1223,7 @@ pub fn decode_serialize_error(ptr: *const c_void) -> SerializeError { let nat_ptr = lean_ctor_get(ptr as *mut _, 0); let base = ptr.cast::(); let idx = *base.add(8 + 8).cast::(); - let max = Nat::from_ptr(nat_ptr) + let max = Nat::from_ptr(nat_ptr.cast()) .to_u64() .and_then(|x| usize::try_from(x).ok()) .unwrap_or(0); @@ -1250,75 +1246,75 @@ pub fn build_decompile_error(err: &DecompileError) -> *mut c_void { match err { DecompileError::InvalidRefIndex { idx, refs_len, constant } => { let obj = lean_alloc_ctor(0, 2, 8); - lean_ctor_set(obj, 0, build_lean_nat_usize(*refs_len)); - lean_ctor_set(obj, 1, build_lean_string(constant)); + lean_ctor_set(obj, 0, build_lean_nat_usize(*refs_len).cast()); + lean_ctor_set(obj, 1, build_lean_string(constant).cast()); lean_ctor_set_uint64(obj, 2 * 8, *idx); - obj + obj.cast() }, DecompileError::InvalidUnivIndex { idx, univs_len, constant } => { let obj = lean_alloc_ctor(1, 2, 8); - lean_ctor_set(obj, 0, build_lean_nat_usize(*univs_len)); - lean_ctor_set(obj, 1, build_lean_string(constant)); + lean_ctor_set(obj, 0, build_lean_nat_usize(*univs_len).cast()); + lean_ctor_set(obj, 1, build_lean_string(constant).cast()); lean_ctor_set_uint64(obj, 2 * 8, *idx); - obj + obj.cast() }, DecompileError::InvalidShareIndex { idx, max, constant } => { let obj = lean_alloc_ctor(2, 2, 8); - lean_ctor_set(obj, 0, build_lean_nat_usize(*max)); - lean_ctor_set(obj, 1, build_lean_string(constant)); + lean_ctor_set(obj, 0, build_lean_nat_usize(*max).cast()); + lean_ctor_set(obj, 1, build_lean_string(constant).cast()); lean_ctor_set_uint64(obj, 2 * 8, *idx); - obj + obj.cast() }, DecompileError::InvalidRecIndex { idx, ctx_size, constant } => { let obj = lean_alloc_ctor(3, 2, 8); - lean_ctor_set(obj, 0, build_lean_nat_usize(*ctx_size)); - lean_ctor_set(obj, 1, build_lean_string(constant)); + lean_ctor_set(obj, 0, build_lean_nat_usize(*ctx_size).cast()); + lean_ctor_set(obj, 1, build_lean_string(constant).cast()); lean_ctor_set_uint64(obj, 2 * 8, *idx); - obj + obj.cast() }, DecompileError::InvalidUnivVarIndex { idx, max, constant } => { let obj = lean_alloc_ctor(4, 2, 8); - lean_ctor_set(obj, 0, build_lean_nat_usize(*max)); - lean_ctor_set(obj, 1, build_lean_string(constant)); + lean_ctor_set(obj, 0, build_lean_nat_usize(*max).cast()); + lean_ctor_set(obj, 1, build_lean_string(constant).cast()); lean_ctor_set_uint64(obj, 2 * 8, *idx); - obj + obj.cast() }, DecompileError::MissingAddress(addr) => { // tag 5, 1 object (Address = ByteArray) let obj = lean_alloc_ctor(5, 1, 0); - lean_ctor_set(obj, 0, build_address_from_ixon(addr)); - obj + lean_ctor_set(obj, 0, build_address_from_ixon(addr).cast()); + obj.cast() }, DecompileError::MissingMetadata(addr) => { // tag 6, 1 object (Address = ByteArray) let obj = lean_alloc_ctor(6, 1, 0); - lean_ctor_set(obj, 0, build_address_from_ixon(addr)); - obj + lean_ctor_set(obj, 0, build_address_from_ixon(addr).cast()); + obj.cast() }, DecompileError::BlobNotFound(addr) => { // tag 7, 1 object (Address = ByteArray) let obj = lean_alloc_ctor(7, 1, 0); - lean_ctor_set(obj, 0, build_address_from_ixon(addr)); - obj + lean_ctor_set(obj, 0, build_address_from_ixon(addr).cast()); + obj.cast() }, DecompileError::BadBlobFormat { addr, expected } => { // tag 8, 2 objects (Address, String) let obj = lean_alloc_ctor(8, 2, 0); - lean_ctor_set(obj, 0, build_address_from_ixon(addr)); - lean_ctor_set(obj, 1, build_lean_string(expected)); - obj + lean_ctor_set(obj, 0, build_address_from_ixon(addr).cast()); + lean_ctor_set(obj, 1, build_lean_string(expected).cast()); + obj.cast() }, DecompileError::BadConstantFormat { msg } => { // tag 9, 1 object (String) let obj = lean_alloc_ctor(9, 1, 0); - lean_ctor_set(obj, 0, build_lean_string(msg)); - obj + lean_ctor_set(obj, 0, build_lean_string(msg).cast()); + obj.cast() }, DecompileError::Serialize(se) => { // tag 10, 1 object (SerializeError) let obj = lean_alloc_ctor(10, 1, 0); - lean_ctor_set(obj, 0, build_serialize_error(se)); - obj + lean_ctor_set(obj, 0, build_serialize_error(se).cast()); + obj.cast() }, } } @@ -1334,12 +1330,12 @@ pub fn decode_decompile_error(ptr: *const c_void) -> DecompileError { let str_ptr = lean_ctor_get(ptr as *mut _, 1); let base = ptr.cast::(); let idx = *base.add(8 + 2 * 8).cast::(); - let refs_len = Nat::from_ptr(nat_ptr) + let refs_len = Nat::from_ptr(nat_ptr.cast()) .to_u64() .and_then(|x| usize::try_from(x).ok()) .unwrap_or(0); let constant = - as_ref_unsafe::(str_ptr.cast()).as_string().clone(); + lean_obj_to_string(str_ptr.cast()).clone(); DecompileError::InvalidRefIndex { idx, refs_len, constant } }, 1 => { @@ -1347,12 +1343,12 @@ pub fn decode_decompile_error(ptr: *const c_void) -> DecompileError { let str_ptr = lean_ctor_get(ptr as *mut _, 1); let base = ptr.cast::(); let idx = *base.add(8 + 2 * 8).cast::(); - let univs_len = Nat::from_ptr(nat_ptr) + let univs_len = Nat::from_ptr(nat_ptr.cast()) .to_u64() .and_then(|x| usize::try_from(x).ok()) .unwrap_or(0); let constant = - as_ref_unsafe::(str_ptr.cast()).as_string().clone(); + lean_obj_to_string(str_ptr.cast()).clone(); DecompileError::InvalidUnivIndex { idx, univs_len, constant } }, 2 => { @@ -1360,12 +1356,12 @@ pub fn decode_decompile_error(ptr: *const c_void) -> DecompileError { let str_ptr = lean_ctor_get(ptr as *mut _, 1); let base = ptr.cast::(); let idx = *base.add(8 + 2 * 8).cast::(); - let max = Nat::from_ptr(nat_ptr) + let max = Nat::from_ptr(nat_ptr.cast()) .to_u64() .and_then(|x| usize::try_from(x).ok()) .unwrap_or(0); let constant = - as_ref_unsafe::(str_ptr.cast()).as_string().clone(); + lean_obj_to_string(str_ptr.cast()).clone(); DecompileError::InvalidShareIndex { idx, max, constant } }, 3 => { @@ -1373,12 +1369,12 @@ pub fn decode_decompile_error(ptr: *const c_void) -> DecompileError { let str_ptr = lean_ctor_get(ptr as *mut _, 1); let base = ptr.cast::(); let idx = *base.add(8 + 2 * 8).cast::(); - let ctx_size = Nat::from_ptr(nat_ptr) + let ctx_size = Nat::from_ptr(nat_ptr.cast()) .to_u64() .and_then(|x| usize::try_from(x).ok()) .unwrap_or(0); let constant = - as_ref_unsafe::(str_ptr.cast()).as_string().clone(); + lean_obj_to_string(str_ptr.cast()).clone(); DecompileError::InvalidRecIndex { idx, ctx_size, constant } }, 4 => { @@ -1386,43 +1382,43 @@ pub fn decode_decompile_error(ptr: *const c_void) -> DecompileError { let str_ptr = lean_ctor_get(ptr as *mut _, 1); let base = ptr.cast::(); let idx = *base.add(8 + 2 * 8).cast::(); - let max = Nat::from_ptr(nat_ptr) + let max = Nat::from_ptr(nat_ptr.cast()) .to_u64() .and_then(|x| usize::try_from(x).ok()) .unwrap_or(0); let constant = - as_ref_unsafe::(str_ptr.cast()).as_string().clone(); + lean_obj_to_string(str_ptr.cast()).clone(); DecompileError::InvalidUnivVarIndex { idx, max, constant } }, 5 => { let addr_ptr = lean_ctor_get(ptr as *mut _, 0); - DecompileError::MissingAddress(decode_ixon_address(addr_ptr)) + DecompileError::MissingAddress(decode_ixon_address(addr_ptr.cast())) }, 6 => { let addr_ptr = lean_ctor_get(ptr as *mut _, 0); - DecompileError::MissingMetadata(decode_ixon_address(addr_ptr)) + DecompileError::MissingMetadata(decode_ixon_address(addr_ptr.cast())) }, 7 => { let addr_ptr = lean_ctor_get(ptr as *mut _, 0); - DecompileError::BlobNotFound(decode_ixon_address(addr_ptr)) + DecompileError::BlobNotFound(decode_ixon_address(addr_ptr.cast())) }, 8 => { let addr_ptr = lean_ctor_get(ptr as *mut _, 0); let str_ptr = lean_ctor_get(ptr as *mut _, 1); - let addr = decode_ixon_address(addr_ptr); + let addr = decode_ixon_address(addr_ptr.cast()); let expected = - as_ref_unsafe::(str_ptr.cast()).as_string().clone(); + lean_obj_to_string(str_ptr.cast()).clone(); DecompileError::BadBlobFormat { addr, expected } }, 9 => { let str_ptr = lean_ctor_get(ptr as *mut _, 0); let msg = - as_ref_unsafe::(str_ptr.cast()).as_string().clone(); + lean_obj_to_string(str_ptr.cast()).clone(); DecompileError::BadConstantFormat { msg } }, 10 => { let se_ptr = lean_ctor_get(ptr as *mut _, 0); - DecompileError::Serialize(decode_serialize_error(se_ptr)) + DecompileError::Serialize(decode_serialize_error(se_ptr.cast())) }, _ => unreachable!("Invalid DecompileError tag: {}", tag), } @@ -1443,34 +1439,34 @@ pub fn build_compile_error(err: &CompileError) -> *mut c_void { match err { CompileError::MissingConstant { name } => { let obj = lean_alloc_ctor(0, 1, 0); - lean_ctor_set(obj, 0, build_lean_string(name)); - obj + lean_ctor_set(obj, 0, build_lean_string(name).cast()); + obj.cast() }, CompileError::MissingAddress(addr) => { let obj = lean_alloc_ctor(1, 1, 0); - lean_ctor_set(obj, 0, build_address_from_ixon(addr)); - obj + lean_ctor_set(obj, 0, build_address_from_ixon(addr).cast()); + obj.cast() }, CompileError::InvalidMutualBlock { reason } => { let obj = lean_alloc_ctor(2, 1, 0); - lean_ctor_set(obj, 0, build_lean_string(reason)); - obj + lean_ctor_set(obj, 0, build_lean_string(reason).cast()); + obj.cast() }, CompileError::UnsupportedExpr { desc } => { let obj = lean_alloc_ctor(3, 1, 0); - lean_ctor_set(obj, 0, build_lean_string(desc)); - obj + lean_ctor_set(obj, 0, build_lean_string(desc).cast()); + obj.cast() }, CompileError::UnknownUnivParam { curr, param } => { let obj = lean_alloc_ctor(4, 2, 0); - lean_ctor_set(obj, 0, build_lean_string(curr)); - lean_ctor_set(obj, 1, build_lean_string(param)); - obj + lean_ctor_set(obj, 0, build_lean_string(curr).cast()); + lean_ctor_set(obj, 1, build_lean_string(param).cast()); + obj.cast() }, CompileError::Serialize(se) => { let obj = lean_alloc_ctor(5, 1, 0); - lean_ctor_set(obj, 0, build_serialize_error(se)); - obj + lean_ctor_set(obj, 0, build_serialize_error(se).cast()); + obj.cast() }, } } @@ -1484,37 +1480,35 @@ pub fn decode_compile_error(ptr: *const c_void) -> CompileError { 0 => { let str_ptr = lean_ctor_get(ptr as *mut _, 0); let name = - as_ref_unsafe::(str_ptr.cast()).as_string().clone(); + lean_obj_to_string(str_ptr.cast()).clone(); CompileError::MissingConstant { name } }, 1 => { let addr_ptr = lean_ctor_get(ptr as *mut _, 0); - CompileError::MissingAddress(decode_ixon_address(addr_ptr)) + CompileError::MissingAddress(decode_ixon_address(addr_ptr.cast())) }, 2 => { let str_ptr = lean_ctor_get(ptr as *mut _, 0); let reason = - as_ref_unsafe::(str_ptr.cast()).as_string().clone(); + lean_obj_to_string(str_ptr.cast()).clone(); CompileError::InvalidMutualBlock { reason } }, 3 => { let str_ptr = lean_ctor_get(ptr as *mut _, 0); let desc = - as_ref_unsafe::(str_ptr.cast()).as_string().clone(); + lean_obj_to_string(str_ptr.cast()).clone(); CompileError::UnsupportedExpr { desc } }, 4 => { let str0 = lean_ctor_get(ptr as *mut _, 0); let str1 = lean_ctor_get(ptr as *mut _, 1); - let curr = - as_ref_unsafe::(str0.cast()).as_string().clone(); - let param = - as_ref_unsafe::(str1.cast()).as_string().clone(); + let curr = lean_obj_to_string(str0 as *const _); + let param = lean_obj_to_string(str1 as *const _); CompileError::UnknownUnivParam { curr, param } }, 5 => { let se_ptr = lean_ctor_get(ptr as *mut _, 0); - CompileError::Serialize(decode_serialize_error(se_ptr)) + CompileError::Serialize(decode_serialize_error(se_ptr.cast())) }, _ => unreachable!("Invalid CompileError tag: {}", tag), } @@ -1576,14 +1570,14 @@ pub extern "C" fn rs_decompile_env(raw_env_ptr: *const c_void) -> *mut c_void { let name_obj = build_name(&mut cache, name); let info_obj = build_constant_info(&mut cache, info); let pair = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(pair, 0, name_obj); - lean_ctor_set(pair, 1, info_obj); + lean_ctor_set(pair, 0, name_obj.cast()); + lean_ctor_set(pair, 1, info_obj.cast()); lean_array_set_core(arr, i, pair); } // Except.ok (tag 1) let obj = lean_alloc_ctor(1, 1, 0); lean_ctor_set(obj, 0, arr); - obj + obj.cast() } }, Err(e) => { @@ -1591,8 +1585,8 @@ pub extern "C" fn rs_decompile_env(raw_env_ptr: *const c_void) -> *mut c_void { unsafe { let err_obj = build_decompile_error(&e); let obj = lean_alloc_ctor(0, 1, 0); - lean_ctor_set(obj, 0, err_obj); - obj + lean_ctor_set(obj, 0, err_obj.cast()); + obj.cast() } }, } diff --git a/src/lean/ffi/graph.rs b/src/lean/ffi/graph.rs index 5258c35d..3764cb77 100644 --- a/src/lean/ffi/graph.rs +++ b/src/lean/ffi/graph.rs @@ -6,7 +6,7 @@ use std::sync::Arc; use super::ffi_io_guard; use crate::ix::condense::compute_sccs; use crate::ix::graph::build_ref_graph; -use crate::lean::{ +use crate::lean::lean::{ lean_alloc_array, lean_alloc_ctor, lean_array_set_core, lean_ctor_set, lean_io_result_mk_ok, }; @@ -28,16 +28,16 @@ pub fn build_ref_graph_array( let refs_arr = lean_alloc_array(ref_set.len(), ref_set.len()); for (j, ref_name) in ref_set.iter().enumerate() { let ref_name_obj = build_name(cache, ref_name); - lean_array_set_core(refs_arr, j, ref_name_obj); + lean_array_set_core(refs_arr, j, ref_name_obj.cast()); } let pair = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(pair, 0, name_obj); + lean_ctor_set(pair, 0, name_obj.cast()); lean_ctor_set(pair, 1, refs_arr); lean_array_set_core(arr, i, pair); } - arr + arr.cast() } } @@ -54,8 +54,8 @@ pub fn build_condensed_blocks( let name_obj = build_name(cache, name); let low_link_obj = build_name(cache, low_link); let pair = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(pair, 0, name_obj); - lean_ctor_set(pair, 1, low_link_obj); + lean_ctor_set(pair, 0, name_obj.cast()); + lean_ctor_set(pair, 1, low_link_obj.cast()); lean_array_set_core(low_links_arr, i, pair); } @@ -67,10 +67,10 @@ pub fn build_condensed_blocks( let block_names_arr = lean_alloc_array(block_set.len(), block_set.len()); for (j, block_name) in block_set.iter().enumerate() { let block_name_obj = build_name(cache, block_name); - lean_array_set_core(block_names_arr, j, block_name_obj); + lean_array_set_core(block_names_arr, j, block_name_obj.cast()); } let pair = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(pair, 0, name_obj); + lean_ctor_set(pair, 0, name_obj.cast()); lean_ctor_set(pair, 1, block_names_arr); lean_array_set_core(blocks_arr, i, pair); } @@ -83,10 +83,10 @@ pub fn build_condensed_blocks( let refs_arr = lean_alloc_array(ref_set.len(), ref_set.len()); for (j, ref_name) in ref_set.iter().enumerate() { let ref_name_obj = build_name(cache, ref_name); - lean_array_set_core(refs_arr, j, ref_name_obj); + lean_array_set_core(refs_arr, j, ref_name_obj.cast()); } let pair = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(pair, 0, name_obj); + lean_ctor_set(pair, 0, name_obj.cast()); lean_ctor_set(pair, 1, refs_arr); lean_array_set_core(block_refs_arr, i, pair); } @@ -96,7 +96,7 @@ pub fn build_condensed_blocks( lean_ctor_set(result, 0, low_links_arr); lean_ctor_set(result, 1, blocks_arr); lean_ctor_set(result, 2, block_refs_arr); - result + result.cast() } } @@ -115,7 +115,7 @@ pub extern "C" fn rs_build_ref_graph( let ref_graph = build_ref_graph(&rust_env); let mut cache = LeanBuildCache::with_capacity(rust_env.len()); let result = build_ref_graph_array(&mut cache, &ref_graph.out_refs); - unsafe { lean_io_result_mk_ok(result) } + unsafe { lean_io_result_mk_ok(result.cast()) }.cast() })) } @@ -131,6 +131,6 @@ pub extern "C" fn rs_compute_sccs( let condensed = compute_sccs(&ref_graph.out_refs); let mut cache = LeanBuildCache::with_capacity(rust_env.len()); let result = build_condensed_blocks(&mut cache, &condensed); - unsafe { lean_io_result_mk_ok(result) } + unsafe { lean_io_result_mk_ok(result.cast()) }.cast() })) } diff --git a/src/lean/ffi/iroh.rs b/src/lean/ffi/iroh.rs deleted file mode 100644 index 23b5d758..00000000 --- a/src/lean/ffi/iroh.rs +++ /dev/null @@ -1,2 +0,0 @@ -// Iroh FFI types have been removed. -// The iroh server/client modules now build Lean objects directly. diff --git a/src/lean/ffi/ix/address.rs b/src/lean/ffi/ix/address.rs index 9b35abf8..9f77d120 100644 --- a/src/lean/ffi/ix/address.rs +++ b/src/lean/ffi/ix/address.rs @@ -5,7 +5,7 @@ use std::ffi::c_void; use crate::lean::{ - as_ref_unsafe, lean_alloc_sarray, lean_sarray_cptr, sarray::LeanSArrayObject, + lean::{lean_alloc_sarray, lean_sarray_cptr}, lean_sarray_data, }; /// Build a Ix.Address from a blake3::Hash. @@ -16,7 +16,7 @@ pub fn build_address(hash: &blake3::Hash) -> *mut c_void { let ba = lean_alloc_sarray(1, bytes.len(), bytes.len()); let data_ptr = lean_sarray_cptr(ba); std::ptr::copy_nonoverlapping(bytes.as_ptr(), data_ptr, bytes.len()); - ba // Due to unboxing, ByteArray IS the Address + ba.cast() // Due to unboxing, ByteArray IS the Address } } @@ -29,13 +29,12 @@ pub extern "C" fn rs_roundtrip_ix_address( unsafe { // Address is a single-field struct { hash : ByteArray } // Due to unboxing, addr_ptr IS the ByteArray directly - let ba: &LeanSArrayObject = as_ref_unsafe(addr_ptr.cast()); - let bytes = ba.data(); + let bytes = lean_sarray_data(addr_ptr); // Rebuild ByteArray - this IS the Address due to unboxing let new_ba = lean_alloc_sarray(1, bytes.len(), bytes.len()); let data_ptr = lean_sarray_cptr(new_ba); std::ptr::copy_nonoverlapping(bytes.as_ptr(), data_ptr, bytes.len()); - new_ba + new_ba.cast() } } diff --git a/src/lean/ffi/ix/constant.rs b/src/lean/ffi/ix/constant.rs index 19ebc7cf..71e8a1b6 100644 --- a/src/lean/ffi/ix/constant.rs +++ b/src/lean/ffi/ix/constant.rs @@ -17,13 +17,12 @@ use crate::ix::env::{ DefinitionVal, InductiveVal, Name, OpaqueVal, QuotKind, QuotVal, RecursorRule, RecursorVal, ReducibilityHints, TheoremVal, }; -use crate::lean::array::LeanArrayObject; use crate::lean::nat::Nat; -use crate::lean::{ - as_ref_unsafe, lean_alloc_array, lean_alloc_ctor, lean_array_set_core, - lean_box_fn, lean_ctor_get, lean_ctor_set, lean_ctor_set_uint8, - lean_is_scalar, lean_obj_tag, +use crate::lean::lean::{ + lean_alloc_array, lean_alloc_ctor, lean_array_set_core, lean_ctor_get, + lean_ctor_set, lean_ctor_set_uint8, lean_obj_tag, }; +use crate::lean::{lean_array_data, lean_box_fn, lean_ctor_scalar_u8, lean_is_scalar}; use super::super::builder::LeanBuildCache; use super::super::primitives::build_nat; @@ -44,10 +43,10 @@ pub fn build_constant_val( let type_obj = build_expr(cache, &cv.typ); let obj = lean_alloc_ctor(0, 3, 0); - lean_ctor_set(obj, 0, name_obj); - lean_ctor_set(obj, 1, level_params_obj); - lean_ctor_set(obj, 2, type_obj); - obj + lean_ctor_set(obj, 0, name_obj.cast()); + lean_ctor_set(obj, 1, level_params_obj.cast()); + lean_ctor_set(obj, 2, type_obj.cast()); + obj.cast() } } @@ -68,7 +67,7 @@ pub fn build_reducibility_hints(hints: &ReducibilityHints) -> *mut c_void { // Set the uint32 at offset 0 in the scalar area let ptr = obj.cast::(); *(ptr.add(8).cast::()) = *h; - obj + obj.cast() }, } } @@ -86,12 +85,12 @@ pub fn build_constant_info( // AxiomVal = { cnst : ConstantVal, isUnsafe : Bool } let cnst_obj = build_constant_val(cache, &v.cnst); let axiom_val = lean_alloc_ctor(0, 1, 1); - lean_ctor_set(axiom_val, 0, cnst_obj); + lean_ctor_set(axiom_val, 0, cnst_obj.cast()); lean_ctor_set_uint8(axiom_val, 8, v.is_unsafe as u8); let obj = lean_alloc_ctor(0, 1, 0); lean_ctor_set(obj, 0, axiom_val); - obj + obj.cast() }, // | defnInfo (v : DefinitionVal) -- tag 1 ConstantInfo::DefnInfo(v) => { @@ -109,15 +108,15 @@ pub fn build_constant_info( }; let defn_val = lean_alloc_ctor(0, 4, 1); // 4 obj fields, 1 scalar byte - lean_ctor_set(defn_val, 0, cnst_obj); - lean_ctor_set(defn_val, 1, value_obj); - lean_ctor_set(defn_val, 2, hints_obj); - lean_ctor_set(defn_val, 3, all_obj); + lean_ctor_set(defn_val, 0, cnst_obj.cast()); + lean_ctor_set(defn_val, 1, value_obj.cast()); + lean_ctor_set(defn_val, 2, hints_obj.cast()); + lean_ctor_set(defn_val, 3, all_obj.cast()); lean_ctor_set_uint8(defn_val, 4 * 8, safety_byte); let obj = lean_alloc_ctor(1, 1, 0); lean_ctor_set(obj, 0, defn_val); - obj + obj.cast() }, // | thmInfo (v : TheoremVal) -- tag 2 ConstantInfo::ThmInfo(v) => { @@ -127,13 +126,13 @@ pub fn build_constant_info( let all_obj = build_name_array(cache, &v.all); let thm_val = lean_alloc_ctor(0, 3, 0); - lean_ctor_set(thm_val, 0, cnst_obj); - lean_ctor_set(thm_val, 1, value_obj); - lean_ctor_set(thm_val, 2, all_obj); + lean_ctor_set(thm_val, 0, cnst_obj.cast()); + lean_ctor_set(thm_val, 1, value_obj.cast()); + lean_ctor_set(thm_val, 2, all_obj.cast()); let obj = lean_alloc_ctor(2, 1, 0); lean_ctor_set(obj, 0, thm_val); - obj + obj.cast() }, // | opaqueInfo (v : OpaqueVal) -- tag 3 ConstantInfo::OpaqueInfo(v) => { @@ -143,14 +142,14 @@ pub fn build_constant_info( let all_obj = build_name_array(cache, &v.all); let opaque_val = lean_alloc_ctor(0, 3, 1); - lean_ctor_set(opaque_val, 0, cnst_obj); - lean_ctor_set(opaque_val, 1, value_obj); - lean_ctor_set(opaque_val, 2, all_obj); + lean_ctor_set(opaque_val, 0, cnst_obj.cast()); + lean_ctor_set(opaque_val, 1, value_obj.cast()); + lean_ctor_set(opaque_val, 2, all_obj.cast()); lean_ctor_set_uint8(opaque_val, 3 * 8, v.is_unsafe as u8); let obj = lean_alloc_ctor(3, 1, 0); lean_ctor_set(obj, 0, opaque_val); - obj + obj.cast() }, // | quotInfo (v : QuotVal) -- tag 4 ConstantInfo::QuotInfo(v) => { @@ -166,12 +165,12 @@ pub fn build_constant_info( }; let quot_val = lean_alloc_ctor(0, 1, 1); // 1 obj field, 1 scalar byte - lean_ctor_set(quot_val, 0, cnst_obj); + lean_ctor_set(quot_val, 0, cnst_obj.cast()); lean_ctor_set_uint8(quot_val, 8, kind_byte); let obj = lean_alloc_ctor(4, 1, 0); lean_ctor_set(obj, 0, quot_val); - obj + obj.cast() }, // | inductInfo (v : InductiveVal) -- tag 5 ConstantInfo::InductInfo(v) => { @@ -185,19 +184,19 @@ pub fn build_constant_info( // 6 object fields, 3 scalar bytes for bools let induct_val = lean_alloc_ctor(0, 6, 3); - lean_ctor_set(induct_val, 0, cnst_obj); - lean_ctor_set(induct_val, 1, num_params_obj); - lean_ctor_set(induct_val, 2, num_indices_obj); - lean_ctor_set(induct_val, 3, all_obj); - lean_ctor_set(induct_val, 4, ctors_obj); - lean_ctor_set(induct_val, 5, num_nested_obj); + lean_ctor_set(induct_val, 0, cnst_obj.cast()); + lean_ctor_set(induct_val, 1, num_params_obj.cast()); + lean_ctor_set(induct_val, 2, num_indices_obj.cast()); + lean_ctor_set(induct_val, 3, all_obj.cast()); + lean_ctor_set(induct_val, 4, ctors_obj.cast()); + lean_ctor_set(induct_val, 5, num_nested_obj.cast()); lean_ctor_set_uint8(induct_val, 6 * 8, v.is_rec as u8); lean_ctor_set_uint8(induct_val, 6 * 8 + 1, v.is_unsafe as u8); lean_ctor_set_uint8(induct_val, 6 * 8 + 2, v.is_reflexive as u8); let obj = lean_alloc_ctor(5, 1, 0); lean_ctor_set(obj, 0, induct_val); - obj + obj.cast() }, // | ctorInfo (v : ConstructorVal) -- tag 6 ConstantInfo::CtorInfo(v) => { @@ -210,16 +209,16 @@ pub fn build_constant_info( // 5 object fields, 1 scalar byte for bool let ctor_val = lean_alloc_ctor(0, 5, 1); - lean_ctor_set(ctor_val, 0, cnst_obj); - lean_ctor_set(ctor_val, 1, induct_obj); - lean_ctor_set(ctor_val, 2, cidx_obj); - lean_ctor_set(ctor_val, 3, num_params_obj); - lean_ctor_set(ctor_val, 4, num_fields_obj); + lean_ctor_set(ctor_val, 0, cnst_obj.cast()); + lean_ctor_set(ctor_val, 1, induct_obj.cast()); + lean_ctor_set(ctor_val, 2, cidx_obj.cast()); + lean_ctor_set(ctor_val, 3, num_params_obj.cast()); + lean_ctor_set(ctor_val, 4, num_fields_obj.cast()); lean_ctor_set_uint8(ctor_val, 5 * 8, v.is_unsafe as u8); let obj = lean_alloc_ctor(6, 1, 0); lean_ctor_set(obj, 0, ctor_val); - obj + obj.cast() }, // | recInfo (v : RecursorVal) -- tag 7 ConstantInfo::RecInfo(v) => { @@ -234,19 +233,19 @@ pub fn build_constant_info( // 7 object fields, 2 scalar bytes for bools let rec_val = lean_alloc_ctor(0, 7, 2); - lean_ctor_set(rec_val, 0, cnst_obj); - lean_ctor_set(rec_val, 1, all_obj); - lean_ctor_set(rec_val, 2, num_params_obj); - lean_ctor_set(rec_val, 3, num_indices_obj); - lean_ctor_set(rec_val, 4, num_motives_obj); - lean_ctor_set(rec_val, 5, num_minors_obj); - lean_ctor_set(rec_val, 6, rules_obj); + lean_ctor_set(rec_val, 0, cnst_obj.cast()); + lean_ctor_set(rec_val, 1, all_obj.cast()); + lean_ctor_set(rec_val, 2, num_params_obj.cast()); + lean_ctor_set(rec_val, 3, num_indices_obj.cast()); + lean_ctor_set(rec_val, 4, num_motives_obj.cast()); + lean_ctor_set(rec_val, 5, num_minors_obj.cast()); + lean_ctor_set(rec_val, 6, rules_obj.cast()); lean_ctor_set_uint8(rec_val, 7 * 8, v.k as u8); lean_ctor_set_uint8(rec_val, 7 * 8 + 1, v.is_unsafe as u8); let obj = lean_alloc_ctor(7, 1, 0); lean_ctor_set(obj, 0, rec_val); - obj + obj.cast() }, } } @@ -266,13 +265,13 @@ fn build_recursor_rules( let rhs_obj = build_expr(cache, &rule.rhs); let rule_obj = lean_alloc_ctor(0, 3, 0); - lean_ctor_set(rule_obj, 0, ctor_obj); - lean_ctor_set(rule_obj, 1, n_fields_obj); - lean_ctor_set(rule_obj, 2, rhs_obj); + lean_ctor_set(rule_obj, 0, ctor_obj.cast()); + lean_ctor_set(rule_obj, 1, n_fields_obj.cast()); + lean_ctor_set(rule_obj, 2, rhs_obj.cast()); lean_array_set_core(arr, i, rule_obj); } - arr + arr.cast() } } @@ -288,14 +287,12 @@ pub fn decode_constant_val(ptr: *const c_void) -> ConstantVal { let level_params_ptr = lean_ctor_get(ptr as *mut _, 1); let type_ptr = lean_ctor_get(ptr as *mut _, 2); - let name = decode_ix_name(name_ptr); + let name = decode_ix_name(name_ptr.cast()); - let level_params_obj: &LeanArrayObject = - as_ref_unsafe(level_params_ptr.cast()); let level_params: Vec = - level_params_obj.data().iter().map(|&p| decode_ix_name(p)).collect(); + lean_array_data(level_params_ptr.cast()).iter().map(|&p| decode_ix_name(p)).collect(); - let typ = decode_ix_expr(type_ptr); + let typ = decode_ix_expr(type_ptr.cast()); ConstantVal { name, level_params, typ } } @@ -345,9 +342,9 @@ fn decode_recursor_rule(ptr: *const c_void) -> RecursorRule { let rhs_ptr = lean_ctor_get(ptr as *mut _, 2); RecursorRule { - ctor: decode_ix_name(ctor_ptr), - n_fields: Nat::from_ptr(n_fields_ptr), - rhs: decode_ix_expr(rhs_ptr), + ctor: decode_ix_name(ctor_ptr.cast()), + n_fields: Nat::from_ptr(n_fields_ptr.cast()), + rhs: decode_ix_expr(rhs_ptr.cast()), } } } @@ -362,13 +359,11 @@ pub fn decode_constant_info(ptr: *const c_void) -> ConstantInfo { 0 => { // axiomInfo: AxiomVal = { cnst : ConstantVal, isUnsafe : Bool } // Structure: 1 obj field (cnst), 1 scalar byte (isUnsafe) - let cnst_ptr = lean_ctor_get(inner_ptr as *mut _, 0); - let ctor: &crate::lean::ctor::LeanCtorObject = - as_ref_unsafe(inner_ptr.cast()); - let is_unsafe = ctor.get_scalar_u8(1, 0) != 0; + let cnst_ptr = lean_ctor_get(inner_ptr, 0); + let is_unsafe = lean_ctor_scalar_u8(inner_ptr.cast(), 1, 0) != 0; ConstantInfo::AxiomInfo(AxiomVal { - cnst: decode_constant_val(cnst_ptr), + cnst: decode_constant_val(cnst_ptr.cast()), is_unsafe, }) }, @@ -376,15 +371,13 @@ pub fn decode_constant_info(ptr: *const c_void) -> ConstantInfo { // defnInfo: DefinitionVal = { cnst, value, hints, safety, all } // NOTE: safety (DefinitionSafety) is a small enum and is stored as a SCALAR field // Memory layout: 4 obj fields (cnst, value, hints, all), 1 scalar byte (safety) - let cnst_ptr = lean_ctor_get(inner_ptr as *mut _, 0); - let value_ptr = lean_ctor_get(inner_ptr as *mut _, 1); - let hints_ptr = lean_ctor_get(inner_ptr as *mut _, 2); - let all_ptr = lean_ctor_get(inner_ptr as *mut _, 3); // all is at index 3, not 4! + let cnst_ptr = lean_ctor_get(inner_ptr, 0); + let value_ptr = lean_ctor_get(inner_ptr, 1); + let hints_ptr = lean_ctor_get(inner_ptr, 2); + let all_ptr = lean_ctor_get(inner_ptr, 3); // all is at index 3, not 4! // safety is a scalar at offset 4*8 = 32 bytes from start of object fields - let ctor: &crate::lean::ctor::LeanCtorObject = - as_ref_unsafe(inner_ptr.cast()); - let safety_byte = ctor.get_scalar_u8(4, 0); // 4 obj fields, offset 0 in scalar area + let safety_byte = lean_ctor_scalar_u8(inner_ptr.cast(), 4, 0); // 4 obj fields, offset 0 in scalar area let safety = match safety_byte { 0 => DefinitionSafety::Unsafe, 1 => DefinitionSafety::Safe, @@ -393,51 +386,47 @@ pub fn decode_constant_info(ptr: *const c_void) -> ConstantInfo { }; ConstantInfo::DefnInfo(DefinitionVal { - cnst: decode_constant_val(cnst_ptr), - value: decode_ix_expr(value_ptr), - hints: decode_reducibility_hints(hints_ptr), + cnst: decode_constant_val(cnst_ptr.cast()), + value: decode_ix_expr(value_ptr.cast()), + hints: decode_reducibility_hints(hints_ptr.cast()), safety, - all: decode_name_array(all_ptr), + all: decode_name_array(all_ptr.cast()), }) }, 2 => { // thmInfo: TheoremVal = { cnst, value, all } - let cnst_ptr = lean_ctor_get(inner_ptr as *mut _, 0); - let value_ptr = lean_ctor_get(inner_ptr as *mut _, 1); - let all_ptr = lean_ctor_get(inner_ptr as *mut _, 2); + let cnst_ptr = lean_ctor_get(inner_ptr, 0); + let value_ptr = lean_ctor_get(inner_ptr, 1); + let all_ptr = lean_ctor_get(inner_ptr, 2); ConstantInfo::ThmInfo(TheoremVal { - cnst: decode_constant_val(cnst_ptr), - value: decode_ix_expr(value_ptr), - all: decode_name_array(all_ptr), + cnst: decode_constant_val(cnst_ptr.cast()), + value: decode_ix_expr(value_ptr.cast()), + all: decode_name_array(all_ptr.cast()), }) }, 3 => { // opaqueInfo: OpaqueVal = { cnst, value, isUnsafe, all } // Structure: 3 obj fields (cnst, value, all), 1 scalar byte (isUnsafe) - let cnst_ptr = lean_ctor_get(inner_ptr as *mut _, 0); - let value_ptr = lean_ctor_get(inner_ptr as *mut _, 1); - let all_ptr = lean_ctor_get(inner_ptr as *mut _, 2); - let ctor: &crate::lean::ctor::LeanCtorObject = - as_ref_unsafe(inner_ptr.cast()); - let is_unsafe = ctor.get_scalar_u8(3, 0) != 0; + let cnst_ptr = lean_ctor_get(inner_ptr, 0); + let value_ptr = lean_ctor_get(inner_ptr, 1); + let all_ptr = lean_ctor_get(inner_ptr, 2); + let is_unsafe = lean_ctor_scalar_u8(inner_ptr.cast(), 3, 0) != 0; ConstantInfo::OpaqueInfo(OpaqueVal { - cnst: decode_constant_val(cnst_ptr), - value: decode_ix_expr(value_ptr), + cnst: decode_constant_val(cnst_ptr.cast()), + value: decode_ix_expr(value_ptr.cast()), is_unsafe, - all: decode_name_array(all_ptr), + all: decode_name_array(all_ptr.cast()), }) }, 4 => { // quotInfo: QuotVal = { cnst, kind } // NOTE: QuotKind is a small enum (4 0-field ctors), stored as SCALAR // Memory layout: 1 obj field (cnst), 1 scalar byte (kind) - let cnst_ptr = lean_ctor_get(inner_ptr as *mut _, 0); + let cnst_ptr = lean_ctor_get(inner_ptr, 0); - let ctor: &crate::lean::ctor::LeanCtorObject = - as_ref_unsafe(inner_ptr.cast()); - let kind_byte = ctor.get_scalar_u8(1, 0); // 1 obj field, offset 0 in scalar area + let kind_byte = lean_ctor_scalar_u8(inner_ptr.cast(), 1, 0); // 1 obj field, offset 0 in scalar area let kind = match kind_byte { 0 => QuotKind::Type, 1 => QuotKind::Ctor, @@ -447,33 +436,31 @@ pub fn decode_constant_info(ptr: *const c_void) -> ConstantInfo { }; ConstantInfo::QuotInfo(QuotVal { - cnst: decode_constant_val(cnst_ptr), + cnst: decode_constant_val(cnst_ptr.cast()), kind, }) }, 5 => { // inductInfo: InductiveVal = { cnst, numParams, numIndices, all, ctors, numNested, isRec, isUnsafe, isReflexive } // 6 obj fields, 3 scalar bytes - let cnst_ptr = lean_ctor_get(inner_ptr as *mut _, 0); - let num_params_ptr = lean_ctor_get(inner_ptr as *mut _, 1); - let num_indices_ptr = lean_ctor_get(inner_ptr as *mut _, 2); - let all_ptr = lean_ctor_get(inner_ptr as *mut _, 3); - let ctors_ptr = lean_ctor_get(inner_ptr as *mut _, 4); - let num_nested_ptr = lean_ctor_get(inner_ptr as *mut _, 5); - - let ctor: &crate::lean::ctor::LeanCtorObject = - as_ref_unsafe(inner_ptr.cast()); - let is_rec = ctor.get_scalar_u8(6, 0) != 0; - let is_unsafe = ctor.get_scalar_u8(6, 1) != 0; - let is_reflexive = ctor.get_scalar_u8(6, 2) != 0; + let cnst_ptr = lean_ctor_get(inner_ptr, 0); + let num_params_ptr = lean_ctor_get(inner_ptr, 1); + let num_indices_ptr = lean_ctor_get(inner_ptr, 2); + let all_ptr = lean_ctor_get(inner_ptr, 3); + let ctors_ptr = lean_ctor_get(inner_ptr, 4); + let num_nested_ptr = lean_ctor_get(inner_ptr, 5); + + let is_rec = lean_ctor_scalar_u8(inner_ptr.cast(), 6, 0) != 0; + let is_unsafe = lean_ctor_scalar_u8(inner_ptr.cast(), 6, 1) != 0; + let is_reflexive = lean_ctor_scalar_u8(inner_ptr.cast(), 6, 2) != 0; ConstantInfo::InductInfo(InductiveVal { - cnst: decode_constant_val(cnst_ptr), - num_params: Nat::from_ptr(num_params_ptr), - num_indices: Nat::from_ptr(num_indices_ptr), - all: decode_name_array(all_ptr), - ctors: decode_name_array(ctors_ptr), - num_nested: Nat::from_ptr(num_nested_ptr), + cnst: decode_constant_val(cnst_ptr.cast()), + num_params: Nat::from_ptr(num_params_ptr.cast()), + num_indices: Nat::from_ptr(num_indices_ptr.cast()), + all: decode_name_array(all_ptr.cast()), + ctors: decode_name_array(ctors_ptr.cast()), + num_nested: Nat::from_ptr(num_nested_ptr.cast()), is_rec, is_unsafe, is_reflexive, @@ -482,52 +469,47 @@ pub fn decode_constant_info(ptr: *const c_void) -> ConstantInfo { 6 => { // ctorInfo: ConstructorVal = { cnst, induct, cidx, numParams, numFields, isUnsafe } // 5 obj fields, 1 scalar byte - let cnst_ptr = lean_ctor_get(inner_ptr as *mut _, 0); - let induct_ptr = lean_ctor_get(inner_ptr as *mut _, 1); - let cidx_ptr = lean_ctor_get(inner_ptr as *mut _, 2); - let num_params_ptr = lean_ctor_get(inner_ptr as *mut _, 3); - let num_fields_ptr = lean_ctor_get(inner_ptr as *mut _, 4); + let cnst_ptr = lean_ctor_get(inner_ptr, 0); + let induct_ptr = lean_ctor_get(inner_ptr, 1); + let cidx_ptr = lean_ctor_get(inner_ptr, 2); + let num_params_ptr = lean_ctor_get(inner_ptr, 3); + let num_fields_ptr = lean_ctor_get(inner_ptr, 4); - let ctor: &crate::lean::ctor::LeanCtorObject = - as_ref_unsafe(inner_ptr.cast()); - let is_unsafe = ctor.get_scalar_u8(5, 0) != 0; + let is_unsafe = lean_ctor_scalar_u8(inner_ptr.cast(), 5, 0) != 0; ConstantInfo::CtorInfo(ConstructorVal { - cnst: decode_constant_val(cnst_ptr), - induct: decode_ix_name(induct_ptr), - cidx: Nat::from_ptr(cidx_ptr), - num_params: Nat::from_ptr(num_params_ptr), - num_fields: Nat::from_ptr(num_fields_ptr), + cnst: decode_constant_val(cnst_ptr.cast()), + induct: decode_ix_name(induct_ptr.cast()), + cidx: Nat::from_ptr(cidx_ptr.cast()), + num_params: Nat::from_ptr(num_params_ptr.cast()), + num_fields: Nat::from_ptr(num_fields_ptr.cast()), is_unsafe, }) }, 7 => { // recInfo: RecursorVal = { cnst, all, numParams, numIndices, numMotives, numMinors, rules, k, isUnsafe } // 7 obj fields, 2 scalar bytes - let cnst_ptr = lean_ctor_get(inner_ptr as *mut _, 0); - let all_ptr = lean_ctor_get(inner_ptr as *mut _, 1); - let num_params_ptr = lean_ctor_get(inner_ptr as *mut _, 2); - let num_indices_ptr = lean_ctor_get(inner_ptr as *mut _, 3); - let num_motives_ptr = lean_ctor_get(inner_ptr as *mut _, 4); - let num_minors_ptr = lean_ctor_get(inner_ptr as *mut _, 5); - let rules_ptr = lean_ctor_get(inner_ptr as *mut _, 6); - - let ctor: &crate::lean::ctor::LeanCtorObject = - as_ref_unsafe(inner_ptr.cast()); - let k = ctor.get_scalar_u8(7, 0) != 0; - let is_unsafe = ctor.get_scalar_u8(7, 1) != 0; - - let rules_obj: &LeanArrayObject = as_ref_unsafe(rules_ptr.cast()); + let cnst_ptr = lean_ctor_get(inner_ptr, 0); + let all_ptr = lean_ctor_get(inner_ptr, 1); + let num_params_ptr = lean_ctor_get(inner_ptr, 2); + let num_indices_ptr = lean_ctor_get(inner_ptr, 3); + let num_motives_ptr = lean_ctor_get(inner_ptr, 4); + let num_minors_ptr = lean_ctor_get(inner_ptr, 5); + let rules_ptr = lean_ctor_get(inner_ptr, 6); + + let k = lean_ctor_scalar_u8(inner_ptr.cast(), 7, 0) != 0; + let is_unsafe = lean_ctor_scalar_u8(inner_ptr.cast(), 7, 1) != 0; + let rules: Vec = - rules_obj.data().iter().map(|&p| decode_recursor_rule(p)).collect(); + lean_array_data(rules_ptr.cast()).iter().map(|&p| decode_recursor_rule(p)).collect(); ConstantInfo::RecInfo(RecursorVal { - cnst: decode_constant_val(cnst_ptr), - all: decode_name_array(all_ptr), - num_params: Nat::from_ptr(num_params_ptr), - num_indices: Nat::from_ptr(num_indices_ptr), - num_motives: Nat::from_ptr(num_motives_ptr), - num_minors: Nat::from_ptr(num_minors_ptr), + cnst: decode_constant_val(cnst_ptr.cast()), + all: decode_name_array(all_ptr.cast()), + num_params: Nat::from_ptr(num_params_ptr.cast()), + num_indices: Nat::from_ptr(num_indices_ptr.cast()), + num_motives: Nat::from_ptr(num_motives_ptr.cast()), + num_minors: Nat::from_ptr(num_minors_ptr.cast()), rules, k, is_unsafe, diff --git a/src/lean/ffi/ix/data.rs b/src/lean/ffi/ix/data.rs index e195c74e..d2c5c77a 100644 --- a/src/lean/ffi/ix/data.rs +++ b/src/lean/ffi/ix/data.rs @@ -5,14 +5,12 @@ use std::ffi::c_void; use crate::ix::env::{ DataValue, Int, Name, SourceInfo, Substring, Syntax, SyntaxPreresolved, }; -use crate::lean::array::LeanArrayObject; use crate::lean::nat::Nat; -use crate::lean::string::LeanStringObject; -use crate::lean::{ - as_ref_unsafe, lean_alloc_array, lean_alloc_ctor, lean_array_set_core, - lean_ctor_get, lean_ctor_set, lean_ctor_set_uint8, lean_is_scalar, - lean_mk_string, lean_obj_tag, +use crate::lean::lean::{ + lean_alloc_array, lean_alloc_ctor, lean_array_set_core, lean_ctor_get, + lean_ctor_set, lean_ctor_set_uint8, lean_mk_string, lean_obj_tag, }; +use crate::lean::{lean_array_data, lean_ctor_scalar_u8, lean_is_scalar, lean_obj_to_string}; use super::super::builder::LeanBuildCache; use super::super::primitives::build_nat; @@ -24,13 +22,13 @@ pub fn build_int(int: &Int) -> *mut c_void { match int { Int::OfNat(n) => { let obj = lean_alloc_ctor(0, 1, 0); - lean_ctor_set(obj, 0, build_nat(n)); - obj + lean_ctor_set(obj, 0, build_nat(n).cast()); + obj.cast() }, Int::NegSucc(n) => { let obj = lean_alloc_ctor(1, 1, 0); - lean_ctor_set(obj, 0, build_nat(n)); - obj + lean_ctor_set(obj, 0, build_nat(n).cast()); + obj.cast() }, } } @@ -42,9 +40,9 @@ pub fn build_substring(ss: &Substring) -> *mut c_void { let s_cstr = crate::lean::safe_cstring(ss.str.as_str()); let obj = lean_alloc_ctor(0, 3, 0); lean_ctor_set(obj, 0, lean_mk_string(s_cstr.as_ptr())); - lean_ctor_set(obj, 1, build_nat(&ss.start_pos)); - lean_ctor_set(obj, 2, build_nat(&ss.stop_pos)); - obj + lean_ctor_set(obj, 1, build_nat(&ss.start_pos).cast()); + lean_ctor_set(obj, 2, build_nat(&ss.stop_pos).cast()); + obj.cast() } } @@ -55,22 +53,22 @@ pub fn build_source_info(si: &SourceInfo) -> *mut c_void { // | original (leading : Substring) (pos : Nat) (trailing : Substring) (endPos : Nat) -- tag 0 SourceInfo::Original(leading, pos, trailing, end_pos) => { let obj = lean_alloc_ctor(0, 4, 0); - lean_ctor_set(obj, 0, build_substring(leading)); - lean_ctor_set(obj, 1, build_nat(pos)); - lean_ctor_set(obj, 2, build_substring(trailing)); - lean_ctor_set(obj, 3, build_nat(end_pos)); - obj + lean_ctor_set(obj, 0, build_substring(leading).cast()); + lean_ctor_set(obj, 1, build_nat(pos).cast()); + lean_ctor_set(obj, 2, build_substring(trailing).cast()); + lean_ctor_set(obj, 3, build_nat(end_pos).cast()); + obj.cast() }, // | synthetic (pos : Nat) (endPos : Nat) (canonical : Bool) -- tag 1 SourceInfo::Synthetic(pos, end_pos, canonical) => { let obj = lean_alloc_ctor(1, 2, 1); - lean_ctor_set(obj, 0, build_nat(pos)); - lean_ctor_set(obj, 1, build_nat(end_pos)); + lean_ctor_set(obj, 0, build_nat(pos).cast()); + lean_ctor_set(obj, 1, build_nat(end_pos).cast()); lean_ctor_set_uint8(obj, 2 * 8, *canonical as u8); - obj + obj.cast() }, // | none -- tag 2 - SourceInfo::None => lean_alloc_ctor(2, 0, 0), + SourceInfo::None => lean_alloc_ctor(2, 0, 0).cast(), } } } @@ -85,17 +83,17 @@ pub fn build_syntax_preresolved( // | namespace (name : Name) -- tag 0 SyntaxPreresolved::Namespace(name) => { let obj = lean_alloc_ctor(0, 1, 0); - lean_ctor_set(obj, 0, build_name(cache, name)); - obj + lean_ctor_set(obj, 0, build_name(cache, name).cast()); + obj.cast() }, // | decl (name : Name) (aliases : Array String) -- tag 1 SyntaxPreresolved::Decl(name, aliases) => { let name_obj = build_name(cache, name); let aliases_obj = build_string_array(aliases); let obj = lean_alloc_ctor(1, 2, 0); - lean_ctor_set(obj, 0, name_obj); - lean_ctor_set(obj, 1, aliases_obj); - obj + lean_ctor_set(obj, 0, name_obj.cast()); + lean_ctor_set(obj, 1, aliases_obj.cast()); + obj.cast() }, } } @@ -109,7 +107,7 @@ pub fn build_string_array(strings: &[String]) -> *mut c_void { let s_cstr = crate::lean::safe_cstring(s.as_str()); lean_array_set_core(arr, i, lean_mk_string(s_cstr.as_ptr())); } - arr + arr.cast() } } @@ -118,26 +116,26 @@ pub fn build_syntax(cache: &mut LeanBuildCache, syn: &Syntax) -> *mut c_void { unsafe { match syn { // | missing -- tag 0 - Syntax::Missing => lean_alloc_ctor(0, 0, 0), + Syntax::Missing => lean_alloc_ctor(0, 0, 0).cast(), // | node (info : SourceInfo) (kind : Name) (args : Array Syntax) -- tag 1 Syntax::Node(info, kind, args) => { let info_obj = build_source_info(info); let kind_obj = build_name(cache, kind); let args_obj = build_syntax_array(cache, args); let obj = lean_alloc_ctor(1, 3, 0); - lean_ctor_set(obj, 0, info_obj); - lean_ctor_set(obj, 1, kind_obj); - lean_ctor_set(obj, 2, args_obj); - obj + lean_ctor_set(obj, 0, info_obj.cast()); + lean_ctor_set(obj, 1, kind_obj.cast()); + lean_ctor_set(obj, 2, args_obj.cast()); + obj.cast() }, // | atom (info : SourceInfo) (val : String) -- tag 2 Syntax::Atom(info, val) => { let info_obj = build_source_info(info); let val_cstr = crate::lean::safe_cstring(val.as_str()); let obj = lean_alloc_ctor(2, 2, 0); - lean_ctor_set(obj, 0, info_obj); + lean_ctor_set(obj, 0, info_obj.cast()); lean_ctor_set(obj, 1, lean_mk_string(val_cstr.as_ptr())); - obj + obj.cast() }, // | ident (info : SourceInfo) (rawVal : Substring) (val : Name) (preresolved : Array SyntaxPreresolved) -- tag 3 Syntax::Ident(info, raw_val, val, preresolved) => { @@ -147,11 +145,11 @@ pub fn build_syntax(cache: &mut LeanBuildCache, syn: &Syntax) -> *mut c_void { let preresolved_obj = build_syntax_preresolved_array(cache, preresolved); let obj = lean_alloc_ctor(3, 4, 0); - lean_ctor_set(obj, 0, info_obj); - lean_ctor_set(obj, 1, raw_val_obj); - lean_ctor_set(obj, 2, val_obj); - lean_ctor_set(obj, 3, preresolved_obj); - obj + lean_ctor_set(obj, 0, info_obj.cast()); + lean_ctor_set(obj, 1, raw_val_obj.cast()); + lean_ctor_set(obj, 2, val_obj.cast()); + lean_ctor_set(obj, 3, preresolved_obj.cast()); + obj.cast() }, } } @@ -166,9 +164,9 @@ pub fn build_syntax_array( let arr = lean_alloc_array(items.len(), items.len()); for (i, item) in items.iter().enumerate() { let item_obj = build_syntax(cache, item); - lean_array_set_core(arr, i, item_obj); + lean_array_set_core(arr, i, item_obj.cast()); } - arr + arr.cast() } } @@ -181,9 +179,9 @@ pub fn build_syntax_preresolved_array( let arr = lean_alloc_array(items.len(), items.len()); for (i, item) in items.iter().enumerate() { let item_obj = build_syntax_preresolved(cache, item); - lean_array_set_core(arr, i, item_obj); + lean_array_set_core(arr, i, item_obj.cast()); } - arr + arr.cast() } } @@ -198,33 +196,33 @@ pub fn build_data_value( let s_cstr = crate::lean::safe_cstring(s.as_str()); let obj = lean_alloc_ctor(0, 1, 0); lean_ctor_set(obj, 0, lean_mk_string(s_cstr.as_ptr())); - obj + obj.cast() }, DataValue::OfBool(b) => { // 0 object fields, 1 scalar byte let obj = lean_alloc_ctor(1, 0, 1); lean_ctor_set_uint8(obj, 0, *b as u8); - obj + obj.cast() }, DataValue::OfName(n) => { let obj = lean_alloc_ctor(2, 1, 0); - lean_ctor_set(obj, 0, build_name(cache, n)); - obj + lean_ctor_set(obj, 0, build_name(cache, n).cast()); + obj.cast() }, DataValue::OfNat(n) => { let obj = lean_alloc_ctor(3, 1, 0); - lean_ctor_set(obj, 0, build_nat(n)); - obj + lean_ctor_set(obj, 0, build_nat(n).cast()); + obj.cast() }, DataValue::OfInt(i) => { let obj = lean_alloc_ctor(4, 1, 0); - lean_ctor_set(obj, 0, build_int(i)); - obj + lean_ctor_set(obj, 0, build_int(i).cast()); + obj.cast() }, DataValue::OfSyntax(syn) => { let obj = lean_alloc_ctor(5, 1, 0); - lean_ctor_set(obj, 0, build_syntax(cache, syn)); - obj + lean_ctor_set(obj, 0, build_syntax(cache, syn).cast()); + obj.cast() }, } } @@ -242,11 +240,11 @@ pub fn build_kvmap( let dv_obj = build_data_value(cache, dv); // Prod (Name × DataValue) let pair = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(pair, 0, name_obj); - lean_ctor_set(pair, 1, dv_obj); + lean_ctor_set(pair, 0, name_obj.cast()); + lean_ctor_set(pair, 1, dv_obj.cast()); lean_array_set_core(arr, i, pair); } - arr + arr.cast() } } @@ -260,7 +258,7 @@ pub fn decode_ix_int(ptr: *const c_void) -> Int { unsafe { let tag = lean_obj_tag(ptr as *mut _); let nat_ptr = lean_ctor_get(ptr as *mut _, 0); - let nat = Nat::from_ptr(nat_ptr); + let nat = Nat::from_ptr(nat_ptr.cast()); match tag { 0 => Int::OfNat(nat), 1 => Int::NegSucc(nat), @@ -278,32 +276,29 @@ pub fn decode_data_value(ptr: *const c_void) -> DataValue { 0 => { // ofString: 1 object field let inner_ptr = lean_ctor_get(ptr as *mut _, 0); - let str_obj: &LeanStringObject = as_ref_unsafe(inner_ptr.cast()); - DataValue::OfString(str_obj.as_string()) + DataValue::OfString(lean_obj_to_string(inner_ptr as *const _)) }, 1 => { // ofBool: 0 object fields, 1 scalar byte - let ctor: &crate::lean::ctor::LeanCtorObject = - as_ref_unsafe(ptr.cast()); - let b = ctor.get_scalar_u8(0, 0) != 0; + let b = lean_ctor_scalar_u8(ptr, 0, 0) != 0; DataValue::OfBool(b) }, 2 => { // ofName: 1 object field let inner_ptr = lean_ctor_get(ptr as *mut _, 0); - DataValue::OfName(decode_ix_name(inner_ptr)) + DataValue::OfName(decode_ix_name(inner_ptr.cast())) }, 3 => { // ofNat: 1 object field let inner_ptr = lean_ctor_get(ptr as *mut _, 0); - DataValue::OfNat(Nat::from_ptr(inner_ptr)) + DataValue::OfNat(Nat::from_ptr(inner_ptr.cast())) }, 4 => { // ofInt: 1 object field let inner_ptr = lean_ctor_get(ptr as *mut _, 0); let int_tag = lean_obj_tag(inner_ptr as *mut _); let nat_ptr = lean_ctor_get(inner_ptr as *mut _, 0); - let nat = Nat::from_ptr(nat_ptr); + let nat = Nat::from_ptr(nat_ptr.cast()); match int_tag { 0 => DataValue::OfInt(Int::OfNat(nat)), 1 => DataValue::OfInt(Int::NegSucc(nat)), @@ -313,7 +308,7 @@ pub fn decode_data_value(ptr: *const c_void) -> DataValue { 5 => { // ofSyntax: 1 object field let inner_ptr = lean_ctor_get(ptr as *mut _, 0); - DataValue::OfSyntax(decode_ix_syntax(inner_ptr).into()) + DataValue::OfSyntax(decode_ix_syntax(inner_ptr.cast()).into()) }, _ => panic!("Invalid DataValue tag: {}", tag), } @@ -335,11 +330,10 @@ pub fn decode_ix_syntax(ptr: *const c_void) -> Syntax { let kind_ptr = lean_ctor_get(ptr as *mut _, 1); let args_ptr = lean_ctor_get(ptr as *mut _, 2); - let info = decode_ix_source_info(info_ptr); - let kind = decode_ix_name(kind_ptr); - let args_obj: &LeanArrayObject = as_ref_unsafe(args_ptr.cast()); + let info = decode_ix_source_info(info_ptr.cast()); + let kind = decode_ix_name(kind_ptr.cast()); let args: Vec = - args_obj.data().iter().map(|&p| decode_ix_syntax(p)).collect(); + lean_array_data(args_ptr.cast()).iter().map(|&p| decode_ix_syntax(p)).collect(); Syntax::Node(info, kind, args) }, @@ -348,10 +342,8 @@ pub fn decode_ix_syntax(ptr: *const c_void) -> Syntax { let info_ptr = lean_ctor_get(ptr as *mut _, 0); let val_ptr = lean_ctor_get(ptr as *mut _, 1); - let info = decode_ix_source_info(info_ptr); - let val_obj: &LeanStringObject = as_ref_unsafe(val_ptr.cast()); - - Syntax::Atom(info, val_obj.as_string()) + let info = decode_ix_source_info(info_ptr.cast()); + Syntax::Atom(info, lean_obj_to_string(val_ptr.cast())) }, 3 => { // ident: info, rawVal, val, preresolved @@ -360,13 +352,10 @@ pub fn decode_ix_syntax(ptr: *const c_void) -> Syntax { let val_ptr = lean_ctor_get(ptr as *mut _, 2); let preresolved_ptr = lean_ctor_get(ptr as *mut _, 3); - let info = decode_ix_source_info(info_ptr); - let raw_val = decode_substring(raw_val_ptr); - let val = decode_ix_name(val_ptr); - let preresolved_obj: &LeanArrayObject = - as_ref_unsafe(preresolved_ptr.cast()); - let preresolved: Vec = preresolved_obj - .data() + let info = decode_ix_source_info(info_ptr.cast()); + let raw_val = decode_substring(raw_val_ptr.cast()); + let val = decode_ix_name(val_ptr.cast()); + let preresolved: Vec = lean_array_data(preresolved_ptr.cast()) .iter() .map(|&p| decode_syntax_preresolved(p)) .collect(); @@ -394,10 +383,10 @@ pub fn decode_ix_source_info(ptr: *const c_void) -> SourceInfo { let end_pos_ptr = lean_ctor_get(ptr as *mut _, 3); SourceInfo::Original( - decode_substring(leading_ptr), - Nat::from_ptr(pos_ptr), - decode_substring(trailing_ptr), - Nat::from_ptr(end_pos_ptr), + decode_substring(leading_ptr.cast()), + Nat::from_ptr(pos_ptr.cast()), + decode_substring(trailing_ptr.cast()), + Nat::from_ptr(end_pos_ptr.cast()), ) }, 1 => { @@ -405,13 +394,11 @@ pub fn decode_ix_source_info(ptr: *const c_void) -> SourceInfo { let pos_ptr = lean_ctor_get(ptr as *mut _, 0); let end_pos_ptr = lean_ctor_get(ptr as *mut _, 1); - let ctor: &crate::lean::ctor::LeanCtorObject = - as_ref_unsafe(ptr.cast()); - let canonical = ctor.get_scalar_u8(2, 0) != 0; + let canonical = lean_ctor_scalar_u8(ptr, 2, 0) != 0; SourceInfo::Synthetic( - Nat::from_ptr(pos_ptr), - Nat::from_ptr(end_pos_ptr), + Nat::from_ptr(pos_ptr.cast()), + Nat::from_ptr(end_pos_ptr.cast()), canonical, ) }, @@ -428,11 +415,10 @@ pub fn decode_substring(ptr: *const c_void) -> Substring { let start_ptr = lean_ctor_get(ptr as *mut _, 1); let stop_ptr = lean_ctor_get(ptr as *mut _, 2); - let str_obj: &LeanStringObject = as_ref_unsafe(str_ptr.cast()); Substring { - str: str_obj.as_string(), - start_pos: Nat::from_ptr(start_ptr), - stop_pos: Nat::from_ptr(stop_ptr), + str: lean_obj_to_string(str_ptr.cast()), + start_pos: Nat::from_ptr(start_ptr.cast()), + stop_pos: Nat::from_ptr(stop_ptr.cast()), } } } @@ -445,22 +431,17 @@ pub fn decode_syntax_preresolved(ptr: *const c_void) -> SyntaxPreresolved { 0 => { // namespace let name_ptr = lean_ctor_get(ptr as *mut _, 0); - SyntaxPreresolved::Namespace(decode_ix_name(name_ptr)) + SyntaxPreresolved::Namespace(decode_ix_name(name_ptr.cast())) }, 1 => { // decl let name_ptr = lean_ctor_get(ptr as *mut _, 0); let aliases_ptr = lean_ctor_get(ptr as *mut _, 1); - let name = decode_ix_name(name_ptr); - let aliases_obj: &LeanArrayObject = as_ref_unsafe(aliases_ptr.cast()); - let aliases: Vec = aliases_obj - .data() + let name = decode_ix_name(name_ptr.cast()); + let aliases: Vec = lean_array_data(aliases_ptr.cast()) .iter() - .map(|&p| { - let s: &LeanStringObject = as_ref_unsafe(p.cast()); - s.as_string() - }) + .map(|&p| lean_obj_to_string(p)) .collect(); SyntaxPreresolved::Decl(name, aliases) diff --git a/src/lean/ffi/ix/env.rs b/src/lean/ffi/ix/env.rs index 38776728..dae44e18 100644 --- a/src/lean/ffi/ix/env.rs +++ b/src/lean/ffi/ix/env.rs @@ -5,11 +5,11 @@ use std::ffi::c_void; use rustc_hash::FxHashMap; use crate::ix::env::{ConstantInfo, Name}; -use crate::lean::array::LeanArrayObject; -use crate::lean::{ - as_ref_unsafe, lean_alloc_array, lean_alloc_ctor, lean_array_set_core, - lean_box_fn, lean_ctor_get, lean_ctor_set, lean_is_scalar, lean_obj_tag, +use crate::lean::lean::{ + lean_alloc_array, lean_alloc_ctor, lean_array_set_core, lean_ctor_get, + lean_ctor_set, lean_obj_tag, }; +use crate::lean::{lean_array_data, lean_box_fn, lean_is_scalar}; use super::super::builder::LeanBuildCache; use super::constant::{build_constant_info, decode_constant_info}; @@ -37,7 +37,7 @@ pub fn build_hashmap_from_pairs( // Create array of AssocLists (initially all nil = boxed 0) let buckets = lean_alloc_array(bucket_count, bucket_count); for i in 0..bucket_count { - lean_array_set_core(buckets, i, lean_box_fn(0)); // nil + lean_array_set_core(buckets, i, lean_box_fn(0).cast()); // nil } // Insert entries @@ -46,14 +46,13 @@ pub fn build_hashmap_from_pairs( usize::try_from(hash).expect("hash overflows usize") % bucket_count; // Get current bucket (AssocList) - let buckets_arr = buckets.cast::(); - let current_tail = (*buckets_arr).data()[bucket_idx]; + let current_tail = lean_array_data(buckets.cast())[bucket_idx]; // cons (key : α) (value : β) (tail : AssocList α β) -- tag 1 let cons = lean_alloc_ctor(1, 3, 0); - lean_ctor_set(cons, 0, key_obj); - lean_ctor_set(cons, 1, val_obj); - lean_ctor_set(cons, 2, current_tail as *mut c_void); + lean_ctor_set(cons, 0, key_obj.cast()); + lean_ctor_set(cons, 1, val_obj.cast()); + lean_ctor_set(cons, 2, current_tail as *mut _); lean_array_set_core(buckets, bucket_idx, cons); } @@ -62,15 +61,15 @@ pub fn build_hashmap_from_pairs( // Due to unboxing, this IS the HashMap directly // Field 0 = size, Field 1 = buckets (2 object fields, no scalars) let size_obj = if size <= (usize::MAX >> 1) { - lean_box_fn(size) + lean_box_fn(size).cast() } else { - crate::lean::lean_uint64_to_nat(size as u64) + crate::lean::lean::lean_uint64_to_nat(size as u64) }; let raw = lean_alloc_ctor(0, 2, 0); lean_ctor_set(raw, 0, size_obj); lean_ctor_set(raw, 1, buckets); - raw + raw.cast() } } @@ -100,12 +99,12 @@ pub fn build_raw_environment( let val_obj = build_constant_info(cache, info); // Build pair (Name × ConstantInfo) let pair = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(pair, 0, key_obj); - lean_ctor_set(pair, 1, val_obj); + lean_ctor_set(pair, 0, key_obj.cast()); + lean_ctor_set(pair, 1, val_obj.cast()); lean_array_set_core(consts_arr, i, pair); } - consts_arr + consts_arr.cast() } } @@ -143,8 +142,8 @@ where let value_ptr = lean_ctor_get(current as *mut _, 1); let tail_ptr = lean_ctor_get(current as *mut _, 2); - result.push((decode_key(key_ptr), decode_val(value_ptr))); - current = tail_ptr; + result.push((decode_key(key_ptr.cast()), decode_val(value_ptr.cast()))); + current = tail_ptr.cast(); } } @@ -172,10 +171,8 @@ where let _size_ptr = lean_ctor_get(map_ptr as *mut _, 0); // unused but needed for layout let buckets_ptr = lean_ctor_get(map_ptr as *mut _, 1); - let buckets_obj: &LeanArrayObject = as_ref_unsafe(buckets_ptr.cast()); - let mut pairs = Vec::new(); - for &bucket_ptr in buckets_obj.data() { + for &bucket_ptr in lean_array_data(buckets_ptr.cast()) { let bucket_pairs = decode_assoc_list(bucket_ptr, decode_key, decode_val); pairs.extend(bucket_pairs); } @@ -213,14 +210,13 @@ pub fn decode_ix_raw_environment( unsafe { // RawEnvironment is a single-field structure that may be unboxed // Try treating ptr as the array directly first - let arr_obj: &LeanArrayObject = as_ref_unsafe(ptr.cast()); let mut consts: FxHashMap = FxHashMap::default(); - for &pair_ptr in arr_obj.data() { + for &pair_ptr in lean_array_data(ptr) { let name_ptr = lean_ctor_get(pair_ptr as *mut _, 0); let info_ptr = lean_ctor_get(pair_ptr as *mut _, 1); - let name = decode_ix_name(name_ptr); - let info = decode_constant_info(info_ptr); + let name = decode_ix_name(name_ptr.cast()); + let info = decode_constant_info(info_ptr.cast()); consts.insert(name, info); } @@ -234,14 +230,14 @@ pub fn decode_ix_raw_environment_vec( ptr: *const c_void, ) -> Vec<(Name, ConstantInfo)> { unsafe { - let arr_obj: &LeanArrayObject = as_ref_unsafe(ptr.cast()); - let mut consts = Vec::with_capacity(arr_obj.data().len()); + let data = lean_array_data(ptr); + let mut consts = Vec::with_capacity(data.len()); - for &pair_ptr in arr_obj.data() { + for &pair_ptr in data { let name_ptr = lean_ctor_get(pair_ptr as *mut _, 0); let info_ptr = lean_ctor_get(pair_ptr as *mut _, 1); - let name = decode_ix_name(name_ptr); - let info = decode_constant_info(info_ptr); + let name = decode_ix_name(name_ptr.cast()); + let info = decode_constant_info(info_ptr.cast()); consts.push((name, info)); } @@ -260,11 +256,11 @@ pub fn build_raw_environment_from_vec( let key_obj = build_name(cache, name); let val_obj = build_constant_info(cache, info); let pair = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(pair, 0, key_obj); - lean_ctor_set(pair, 1, val_obj); + lean_ctor_set(pair, 0, key_obj.cast()); + lean_ctor_set(pair, 1, val_obj.cast()); lean_array_set_core(consts_arr, i, pair); } - consts_arr + consts_arr.cast() } } diff --git a/src/lean/ffi/ix/expr.rs b/src/lean/ffi/ix/expr.rs index 598d5a77..f986a6c1 100644 --- a/src/lean/ffi/ix/expr.rs +++ b/src/lean/ffi/ix/expr.rs @@ -19,14 +19,12 @@ use std::ffi::c_void; use crate::ix::env::{ BinderInfo, DataValue, Expr, ExprData, Level, Literal, Name, }; -use crate::lean::array::LeanArrayObject; use crate::lean::nat::Nat; -use crate::lean::string::LeanStringObject; -use crate::lean::{ - as_ref_unsafe, lean_alloc_array, lean_alloc_ctor, lean_array_set_core, - lean_box_fn, lean_ctor_get, lean_ctor_set, lean_ctor_set_uint8, lean_inc, - lean_mk_string, lean_obj_tag, +use crate::lean::lean::{ + lean_alloc_array, lean_alloc_ctor, lean_array_set_core, lean_ctor_get, + lean_ctor_set, lean_ctor_set_uint8, lean_inc, lean_mk_string, lean_obj_tag, }; +use crate::lean::{lean_array_data, lean_box_fn, lean_ctor_scalar_u8, lean_obj_to_string}; use super::super::builder::LeanBuildCache; use super::super::primitives::build_nat; @@ -40,7 +38,7 @@ use super::name::{build_name, decode_ix_name}; pub fn build_expr(cache: &mut LeanBuildCache, expr: &Expr) -> *mut c_void { let hash = *expr.get_hash(); if let Some(&cached) = cache.exprs.get(&hash) { - unsafe { lean_inc(cached) }; + unsafe { lean_inc(cached.cast()) }; return cached; } @@ -48,45 +46,45 @@ pub fn build_expr(cache: &mut LeanBuildCache, expr: &Expr) -> *mut c_void { match expr.as_data() { ExprData::Bvar(idx, h) => { let obj = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(obj, 0, build_nat(idx)); - lean_ctor_set(obj, 1, build_address(h)); - obj + lean_ctor_set(obj, 0, build_nat(idx).cast()); + lean_ctor_set(obj, 1, build_address(h).cast()); + obj.cast() }, ExprData::Fvar(name, h) => { let obj = lean_alloc_ctor(1, 2, 0); - lean_ctor_set(obj, 0, build_name(cache, name)); - lean_ctor_set(obj, 1, build_address(h)); - obj + lean_ctor_set(obj, 0, build_name(cache, name).cast()); + lean_ctor_set(obj, 1, build_address(h).cast()); + obj.cast() }, ExprData::Mvar(name, h) => { let obj = lean_alloc_ctor(2, 2, 0); - lean_ctor_set(obj, 0, build_name(cache, name)); - lean_ctor_set(obj, 1, build_address(h)); - obj + lean_ctor_set(obj, 0, build_name(cache, name).cast()); + lean_ctor_set(obj, 1, build_address(h).cast()); + obj.cast() }, ExprData::Sort(level, h) => { let obj = lean_alloc_ctor(3, 2, 0); - lean_ctor_set(obj, 0, build_level(cache, level)); - lean_ctor_set(obj, 1, build_address(h)); - obj + lean_ctor_set(obj, 0, build_level(cache, level).cast()); + lean_ctor_set(obj, 1, build_address(h).cast()); + obj.cast() }, ExprData::Const(name, levels, h) => { let name_obj = build_name(cache, name); let levels_obj = build_level_array(cache, levels); let obj = lean_alloc_ctor(4, 3, 0); - lean_ctor_set(obj, 0, name_obj); - lean_ctor_set(obj, 1, levels_obj); - lean_ctor_set(obj, 2, build_address(h)); - obj + lean_ctor_set(obj, 0, name_obj.cast()); + lean_ctor_set(obj, 1, levels_obj.cast()); + lean_ctor_set(obj, 2, build_address(h).cast()); + obj.cast() }, ExprData::App(fn_expr, arg_expr, h) => { let fn_obj = build_expr(cache, fn_expr); let arg_obj = build_expr(cache, arg_expr); let obj = lean_alloc_ctor(5, 3, 0); - lean_ctor_set(obj, 0, fn_obj); - lean_ctor_set(obj, 1, arg_obj); - lean_ctor_set(obj, 2, build_address(h)); - obj + lean_ctor_set(obj, 0, fn_obj.cast()); + lean_ctor_set(obj, 1, arg_obj.cast()); + lean_ctor_set(obj, 2, build_address(h).cast()); + obj.cast() }, ExprData::Lam(name, ty, body, bi, h) => { let name_obj = build_name(cache, name); @@ -95,12 +93,12 @@ pub fn build_expr(cache: &mut LeanBuildCache, expr: &Expr) -> *mut c_void { let hash_obj = build_address(h); // 4 object fields, 1 scalar byte for BinderInfo let obj = lean_alloc_ctor(6, 4, 1); - lean_ctor_set(obj, 0, name_obj); - lean_ctor_set(obj, 1, ty_obj); - lean_ctor_set(obj, 2, body_obj); - lean_ctor_set(obj, 3, hash_obj); + lean_ctor_set(obj, 0, name_obj.cast()); + lean_ctor_set(obj, 1, ty_obj.cast()); + lean_ctor_set(obj, 2, body_obj.cast()); + lean_ctor_set(obj, 3, hash_obj.cast()); lean_ctor_set_uint8(obj, 4 * 8, binder_info_to_u8(bi)); - obj + obj.cast() }, ExprData::ForallE(name, ty, body, bi, h) => { let name_obj = build_name(cache, name); @@ -108,12 +106,12 @@ pub fn build_expr(cache: &mut LeanBuildCache, expr: &Expr) -> *mut c_void { let body_obj = build_expr(cache, body); let hash_obj = build_address(h); let obj = lean_alloc_ctor(7, 4, 1); - lean_ctor_set(obj, 0, name_obj); - lean_ctor_set(obj, 1, ty_obj); - lean_ctor_set(obj, 2, body_obj); - lean_ctor_set(obj, 3, hash_obj); + lean_ctor_set(obj, 0, name_obj.cast()); + lean_ctor_set(obj, 1, ty_obj.cast()); + lean_ctor_set(obj, 2, body_obj.cast()); + lean_ctor_set(obj, 3, hash_obj.cast()); lean_ctor_set_uint8(obj, 4 * 8, binder_info_to_u8(bi)); - obj + obj.cast() }, ExprData::LetE(name, ty, val, body, non_dep, h) => { let name_obj = build_name(cache, name); @@ -123,40 +121,40 @@ pub fn build_expr(cache: &mut LeanBuildCache, expr: &Expr) -> *mut c_void { let hash_obj = build_address(h); // 5 object fields, 1 scalar byte for Bool let obj = lean_alloc_ctor(8, 5, 1); - lean_ctor_set(obj, 0, name_obj); - lean_ctor_set(obj, 1, ty_obj); - lean_ctor_set(obj, 2, val_obj); - lean_ctor_set(obj, 3, body_obj); - lean_ctor_set(obj, 4, hash_obj); + lean_ctor_set(obj, 0, name_obj.cast()); + lean_ctor_set(obj, 1, ty_obj.cast()); + lean_ctor_set(obj, 2, val_obj.cast()); + lean_ctor_set(obj, 3, body_obj.cast()); + lean_ctor_set(obj, 4, hash_obj.cast()); lean_ctor_set_uint8(obj, 5 * 8, *non_dep as u8); - obj + obj.cast() }, ExprData::Lit(lit, h) => { let lit_obj = build_literal(lit); let obj = lean_alloc_ctor(9, 2, 0); - lean_ctor_set(obj, 0, lit_obj); - lean_ctor_set(obj, 1, build_address(h)); - obj + lean_ctor_set(obj, 0, lit_obj.cast()); + lean_ctor_set(obj, 1, build_address(h).cast()); + obj.cast() }, ExprData::Mdata(md, inner, h) => { let md_obj = build_mdata_array(cache, md); let inner_obj = build_expr(cache, inner); let obj = lean_alloc_ctor(10, 3, 0); - lean_ctor_set(obj, 0, md_obj); - lean_ctor_set(obj, 1, inner_obj); - lean_ctor_set(obj, 2, build_address(h)); - obj + lean_ctor_set(obj, 0, md_obj.cast()); + lean_ctor_set(obj, 1, inner_obj.cast()); + lean_ctor_set(obj, 2, build_address(h).cast()); + obj.cast() }, ExprData::Proj(type_name, idx, struct_expr, h) => { let name_obj = build_name(cache, type_name); let idx_obj = build_nat(idx); let struct_obj = build_expr(cache, struct_expr); let obj = lean_alloc_ctor(11, 4, 0); - lean_ctor_set(obj, 0, name_obj); - lean_ctor_set(obj, 1, idx_obj); - lean_ctor_set(obj, 2, struct_obj); - lean_ctor_set(obj, 3, build_address(h)); - obj + lean_ctor_set(obj, 0, name_obj.cast()); + lean_ctor_set(obj, 1, idx_obj.cast()); + lean_ctor_set(obj, 2, struct_obj.cast()); + lean_ctor_set(obj, 3, build_address(h).cast()); + obj.cast() }, } }; @@ -174,9 +172,9 @@ fn build_mdata_array( let arr = lean_alloc_array(md.len(), md.len()); for (i, (name, dv)) in md.iter().enumerate() { let pair = build_name_datavalue_pair(cache, name, dv); - lean_array_set_core(arr, i, pair); + lean_array_set_core(arr, i, pair.cast()); } - arr + arr.cast() } } @@ -190,9 +188,9 @@ fn build_name_datavalue_pair( let name_obj = build_name(cache, name); let dv_obj = build_data_value(cache, dv); let pair = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(pair, 0, name_obj); - lean_ctor_set(pair, 1, dv_obj); - pair + lean_ctor_set(pair, 0, name_obj.cast()); + lean_ctor_set(pair, 1, dv_obj.cast()); + pair.cast() } } @@ -202,14 +200,14 @@ pub fn build_literal(lit: &Literal) -> *mut c_void { match lit { Literal::NatVal(n) => { let obj = lean_alloc_ctor(0, 1, 0); - lean_ctor_set(obj, 0, build_nat(n)); - obj + lean_ctor_set(obj, 0, build_nat(n).cast()); + obj.cast() }, Literal::StrVal(s) => { let s_cstr = crate::lean::safe_cstring(s.as_str()); let obj = lean_alloc_ctor(1, 1, 0); lean_ctor_set(obj, 0, lean_mk_string(s_cstr.as_ptr())); - obj + obj.cast() }, } } @@ -239,25 +237,25 @@ pub fn decode_ix_expr(ptr: *const c_void) -> Expr { 0 => { // bvar let idx_ptr = lean_ctor_get(ptr as *mut _, 0); - let idx = Nat::from_ptr(idx_ptr); + let idx = Nat::from_ptr(idx_ptr.cast()); Expr::bvar(idx) }, 1 => { // fvar let name_ptr = lean_ctor_get(ptr as *mut _, 0); - let name = decode_ix_name(name_ptr); + let name = decode_ix_name(name_ptr.cast()); Expr::fvar(name) }, 2 => { // mvar let name_ptr = lean_ctor_get(ptr as *mut _, 0); - let name = decode_ix_name(name_ptr); + let name = decode_ix_name(name_ptr.cast()); Expr::mvar(name) }, 3 => { // sort let level_ptr = lean_ctor_get(ptr as *mut _, 0); - let level = decode_ix_level(level_ptr); + let level = decode_ix_level(level_ptr.cast()); Expr::sort(level) }, 4 => { @@ -265,10 +263,9 @@ pub fn decode_ix_expr(ptr: *const c_void) -> Expr { let name_ptr = lean_ctor_get(ptr as *mut _, 0); let levels_ptr = lean_ctor_get(ptr as *mut _, 1); - let name = decode_ix_name(name_ptr); - let levels_obj: &LeanArrayObject = as_ref_unsafe(levels_ptr.cast()); + let name = decode_ix_name(name_ptr.cast()); let levels: Vec = - levels_obj.data().iter().map(|&p| decode_ix_level(p)).collect(); + lean_array_data(levels_ptr.cast()).iter().map(|&p| decode_ix_level(p)).collect(); Expr::cnst(name, levels) }, @@ -276,8 +273,8 @@ pub fn decode_ix_expr(ptr: *const c_void) -> Expr { // app let fn_ptr = lean_ctor_get(ptr as *mut _, 0); let arg_ptr = lean_ctor_get(ptr as *mut _, 1); - let fn_expr = decode_ix_expr(fn_ptr); - let arg_expr = decode_ix_expr(arg_ptr); + let fn_expr = decode_ix_expr(fn_ptr.cast()); + let arg_expr = decode_ix_expr(arg_ptr.cast()); Expr::app(fn_expr, arg_expr) }, 6 => { @@ -288,14 +285,12 @@ pub fn decode_ix_expr(ptr: *const c_void) -> Expr { // hash at field 3 // bi is a scalar byte at offset 4*8 - let name = decode_ix_name(name_ptr); - let ty = decode_ix_expr(ty_ptr); - let body = decode_ix_expr(body_ptr); + let name = decode_ix_name(name_ptr.cast()); + let ty = decode_ix_expr(ty_ptr.cast()); + let body = decode_ix_expr(body_ptr.cast()); // Read BinderInfo scalar (4 obj fields: name, ty, body, hash) - let ctor: &crate::lean::ctor::LeanCtorObject = - as_ref_unsafe(ptr.cast()); - let bi_byte = ctor.get_scalar_u8(4, 0); + let bi_byte = lean_ctor_scalar_u8(ptr, 4, 0); let bi = decode_binder_info(bi_byte); Expr::lam(name, ty, body, bi) @@ -306,14 +301,12 @@ pub fn decode_ix_expr(ptr: *const c_void) -> Expr { let ty_ptr = lean_ctor_get(ptr as *mut _, 1); let body_ptr = lean_ctor_get(ptr as *mut _, 2); - let name = decode_ix_name(name_ptr); - let ty = decode_ix_expr(ty_ptr); - let body = decode_ix_expr(body_ptr); + let name = decode_ix_name(name_ptr.cast()); + let ty = decode_ix_expr(ty_ptr.cast()); + let body = decode_ix_expr(body_ptr.cast()); // 4 obj fields: name, ty, body, hash - let ctor: &crate::lean::ctor::LeanCtorObject = - as_ref_unsafe(ptr.cast()); - let bi_byte = ctor.get_scalar_u8(4, 0); + let bi_byte = lean_ctor_scalar_u8(ptr, 4, 0); let bi = decode_binder_info(bi_byte); Expr::all(name, ty, body, bi) @@ -327,22 +320,20 @@ pub fn decode_ix_expr(ptr: *const c_void) -> Expr { // hash at field 4 // nonDep is scalar byte after 5 obj fields - let name = decode_ix_name(name_ptr); - let ty = decode_ix_expr(ty_ptr); - let val = decode_ix_expr(val_ptr); - let body = decode_ix_expr(body_ptr); + let name = decode_ix_name(name_ptr.cast()); + let ty = decode_ix_expr(ty_ptr.cast()); + let val = decode_ix_expr(val_ptr.cast()); + let body = decode_ix_expr(body_ptr.cast()); // 5 obj fields: name, ty, val, body, hash - let ctor: &crate::lean::ctor::LeanCtorObject = - as_ref_unsafe(ptr.cast()); - let non_dep = ctor.get_scalar_u8(5, 0) != 0; + let non_dep = lean_ctor_scalar_u8(ptr, 5, 0) != 0; Expr::letE(name, ty, val, body, non_dep) }, 9 => { // lit let lit_ptr = lean_ctor_get(ptr as *mut _, 0); - let lit = decode_literal(lit_ptr); + let lit = decode_literal(lit_ptr.cast()); Expr::lit(lit) }, 10 => { @@ -350,11 +341,10 @@ pub fn decode_ix_expr(ptr: *const c_void) -> Expr { let data_ptr = lean_ctor_get(ptr as *mut _, 0); let expr_ptr = lean_ctor_get(ptr as *mut _, 1); - let data_obj: &LeanArrayObject = as_ref_unsafe(data_ptr.cast()); let data: Vec<(Name, DataValue)> = - data_obj.data().iter().map(|&p| decode_name_data_value(p)).collect(); + lean_array_data(data_ptr.cast()).iter().map(|&p| decode_name_data_value(p)).collect(); - let inner = decode_ix_expr(expr_ptr); + let inner = decode_ix_expr(expr_ptr.cast()); Expr::mdata(data, inner) }, 11 => { @@ -363,9 +353,9 @@ pub fn decode_ix_expr(ptr: *const c_void) -> Expr { let idx_ptr = lean_ctor_get(ptr as *mut _, 1); let struct_ptr = lean_ctor_get(ptr as *mut _, 2); - let type_name = decode_ix_name(type_name_ptr); - let idx = Nat::from_ptr(idx_ptr); - let struct_expr = decode_ix_expr(struct_ptr); + let type_name = decode_ix_name(type_name_ptr.cast()); + let idx = Nat::from_ptr(idx_ptr.cast()); + let struct_expr = decode_ix_expr(struct_ptr.cast()); Expr::proj(type_name, idx, struct_expr) }, @@ -382,14 +372,13 @@ pub fn decode_literal(ptr: *const c_void) -> Literal { 0 => { // natVal let nat_ptr = lean_ctor_get(ptr as *mut _, 0); - let nat = Nat::from_ptr(nat_ptr); + let nat = Nat::from_ptr(nat_ptr.cast()); Literal::NatVal(nat) }, 1 => { // strVal let str_ptr = lean_ctor_get(ptr as *mut _, 0); - let str_obj: &LeanStringObject = as_ref_unsafe(str_ptr.cast()); - Literal::StrVal(str_obj.as_string()) + Literal::StrVal(lean_obj_to_string(str_ptr as *const _)) }, _ => panic!("Invalid Literal tag: {}", tag), } @@ -403,8 +392,8 @@ fn decode_name_data_value(ptr: *const c_void) -> (Name, DataValue) { let name_ptr = lean_ctor_get(ptr as *mut _, 0); let dv_ptr = lean_ctor_get(ptr as *mut _, 1); - let name = decode_ix_name(name_ptr); - let dv = decode_data_value(dv_ptr); + let name = decode_ix_name(name_ptr.cast()); + let dv = decode_data_value(dv_ptr.cast()); (name, dv) } diff --git a/src/lean/ffi/ix/level.rs b/src/lean/ffi/ix/level.rs index cc139a78..026e597a 100644 --- a/src/lean/ffi/ix/level.rs +++ b/src/lean/ffi/ix/level.rs @@ -11,7 +11,7 @@ use std::ffi::c_void; use crate::ix::env::{Level, LevelData}; -use crate::lean::{ +use crate::lean::lean::{ lean_alloc_array, lean_alloc_ctor, lean_array_set_core, lean_ctor_get, lean_ctor_set, lean_inc, lean_obj_tag, }; @@ -25,7 +25,7 @@ use super::name::{build_name, decode_ix_name}; pub fn build_level(cache: &mut LeanBuildCache, level: &Level) -> *mut c_void { let hash = *level.get_hash(); if let Some(&cached) = cache.levels.get(&hash) { - unsafe { lean_inc(cached) }; + unsafe { lean_inc(cached.cast()) }; return cached; } @@ -33,47 +33,47 @@ pub fn build_level(cache: &mut LeanBuildCache, level: &Level) -> *mut c_void { match level.as_data() { LevelData::Zero(h) => { let obj = lean_alloc_ctor(0, 1, 0); - lean_ctor_set(obj, 0, build_address(h)); - obj + lean_ctor_set(obj, 0, build_address(h).cast()); + obj.cast() }, LevelData::Succ(x, h) => { let x_obj = build_level(cache, x); let obj = lean_alloc_ctor(1, 2, 0); - lean_ctor_set(obj, 0, x_obj); - lean_ctor_set(obj, 1, build_address(h)); - obj + lean_ctor_set(obj, 0, x_obj.cast()); + lean_ctor_set(obj, 1, build_address(h).cast()); + obj.cast() }, LevelData::Max(x, y, h) => { let x_obj = build_level(cache, x); let y_obj = build_level(cache, y); let obj = lean_alloc_ctor(2, 3, 0); - lean_ctor_set(obj, 0, x_obj); - lean_ctor_set(obj, 1, y_obj); - lean_ctor_set(obj, 2, build_address(h)); - obj + lean_ctor_set(obj, 0, x_obj.cast()); + lean_ctor_set(obj, 1, y_obj.cast()); + lean_ctor_set(obj, 2, build_address(h).cast()); + obj.cast() }, LevelData::Imax(x, y, h) => { let x_obj = build_level(cache, x); let y_obj = build_level(cache, y); let obj = lean_alloc_ctor(3, 3, 0); - lean_ctor_set(obj, 0, x_obj); - lean_ctor_set(obj, 1, y_obj); - lean_ctor_set(obj, 2, build_address(h)); - obj + lean_ctor_set(obj, 0, x_obj.cast()); + lean_ctor_set(obj, 1, y_obj.cast()); + lean_ctor_set(obj, 2, build_address(h).cast()); + obj.cast() }, LevelData::Param(n, h) => { let n_obj = build_name(cache, n); let obj = lean_alloc_ctor(4, 2, 0); - lean_ctor_set(obj, 0, n_obj); - lean_ctor_set(obj, 1, build_address(h)); - obj + lean_ctor_set(obj, 0, n_obj.cast()); + lean_ctor_set(obj, 1, build_address(h).cast()); + obj.cast() }, LevelData::Mvar(n, h) => { let n_obj = build_name(cache, n); let obj = lean_alloc_ctor(5, 2, 0); - lean_ctor_set(obj, 0, n_obj); - lean_ctor_set(obj, 1, build_address(h)); - obj + lean_ctor_set(obj, 0, n_obj.cast()); + lean_ctor_set(obj, 1, build_address(h).cast()); + obj.cast() }, } }; @@ -91,9 +91,9 @@ pub fn build_level_array( let arr = lean_alloc_array(levels.len(), levels.len()); for (i, level) in levels.iter().enumerate() { let level_obj = build_level(cache, level); - lean_array_set_core(arr, i, level_obj); + lean_array_set_core(arr, i, level_obj.cast()); } - arr + arr.cast() } } @@ -105,31 +105,31 @@ pub fn decode_ix_level(ptr: *const c_void) -> Level { 0 => Level::zero(), 1 => { let x_ptr = lean_ctor_get(ptr as *mut _, 0); - let x = decode_ix_level(x_ptr); + let x = decode_ix_level(x_ptr.cast()); Level::succ(x) }, 2 => { let x_ptr = lean_ctor_get(ptr as *mut _, 0); let y_ptr = lean_ctor_get(ptr as *mut _, 1); - let x = decode_ix_level(x_ptr); - let y = decode_ix_level(y_ptr); + let x = decode_ix_level(x_ptr.cast()); + let y = decode_ix_level(y_ptr.cast()); Level::max(x, y) }, 3 => { let x_ptr = lean_ctor_get(ptr as *mut _, 0); let y_ptr = lean_ctor_get(ptr as *mut _, 1); - let x = decode_ix_level(x_ptr); - let y = decode_ix_level(y_ptr); + let x = decode_ix_level(x_ptr.cast()); + let y = decode_ix_level(y_ptr.cast()); Level::imax(x, y) }, 4 => { let n_ptr = lean_ctor_get(ptr as *mut _, 0); - let n = decode_ix_name(n_ptr); + let n = decode_ix_name(n_ptr.cast()); Level::param(n) }, 5 => { let n_ptr = lean_ctor_get(ptr as *mut _, 0); - let n = decode_ix_name(n_ptr); + let n = decode_ix_name(n_ptr.cast()); Level::mvar(n) }, _ => panic!("Invalid Ix.Level tag: {}", tag), @@ -139,9 +139,7 @@ pub fn decode_ix_level(ptr: *const c_void) -> Level { /// Decode Array of Levels from Lean pointer. pub fn decode_level_array(ptr: *const c_void) -> Vec { - let arr_obj: &crate::lean::array::LeanArrayObject = - crate::lean::as_ref_unsafe(ptr.cast()); - arr_obj.data().iter().map(|&p| decode_ix_level(p)).collect() + crate::lean::lean_array_to_vec(ptr, decode_ix_level) } /// Round-trip an Ix.Level: decode from Lean, re-encode via LeanBuildCache. diff --git a/src/lean/ffi/ix/name.rs b/src/lean/ffi/ix/name.rs index 052606eb..eef4dc77 100644 --- a/src/lean/ffi/ix/name.rs +++ b/src/lean/ffi/ix/name.rs @@ -9,11 +9,11 @@ use std::ffi::c_void; use crate::ix::env::{Name, NameData}; use crate::lean::nat::Nat; -use crate::lean::string::LeanStringObject; -use crate::lean::{ - as_ref_unsafe, lean_alloc_array, lean_alloc_ctor, lean_array_set_core, - lean_ctor_get, lean_ctor_set, lean_inc, lean_mk_string, lean_obj_tag, +use crate::lean::lean::{ + lean_alloc_array, lean_alloc_ctor, lean_array_set_core, lean_ctor_get, + lean_ctor_set, lean_inc, lean_mk_string, lean_obj_tag, }; +use crate::lean::lean_obj_to_string; use super::super::builder::LeanBuildCache; use super::super::primitives::build_nat; @@ -24,7 +24,7 @@ use super::address::build_address; pub fn build_name(cache: &mut LeanBuildCache, name: &Name) -> *mut c_void { let hash = name.get_hash(); if let Some(&cached) = cache.names.get(hash) { - unsafe { lean_inc(cached) }; + unsafe { lean_inc(cached.cast()) }; return cached; } @@ -33,28 +33,28 @@ pub fn build_name(cache: &mut LeanBuildCache, name: &Name) -> *mut c_void { NameData::Anonymous(h) => { // anonymous: (hash : Address) let obj = lean_alloc_ctor(0, 1, 0); - lean_ctor_set(obj, 0, build_address(h)); - obj + lean_ctor_set(obj, 0, build_address(h).cast()); + obj.cast() }, NameData::Str(parent, s, h) => { // str: (parent : Name) (s : String) (hash : Address) let parent_obj = build_name(cache, parent); let s_cstr = crate::lean::safe_cstring(s.as_str()); let obj = lean_alloc_ctor(1, 3, 0); - lean_ctor_set(obj, 0, parent_obj); + lean_ctor_set(obj, 0, parent_obj.cast()); lean_ctor_set(obj, 1, lean_mk_string(s_cstr.as_ptr())); - lean_ctor_set(obj, 2, build_address(h)); - obj + lean_ctor_set(obj, 2, build_address(h).cast()); + obj.cast() }, NameData::Num(parent, n, h) => { // num: (parent : Name) (i : Nat) (hash : Address) let parent_obj = build_name(cache, parent); let n_obj = build_nat(n); let obj = lean_alloc_ctor(2, 3, 0); - lean_ctor_set(obj, 0, parent_obj); - lean_ctor_set(obj, 1, n_obj); - lean_ctor_set(obj, 2, build_address(h)); - obj + lean_ctor_set(obj, 0, parent_obj.cast()); + lean_ctor_set(obj, 1, n_obj.cast()); + lean_ctor_set(obj, 2, build_address(h).cast()); + obj.cast() }, } }; @@ -72,9 +72,9 @@ pub fn build_name_array( let arr = lean_alloc_array(names.len(), names.len()); for (i, name) in names.iter().enumerate() { let name_obj = build_name(cache, name); - lean_array_set_core(arr, i, name_obj); + lean_array_set_core(arr, i, name_obj.cast()); } - arr + arr.cast() } } @@ -93,9 +93,8 @@ pub fn decode_ix_name(ptr: *const c_void) -> Name { let s_ptr = lean_ctor_get(ptr as *mut _, 1); // hash at field 2 is ignored - Rust recomputes it - let parent = decode_ix_name(parent_ptr); - let s_obj: &LeanStringObject = as_ref_unsafe(s_ptr.cast()); - let s = s_obj.as_string(); + let parent = decode_ix_name(parent_ptr.cast()); + let s = lean_obj_to_string(s_ptr as *const _); Name::str(parent, s) }, @@ -105,8 +104,8 @@ pub fn decode_ix_name(ptr: *const c_void) -> Name { let i_ptr = lean_ctor_get(ptr as *mut _, 1); // hash at field 2 is ignored - let parent = decode_ix_name(parent_ptr); - let i = Nat::from_ptr(i_ptr); + let parent = decode_ix_name(parent_ptr.cast()); + let i = Nat::from_ptr(i_ptr.cast()); Name::num(parent, i) }, @@ -117,8 +116,7 @@ pub fn decode_ix_name(ptr: *const c_void) -> Name { /// Decode Array of Names from Lean pointer. pub fn decode_name_array(ptr: *const c_void) -> Vec { - let arr_obj: &crate::lean::array::LeanArrayObject = as_ref_unsafe(ptr.cast()); - arr_obj.data().iter().map(|&p| decode_ix_name(p)).collect() + crate::lean::lean_array_to_vec(ptr, decode_ix_name) } /// Round-trip an Ix.Name: decode from Lean, re-encode via LeanBuildCache. diff --git a/src/lean/ffi/ixon/compare.rs b/src/lean/ffi/ixon/compare.rs index 59232b22..f526e0d9 100644 --- a/src/lean/ffi/ixon/compare.rs +++ b/src/lean/ffi/ixon/compare.rs @@ -7,8 +7,8 @@ use crate::ix::compile::{BlockCache, CompileState, compile_env, compile_expr}; use crate::ix::env::Name; use crate::ix::ixon::serialize::put_expr; use crate::ix::mutual::MutCtx; -use crate::lean::sarray::LeanSArrayObject; -use crate::lean::{lean_alloc_ctor, lean_ctor_set}; +use crate::lean::lean::{lean_alloc_ctor, lean_ctor_set}; +use crate::lean::lean_sarray_data; use super::super::lean_env::{ Cache as LeanCache, GlobalCache, lean_ptr_to_expr, lean_ptr_to_name, @@ -23,7 +23,7 @@ pub struct RustBlockEnv { #[unsafe(no_mangle)] pub extern "C" fn rs_compare_expr_compilation( lean_expr_ptr: *const c_void, - lean_output: &LeanSArrayObject, + lean_output: *const c_void, univ_ctx_size: u64, ) -> bool { // Decode Lean.Expr to Rust's representation @@ -58,7 +58,7 @@ pub extern "C" fn rs_compare_expr_compilation( put_expr(&rust_expr, &mut rust_bytes); // Compare byte-for-byte - let lean_bytes = lean_output.data(); + let lean_bytes = lean_sarray_data(lean_output); rust_bytes == lean_bytes } @@ -72,9 +72,9 @@ fn build_block_compare_result( ) -> *mut c_void { unsafe { if matched { - lean_alloc_ctor(0, 0, 0) // match + lean_alloc_ctor(0, 0, 0).cast() // match } else if not_found { - lean_alloc_ctor(2, 0, 0) // notFound + lean_alloc_ctor(2, 0, 0).cast() // notFound } else { // mismatch let obj = lean_alloc_ctor(1, 0, 24); @@ -82,7 +82,7 @@ fn build_block_compare_result( *base.add(8).cast::() = lean_size; *base.add(16).cast::() = rust_size; *base.add(24).cast::() = first_diff_offset; - obj + obj.cast() } } } @@ -95,11 +95,11 @@ fn build_block_compare_detail( ) -> *mut c_void { unsafe { let obj = lean_alloc_ctor(0, 1, 16); - lean_ctor_set(obj, 0, result); + lean_ctor_set(obj, 0, result.cast()); let base = obj.cast::(); *base.add(16).cast::() = lean_sharing_len; *base.add(24).cast::() = rust_sharing_len; - obj + obj.cast() } } @@ -113,14 +113,14 @@ fn build_block_compare_detail( pub unsafe extern "C" fn rs_compare_block_v2( rust_env: *const RustBlockEnv, lowlink_name: *const c_void, - lean_bytes: &LeanSArrayObject, + lean_bytes: *const c_void, lean_sharing_len: u64, ) -> *mut c_void { let global_cache = GlobalCache::default(); let name = lean_ptr_to_name(lowlink_name, &global_cache); let rust_env = unsafe { &*rust_env }; - let lean_data = lean_bytes.data(); + let lean_data = lean_sarray_data(lean_bytes); // Look up Rust's compiled block let (rust_bytes, rust_sharing_len) = match rust_env.blocks.get(&name) { diff --git a/src/lean/ffi/ixon/constant.rs b/src/lean/ffi/ixon/constant.rs index e7692759..22e96ef5 100644 --- a/src/lean/ffi/ixon/constant.rs +++ b/src/lean/ffi/ixon/constant.rs @@ -16,12 +16,11 @@ use crate::ix::ixon::constant::{ Quotient as IxonQuotient, Recursor as IxonRecursor, RecursorProj, RecursorRule as IxonRecursorRule, }; -use crate::lean::sarray::LeanSArrayObject; -use crate::lean::{ - as_ref_unsafe, lean_alloc_array, lean_alloc_ctor, lean_alloc_sarray, - lean_array_set_core, lean_ctor_get, lean_ctor_set, lean_obj_tag, - lean_sarray_cptr, +use crate::lean::lean::{ + lean_alloc_array, lean_alloc_ctor, lean_alloc_sarray, lean_array_set_core, + lean_ctor_get, lean_ctor_set, lean_obj_tag, lean_sarray_cptr, }; +use crate::lean::{lean_array_to_vec, lean_sarray_data}; use super::expr::{ build_ixon_expr, build_ixon_expr_array, decode_ixon_expr, @@ -35,7 +34,7 @@ pub fn build_address_from_ixon(addr: &Address) -> *mut c_void { let ba = lean_alloc_sarray(1, 32, 32); let data_ptr = lean_sarray_cptr(ba); std::ptr::copy_nonoverlapping(addr.as_bytes().as_ptr(), data_ptr, 32); - ba + ba.cast() } } @@ -45,9 +44,9 @@ pub fn build_address_array(addrs: &[Address]) -> *mut c_void { let arr = lean_alloc_array(addrs.len(), addrs.len()); for (i, addr) in addrs.iter().enumerate() { let addr_obj = build_address_from_ixon(addr); - lean_array_set_core(arr, i, addr_obj); + lean_array_set_core(arr, i, addr_obj.cast()); } - arr + arr.cast() } } @@ -60,8 +59,8 @@ pub fn build_ixon_definition(def: &IxonDefinition) -> *mut c_void { let value_obj = build_ixon_expr(&def.value); // 2 obj fields, 16 scalar bytes (lvls(8) + kind(1) + safety(1) + padding(6)) let obj = lean_alloc_ctor(0, 2, 16); - lean_ctor_set(obj, 0, typ_obj); - lean_ctor_set(obj, 1, value_obj); + lean_ctor_set(obj, 0, typ_obj.cast()); + lean_ctor_set(obj, 1, value_obj.cast()); let base = obj.cast::(); let scalar_base = base.add(2 * 8 + 8); // offset 24 @@ -81,7 +80,7 @@ pub fn build_ixon_definition(def: &IxonDefinition) -> *mut c_void { crate::ix::env::DefinitionSafety::Partial => 2, }; *scalar_base.add(9) = safety_val; - obj + obj.cast() } } @@ -91,10 +90,10 @@ pub fn build_ixon_recursor_rule(rule: &IxonRecursorRule) -> *mut c_void { let rhs_obj = build_ixon_expr(&rule.rhs); // 1 obj field, 8 scalar bytes let obj = lean_alloc_ctor(0, 1, 8); - lean_ctor_set(obj, 0, rhs_obj); + lean_ctor_set(obj, 0, rhs_obj.cast()); let base = obj.cast::(); *base.add(8 + 8).cast::() = rule.fields; - obj + obj.cast() } } @@ -107,11 +106,11 @@ pub fn build_ixon_recursor(rec: &IxonRecursor) -> *mut c_void { let rules_arr = lean_alloc_array(rec.rules.len(), rec.rules.len()); for (i, rule) in rec.rules.iter().enumerate() { let rule_obj = build_ixon_recursor_rule(rule); - lean_array_set_core(rules_arr, i, rule_obj); + lean_array_set_core(rules_arr, i, rule_obj.cast()); } // 2 obj fields (typ, rules), 48 scalar bytes (5×8 + 1 + 1 + 6 padding) let obj = lean_alloc_ctor(0, 2, 48); - lean_ctor_set(obj, 0, typ_obj); + lean_ctor_set(obj, 0, typ_obj.cast()); lean_ctor_set(obj, 1, rules_arr); let base = obj.cast::(); let scalar_base = base.add(2 * 8 + 8); @@ -124,7 +123,7 @@ pub fn build_ixon_recursor(rec: &IxonRecursor) -> *mut c_void { // bool fields last *scalar_base.add(40) = if rec.k { 1 } else { 0 }; *scalar_base.add(41) = if rec.is_unsafe { 1 } else { 0 }; - obj + obj.cast() } } @@ -135,14 +134,14 @@ pub fn build_ixon_axiom(ax: &IxonAxiom) -> *mut c_void { let typ_obj = build_ixon_expr(&ax.typ); // 1 obj field, 16 scalar bytes (lvls(8) + isUnsafe(1) + padding(7)) let obj = lean_alloc_ctor(0, 1, 16); - lean_ctor_set(obj, 0, typ_obj); + lean_ctor_set(obj, 0, typ_obj.cast()); let base = obj.cast::(); let scalar_base = base.add(8 + 8); // lvls at offset 0 *scalar_base.cast::() = ax.lvls; // isUnsafe at offset 8 *scalar_base.add(8) = if ax.is_unsafe { 1 } else { 0 }; - obj + obj.cast() } } @@ -154,7 +153,7 @@ pub fn build_ixon_quotient(quot: &IxonQuotient) -> *mut c_void { let typ_obj = build_ixon_expr(".typ); // 1 obj field (typ), 16 scalar bytes (lvls(8) + kind(1) + padding(7)) let obj = lean_alloc_ctor(0, 1, 16); - lean_ctor_set(obj, 0, typ_obj); + lean_ctor_set(obj, 0, typ_obj.cast()); let base = obj.cast::(); let scalar_base = base.add(8 + 8); // lvls at offset 0 @@ -167,7 +166,7 @@ pub fn build_ixon_quotient(quot: &IxonQuotient) -> *mut c_void { crate::ix::env::QuotKind::Ind => 3, }; *scalar_base.add(8) = kind_val; - obj + obj.cast() } } @@ -178,7 +177,7 @@ pub fn build_ixon_constructor(ctor: &IxonConstructor) -> *mut c_void { let typ_obj = build_ixon_expr(&ctor.typ); // 1 obj field, 40 scalar bytes (4×8 + 1 + 7 padding) let obj = lean_alloc_ctor(0, 1, 40); - lean_ctor_set(obj, 0, typ_obj); + lean_ctor_set(obj, 0, typ_obj.cast()); let base = obj.cast::(); let scalar_base = base.add(8 + 8); // u64 fields first @@ -188,7 +187,7 @@ pub fn build_ixon_constructor(ctor: &IxonConstructor) -> *mut c_void { *scalar_base.add(24).cast::() = ctor.fields; // bool field last *scalar_base.add(32) = if ctor.is_unsafe { 1 } else { 0 }; - obj + obj.cast() } } @@ -201,11 +200,11 @@ pub fn build_ixon_inductive(ind: &IxonInductive) -> *mut c_void { let ctors_arr = lean_alloc_array(ind.ctors.len(), ind.ctors.len()); for (i, ctor) in ind.ctors.iter().enumerate() { let ctor_obj = build_ixon_constructor(ctor); - lean_array_set_core(ctors_arr, i, ctor_obj); + lean_array_set_core(ctors_arr, i, ctor_obj.cast()); } // 2 obj fields, 40 scalar bytes (4×8 + 3 + 5 padding) let obj = lean_alloc_ctor(0, 2, 40); - lean_ctor_set(obj, 0, typ_obj); + lean_ctor_set(obj, 0, typ_obj.cast()); lean_ctor_set(obj, 1, ctors_arr); let base = obj.cast::(); let scalar_base = base.add(2 * 8 + 8); @@ -218,7 +217,7 @@ pub fn build_ixon_inductive(ind: &IxonInductive) -> *mut c_void { *scalar_base.add(32) = if ind.recr { 1 } else { 0 }; *scalar_base.add(33) = if ind.refl { 1 } else { 0 }; *scalar_base.add(34) = if ind.is_unsafe { 1 } else { 0 }; - obj + obj.cast() } } @@ -227,10 +226,10 @@ pub fn build_inductive_proj(proj: &InductiveProj) -> *mut c_void { unsafe { let block_obj = build_address_from_ixon(&proj.block); let obj = lean_alloc_ctor(0, 1, 8); - lean_ctor_set(obj, 0, block_obj); + lean_ctor_set(obj, 0, block_obj.cast()); let base = obj.cast::(); *base.add(8 + 8).cast::() = proj.idx; - obj + obj.cast() } } @@ -239,11 +238,11 @@ pub fn build_constructor_proj(proj: &ConstructorProj) -> *mut c_void { unsafe { let block_obj = build_address_from_ixon(&proj.block); let obj = lean_alloc_ctor(0, 1, 16); - lean_ctor_set(obj, 0, block_obj); + lean_ctor_set(obj, 0, block_obj.cast()); let base = obj.cast::(); *base.add(8 + 8).cast::() = proj.idx; *base.add(8 + 16).cast::() = proj.cidx; - obj + obj.cast() } } @@ -252,10 +251,10 @@ pub fn build_recursor_proj(proj: &RecursorProj) -> *mut c_void { unsafe { let block_obj = build_address_from_ixon(&proj.block); let obj = lean_alloc_ctor(0, 1, 8); - lean_ctor_set(obj, 0, block_obj); + lean_ctor_set(obj, 0, block_obj.cast()); let base = obj.cast::(); *base.add(8 + 8).cast::() = proj.idx; - obj + obj.cast() } } @@ -264,10 +263,10 @@ pub fn build_definition_proj(proj: &DefinitionProj) -> *mut c_void { unsafe { let block_obj = build_address_from_ixon(&proj.block); let obj = lean_alloc_ctor(0, 1, 8); - lean_ctor_set(obj, 0, block_obj); + lean_ctor_set(obj, 0, block_obj.cast()); let base = obj.cast::(); *base.add(8 + 8).cast::() = proj.idx; - obj + obj.cast() } } @@ -278,20 +277,20 @@ pub fn build_mut_const(mc: &MutConst) -> *mut c_void { MutConst::Defn(def) => { let def_obj = build_ixon_definition(def); let obj = lean_alloc_ctor(0, 1, 0); - lean_ctor_set(obj, 0, def_obj); - obj + lean_ctor_set(obj, 0, def_obj.cast()); + obj.cast() }, MutConst::Indc(ind) => { let ind_obj = build_ixon_inductive(ind); let obj = lean_alloc_ctor(1, 1, 0); - lean_ctor_set(obj, 0, ind_obj); - obj + lean_ctor_set(obj, 0, ind_obj.cast()); + obj.cast() }, MutConst::Recr(rec) => { let rec_obj = build_ixon_recursor(rec); let obj = lean_alloc_ctor(2, 1, 0); - lean_ctor_set(obj, 0, rec_obj); - obj + lean_ctor_set(obj, 0, rec_obj.cast()); + obj.cast() }, } } @@ -304,60 +303,60 @@ pub fn build_ixon_constant_info(info: &IxonConstantInfo) -> *mut c_void { IxonConstantInfo::Defn(def) => { let def_obj = build_ixon_definition(def); let obj = lean_alloc_ctor(0, 1, 0); - lean_ctor_set(obj, 0, def_obj); - obj + lean_ctor_set(obj, 0, def_obj.cast()); + obj.cast() }, IxonConstantInfo::Recr(rec) => { let rec_obj = build_ixon_recursor(rec); let obj = lean_alloc_ctor(1, 1, 0); - lean_ctor_set(obj, 0, rec_obj); - obj + lean_ctor_set(obj, 0, rec_obj.cast()); + obj.cast() }, IxonConstantInfo::Axio(ax) => { let ax_obj = build_ixon_axiom(ax); let obj = lean_alloc_ctor(2, 1, 0); - lean_ctor_set(obj, 0, ax_obj); - obj + lean_ctor_set(obj, 0, ax_obj.cast()); + obj.cast() }, IxonConstantInfo::Quot(quot) => { let quot_obj = build_ixon_quotient(quot); let obj = lean_alloc_ctor(3, 1, 0); - lean_ctor_set(obj, 0, quot_obj); - obj + lean_ctor_set(obj, 0, quot_obj.cast()); + obj.cast() }, IxonConstantInfo::CPrj(proj) => { let proj_obj = build_constructor_proj(proj); let obj = lean_alloc_ctor(4, 1, 0); - lean_ctor_set(obj, 0, proj_obj); - obj + lean_ctor_set(obj, 0, proj_obj.cast()); + obj.cast() }, IxonConstantInfo::RPrj(proj) => { let proj_obj = build_recursor_proj(proj); let obj = lean_alloc_ctor(5, 1, 0); - lean_ctor_set(obj, 0, proj_obj); - obj + lean_ctor_set(obj, 0, proj_obj.cast()); + obj.cast() }, IxonConstantInfo::IPrj(proj) => { let proj_obj = build_inductive_proj(proj); let obj = lean_alloc_ctor(6, 1, 0); - lean_ctor_set(obj, 0, proj_obj); - obj + lean_ctor_set(obj, 0, proj_obj.cast()); + obj.cast() }, IxonConstantInfo::DPrj(proj) => { let proj_obj = build_definition_proj(proj); let obj = lean_alloc_ctor(7, 1, 0); - lean_ctor_set(obj, 0, proj_obj); - obj + lean_ctor_set(obj, 0, proj_obj.cast()); + obj.cast() }, IxonConstantInfo::Muts(muts) => { let arr = lean_alloc_array(muts.len(), muts.len()); for (i, mc) in muts.iter().enumerate() { let mc_obj = build_mut_const(mc); - lean_array_set_core(arr, i, mc_obj); + lean_array_set_core(arr, i, mc_obj.cast()); } let obj = lean_alloc_ctor(8, 1, 0); lean_ctor_set(obj, 0, arr); - obj + obj.cast() }, } } @@ -371,11 +370,11 @@ pub fn build_ixon_constant(constant: &IxonConstant) -> *mut c_void { let refs_obj = build_address_array(&constant.refs); let univs_obj = build_ixon_univ_array(&constant.univs); let obj = lean_alloc_ctor(0, 4, 0); - lean_ctor_set(obj, 0, info_obj); - lean_ctor_set(obj, 1, sharing_obj); - lean_ctor_set(obj, 2, refs_obj); - lean_ctor_set(obj, 3, univs_obj); - obj + lean_ctor_set(obj, 0, info_obj.cast()); + lean_ctor_set(obj, 1, sharing_obj.cast()); + lean_ctor_set(obj, 2, refs_obj.cast()); + lean_ctor_set(obj, 3, univs_obj.cast()); + obj.cast() } } @@ -385,15 +384,13 @@ pub fn build_ixon_constant(constant: &IxonConstant) -> *mut c_void { /// Decode a ByteArray (Address) to Address. pub fn decode_ixon_address(ptr: *const c_void) -> Address { - let ba: &LeanSArrayObject = as_ref_unsafe(ptr.cast()); - let bytes = ba.data(); + let bytes = lean_sarray_data(ptr); Address::from_slice(&bytes[..32]).expect("Address should be 32 bytes") } /// Decode Array Address. pub fn decode_ixon_address_array(ptr: *const c_void) -> Vec
{ - let arr: &crate::lean::array::LeanArrayObject = as_ref_unsafe(ptr.cast()); - arr.to_vec(decode_ixon_address) + lean_array_to_vec(ptr, decode_ixon_address) } /// Decode Ixon.Definition. @@ -401,8 +398,8 @@ pub fn decode_ixon_address_array(ptr: *const c_void) -> Vec
{ /// Layout: header(8) + typ(8) + value(8) + lvls(8) + kind(1) + safety(1) + padding(6) pub fn decode_ixon_definition(ptr: *const c_void) -> IxonDefinition { unsafe { - let typ_ptr = lean_ctor_get(ptr.cast_mut(), 0); - let value_ptr = lean_ctor_get(ptr.cast_mut(), 1); + let typ_ptr = lean_ctor_get(ptr as *mut _, 0); + let value_ptr = lean_ctor_get(ptr as *mut _, 1); let base = ptr.cast::(); // Scalars start after header (8) + 2 obj fields (16) = offset 24 @@ -431,8 +428,8 @@ pub fn decode_ixon_definition(ptr: *const c_void) -> IxonDefinition { kind, safety, lvls, - typ: Arc::new(decode_ixon_expr(typ_ptr)), - value: Arc::new(decode_ixon_expr(value_ptr)), + typ: Arc::new(decode_ixon_expr(typ_ptr.cast())), + value: Arc::new(decode_ixon_expr(value_ptr.cast())), } } } @@ -440,10 +437,10 @@ pub fn decode_ixon_definition(ptr: *const c_void) -> IxonDefinition { /// Decode Ixon.RecursorRule. pub fn decode_ixon_recursor_rule(ptr: *const c_void) -> IxonRecursorRule { unsafe { - let rhs_ptr = lean_ctor_get(ptr.cast_mut(), 0); + let rhs_ptr = lean_ctor_get(ptr as *mut _, 0); let base = ptr.cast::(); let fields = *base.add(8 + 8).cast::(); - IxonRecursorRule { fields, rhs: Arc::new(decode_ixon_expr(rhs_ptr)) } + IxonRecursorRule { fields, rhs: Arc::new(decode_ixon_expr(rhs_ptr.cast())) } } } @@ -451,8 +448,8 @@ pub fn decode_ixon_recursor_rule(ptr: *const c_void) -> IxonRecursorRule { /// Scalars ordered by size: lvls(8) + params(8) + indices(8) + motives(8) + minors(8) + k(1) + isUnsafe(1) + padding(6) pub fn decode_ixon_recursor(ptr: *const c_void) -> IxonRecursor { unsafe { - let typ_ptr = lean_ctor_get(ptr.cast_mut(), 0); - let rules_ptr = lean_ctor_get(ptr.cast_mut(), 1); + let typ_ptr = lean_ctor_get(ptr as *mut _, 0); + let rules_ptr = lean_ctor_get(ptr as *mut _, 1); let base = ptr.cast::(); let scalar_base = base.add(2 * 8 + 8); // u64 fields first @@ -465,9 +462,7 @@ pub fn decode_ixon_recursor(ptr: *const c_void) -> IxonRecursor { let k = *scalar_base.add(40) != 0; let is_unsafe = *scalar_base.add(41) != 0; - let rules_arr: &crate::lean::array::LeanArrayObject = - as_ref_unsafe(rules_ptr.cast()); - let rules = rules_arr.to_vec(decode_ixon_recursor_rule); + let rules = lean_array_to_vec(rules_ptr.cast(), decode_ixon_recursor_rule); IxonRecursor { k, @@ -477,7 +472,7 @@ pub fn decode_ixon_recursor(ptr: *const c_void) -> IxonRecursor { indices, motives, minors, - typ: Arc::new(decode_ixon_expr(typ_ptr)), + typ: Arc::new(decode_ixon_expr(typ_ptr.cast())), rules, } } @@ -487,14 +482,14 @@ pub fn decode_ixon_recursor(ptr: *const c_void) -> IxonRecursor { /// Scalars ordered by size: lvls(8) + isUnsafe(1) + padding(7) pub fn decode_ixon_axiom(ptr: *const c_void) -> IxonAxiom { unsafe { - let typ_ptr = lean_ctor_get(ptr.cast_mut(), 0); + let typ_ptr = lean_ctor_get(ptr as *mut _, 0); let base = ptr.cast::(); let scalar_base = base.add(8 + 8); // lvls at offset 0 let lvls = *scalar_base.cast::(); // isUnsafe at offset 8 let is_unsafe = *scalar_base.add(8) != 0; - IxonAxiom { is_unsafe, lvls, typ: Arc::new(decode_ixon_expr(typ_ptr)) } + IxonAxiom { is_unsafe, lvls, typ: Arc::new(decode_ixon_expr(typ_ptr.cast())) } } } @@ -503,7 +498,7 @@ pub fn decode_ixon_axiom(ptr: *const c_void) -> IxonAxiom { pub fn decode_ixon_quotient(ptr: *const c_void) -> IxonQuotient { unsafe { // typ is the only object field (at index 0) - let typ_ptr = lean_ctor_get(ptr.cast_mut(), 0); + let typ_ptr = lean_ctor_get(ptr as *mut _, 0); let base = ptr.cast::(); let scalar_base = base.add(8 + 8); // lvls at offset 0 @@ -517,7 +512,7 @@ pub fn decode_ixon_quotient(ptr: *const c_void) -> IxonQuotient { 3 => crate::ix::env::QuotKind::Ind, _ => panic!("Invalid QuotKind: {}", kind_val), }; - IxonQuotient { kind, lvls, typ: Arc::new(decode_ixon_expr(typ_ptr)) } + IxonQuotient { kind, lvls, typ: Arc::new(decode_ixon_expr(typ_ptr.cast())) } } } @@ -525,7 +520,7 @@ pub fn decode_ixon_quotient(ptr: *const c_void) -> IxonQuotient { /// Scalars ordered by size: lvls(8) + cidx(8) + params(8) + fields(8) + isUnsafe(1) + padding(7) pub fn decode_ixon_constructor(ptr: *const c_void) -> IxonConstructor { unsafe { - let typ_ptr = lean_ctor_get(ptr.cast_mut(), 0); + let typ_ptr = lean_ctor_get(ptr as *mut _, 0); let base = ptr.cast::(); let scalar_base = base.add(8 + 8); // u64 fields first @@ -541,7 +536,7 @@ pub fn decode_ixon_constructor(ptr: *const c_void) -> IxonConstructor { cidx, params, fields, - typ: Arc::new(decode_ixon_expr(typ_ptr)), + typ: Arc::new(decode_ixon_expr(typ_ptr.cast())), } } } @@ -550,8 +545,8 @@ pub fn decode_ixon_constructor(ptr: *const c_void) -> IxonConstructor { /// Scalars ordered by size: lvls(8) + params(8) + indices(8) + nested(8) + recr(1) + refl(1) + isUnsafe(1) + padding(5) pub fn decode_ixon_inductive(ptr: *const c_void) -> IxonInductive { unsafe { - let typ_ptr = lean_ctor_get(ptr.cast_mut(), 0); - let ctors_ptr = lean_ctor_get(ptr.cast_mut(), 1); + let typ_ptr = lean_ctor_get(ptr as *mut _, 0); + let ctors_ptr = lean_ctor_get(ptr as *mut _, 1); let base = ptr.cast::(); let scalar_base = base.add(2 * 8 + 8); // u64 fields first @@ -564,9 +559,7 @@ pub fn decode_ixon_inductive(ptr: *const c_void) -> IxonInductive { let refl = *scalar_base.add(33) != 0; let is_unsafe = *scalar_base.add(34) != 0; - let ctors_arr: &crate::lean::array::LeanArrayObject = - as_ref_unsafe(ctors_ptr.cast()); - let ctors = ctors_arr.to_vec(decode_ixon_constructor); + let ctors = lean_array_to_vec(ctors_ptr.cast(), decode_ixon_constructor); IxonInductive { recr, @@ -576,7 +569,7 @@ pub fn decode_ixon_inductive(ptr: *const c_void) -> IxonInductive { params, indices, nested, - typ: Arc::new(decode_ixon_expr(typ_ptr)), + typ: Arc::new(decode_ixon_expr(typ_ptr.cast())), ctors, } } @@ -585,53 +578,53 @@ pub fn decode_ixon_inductive(ptr: *const c_void) -> IxonInductive { /// Decode Ixon.InductiveProj. pub fn decode_ixon_inductive_proj(ptr: *const c_void) -> InductiveProj { unsafe { - let block_ptr = lean_ctor_get(ptr.cast_mut(), 0); + let block_ptr = lean_ctor_get(ptr as *mut _, 0); let base = ptr.cast::(); let idx = *base.add(8 + 8).cast::(); - InductiveProj { idx, block: decode_ixon_address(block_ptr) } + InductiveProj { idx, block: decode_ixon_address(block_ptr.cast()) } } } /// Decode Ixon.ConstructorProj. pub fn decode_ixon_constructor_proj(ptr: *const c_void) -> ConstructorProj { unsafe { - let block_ptr = lean_ctor_get(ptr.cast_mut(), 0); + let block_ptr = lean_ctor_get(ptr as *mut _, 0); let base = ptr.cast::(); let idx = *base.add(8 + 8).cast::(); let cidx = *base.add(8 + 16).cast::(); - ConstructorProj { idx, cidx, block: decode_ixon_address(block_ptr) } + ConstructorProj { idx, cidx, block: decode_ixon_address(block_ptr.cast()) } } } /// Decode Ixon.RecursorProj. pub fn decode_ixon_recursor_proj(ptr: *const c_void) -> RecursorProj { unsafe { - let block_ptr = lean_ctor_get(ptr.cast_mut(), 0); + let block_ptr = lean_ctor_get(ptr as *mut _, 0); let base = ptr.cast::(); let idx = *base.add(8 + 8).cast::(); - RecursorProj { idx, block: decode_ixon_address(block_ptr) } + RecursorProj { idx, block: decode_ixon_address(block_ptr.cast()) } } } /// Decode Ixon.DefinitionProj. pub fn decode_ixon_definition_proj(ptr: *const c_void) -> DefinitionProj { unsafe { - let block_ptr = lean_ctor_get(ptr.cast_mut(), 0); + let block_ptr = lean_ctor_get(ptr as *mut _, 0); let base = ptr.cast::(); let idx = *base.add(8 + 8).cast::(); - DefinitionProj { idx, block: decode_ixon_address(block_ptr) } + DefinitionProj { idx, block: decode_ixon_address(block_ptr.cast()) } } } /// Decode Ixon.MutConst. pub fn decode_ixon_mut_const(ptr: *const c_void) -> MutConst { unsafe { - let tag = lean_obj_tag(ptr.cast_mut()); - let inner_ptr = lean_ctor_get(ptr.cast_mut(), 0); + let tag = lean_obj_tag(ptr as *mut _); + let inner_ptr = lean_ctor_get(ptr as *mut _, 0); match tag { - 0 => MutConst::Defn(decode_ixon_definition(inner_ptr)), - 1 => MutConst::Indc(decode_ixon_inductive(inner_ptr)), - 2 => MutConst::Recr(decode_ixon_recursor(inner_ptr)), + 0 => MutConst::Defn(decode_ixon_definition(inner_ptr.cast())), + 1 => MutConst::Indc(decode_ixon_inductive(inner_ptr.cast())), + 2 => MutConst::Recr(decode_ixon_recursor(inner_ptr.cast())), _ => panic!("Invalid Ixon.MutConst tag: {}", tag), } } @@ -640,21 +633,19 @@ pub fn decode_ixon_mut_const(ptr: *const c_void) -> MutConst { /// Decode Ixon.ConstantInfo. pub fn decode_ixon_constant_info(ptr: *const c_void) -> IxonConstantInfo { unsafe { - let tag = lean_obj_tag(ptr.cast_mut()); - let inner_ptr = lean_ctor_get(ptr.cast_mut(), 0); + let tag = lean_obj_tag(ptr as *mut _); + let inner_ptr = lean_ctor_get(ptr as *mut _, 0); match tag { - 0 => IxonConstantInfo::Defn(decode_ixon_definition(inner_ptr)), - 1 => IxonConstantInfo::Recr(decode_ixon_recursor(inner_ptr)), - 2 => IxonConstantInfo::Axio(decode_ixon_axiom(inner_ptr)), - 3 => IxonConstantInfo::Quot(decode_ixon_quotient(inner_ptr)), - 4 => IxonConstantInfo::CPrj(decode_ixon_constructor_proj(inner_ptr)), - 5 => IxonConstantInfo::RPrj(decode_ixon_recursor_proj(inner_ptr)), - 6 => IxonConstantInfo::IPrj(decode_ixon_inductive_proj(inner_ptr)), - 7 => IxonConstantInfo::DPrj(decode_ixon_definition_proj(inner_ptr)), + 0 => IxonConstantInfo::Defn(decode_ixon_definition(inner_ptr.cast())), + 1 => IxonConstantInfo::Recr(decode_ixon_recursor(inner_ptr.cast())), + 2 => IxonConstantInfo::Axio(decode_ixon_axiom(inner_ptr.cast())), + 3 => IxonConstantInfo::Quot(decode_ixon_quotient(inner_ptr.cast())), + 4 => IxonConstantInfo::CPrj(decode_ixon_constructor_proj(inner_ptr.cast())), + 5 => IxonConstantInfo::RPrj(decode_ixon_recursor_proj(inner_ptr.cast())), + 6 => IxonConstantInfo::IPrj(decode_ixon_inductive_proj(inner_ptr.cast())), + 7 => IxonConstantInfo::DPrj(decode_ixon_definition_proj(inner_ptr.cast())), 8 => { - let muts_arr: &crate::lean::array::LeanArrayObject = - as_ref_unsafe(inner_ptr.cast()); - let muts = muts_arr.to_vec(decode_ixon_mut_const); + let muts = lean_array_to_vec(inner_ptr.cast(), decode_ixon_mut_const); IxonConstantInfo::Muts(muts) }, _ => panic!("Invalid Ixon.ConstantInfo tag: {}", tag), @@ -665,16 +656,16 @@ pub fn decode_ixon_constant_info(ptr: *const c_void) -> IxonConstantInfo { /// Decode Ixon.Constant. pub fn decode_ixon_constant(ptr: *const c_void) -> IxonConstant { unsafe { - let info_ptr = lean_ctor_get(ptr.cast_mut(), 0); - let sharing_ptr = lean_ctor_get(ptr.cast_mut(), 1); - let refs_ptr = lean_ctor_get(ptr.cast_mut(), 2); - let univs_ptr = lean_ctor_get(ptr.cast_mut(), 3); + let info_ptr = lean_ctor_get(ptr as *mut _, 0); + let sharing_ptr = lean_ctor_get(ptr as *mut _, 1); + let refs_ptr = lean_ctor_get(ptr as *mut _, 2); + let univs_ptr = lean_ctor_get(ptr as *mut _, 3); IxonConstant { - info: decode_ixon_constant_info(info_ptr), - sharing: decode_ixon_expr_array(sharing_ptr), - refs: decode_ixon_address_array(refs_ptr), - univs: decode_ixon_univ_array(univs_ptr), + info: decode_ixon_constant_info(info_ptr.cast()), + sharing: decode_ixon_expr_array(sharing_ptr.cast()), + refs: decode_ixon_address_array(refs_ptr.cast()), + univs: decode_ixon_univ_array(univs_ptr.cast()), } } } diff --git a/src/lean/ffi/ixon/env.rs b/src/lean/ffi/ixon/env.rs index 68781735..5a0305ed 100644 --- a/src/lean/ffi/ixon/env.rs +++ b/src/lean/ffi/ixon/env.rs @@ -11,13 +11,11 @@ use crate::ix::ixon::comm::Comm; use crate::ix::ixon::constant::Constant as IxonConstant; use crate::ix::ixon::env::{Env as IxonEnv, Named as IxonNamed}; use crate::ix::ixon::metadata::ConstantMeta; -use crate::lean::array::LeanArrayObject; -use crate::lean::sarray::LeanSArrayObject; -use crate::lean::{ - as_ref_unsafe, lean_alloc_array, lean_alloc_ctor, lean_alloc_sarray, - lean_array_set_core, lean_ctor_get, lean_ctor_set, lean_mk_string, - lean_sarray_cptr, +use crate::lean::lean::{ + lean_alloc_array, lean_alloc_ctor, lean_alloc_sarray, lean_array_set_core, + lean_ctor_get, lean_ctor_set, lean_mk_string, lean_sarray_cptr, }; +use crate::lean::{lean_array_to_vec, lean_sarray_data}; use super::constant::{ build_address_from_ixon, build_ixon_constant, decode_ixon_address, @@ -44,8 +42,8 @@ pub fn decode_comm(ptr: *const c_void) -> DecodedComm { let secret_ptr = lean_ctor_get(ptr as *mut _, 0); let payload_ptr = lean_ctor_get(ptr as *mut _, 1); DecodedComm { - secret: decode_ixon_address(secret_ptr), - payload: decode_ixon_address(payload_ptr), + secret: decode_ixon_address(secret_ptr.cast()), + payload: decode_ixon_address(payload_ptr.cast()), } } } @@ -56,9 +54,9 @@ pub fn build_comm(comm: &DecodedComm) -> *mut c_void { let secret_obj = build_address_from_ixon(&comm.secret); let payload_obj = build_address_from_ixon(&comm.payload); let obj = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(obj, 0, secret_obj); - lean_ctor_set(obj, 1, payload_obj); - obj + lean_ctor_set(obj, 0, secret_obj.cast()); + lean_ctor_set(obj, 1, payload_obj.cast()); + obj.cast() } } @@ -78,8 +76,8 @@ pub fn decode_raw_const(ptr: *const c_void) -> DecodedRawConst { let addr_ptr = lean_ctor_get(ptr as *mut _, 0); let const_ptr = lean_ctor_get(ptr as *mut _, 1); DecodedRawConst { - addr: decode_ixon_address(addr_ptr), - constant: decode_ixon_constant(const_ptr), + addr: decode_ixon_address(addr_ptr.cast()), + constant: decode_ixon_constant(const_ptr.cast()), } } } @@ -90,9 +88,9 @@ pub fn build_raw_const(rc: &DecodedRawConst) -> *mut c_void { let addr_obj = build_address_from_ixon(&rc.addr); let const_obj = build_ixon_constant(&rc.constant); let obj = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(obj, 0, addr_obj); - lean_ctor_set(obj, 1, const_obj); - obj + lean_ctor_set(obj, 0, addr_obj.cast()); + lean_ctor_set(obj, 1, const_obj.cast()); + obj.cast() } } @@ -114,9 +112,9 @@ pub fn decode_raw_named(ptr: *const c_void) -> DecodedRawNamed { let addr_ptr = lean_ctor_get(ptr as *mut _, 1); let meta_ptr = lean_ctor_get(ptr as *mut _, 2); DecodedRawNamed { - name: decode_ix_name(name_ptr), - addr: decode_ixon_address(addr_ptr), - const_meta: decode_constant_meta(meta_ptr), + name: decode_ix_name(name_ptr.cast()), + addr: decode_ixon_address(addr_ptr.cast()), + const_meta: decode_constant_meta(meta_ptr.cast()), } } } @@ -131,10 +129,10 @@ pub fn build_raw_named( let addr_obj = build_address_from_ixon(&rn.addr); let meta_obj = build_constant_meta(&rn.const_meta); let obj = lean_alloc_ctor(0, 3, 0); - lean_ctor_set(obj, 0, name_obj); - lean_ctor_set(obj, 1, addr_obj); - lean_ctor_set(obj, 2, meta_obj); - obj + lean_ctor_set(obj, 0, name_obj.cast()); + lean_ctor_set(obj, 1, addr_obj.cast()); + lean_ctor_set(obj, 2, meta_obj.cast()); + obj.cast() } } @@ -153,10 +151,9 @@ pub fn decode_raw_blob(ptr: *const c_void) -> DecodedRawBlob { unsafe { let addr_ptr = lean_ctor_get(ptr as *mut _, 0); let bytes_ptr = lean_ctor_get(ptr as *mut _, 1); - let bytes_arr: &LeanSArrayObject = as_ref_unsafe(bytes_ptr.cast()); DecodedRawBlob { - addr: decode_ixon_address(addr_ptr), - bytes: bytes_arr.data().to_vec(), + addr: decode_ixon_address(addr_ptr.cast()), + bytes: lean_sarray_data(bytes_ptr.cast()).to_vec(), } } } @@ -172,9 +169,9 @@ pub fn build_raw_blob(rb: &DecodedRawBlob) -> *mut c_void { std::ptr::copy_nonoverlapping(rb.bytes.as_ptr(), data_ptr, len); let obj = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(obj, 0, addr_obj); + lean_ctor_set(obj, 0, addr_obj.cast()); lean_ctor_set(obj, 1, bytes_obj); - obj + obj.cast() } } @@ -194,8 +191,8 @@ pub fn decode_raw_comm(ptr: *const c_void) -> DecodedRawComm { let addr_ptr = lean_ctor_get(ptr as *mut _, 0); let comm_ptr = lean_ctor_get(ptr as *mut _, 1); DecodedRawComm { - addr: decode_ixon_address(addr_ptr), - comm: decode_comm(comm_ptr), + addr: decode_ixon_address(addr_ptr.cast()), + comm: decode_comm(comm_ptr.cast()), } } } @@ -206,9 +203,9 @@ pub fn build_raw_comm(rc: &DecodedRawComm) -> *mut c_void { let addr_obj = build_address_from_ixon(&rc.addr); let comm_obj = build_comm(&rc.comm); let obj = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(obj, 0, addr_obj); - lean_ctor_set(obj, 1, comm_obj); - obj + lean_ctor_set(obj, 0, addr_obj.cast()); + lean_ctor_set(obj, 1, comm_obj.cast()); + obj.cast() } } @@ -228,8 +225,8 @@ pub fn decode_raw_name_entry(ptr: *const c_void) -> DecodedRawNameEntry { let addr_ptr = lean_ctor_get(ptr as *mut _, 0); let name_ptr = lean_ctor_get(ptr as *mut _, 1); DecodedRawNameEntry { - addr: decode_ixon_address(addr_ptr), - name: decode_ix_name(name_ptr), + addr: decode_ixon_address(addr_ptr.cast()), + name: decode_ix_name(name_ptr.cast()), } } } @@ -244,9 +241,9 @@ pub fn build_raw_name_entry( let addr_obj = build_address_from_ixon(addr); let name_obj = build_name(cache, name); let obj = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(obj, 0, addr_obj); - lean_ctor_set(obj, 1, name_obj); - obj + lean_ctor_set(obj, 0, addr_obj.cast()); + lean_ctor_set(obj, 1, name_obj.cast()); + obj.cast() } } @@ -272,18 +269,12 @@ pub fn decode_raw_env(ptr: *const c_void) -> DecodedRawEnv { let comms_ptr = lean_ctor_get(ptr as *mut _, 3); let names_ptr = lean_ctor_get(ptr as *mut _, 4); - let consts_arr: &LeanArrayObject = as_ref_unsafe(consts_ptr.cast()); - let named_arr: &LeanArrayObject = as_ref_unsafe(named_ptr.cast()); - let blobs_arr: &LeanArrayObject = as_ref_unsafe(blobs_ptr.cast()); - let comms_arr: &LeanArrayObject = as_ref_unsafe(comms_ptr.cast()); - let names_arr: &LeanArrayObject = as_ref_unsafe(names_ptr.cast()); - DecodedRawEnv { - consts: consts_arr.to_vec(decode_raw_const), - named: named_arr.to_vec(decode_raw_named), - blobs: blobs_arr.to_vec(decode_raw_blob), - comms: comms_arr.to_vec(decode_raw_comm), - names: names_arr.to_vec(decode_raw_name_entry), + consts: lean_array_to_vec(consts_ptr.cast(), decode_raw_const), + named: lean_array_to_vec(named_ptr.cast(), decode_raw_named), + blobs: lean_array_to_vec(blobs_ptr.cast(), decode_raw_blob), + comms: lean_array_to_vec(comms_ptr.cast(), decode_raw_comm), + names: lean_array_to_vec(names_ptr.cast(), decode_raw_name_entry), } } } @@ -297,35 +288,35 @@ pub fn build_raw_env(env: &DecodedRawEnv) -> *mut c_void { let consts_arr = lean_alloc_array(env.consts.len(), env.consts.len()); for (i, rc) in env.consts.iter().enumerate() { let obj = build_raw_const(rc); - lean_array_set_core(consts_arr, i, obj); + lean_array_set_core(consts_arr, i, obj.cast()); } // Build named array let named_arr = lean_alloc_array(env.named.len(), env.named.len()); for (i, rn) in env.named.iter().enumerate() { let obj = build_raw_named(&mut cache, rn); - lean_array_set_core(named_arr, i, obj); + lean_array_set_core(named_arr, i, obj.cast()); } // Build blobs array let blobs_arr = lean_alloc_array(env.blobs.len(), env.blobs.len()); for (i, rb) in env.blobs.iter().enumerate() { let obj = build_raw_blob(rb); - lean_array_set_core(blobs_arr, i, obj); + lean_array_set_core(blobs_arr, i, obj.cast()); } // Build comms array let comms_arr = lean_alloc_array(env.comms.len(), env.comms.len()); for (i, rc) in env.comms.iter().enumerate() { let obj = build_raw_comm(rc); - lean_array_set_core(comms_arr, i, obj); + lean_array_set_core(comms_arr, i, obj.cast()); } // Build names array let names_arr = lean_alloc_array(env.names.len(), env.names.len()); for (i, rn) in env.names.iter().enumerate() { let obj = build_raw_name_entry(&mut cache, &rn.addr, &rn.name); - lean_array_set_core(names_arr, i, obj); + lean_array_set_core(names_arr, i, obj.cast()); } // Build RawEnv structure @@ -335,7 +326,7 @@ pub fn build_raw_env(env: &DecodedRawEnv) -> *mut c_void { lean_ctor_set(obj, 2, blobs_arr); lean_ctor_set(obj, 3, comms_arr); lean_ctor_set(obj, 4, names_arr); - obj + obj.cast() } } @@ -432,7 +423,7 @@ pub extern "C" fn rs_ser_env(raw_env_ptr: *const c_void) -> *mut c_void { lean_sarray_cptr(ba), buf.len(), ); - ba + ba.cast() } } @@ -443,8 +434,7 @@ pub extern "C" fn rs_ser_env(raw_env_ptr: *const c_void) -> *mut c_void { /// FFI: Deserialize ByteArray → Except String Ixon.RawEnv via Rust's Env.get. Pure. #[unsafe(no_mangle)] pub extern "C" fn rs_des_env(bytes_ptr: *const c_void) -> *mut c_void { - let bytes_arr: &LeanSArrayObject = as_ref_unsafe(bytes_ptr.cast()); - let data = bytes_arr.data(); + let data = lean_sarray_data(bytes_ptr); let mut slice: &[u8] = data; match IxonEnv::get(&mut slice) { Ok(env) => { @@ -453,8 +443,8 @@ pub extern "C" fn rs_des_env(bytes_ptr: *const c_void) -> *mut c_void { // Except.ok (tag 1) unsafe { let obj = lean_alloc_ctor(1, 1, 0); - lean_ctor_set(obj, 0, raw_env); - obj + lean_ctor_set(obj, 0, raw_env.cast()); + obj.cast() } }, Err(e) => { @@ -467,7 +457,7 @@ pub extern "C" fn rs_des_env(bytes_ptr: *const c_void) -> *mut c_void { let lean_str = lean_mk_string(msg.as_ptr()); let obj = lean_alloc_ctor(0, 1, 0); lean_ctor_set(obj, 0, lean_str); - obj + obj.cast() } }, } diff --git a/src/lean/ffi/ixon/expr.rs b/src/lean/ffi/ixon/expr.rs index 060d91b2..2b5a5d75 100644 --- a/src/lean/ffi/ixon/expr.rs +++ b/src/lean/ffi/ixon/expr.rs @@ -4,9 +4,9 @@ use std::ffi::c_void; use std::sync::Arc; use crate::ix::ixon::expr::Expr as IxonExpr; -use crate::lean::{ - as_ref_unsafe, lean_alloc_array, lean_alloc_ctor, lean_array_set_core, - lean_ctor_get, lean_ctor_set, lean_obj_tag, +use crate::lean::lean::{ + lean_alloc_array, lean_alloc_ctor, lean_array_set_core, lean_ctor_get, + lean_ctor_set, lean_obj_tag, }; use crate::lean_unbox; @@ -18,13 +18,13 @@ pub fn build_ixon_expr(expr: &IxonExpr) -> *mut c_void { let obj = lean_alloc_ctor(0, 0, 8); let base = obj.cast::(); *base.add(8).cast::() = *idx; - obj + obj.cast() }, IxonExpr::Var(idx) => { let obj = lean_alloc_ctor(1, 0, 8); let base = obj.cast::(); *base.add(8).cast::() = *idx; - obj + obj.cast() }, IxonExpr::Ref(ref_idx, univ_idxs) => { let arr = lean_alloc_array(univ_idxs.len(), univ_idxs.len()); @@ -39,7 +39,7 @@ pub fn build_ixon_expr(expr: &IxonExpr) -> *mut c_void { lean_ctor_set(obj, 0, arr); let base = obj.cast::(); *base.add(8 + 8).cast::() = *ref_idx; - obj + obj.cast() }, IxonExpr::Rec(rec_idx, univ_idxs) => { let arr = lean_alloc_array(univ_idxs.len(), univ_idxs.len()); @@ -53,70 +53,70 @@ pub fn build_ixon_expr(expr: &IxonExpr) -> *mut c_void { lean_ctor_set(obj, 0, arr); let base = obj.cast::(); *base.add(8 + 8).cast::() = *rec_idx; - obj + obj.cast() }, IxonExpr::Prj(type_ref_idx, field_idx, val) => { let val_obj = build_ixon_expr(val); let obj = lean_alloc_ctor(4, 1, 16); - lean_ctor_set(obj, 0, val_obj); + lean_ctor_set(obj, 0, val_obj.cast()); let base = obj.cast::(); *base.add(8 + 8).cast::() = *type_ref_idx; *base.add(8 + 16).cast::() = *field_idx; - obj + obj.cast() }, IxonExpr::Str(ref_idx) => { let obj = lean_alloc_ctor(5, 0, 8); let base = obj.cast::(); *base.add(8).cast::() = *ref_idx; - obj + obj.cast() }, IxonExpr::Nat(ref_idx) => { let obj = lean_alloc_ctor(6, 0, 8); let base = obj.cast::(); *base.add(8).cast::() = *ref_idx; - obj + obj.cast() }, IxonExpr::App(fun, arg) => { let fun_obj = build_ixon_expr(fun); let arg_obj = build_ixon_expr(arg); let obj = lean_alloc_ctor(7, 2, 0); - lean_ctor_set(obj, 0, fun_obj); - lean_ctor_set(obj, 1, arg_obj); - obj + lean_ctor_set(obj, 0, fun_obj.cast()); + lean_ctor_set(obj, 1, arg_obj.cast()); + obj.cast() }, IxonExpr::Lam(ty, body) => { let ty_obj = build_ixon_expr(ty); let body_obj = build_ixon_expr(body); let obj = lean_alloc_ctor(8, 2, 0); - lean_ctor_set(obj, 0, ty_obj); - lean_ctor_set(obj, 1, body_obj); - obj + lean_ctor_set(obj, 0, ty_obj.cast()); + lean_ctor_set(obj, 1, body_obj.cast()); + obj.cast() }, IxonExpr::All(ty, body) => { let ty_obj = build_ixon_expr(ty); let body_obj = build_ixon_expr(body); let obj = lean_alloc_ctor(9, 2, 0); - lean_ctor_set(obj, 0, ty_obj); - lean_ctor_set(obj, 1, body_obj); - obj + lean_ctor_set(obj, 0, ty_obj.cast()); + lean_ctor_set(obj, 1, body_obj.cast()); + obj.cast() }, IxonExpr::Let(non_dep, ty, val, body) => { let ty_obj = build_ixon_expr(ty); let val_obj = build_ixon_expr(val); let body_obj = build_ixon_expr(body); let obj = lean_alloc_ctor(10, 3, 1); - lean_ctor_set(obj, 0, ty_obj); - lean_ctor_set(obj, 1, val_obj); - lean_ctor_set(obj, 2, body_obj); + lean_ctor_set(obj, 0, ty_obj.cast()); + lean_ctor_set(obj, 1, val_obj.cast()); + lean_ctor_set(obj, 2, body_obj.cast()); let base = obj.cast::(); *base.add(3 * 8 + 8) = if *non_dep { 1 } else { 0 }; - obj + obj.cast() }, IxonExpr::Share(idx) => { let obj = lean_alloc_ctor(11, 0, 8); let base = obj.cast::(); *base.add(8).cast::() = *idx; - obj + obj.cast() }, } } @@ -128,9 +128,9 @@ pub fn build_ixon_expr_array(exprs: &[Arc]) -> *mut c_void { let arr = lean_alloc_array(exprs.len(), exprs.len()); for (i, expr) in exprs.iter().enumerate() { let expr_obj = build_ixon_expr(expr); - lean_array_set_core(arr, i, expr_obj); + lean_array_set_core(arr, i, expr_obj.cast()); } - arr + arr.cast() } } @@ -145,8 +145,7 @@ pub fn build_ixon_expr_array(exprs: &[Arc]) -> *mut c_void { fn decode_u64_array(ptr: *const c_void) -> Vec { use crate::lean::lean_is_scalar; - let arr: &crate::lean::array::LeanArrayObject = as_ref_unsafe(ptr.cast()); - arr.to_vec(|elem| { + crate::lean::lean_array_data(ptr).iter().map(|&elem| { if lean_is_scalar(elem) { // Small scalar value lean_unbox!(u64, elem) @@ -157,13 +156,13 @@ fn decode_u64_array(ptr: *const c_void) -> Vec { *base.add(8).cast::() } } - }) + }).collect() } /// Decode Ixon.Expr (12 constructors). pub fn decode_ixon_expr(ptr: *const c_void) -> IxonExpr { unsafe { - let tag = lean_obj_tag(ptr.cast_mut()); + let tag = lean_obj_tag(ptr as *mut _); match tag { 0 => { // sort (idx : UInt64) @@ -179,30 +178,30 @@ pub fn decode_ixon_expr(ptr: *const c_void) -> IxonExpr { }, 2 => { // ref (refIdx : UInt64) (univIdxs : Array UInt64) - let arr_ptr = lean_ctor_get(ptr.cast_mut(), 0); + let arr_ptr = lean_ctor_get(ptr as *mut _, 0); let base = ptr.cast::(); let ref_idx = *base.add(8 + 8).cast::(); - let univ_idxs = decode_u64_array(arr_ptr); + let univ_idxs = decode_u64_array(arr_ptr.cast()); IxonExpr::Ref(ref_idx, univ_idxs) }, 3 => { // recur (recIdx : UInt64) (univIdxs : Array UInt64) - let arr_ptr = lean_ctor_get(ptr.cast_mut(), 0); + let arr_ptr = lean_ctor_get(ptr as *mut _, 0); let base = ptr.cast::(); let rec_idx = *base.add(8 + 8).cast::(); - let univ_idxs = decode_u64_array(arr_ptr); + let univ_idxs = decode_u64_array(arr_ptr.cast()); IxonExpr::Rec(rec_idx, univ_idxs) }, 4 => { // prj (typeRefIdx : UInt64) (fieldIdx : UInt64) (val : Expr) - let val_ptr = lean_ctor_get(ptr.cast_mut(), 0); + let val_ptr = lean_ctor_get(ptr as *mut _, 0); let base = ptr.cast::(); let type_ref_idx = *base.add(8 + 8).cast::(); let field_idx = *base.add(8 + 16).cast::(); IxonExpr::Prj( type_ref_idx, field_idx, - Arc::new(decode_ixon_expr(val_ptr)), + Arc::new(decode_ixon_expr(val_ptr.cast())), ) }, 5 => { @@ -219,43 +218,43 @@ pub fn decode_ixon_expr(ptr: *const c_void) -> IxonExpr { }, 7 => { // app (f a : Expr) - let f_ptr = lean_ctor_get(ptr.cast_mut(), 0); - let a_ptr = lean_ctor_get(ptr.cast_mut(), 1); + let f_ptr = lean_ctor_get(ptr as *mut _, 0); + let a_ptr = lean_ctor_get(ptr as *mut _, 1); IxonExpr::App( - Arc::new(decode_ixon_expr(f_ptr)), - Arc::new(decode_ixon_expr(a_ptr)), + Arc::new(decode_ixon_expr(f_ptr.cast())), + Arc::new(decode_ixon_expr(a_ptr.cast())), ) }, 8 => { // lam (ty body : Expr) - let ty_ptr = lean_ctor_get(ptr.cast_mut(), 0); - let body_ptr = lean_ctor_get(ptr.cast_mut(), 1); + let ty_ptr = lean_ctor_get(ptr as *mut _, 0); + let body_ptr = lean_ctor_get(ptr as *mut _, 1); IxonExpr::Lam( - Arc::new(decode_ixon_expr(ty_ptr)), - Arc::new(decode_ixon_expr(body_ptr)), + Arc::new(decode_ixon_expr(ty_ptr.cast())), + Arc::new(decode_ixon_expr(body_ptr.cast())), ) }, 9 => { // all (ty body : Expr) - let ty_ptr = lean_ctor_get(ptr.cast_mut(), 0); - let body_ptr = lean_ctor_get(ptr.cast_mut(), 1); + let ty_ptr = lean_ctor_get(ptr as *mut _, 0); + let body_ptr = lean_ctor_get(ptr as *mut _, 1); IxonExpr::All( - Arc::new(decode_ixon_expr(ty_ptr)), - Arc::new(decode_ixon_expr(body_ptr)), + Arc::new(decode_ixon_expr(ty_ptr.cast())), + Arc::new(decode_ixon_expr(body_ptr.cast())), ) }, 10 => { // letE (nonDep : Bool) (ty val body : Expr) - let ty_ptr = lean_ctor_get(ptr.cast_mut(), 0); - let val_ptr = lean_ctor_get(ptr.cast_mut(), 1); - let body_ptr = lean_ctor_get(ptr.cast_mut(), 2); + let ty_ptr = lean_ctor_get(ptr as *mut _, 0); + let val_ptr = lean_ctor_get(ptr as *mut _, 1); + let body_ptr = lean_ctor_get(ptr as *mut _, 2); let base = ptr.cast::(); let non_dep = *base.add(3 * 8 + 8) != 0; IxonExpr::Let( non_dep, - Arc::new(decode_ixon_expr(ty_ptr)), - Arc::new(decode_ixon_expr(val_ptr)), - Arc::new(decode_ixon_expr(body_ptr)), + Arc::new(decode_ixon_expr(ty_ptr.cast())), + Arc::new(decode_ixon_expr(val_ptr.cast())), + Arc::new(decode_ixon_expr(body_ptr.cast())), ) }, 11 => { @@ -271,8 +270,7 @@ pub fn decode_ixon_expr(ptr: *const c_void) -> IxonExpr { /// Decode Array Ixon.Expr. pub fn decode_ixon_expr_array(ptr: *const c_void) -> Vec> { - let arr: &crate::lean::array::LeanArrayObject = as_ref_unsafe(ptr.cast()); - arr.to_vec(|e| Arc::new(decode_ixon_expr(e))) + crate::lean::lean_array_data(ptr).iter().map(|&e| Arc::new(decode_ixon_expr(e))).collect() } // ============================================================================= diff --git a/src/lean/ffi/ixon/meta.rs b/src/lean/ffi/ixon/meta.rs index dafe11f7..7e3a123a 100644 --- a/src/lean/ffi/ixon/meta.rs +++ b/src/lean/ffi/ixon/meta.rs @@ -11,12 +11,13 @@ use crate::ix::ixon::env::Named; use crate::ix::ixon::metadata::{ ConstantMeta, DataValue as IxonDataValue, ExprMeta, ExprMetaData, KVMap, }; -use crate::lean::array::LeanArrayObject; -use crate::lean::ctor::LeanCtorObject; +use crate::lean::lean::{ + lean_alloc_array, lean_alloc_ctor, lean_array_set_core, lean_ctor_get, + lean_ctor_set, lean_ctor_set_uint64, lean_ctor_set_uint8, lean_obj_tag, +}; use crate::lean::{ - as_ref_unsafe, lean_alloc_array, lean_alloc_ctor, lean_array_set_core, - lean_box_fn, lean_ctor_get, lean_ctor_set, lean_ctor_set_uint8, - lean_ctor_set_uint64, lean_is_scalar, lean_obj_tag, + lean_array_data, lean_array_to_vec, lean_box_fn, lean_ctor_scalar_u64, + lean_ctor_scalar_u8, lean_is_scalar, }; use super::constant::{ @@ -38,37 +39,37 @@ pub fn build_ixon_data_value(dv: &IxonDataValue) -> *mut c_void { IxonDataValue::OfString(addr) => { let addr_obj = build_address_from_ixon(addr); let obj = lean_alloc_ctor(0, 1, 0); - lean_ctor_set(obj, 0, addr_obj); - obj + lean_ctor_set(obj, 0, addr_obj.cast()); + obj.cast() }, IxonDataValue::OfBool(b) => { let obj = lean_alloc_ctor(1, 0, 1); lean_ctor_set_uint8(obj, 0, if *b { 1 } else { 0 }); - obj + obj.cast() }, IxonDataValue::OfName(addr) => { let addr_obj = build_address_from_ixon(addr); let obj = lean_alloc_ctor(2, 1, 0); - lean_ctor_set(obj, 0, addr_obj); - obj + lean_ctor_set(obj, 0, addr_obj.cast()); + obj.cast() }, IxonDataValue::OfNat(addr) => { let addr_obj = build_address_from_ixon(addr); let obj = lean_alloc_ctor(3, 1, 0); - lean_ctor_set(obj, 0, addr_obj); - obj + lean_ctor_set(obj, 0, addr_obj.cast()); + obj.cast() }, IxonDataValue::OfInt(addr) => { let addr_obj = build_address_from_ixon(addr); let obj = lean_alloc_ctor(4, 1, 0); - lean_ctor_set(obj, 0, addr_obj); - obj + lean_ctor_set(obj, 0, addr_obj.cast()); + obj.cast() }, IxonDataValue::OfSyntax(addr) => { let addr_obj = build_address_from_ixon(addr); let obj = lean_alloc_ctor(5, 1, 0); - lean_ctor_set(obj, 0, addr_obj); - obj + lean_ctor_set(obj, 0, addr_obj.cast()); + obj.cast() }, } } @@ -81,28 +82,27 @@ pub fn decode_ixon_data_value(ptr: *const c_void) -> IxonDataValue { match tag { 0 => { let addr_ptr = lean_ctor_get(ptr as *mut _, 0); - IxonDataValue::OfString(decode_ixon_address(addr_ptr)) + IxonDataValue::OfString(decode_ixon_address(addr_ptr.cast())) }, 1 => { - let ctor: &LeanCtorObject = as_ref_unsafe(ptr.cast()); - let b = ctor.get_scalar_u8(0, 0) != 0; + let b = lean_ctor_scalar_u8(ptr, 0, 0) != 0; IxonDataValue::OfBool(b) }, 2 => { let addr_ptr = lean_ctor_get(ptr as *mut _, 0); - IxonDataValue::OfName(decode_ixon_address(addr_ptr)) + IxonDataValue::OfName(decode_ixon_address(addr_ptr.cast())) }, 3 => { let addr_ptr = lean_ctor_get(ptr as *mut _, 0); - IxonDataValue::OfNat(decode_ixon_address(addr_ptr)) + IxonDataValue::OfNat(decode_ixon_address(addr_ptr.cast())) }, 4 => { let addr_ptr = lean_ctor_get(ptr as *mut _, 0); - IxonDataValue::OfInt(decode_ixon_address(addr_ptr)) + IxonDataValue::OfInt(decode_ixon_address(addr_ptr.cast())) }, 5 => { let addr_ptr = lean_ctor_get(ptr as *mut _, 0); - IxonDataValue::OfSyntax(decode_ixon_address(addr_ptr)) + IxonDataValue::OfSyntax(decode_ixon_address(addr_ptr.cast())) }, _ => panic!("Invalid Ixon.DataValue tag: {}", tag), } @@ -121,11 +121,11 @@ pub fn build_ixon_kvmap(kvmap: &KVMap) -> *mut c_void { let addr_obj = build_address_from_ixon(addr); let dv_obj = build_ixon_data_value(dv); let pair = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(pair, 0, addr_obj); - lean_ctor_set(pair, 1, dv_obj); + lean_ctor_set(pair, 0, addr_obj.cast()); + lean_ctor_set(pair, 1, dv_obj.cast()); lean_array_set_core(arr, i, pair); } - arr + arr.cast() } } @@ -135,26 +135,24 @@ pub fn build_kvmap_array(kvmaps: &[KVMap]) -> *mut c_void { let arr = lean_alloc_array(kvmaps.len(), kvmaps.len()); for (i, kvmap) in kvmaps.iter().enumerate() { let kvmap_obj = build_ixon_kvmap(kvmap); - lean_array_set_core(arr, i, kvmap_obj); + lean_array_set_core(arr, i, kvmap_obj.cast()); } - arr + arr.cast() } } /// Decode KVMap (Array (Address × DataValue)). pub fn decode_ixon_kvmap(ptr: *const c_void) -> KVMap { - let arr: &LeanArrayObject = as_ref_unsafe(ptr.cast()); - arr.to_vec(|pair| unsafe { + lean_array_data(ptr).iter().map(|&pair| unsafe { let addr_ptr = lean_ctor_get(pair as *mut _, 0); let dv_ptr = lean_ctor_get(pair as *mut _, 1); - (decode_ixon_address(addr_ptr), decode_ixon_data_value(dv_ptr)) - }) + (decode_ixon_address(addr_ptr.cast()), decode_ixon_data_value(dv_ptr.cast())) + }).collect() } /// Decode Array KVMap. fn decode_kvmap_array(ptr: *const c_void) -> Vec { - let arr: &LeanArrayObject = as_ref_unsafe(ptr.cast()); - arr.to_vec(decode_ixon_kvmap) + lean_array_to_vec(ptr, decode_ixon_kvmap) } // ============================================================================= @@ -163,8 +161,7 @@ fn decode_kvmap_array(ptr: *const c_void) -> Vec { /// Decode Array Address. fn decode_address_array(ptr: *const c_void) -> Vec
{ - let arr: &LeanArrayObject = as_ref_unsafe(ptr.cast()); - arr.to_vec(decode_ixon_address) + lean_array_to_vec(ptr, decode_ixon_address) } /// Build Array UInt64. @@ -173,16 +170,15 @@ fn build_u64_array(vals: &[u64]) -> *mut c_void { let arr = lean_alloc_array(vals.len(), vals.len()); for (i, &v) in vals.iter().enumerate() { let obj = crate::lean::lean_box_u64(v); - lean_array_set_core(arr, i, obj); + lean_array_set_core(arr, i, obj.cast()); } - arr + arr.cast() } } /// Decode Array UInt64. fn decode_u64_array(ptr: *const c_void) -> Vec { - let arr: &LeanArrayObject = as_ref_unsafe(ptr.cast()); - arr.to_vec(crate::lean::lean_unbox_u64) + lean_array_to_vec(ptr, crate::lean::lean_unbox_u64) } // ============================================================================= @@ -210,52 +206,52 @@ pub fn build_expr_meta_data(node: &ExprMetaData) -> *mut c_void { let obj = lean_alloc_ctor(1, 0, 16); lean_ctor_set_uint64(obj, 0, children[0]); lean_ctor_set_uint64(obj, 8, children[1]); - obj + obj.cast() }, ExprMetaData::Binder { name, info, children } => { // Tag 2, 1 obj field (name), scalar: 2× u64 + u8 (info) // Lean ABI sorts scalars by size descending: [tyChild: u64 @ 0] [bodyChild: u64 @ 8] [info: u8 @ 16] let obj = lean_alloc_ctor(2, 1, 17); - lean_ctor_set(obj, 0, build_address_from_ixon(name)); + lean_ctor_set(obj, 0, build_address_from_ixon(name).cast()); lean_ctor_set_uint64(obj, 8, children[0]); lean_ctor_set_uint64(obj, 8 + 8, children[1]); lean_ctor_set_uint8(obj, 8 + 16, binder_info_to_u8(info)); - obj + obj.cast() }, ExprMetaData::LetBinder { name, children } => { // Tag 3, 1 obj field (name), 24 scalar bytes (3× u64) let obj = lean_alloc_ctor(3, 1, 24); - lean_ctor_set(obj, 0, build_address_from_ixon(name)); + lean_ctor_set(obj, 0, build_address_from_ixon(name).cast()); lean_ctor_set_uint64(obj, 8, children[0]); lean_ctor_set_uint64(obj, 8 + 8, children[1]); lean_ctor_set_uint64(obj, 8 + 16, children[2]); - obj + obj.cast() }, ExprMetaData::Ref { name } => { // Tag 4, 1 obj field (name), 0 scalar bytes let obj = lean_alloc_ctor(4, 1, 0); - lean_ctor_set(obj, 0, build_address_from_ixon(name)); - obj + lean_ctor_set(obj, 0, build_address_from_ixon(name).cast()); + obj.cast() }, ExprMetaData::Prj { struct_name, child } => { // Tag 5, 1 obj field (structName), 8 scalar bytes (1× u64) let obj = lean_alloc_ctor(5, 1, 8); - lean_ctor_set(obj, 0, build_address_from_ixon(struct_name)); + lean_ctor_set(obj, 0, build_address_from_ixon(struct_name).cast()); lean_ctor_set_uint64(obj, 8, *child); - obj + obj.cast() }, ExprMetaData::Mdata { mdata, child } => { // Tag 6, 1 obj field (mdata: Array KVMap), 8 scalar bytes (1× u64) let mdata_obj = build_kvmap_array(mdata); let obj = lean_alloc_ctor(6, 1, 8); - lean_ctor_set(obj, 0, mdata_obj); + lean_ctor_set(obj, 0, mdata_obj.cast()); lean_ctor_set_uint64(obj, 8, *child); - obj + obj.cast() }, } } @@ -271,12 +267,11 @@ pub fn decode_expr_meta_data(ptr: *const c_void) -> ExprMetaData { return ExprMetaData::Leaf; } let tag = lean_obj_tag(ptr as *mut _); - let ctor: &LeanCtorObject = as_ref_unsafe(ptr.cast()); match tag { 1 => { // app: 0 obj fields, 2× u64 scalar - let fun_ = ctor.get_scalar_u64(0, 0); - let arg = ctor.get_scalar_u64(0, 8); + let fun_ = lean_ctor_scalar_u64(ptr, 0, 0); + let arg = lean_ctor_scalar_u64(ptr, 0, 8); ExprMetaData::App { children: [fun_, arg] } }, @@ -284,9 +279,9 @@ pub fn decode_expr_meta_data(ptr: *const c_void) -> ExprMetaData { // binder: 1 obj field (name), scalar (Lean ABI: u64s first, then u8): // [tyChild: u64 @ 0] [bodyChild: u64 @ 8] [info: u8 @ 16] let name_ptr = lean_ctor_get(ptr as *mut _, 0); - let ty_child = ctor.get_scalar_u64(1, 0); - let body_child = ctor.get_scalar_u64(1, 8); - let info_byte = ctor.get_scalar_u8(1, 16); + let ty_child = lean_ctor_scalar_u64(ptr, 1, 0); + let body_child = lean_ctor_scalar_u64(ptr, 1, 8); + let info_byte = lean_ctor_scalar_u8(ptr, 1, 16); let info = match info_byte { 0 => BinderInfo::Default, 1 => BinderInfo::Implicit, @@ -295,7 +290,7 @@ pub fn decode_expr_meta_data(ptr: *const c_void) -> ExprMetaData { _ => panic!("Invalid BinderInfo tag: {}", info_byte), }; ExprMetaData::Binder { - name: decode_ixon_address(name_ptr), + name: decode_ixon_address(name_ptr.cast()), info, children: [ty_child, body_child], } @@ -304,11 +299,11 @@ pub fn decode_expr_meta_data(ptr: *const c_void) -> ExprMetaData { 3 => { // letBinder: 1 obj field (name), 3× u64 scalar let name_ptr = lean_ctor_get(ptr as *mut _, 0); - let ty_child = ctor.get_scalar_u64(1, 0); - let val_child = ctor.get_scalar_u64(1, 8); - let body_child = ctor.get_scalar_u64(1, 16); + let ty_child = lean_ctor_scalar_u64(ptr, 1, 0); + let val_child = lean_ctor_scalar_u64(ptr, 1, 8); + let body_child = lean_ctor_scalar_u64(ptr, 1, 16); ExprMetaData::LetBinder { - name: decode_ixon_address(name_ptr), + name: decode_ixon_address(name_ptr.cast()), children: [ty_child, val_child, body_child], } }, @@ -316,21 +311,21 @@ pub fn decode_expr_meta_data(ptr: *const c_void) -> ExprMetaData { 4 => { // ref: 1 obj field (name), 0 scalar let name_ptr = lean_ctor_get(ptr as *mut _, 0); - ExprMetaData::Ref { name: decode_ixon_address(name_ptr) } + ExprMetaData::Ref { name: decode_ixon_address(name_ptr.cast()) } }, 5 => { // prj: 1 obj field (structName), 1× u64 scalar let name_ptr = lean_ctor_get(ptr as *mut _, 0); - let child = ctor.get_scalar_u64(1, 0); - ExprMetaData::Prj { struct_name: decode_ixon_address(name_ptr), child } + let child = lean_ctor_scalar_u64(ptr, 1, 0); + ExprMetaData::Prj { struct_name: decode_ixon_address(name_ptr.cast()), child } }, 6 => { // mdata: 1 obj field (mdata: Array KVMap), 1× u64 scalar let mdata_ptr = lean_ctor_get(ptr as *mut _, 0); - let child = ctor.get_scalar_u64(1, 0); - ExprMetaData::Mdata { mdata: decode_kvmap_array(mdata_ptr), child } + let child = lean_ctor_scalar_u64(ptr, 1, 0); + ExprMetaData::Mdata { mdata: decode_kvmap_array(mdata_ptr.cast()), child } }, _ => panic!("Invalid Ixon.ExprMetaData tag: {}", tag), @@ -349,17 +344,16 @@ pub fn build_expr_meta_arena(arena: &ExprMeta) -> *mut c_void { unsafe { let arr = lean_alloc_array(arena.nodes.len(), arena.nodes.len()); for (i, node) in arena.nodes.iter().enumerate() { - lean_array_set_core(arr, i, build_expr_meta_data(node)); + lean_array_set_core(arr, i, build_expr_meta_data(node).cast()); } - arr + arr.cast() } } /// Decode Ixon.ExprMetaArena from Lean pointer. /// Single-field struct is unboxed — ptr IS the Array directly. pub fn decode_expr_meta_arena(ptr: *const c_void) -> ExprMeta { - let arr: &LeanArrayObject = as_ref_unsafe(ptr.cast()); - ExprMeta { nodes: arr.to_vec(decode_expr_meta_data) } + ExprMeta { nodes: lean_array_to_vec(ptr, decode_expr_meta_data) } } // ============================================================================= @@ -393,55 +387,55 @@ pub fn build_constant_meta(meta: &ConstantMeta) -> *mut c_void { value_root, } => { let obj = lean_alloc_ctor(1, 6, 16); - lean_ctor_set(obj, 0, build_address_from_ixon(name)); - lean_ctor_set(obj, 1, build_address_array(lvls)); - lean_ctor_set(obj, 2, build_reducibility_hints(hints)); - lean_ctor_set(obj, 3, build_address_array(all)); - lean_ctor_set(obj, 4, build_address_array(ctx)); - lean_ctor_set(obj, 5, build_expr_meta_arena(arena)); + lean_ctor_set(obj, 0, build_address_from_ixon(name).cast()); + lean_ctor_set(obj, 1, build_address_array(lvls).cast()); + lean_ctor_set(obj, 2, build_reducibility_hints(hints).cast()); + lean_ctor_set(obj, 3, build_address_array(all).cast()); + lean_ctor_set(obj, 4, build_address_array(ctx).cast()); + lean_ctor_set(obj, 5, build_expr_meta_arena(arena).cast()); lean_ctor_set_uint64(obj, 6 * 8, *type_root); lean_ctor_set_uint64(obj, 6 * 8 + 8, *value_root); - obj + obj.cast() }, ConstantMeta::Axio { name, lvls, arena, type_root } => { let obj = lean_alloc_ctor(2, 3, 8); - lean_ctor_set(obj, 0, build_address_from_ixon(name)); - lean_ctor_set(obj, 1, build_address_array(lvls)); - lean_ctor_set(obj, 2, build_expr_meta_arena(arena)); + lean_ctor_set(obj, 0, build_address_from_ixon(name).cast()); + lean_ctor_set(obj, 1, build_address_array(lvls).cast()); + lean_ctor_set(obj, 2, build_expr_meta_arena(arena).cast()); lean_ctor_set_uint64(obj, 3 * 8, *type_root); - obj + obj.cast() }, ConstantMeta::Quot { name, lvls, arena, type_root } => { let obj = lean_alloc_ctor(3, 3, 8); - lean_ctor_set(obj, 0, build_address_from_ixon(name)); - lean_ctor_set(obj, 1, build_address_array(lvls)); - lean_ctor_set(obj, 2, build_expr_meta_arena(arena)); + lean_ctor_set(obj, 0, build_address_from_ixon(name).cast()); + lean_ctor_set(obj, 1, build_address_array(lvls).cast()); + lean_ctor_set(obj, 2, build_expr_meta_arena(arena).cast()); lean_ctor_set_uint64(obj, 3 * 8, *type_root); - obj + obj.cast() }, ConstantMeta::Indc { name, lvls, ctors, all, ctx, arena, type_root } => { let obj = lean_alloc_ctor(4, 6, 8); - lean_ctor_set(obj, 0, build_address_from_ixon(name)); - lean_ctor_set(obj, 1, build_address_array(lvls)); - lean_ctor_set(obj, 2, build_address_array(ctors)); - lean_ctor_set(obj, 3, build_address_array(all)); - lean_ctor_set(obj, 4, build_address_array(ctx)); - lean_ctor_set(obj, 5, build_expr_meta_arena(arena)); + lean_ctor_set(obj, 0, build_address_from_ixon(name).cast()); + lean_ctor_set(obj, 1, build_address_array(lvls).cast()); + lean_ctor_set(obj, 2, build_address_array(ctors).cast()); + lean_ctor_set(obj, 3, build_address_array(all).cast()); + lean_ctor_set(obj, 4, build_address_array(ctx).cast()); + lean_ctor_set(obj, 5, build_expr_meta_arena(arena).cast()); lean_ctor_set_uint64(obj, 6 * 8, *type_root); - obj + obj.cast() }, ConstantMeta::Ctor { name, lvls, induct, arena, type_root } => { let obj = lean_alloc_ctor(5, 4, 8); - lean_ctor_set(obj, 0, build_address_from_ixon(name)); - lean_ctor_set(obj, 1, build_address_array(lvls)); - lean_ctor_set(obj, 2, build_address_from_ixon(induct)); - lean_ctor_set(obj, 3, build_expr_meta_arena(arena)); + lean_ctor_set(obj, 0, build_address_from_ixon(name).cast()); + lean_ctor_set(obj, 1, build_address_array(lvls).cast()); + lean_ctor_set(obj, 2, build_address_from_ixon(induct).cast()); + lean_ctor_set(obj, 3, build_expr_meta_arena(arena).cast()); lean_ctor_set_uint64(obj, 4 * 8, *type_root); - obj + obj.cast() }, ConstantMeta::Rec { @@ -455,15 +449,15 @@ pub fn build_constant_meta(meta: &ConstantMeta) -> *mut c_void { rule_roots, } => { let obj = lean_alloc_ctor(6, 7, 8); - lean_ctor_set(obj, 0, build_address_from_ixon(name)); - lean_ctor_set(obj, 1, build_address_array(lvls)); - lean_ctor_set(obj, 2, build_address_array(rules)); - lean_ctor_set(obj, 3, build_address_array(all)); - lean_ctor_set(obj, 4, build_address_array(ctx)); - lean_ctor_set(obj, 5, build_expr_meta_arena(arena)); - lean_ctor_set(obj, 6, build_u64_array(rule_roots)); + lean_ctor_set(obj, 0, build_address_from_ixon(name).cast()); + lean_ctor_set(obj, 1, build_address_array(lvls).cast()); + lean_ctor_set(obj, 2, build_address_array(rules).cast()); + lean_ctor_set(obj, 3, build_address_array(all).cast()); + lean_ctor_set(obj, 4, build_address_array(ctx).cast()); + lean_ctor_set(obj, 5, build_expr_meta_arena(arena).cast()); + lean_ctor_set(obj, 6, build_u64_array(rule_roots).cast()); lean_ctor_set_uint64(obj, 7 * 8, *type_root); - obj + obj.cast() }, } } @@ -479,18 +473,17 @@ pub fn decode_constant_meta(ptr: *const c_void) -> ConstantMeta { return ConstantMeta::Empty; } let tag = lean_obj_tag(ptr as *mut _); - let ctor: &LeanCtorObject = as_ref_unsafe(ptr.cast()); match tag { 1 => { // defn: 6 obj fields, 2× u64 scalar - let name = decode_ixon_address(lean_ctor_get(ptr as *mut _, 0)); - let lvls = decode_address_array(lean_ctor_get(ptr as *mut _, 1)); - let hints = decode_reducibility_hints(lean_ctor_get(ptr as *mut _, 2)); - let all = decode_address_array(lean_ctor_get(ptr as *mut _, 3)); - let ctx = decode_address_array(lean_ctor_get(ptr as *mut _, 4)); - let arena = decode_expr_meta_arena(lean_ctor_get(ptr as *mut _, 5)); - let type_root = ctor.get_scalar_u64(6, 0); - let value_root = ctor.get_scalar_u64(6, 8); + let name = decode_ixon_address(lean_ctor_get(ptr as *mut _, 0).cast()); + let lvls = decode_address_array(lean_ctor_get(ptr as *mut _, 1).cast()); + let hints = decode_reducibility_hints(lean_ctor_get(ptr as *mut _, 2).cast()); + let all = decode_address_array(lean_ctor_get(ptr as *mut _, 3).cast()); + let ctx = decode_address_array(lean_ctor_get(ptr as *mut _, 4).cast()); + let arena = decode_expr_meta_arena(lean_ctor_get(ptr as *mut _, 5).cast()); + let type_root = lean_ctor_scalar_u64(ptr, 6, 0); + let value_root = lean_ctor_scalar_u64(ptr, 6, 8); ConstantMeta::Def { name, lvls, @@ -505,54 +498,54 @@ pub fn decode_constant_meta(ptr: *const c_void) -> ConstantMeta { 2 => { // axio: 3 obj fields, 1× u64 scalar - let name = decode_ixon_address(lean_ctor_get(ptr as *mut _, 0)); - let lvls = decode_address_array(lean_ctor_get(ptr as *mut _, 1)); - let arena = decode_expr_meta_arena(lean_ctor_get(ptr as *mut _, 2)); - let type_root = ctor.get_scalar_u64(3, 0); + let name = decode_ixon_address(lean_ctor_get(ptr as *mut _, 0).cast()); + let lvls = decode_address_array(lean_ctor_get(ptr as *mut _, 1).cast()); + let arena = decode_expr_meta_arena(lean_ctor_get(ptr as *mut _, 2).cast()); + let type_root = lean_ctor_scalar_u64(ptr, 3, 0); ConstantMeta::Axio { name, lvls, arena, type_root } }, 3 => { // quot: 3 obj fields, 1× u64 scalar - let name = decode_ixon_address(lean_ctor_get(ptr as *mut _, 0)); - let lvls = decode_address_array(lean_ctor_get(ptr as *mut _, 1)); - let arena = decode_expr_meta_arena(lean_ctor_get(ptr as *mut _, 2)); - let type_root = ctor.get_scalar_u64(3, 0); + let name = decode_ixon_address(lean_ctor_get(ptr as *mut _, 0).cast()); + let lvls = decode_address_array(lean_ctor_get(ptr as *mut _, 1).cast()); + let arena = decode_expr_meta_arena(lean_ctor_get(ptr as *mut _, 2).cast()); + let type_root = lean_ctor_scalar_u64(ptr, 3, 0); ConstantMeta::Quot { name, lvls, arena, type_root } }, 4 => { // indc: 6 obj fields, 1× u64 scalar - let name = decode_ixon_address(lean_ctor_get(ptr as *mut _, 0)); - let lvls = decode_address_array(lean_ctor_get(ptr as *mut _, 1)); - let ctors = decode_address_array(lean_ctor_get(ptr as *mut _, 2)); - let all = decode_address_array(lean_ctor_get(ptr as *mut _, 3)); - let ctx = decode_address_array(lean_ctor_get(ptr as *mut _, 4)); - let arena = decode_expr_meta_arena(lean_ctor_get(ptr as *mut _, 5)); - let type_root = ctor.get_scalar_u64(6, 0); + let name = decode_ixon_address(lean_ctor_get(ptr as *mut _, 0).cast()); + let lvls = decode_address_array(lean_ctor_get(ptr as *mut _, 1).cast()); + let ctors = decode_address_array(lean_ctor_get(ptr as *mut _, 2).cast()); + let all = decode_address_array(lean_ctor_get(ptr as *mut _, 3).cast()); + let ctx = decode_address_array(lean_ctor_get(ptr as *mut _, 4).cast()); + let arena = decode_expr_meta_arena(lean_ctor_get(ptr as *mut _, 5).cast()); + let type_root = lean_ctor_scalar_u64(ptr, 6, 0); ConstantMeta::Indc { name, lvls, ctors, all, ctx, arena, type_root } }, 5 => { // ctor: 4 obj fields, 1× u64 scalar - let name = decode_ixon_address(lean_ctor_get(ptr as *mut _, 0)); - let lvls = decode_address_array(lean_ctor_get(ptr as *mut _, 1)); - let induct = decode_ixon_address(lean_ctor_get(ptr as *mut _, 2)); - let arena = decode_expr_meta_arena(lean_ctor_get(ptr as *mut _, 3)); - let type_root = ctor.get_scalar_u64(4, 0); + let name = decode_ixon_address(lean_ctor_get(ptr as *mut _, 0).cast()); + let lvls = decode_address_array(lean_ctor_get(ptr as *mut _, 1).cast()); + let induct = decode_ixon_address(lean_ctor_get(ptr as *mut _, 2).cast()); + let arena = decode_expr_meta_arena(lean_ctor_get(ptr as *mut _, 3).cast()); + let type_root = lean_ctor_scalar_u64(ptr, 4, 0); ConstantMeta::Ctor { name, lvls, induct, arena, type_root } }, 6 => { // recr: 7 obj fields, 1× u64 scalar - let name = decode_ixon_address(lean_ctor_get(ptr as *mut _, 0)); - let lvls = decode_address_array(lean_ctor_get(ptr as *mut _, 1)); - let rules = decode_address_array(lean_ctor_get(ptr as *mut _, 2)); - let all = decode_address_array(lean_ctor_get(ptr as *mut _, 3)); - let ctx = decode_address_array(lean_ctor_get(ptr as *mut _, 4)); - let arena = decode_expr_meta_arena(lean_ctor_get(ptr as *mut _, 5)); - let rule_roots = decode_u64_array(lean_ctor_get(ptr as *mut _, 6)); - let type_root = ctor.get_scalar_u64(7, 0); + let name = decode_ixon_address(lean_ctor_get(ptr as *mut _, 0).cast()); + let lvls = decode_address_array(lean_ctor_get(ptr as *mut _, 1).cast()); + let rules = decode_address_array(lean_ctor_get(ptr as *mut _, 2).cast()); + let all = decode_address_array(lean_ctor_get(ptr as *mut _, 3).cast()); + let ctx = decode_address_array(lean_ctor_get(ptr as *mut _, 4).cast()); + let arena = decode_expr_meta_arena(lean_ctor_get(ptr as *mut _, 5).cast()); + let rule_roots = decode_u64_array(lean_ctor_get(ptr as *mut _, 6).cast()); + let type_root = lean_ctor_scalar_u64(ptr, 7, 0); ConstantMeta::Rec { name, lvls, @@ -580,9 +573,9 @@ pub fn build_named(addr: &Address, meta: &ConstantMeta) -> *mut c_void { let addr_obj = build_address_from_ixon(addr); let meta_obj = build_constant_meta(meta); let obj = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(obj, 0, addr_obj); - lean_ctor_set(obj, 1, meta_obj); - obj + lean_ctor_set(obj, 0, addr_obj.cast()); + lean_ctor_set(obj, 1, meta_obj.cast()); + obj.cast() } } @@ -592,8 +585,8 @@ pub fn decode_named(ptr: *const c_void) -> Named { let addr_ptr = lean_ctor_get(ptr as *mut _, 0); let meta_ptr = lean_ctor_get(ptr as *mut _, 1); Named { - addr: decode_ixon_address(addr_ptr), - meta: decode_constant_meta(meta_ptr), + addr: decode_ixon_address(addr_ptr.cast()), + meta: decode_constant_meta(meta_ptr.cast()), } } } @@ -604,9 +597,9 @@ pub fn build_ixon_comm(comm: &Comm) -> *mut c_void { let secret_obj = build_address_from_ixon(&comm.secret); let payload_obj = build_address_from_ixon(&comm.payload); let obj = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(obj, 0, secret_obj); - lean_ctor_set(obj, 1, payload_obj); - obj + lean_ctor_set(obj, 0, secret_obj.cast()); + lean_ctor_set(obj, 1, payload_obj.cast()); + obj.cast() } } @@ -616,8 +609,8 @@ pub fn decode_ixon_comm(ptr: *const c_void) -> Comm { let secret_ptr = lean_ctor_get(ptr as *mut _, 0); let payload_ptr = lean_ctor_get(ptr as *mut _, 1); Comm { - secret: decode_ixon_address(secret_ptr), - payload: decode_ixon_address(payload_ptr), + secret: decode_ixon_address(secret_ptr.cast()), + payload: decode_ixon_address(payload_ptr.cast()), } } } diff --git a/src/lean/ffi/ixon/serialize.rs b/src/lean/ffi/ixon/serialize.rs index e9c7eb22..5958c8de 100644 --- a/src/lean/ffi/ixon/serialize.rs +++ b/src/lean/ffi/ixon/serialize.rs @@ -11,10 +11,10 @@ use crate::ix::ixon::expr::Expr as IxonExpr; use crate::ix::ixon::serialize::put_expr; use crate::ix::ixon::sharing::hash_expr; use crate::ix::ixon::univ::{Univ as IxonUniv, put_univ}; -use crate::lean::array::LeanArrayObject; -use crate::lean::ctor::LeanCtorObject; -use crate::lean::sarray::LeanSArrayObject; -use crate::lean::{as_ref_unsafe, lean_is_scalar, lean_unbox_u64}; +use crate::lean::{ + lean_array_to_vec, lean_ctor_objs, lean_ctor_scalar_u64, lean_is_scalar, + lean_sarray_data, lean_tag, lean_unbox_u64, +}; use super::constant::{decode_ixon_address, decode_ixon_constant}; @@ -30,66 +30,63 @@ fn lean_ptr_to_u64(ptr: *const c_void) -> u64 { /// Decode a Lean `Ixon.Expr` to a Rust `IxonExpr`. pub fn lean_ptr_to_ixon_expr(ptr: *const c_void) -> Arc { assert!(!lean_is_scalar(ptr), "Ixon.Expr should not be scalar"); - let ctor: &LeanCtorObject = as_ref_unsafe(ptr.cast()); - match ctor.tag() { + match lean_tag(ptr) { 0 => { - let idx = ctor.get_scalar_u64(0, 0); + let idx = lean_ctor_scalar_u64(ptr, 0, 0); Arc::new(IxonExpr::Sort(idx)) }, 1 => { - let idx = ctor.get_scalar_u64(0, 0); + let idx = lean_ctor_scalar_u64(ptr, 0, 0); Arc::new(IxonExpr::Var(idx)) }, 2 => { - let [univs_ptr] = ctor.objs(); - let ref_idx = ctor.get_scalar_u64(1, 0); - let univs_arr: &LeanArrayObject = as_ref_unsafe(univs_ptr.cast()); - let univs = univs_arr.to_vec(lean_ptr_to_u64); + let [univs_ptr] = lean_ctor_objs(ptr); + let ref_idx = lean_ctor_scalar_u64(ptr, 1, 0); + let univs = lean_array_to_vec(univs_ptr, lean_ptr_to_u64); Arc::new(IxonExpr::Ref(ref_idx, univs)) }, 3 => { - let [univs_ptr] = ctor.objs(); - let rec_idx = ctor.get_scalar_u64(1, 0); - let univs_arr: &LeanArrayObject = as_ref_unsafe(univs_ptr.cast()); - let univs = univs_arr.to_vec(lean_ptr_to_u64); + let [univs_ptr] = lean_ctor_objs(ptr); + let rec_idx = lean_ctor_scalar_u64(ptr, 1, 0); + let univs = lean_array_to_vec(univs_ptr, lean_ptr_to_u64); Arc::new(IxonExpr::Rec(rec_idx, univs)) }, 4 => { - let [val_ptr] = ctor.objs(); - let type_idx = ctor.get_scalar_u64(1, 0); - let field_idx = ctor.get_scalar_u64(1, 8); + let [val_ptr] = lean_ctor_objs(ptr); + let type_idx = lean_ctor_scalar_u64(ptr, 1, 0); + let field_idx = lean_ctor_scalar_u64(ptr, 1, 8); let val = lean_ptr_to_ixon_expr(val_ptr); Arc::new(IxonExpr::Prj(type_idx, field_idx, val)) }, 5 => { - let idx = ctor.get_scalar_u64(0, 0); + let idx = lean_ctor_scalar_u64(ptr, 0, 0); Arc::new(IxonExpr::Str(idx)) }, 6 => { - let idx = ctor.get_scalar_u64(0, 0); + let idx = lean_ctor_scalar_u64(ptr, 0, 0); Arc::new(IxonExpr::Nat(idx)) }, 7 => { - let [fun_ptr, arg_ptr] = ctor.objs(); + let [fun_ptr, arg_ptr] = lean_ctor_objs(ptr); let fun_ = lean_ptr_to_ixon_expr(fun_ptr); let arg = lean_ptr_to_ixon_expr(arg_ptr); Arc::new(IxonExpr::App(fun_, arg)) }, 8 => { - let [ty_ptr, body_ptr] = ctor.objs(); + let [ty_ptr, body_ptr] = lean_ctor_objs(ptr); let ty = lean_ptr_to_ixon_expr(ty_ptr); let body = lean_ptr_to_ixon_expr(body_ptr); Arc::new(IxonExpr::Lam(ty, body)) }, 9 => { - let [ty_ptr, body_ptr] = ctor.objs(); + let [ty_ptr, body_ptr] = lean_ctor_objs(ptr); let ty = lean_ptr_to_ixon_expr(ty_ptr); let body = lean_ptr_to_ixon_expr(body_ptr); Arc::new(IxonExpr::All(ty, body)) }, 10 => { - let [ty_ptr, val_ptr, body_ptr] = ctor.objs(); - let base_ptr = (ctor as *const LeanCtorObject).cast::(); + let [ty_ptr, val_ptr, body_ptr] = lean_ctor_objs(ptr); + let base_ptr = ptr.cast::(); let non_dep = unsafe { *base_ptr.add(8 + 3 * 8) } != 0; let ty = lean_ptr_to_ixon_expr(ty_ptr); let val = lean_ptr_to_ixon_expr(val_ptr); @@ -97,7 +94,7 @@ pub fn lean_ptr_to_ixon_expr(ptr: *const c_void) -> Arc { Arc::new(IxonExpr::Let(non_dep, ty, val, body)) }, 11 => { - let idx = ctor.get_scalar_u64(0, 0); + let idx = lean_ctor_scalar_u64(ptr, 0, 0); Arc::new(IxonExpr::Share(idx)) }, tag => panic!("Unknown Ixon.Expr tag: {}", tag), @@ -121,21 +118,20 @@ fn lean_ptr_to_ixon_univ(ptr: *const c_void) -> Arc { if lean_is_scalar(ptr) { return IxonUniv::zero(); } - let ctor: &LeanCtorObject = as_ref_unsafe(ptr.cast()); - match ctor.tag() { + match lean_tag(ptr) { 1 => { - let [inner] = ctor.objs(); + let [inner] = lean_ctor_objs(ptr); IxonUniv::succ(lean_ptr_to_ixon_univ(inner)) }, 2 => { - let [a, b] = ctor.objs(); + let [a, b] = lean_ctor_objs(ptr); IxonUniv::max(lean_ptr_to_ixon_univ(a), lean_ptr_to_ixon_univ(b)) }, 3 => { - let [a, b] = ctor.objs(); + let [a, b] = lean_ctor_objs(ptr); IxonUniv::imax(lean_ptr_to_ixon_univ(a), lean_ptr_to_ixon_univ(b)) }, - 4 => IxonUniv::var(ctor.get_scalar_u64(0, 0)), + 4 => IxonUniv::var(lean_ctor_scalar_u64(ptr, 0, 0)), tag => panic!("Unknown Ixon.Univ tag: {}", tag), } } @@ -144,10 +140,10 @@ fn lean_ptr_to_ixon_univ(ptr: *const c_void) -> Arc { #[unsafe(no_mangle)] pub extern "C" fn rs_eq_univ_serialization( univ_ptr: *const c_void, - bytes: &LeanSArrayObject, + bytes: *const c_void, ) -> bool { let univ = lean_ptr_to_ixon_univ(univ_ptr); - let bytes_data = bytes.data(); + let bytes_data = lean_sarray_data(bytes); let mut buf = Vec::with_capacity(bytes_data.len()); put_univ(&univ, &mut buf); buf == bytes_data @@ -157,10 +153,10 @@ pub extern "C" fn rs_eq_univ_serialization( #[unsafe(no_mangle)] pub extern "C" fn rs_eq_expr_serialization( expr_ptr: *const c_void, - bytes: &LeanSArrayObject, + bytes: *const c_void, ) -> bool { let expr = lean_ptr_to_ixon_expr(expr_ptr); - let bytes_data = bytes.data(); + let bytes_data = lean_sarray_data(bytes); let mut buf = Vec::with_capacity(bytes_data.len()); put_expr(&expr, &mut buf); buf == bytes_data @@ -170,10 +166,10 @@ pub extern "C" fn rs_eq_expr_serialization( #[unsafe(no_mangle)] pub extern "C" fn rs_eq_constant_serialization( constant_ptr: *const c_void, - bytes: &LeanSArrayObject, + bytes: *const c_void, ) -> bool { let constant = decode_ixon_constant(constant_ptr); - let bytes_data = bytes.data(); + let bytes_data = lean_sarray_data(bytes); let mut buf = Vec::with_capacity(bytes_data.len()); constant.put(&mut buf); buf == bytes_data @@ -184,13 +180,13 @@ pub extern "C" fn rs_eq_constant_serialization( #[unsafe(no_mangle)] pub extern "C" fn rs_eq_env_serialization( raw_env_ptr: *const c_void, - bytes: &LeanSArrayObject, + bytes: *const c_void, ) -> bool { use super::env::decode_raw_env; use crate::ix::ixon::env::Env; let decoded = decode_raw_env(raw_env_ptr); - let bytes_data = bytes.data(); + let bytes_data = lean_sarray_data(bytes); // Deserialize Lean's bytes using Rust's deserializer let rust_env = match Env::get(&mut &bytes_data[..]) { @@ -260,8 +256,7 @@ extern "C" fn rs_env_serde_roundtrip(lean_bytes_ptr: *const c_void) -> bool { use crate::ix::ixon::env::Env; // Get bytes from Lean ByteArray - let bytes_arr: &LeanSArrayObject = as_ref_unsafe(lean_bytes_ptr.cast()); - let lean_bytes = bytes_arr.data().to_vec(); + let lean_bytes = lean_sarray_data(lean_bytes_ptr).to_vec(); // Try to deserialize with Rust let mut slice = lean_bytes.as_slice(); @@ -307,8 +302,7 @@ extern "C" fn rs_env_serde_check(lean_bytes_ptr: *const c_void) -> bool { use crate::ix::ixon::env::Env; // Get bytes from Lean ByteArray - let bytes_arr: &LeanSArrayObject = as_ref_unsafe(lean_bytes_ptr.cast()); - let lean_bytes = bytes_arr.data().to_vec(); + let lean_bytes = lean_sarray_data(lean_bytes_ptr).to_vec(); // Try to deserialize with Rust let mut slice = lean_bytes.as_slice(); diff --git a/src/lean/ffi/ixon/sharing.rs b/src/lean/ffi/ixon/sharing.rs index 955386cb..3b55c6f0 100644 --- a/src/lean/ffi/ixon/sharing.rs +++ b/src/lean/ffi/ixon/sharing.rs @@ -8,9 +8,7 @@ use crate::ix::ixon::serialize::put_expr; use crate::ix::ixon::sharing::{ analyze_block, build_sharing_vec, decide_sharing, }; -use crate::lean::array::LeanArrayObject; -use crate::lean::as_ref_unsafe; -use crate::lean::sarray::LeanSArrayObject; +use crate::lean::{lean_array_to_vec, lean_sarray_set_data}; use super::expr::decode_ixon_expr_array; use super::serialize::lean_ptr_to_ixon_expr; @@ -19,8 +17,7 @@ use super::serialize::lean_ptr_to_ixon_expr; /// This helps diagnose why Lean and Rust make different sharing decisions. #[unsafe(no_mangle)] pub extern "C" fn rs_debug_sharing_analysis(exprs_ptr: *const c_void) { - let exprs_arr: &LeanArrayObject = as_ref_unsafe(exprs_ptr.cast()); - let exprs: Vec> = exprs_arr.to_vec(lean_ptr_to_ixon_expr); + let exprs: Vec> = lean_array_to_vec(exprs_ptr, lean_ptr_to_ixon_expr); println!("[Rust] Analyzing {} input expressions", exprs.len()); @@ -98,13 +95,8 @@ extern "C" fn rs_run_sharing_analysis( } // Write to output arrays - let sharing_out: &mut LeanSArrayObject = - unsafe { &mut *out_sharing_vec.cast() }; - sharing_out.set_data(&sharing_bytes); - - let rewritten_out: &mut LeanSArrayObject = - unsafe { &mut *out_rewritten.cast() }; - rewritten_out.set_data(&rewritten_bytes); + unsafe { lean_sarray_set_data(out_sharing_vec, &sharing_bytes) }; + unsafe { lean_sarray_set_data(out_rewritten, &rewritten_bytes) }; shared_hashes.len() as u64 } diff --git a/src/lean/ffi/ixon/univ.rs b/src/lean/ffi/ixon/univ.rs index 3558c244..d5497091 100644 --- a/src/lean/ffi/ixon/univ.rs +++ b/src/lean/ffi/ixon/univ.rs @@ -5,8 +5,11 @@ use std::sync::Arc; use crate::ix::ixon::univ::Univ as IxonUniv; use crate::lean::{ - as_ref_unsafe, lean_alloc_array, lean_alloc_ctor, lean_array_set_core, - lean_box_fn, lean_ctor_get, lean_ctor_set, lean_is_scalar, lean_obj_tag, + lean::{ + lean_alloc_array, lean_alloc_ctor, lean_array_set_core, lean_ctor_get, + lean_ctor_set, lean_obj_tag, + }, + lean_box_fn, lean_is_scalar, }; /// Build Ixon.Univ @@ -17,30 +20,30 @@ pub fn build_ixon_univ(univ: &IxonUniv) -> *mut c_void { IxonUniv::Succ(inner) => { let inner_obj = build_ixon_univ(inner); let obj = lean_alloc_ctor(1, 1, 0); - lean_ctor_set(obj, 0, inner_obj); - obj + lean_ctor_set(obj, 0, inner_obj.cast()); + obj.cast() }, IxonUniv::Max(a, b) => { let a_obj = build_ixon_univ(a); let b_obj = build_ixon_univ(b); let obj = lean_alloc_ctor(2, 2, 0); - lean_ctor_set(obj, 0, a_obj); - lean_ctor_set(obj, 1, b_obj); - obj + lean_ctor_set(obj, 0, a_obj.cast()); + lean_ctor_set(obj, 1, b_obj.cast()); + obj.cast() }, IxonUniv::IMax(a, b) => { let a_obj = build_ixon_univ(a); let b_obj = build_ixon_univ(b); let obj = lean_alloc_ctor(3, 2, 0); - lean_ctor_set(obj, 0, a_obj); - lean_ctor_set(obj, 1, b_obj); - obj + lean_ctor_set(obj, 0, a_obj.cast()); + lean_ctor_set(obj, 1, b_obj.cast()); + obj.cast() }, IxonUniv::Var(idx) => { let obj = lean_alloc_ctor(4, 0, 8); let base = obj.cast::(); *base.add(8).cast::() = *idx; - obj + obj.cast() }, } } @@ -52,9 +55,9 @@ pub fn build_ixon_univ_array(univs: &[Arc]) -> *mut c_void { let arr = lean_alloc_array(univs.len(), univs.len()); for (i, univ) in univs.iter().enumerate() { let univ_obj = build_ixon_univ(univ); - lean_array_set_core(arr, i, univ_obj); + lean_array_set_core(arr, i, univ_obj.cast()); } - arr + arr.cast() } } @@ -74,27 +77,27 @@ pub fn decode_ixon_univ(ptr: *const c_void) -> IxonUniv { if lean_is_scalar(ptr) { return IxonUniv::Zero; } - let tag = lean_obj_tag(ptr as *mut _); + let tag = lean_obj_tag((ptr as *mut c_void).cast()); match tag { 0 => IxonUniv::Zero, 1 => { - let inner_ptr = lean_ctor_get(ptr as *mut _, 0); - IxonUniv::Succ(Arc::new(decode_ixon_univ(inner_ptr))) + let inner_ptr = lean_ctor_get((ptr as *mut c_void).cast(), 0); + IxonUniv::Succ(Arc::new(decode_ixon_univ(inner_ptr.cast()))) }, 2 => { - let a_ptr = lean_ctor_get(ptr as *mut _, 0); - let b_ptr = lean_ctor_get(ptr as *mut _, 1); + let a_ptr = lean_ctor_get((ptr as *mut c_void).cast(), 0); + let b_ptr = lean_ctor_get((ptr as *mut c_void).cast(), 1); IxonUniv::Max( - Arc::new(decode_ixon_univ(a_ptr)), - Arc::new(decode_ixon_univ(b_ptr)), + Arc::new(decode_ixon_univ(a_ptr.cast())), + Arc::new(decode_ixon_univ(b_ptr.cast())), ) }, 3 => { - let a_ptr = lean_ctor_get(ptr as *mut _, 0); - let b_ptr = lean_ctor_get(ptr as *mut _, 1); + let a_ptr = lean_ctor_get((ptr as *mut c_void).cast(), 0); + let b_ptr = lean_ctor_get((ptr as *mut c_void).cast(), 1); IxonUniv::IMax( - Arc::new(decode_ixon_univ(a_ptr)), - Arc::new(decode_ixon_univ(b_ptr)), + Arc::new(decode_ixon_univ(a_ptr.cast())), + Arc::new(decode_ixon_univ(b_ptr.cast())), ) }, 4 => { @@ -110,8 +113,7 @@ pub fn decode_ixon_univ(ptr: *const c_void) -> IxonUniv { /// Decode Array Ixon.Univ. pub fn decode_ixon_univ_array(ptr: *const c_void) -> Vec> { - let arr: &crate::lean::array::LeanArrayObject = as_ref_unsafe(ptr.cast()); - arr.to_vec(|u| Arc::new(decode_ixon_univ(u))) + crate::lean::lean_array_data(ptr).iter().map(|&u| Arc::new(decode_ixon_univ(u))).collect() } // ============================================================================= diff --git a/src/lean/ffi/keccak.rs b/src/lean/ffi/keccak.rs index ef52515c..df4bd7d1 100644 --- a/src/lean/ffi/keccak.rs +++ b/src/lean/ffi/keccak.rs @@ -4,9 +4,8 @@ use std::sync::OnceLock; use tiny_keccak::{Hasher, Keccak}; use crate::lean::{ - as_mut_unsafe, as_ref_unsafe, external::LeanExternalObject, - lean_alloc_external, lean_alloc_sarray, lean_register_external_class, - noop_foreach, sarray::LeanSArrayObject, + lean::{ lean_alloc_external, lean_alloc_sarray, lean_get_external_data, lean_register_external_class }, + lean_sarray_data, lean_sarray_set_data, noop_foreach, }; use super::{ExternalClassPtr, drop_raw, to_raw}; @@ -17,8 +16,8 @@ fn get_keccak_class() -> *mut c_void { KECCAK_CLASS .get_or_init(|| { ExternalClassPtr(unsafe { - lean_register_external_class(keccak_finalizer, noop_foreach) - }) + lean_register_external_class(Some(keccak_finalizer), Some(noop_foreach)) + }.cast()) }) .0 } @@ -29,37 +28,34 @@ extern "C" fn keccak_finalizer(ptr: *mut c_void) { /// `Keccak.Hasher.init : Unit → Hasher` #[unsafe(no_mangle)] -extern "C" fn c_rs_keccak256_hasher_init(_unit: *const c_void) -> *mut c_void { +extern "C" fn rs_keccak256_hasher_init(_unit: *const c_void) -> *mut c_void { let hasher = Keccak::v256(); let ptr = to_raw(hasher) as *mut c_void; - unsafe { lean_alloc_external(get_keccak_class(), ptr) } + unsafe { lean_alloc_external(get_keccak_class().cast(), ptr) }.cast() } /// `Keccak.Hasher.update : (hasher: Hasher) → (input: @& ByteArray) → Hasher` #[unsafe(no_mangle)] -extern "C" fn c_rs_keccak256_hasher_update( +extern "C" fn rs_keccak256_hasher_update( hasher_obj: *mut c_void, - input: &LeanSArrayObject, + input: *const c_void, ) -> *mut c_void { - let external: &LeanExternalObject = as_ref_unsafe(hasher_obj.cast()); - let hasher: &Keccak = as_ref_unsafe(external.cast_data()); + let hasher: &Keccak = unsafe { &*lean_get_external_data(hasher_obj.cast()).cast() }; let mut new_hasher = hasher.clone(); - new_hasher.update(input.data()); + new_hasher.update(lean_sarray_data(input)); let ptr = to_raw(new_hasher) as *mut c_void; - unsafe { lean_alloc_external(get_keccak_class(), ptr) } + unsafe { lean_alloc_external(get_keccak_class().cast(), ptr) }.cast() } /// `Keccak.Hasher.finalize : (hasher: Hasher) → ByteArray` #[unsafe(no_mangle)] -extern "C" fn c_rs_keccak256_hasher_finalize( +extern "C" fn rs_keccak256_hasher_finalize( hasher_obj: *mut c_void, ) -> *mut c_void { - let external: &LeanExternalObject = as_ref_unsafe(hasher_obj.cast()); - let hasher: &Keccak = as_ref_unsafe(external.cast_data()); + let hasher: &Keccak = unsafe { &*lean_get_external_data(hasher_obj.cast()).cast() }; let mut data = [0u8; 32]; hasher.clone().finalize(&mut data); let arr_ptr = unsafe { lean_alloc_sarray(1, 32, 32) }; - let arr: &mut LeanSArrayObject = as_mut_unsafe(arr_ptr.cast()); - arr.set_data(&data); - arr_ptr + unsafe { lean_sarray_set_data(arr_ptr.cast(), &data) }; + arr_ptr.cast() } diff --git a/src/lean/ffi/lean_env.rs b/src/lean/ffi/lean_env.rs index 3817e0e4..addc8470 100644 --- a/src/lean/ffi/lean_env.rs +++ b/src/lean/ffi/lean_env.rs @@ -30,8 +30,8 @@ use crate::{ TheoremVal, }, lean::{ - array::LeanArrayObject, as_ref_unsafe, collect_list, ctor::LeanCtorObject, - lean_is_scalar, nat::Nat, string::LeanStringObject, + collect_list, lean_array_to_vec_with, lean_ctor_objs, lean_is_scalar, + lean_obj_to_string, lean_tag, nat::Nat, }, lean_unbox, }; @@ -99,8 +99,7 @@ impl<'g> Cache<'g> { fn collect_list_ptrs(mut ptr: *const c_void) -> Vec<*const c_void> { let mut ptrs = Vec::new(); while !lean_is_scalar(ptr) { - let ctor: &LeanCtorObject = as_ref_unsafe(ptr.cast()); - let [head_ptr, tail_ptr] = ctor.objs(); + let [head_ptr, tail_ptr] = lean_ctor_objs(ptr); ptrs.push(head_ptr); ptr = tail_ptr; } @@ -118,14 +117,12 @@ pub fn lean_ptr_to_name(ptr: *const c_void, global: &GlobalCache) -> Name { let name = if lean_is_scalar(ptr) { Name::anon() } else { - let ctor: &LeanCtorObject = as_ref_unsafe(ptr.cast()); - let [pre_ptr, pos_ptr] = ctor.objs(); + let [pre_ptr, pos_ptr] = lean_ctor_objs(ptr); // Recursive call - will also use global cache let pre = lean_ptr_to_name(pre_ptr, global); - match ctor.tag() { + match lean_tag(ptr) { 1 => { - let str_obj: &LeanStringObject = as_ref_unsafe(pos_ptr.cast()); - Name::str(pre, str_obj.as_string()) + Name::str(pre, lean_obj_to_string(pos_ptr)) }, 2 => Name::num(pre, Nat::from_ptr(pos_ptr)), _ => unreachable!(), @@ -143,26 +140,25 @@ fn lean_ptr_to_level(ptr: *const c_void, cache: &mut Cache<'_>) -> Level { let level = if lean_is_scalar(ptr) { Level::zero() } else { - let ctor: &LeanCtorObject = as_ref_unsafe(ptr.cast()); - match ctor.tag() { + match lean_tag(ptr) { 1 => { - let [u] = ctor.objs().map(|p| lean_ptr_to_level(p, cache)); + let [u] = lean_ctor_objs::<1>(ptr).map(|p| lean_ptr_to_level(p, cache)); Level::succ(u) }, 2 => { - let [u, v] = ctor.objs().map(|p| lean_ptr_to_level(p, cache)); + let [u, v] = lean_ctor_objs::<2>(ptr).map(|p| lean_ptr_to_level(p, cache)); Level::max(u, v) }, 3 => { - let [u, v] = ctor.objs().map(|p| lean_ptr_to_level(p, cache)); + let [u, v] = lean_ctor_objs::<2>(ptr).map(|p| lean_ptr_to_level(p, cache)); Level::imax(u, v) }, 4 => { - let [name] = ctor.objs().map(|p| lean_ptr_to_name(p, cache.global)); + let [name] = lean_ctor_objs::<1>(ptr).map(|p| lean_ptr_to_name(p, cache.global)); Level::param(name) }, 5 => { - let [name] = ctor.objs().map(|p| lean_ptr_to_name(p, cache.global)); + let [name] = lean_ctor_objs::<1>(ptr).map(|p| lean_ptr_to_name(p, cache.global)); Level::mvar(name) }, _ => unreachable!(), @@ -173,10 +169,8 @@ fn lean_ptr_to_level(ptr: *const c_void, cache: &mut Cache<'_>) -> Level { } fn lean_ptr_to_substring(ptr: *const c_void) -> Substring { - let ctor: &LeanCtorObject = as_ref_unsafe(ptr.cast()); - let [str_ptr, start_pos_ptr, stop_pos_ptr] = ctor.objs(); - let str: &LeanStringObject = as_ref_unsafe(str_ptr.cast()); - let str = str.as_string(); + let [str_ptr, start_pos_ptr, stop_pos_ptr] = lean_ctor_objs(ptr); + let str = lean_obj_to_string(str_ptr); let start_pos = Nat::from_ptr(start_pos_ptr); let stop_pos = Nat::from_ptr(stop_pos_ptr); Substring { str, start_pos, stop_pos } @@ -186,10 +180,9 @@ fn lean_ptr_to_source_info(ptr: *const c_void) -> SourceInfo { if lean_is_scalar(ptr) { return SourceInfo::None; } - let ctor: &LeanCtorObject = as_ref_unsafe(ptr.cast()); - match ctor.tag() { + match lean_tag(ptr) { 0 => { - let [leading_ptr, pos_ptr, trailing_ptr, end_pos_ptr] = ctor.objs(); + let [leading_ptr, pos_ptr, trailing_ptr, end_pos_ptr] = lean_ctor_objs(ptr); let leading = lean_ptr_to_substring(leading_ptr); let pos = Nat::from_ptr(pos_ptr); let trailing = lean_ptr_to_substring(trailing_ptr); @@ -197,7 +190,7 @@ fn lean_ptr_to_source_info(ptr: *const c_void) -> SourceInfo { SourceInfo::Original(leading, pos, trailing, end_pos) }, 1 => { - let [pos_ptr, end_pos_ptr, canonical_ptr] = ctor.objs(); + let [pos_ptr, end_pos_ptr, canonical_ptr] = lean_ctor_objs(ptr); let pos = Nat::from_ptr(pos_ptr); let end_pos = Nat::from_ptr(end_pos_ptr); let canonical = canonical_ptr as usize == 1; @@ -211,20 +204,16 @@ fn lean_ptr_to_syntax_preresolved( ptr: *const c_void, cache: &mut Cache<'_>, ) -> SyntaxPreresolved { - let ctor: &LeanCtorObject = as_ref_unsafe(ptr.cast()); - match ctor.tag() { + match lean_tag(ptr) { 0 => { - let [name_ptr] = ctor.objs(); + let [name_ptr] = lean_ctor_objs(ptr); let name = lean_ptr_to_name(name_ptr, cache.global); SyntaxPreresolved::Namespace(name) }, 1 => { - let [name_ptr, fields_ptr] = ctor.objs(); + let [name_ptr, fields_ptr] = lean_ctor_objs(ptr); let name = lean_ptr_to_name(name_ptr, cache.global); - let fields = collect_list(fields_ptr, |p| { - let str: &LeanStringObject = as_ref_unsafe(p.cast()); - str.as_string() - }); + let fields = collect_list(fields_ptr, lean_obj_to_string); SyntaxPreresolved::Decl(name, fields) }, _ => unreachable!(), @@ -235,28 +224,22 @@ fn lean_ptr_to_syntax(ptr: *const c_void, cache: &mut Cache<'_>) -> Syntax { if lean_is_scalar(ptr) { return Syntax::Missing; } - let ctor: &LeanCtorObject = as_ref_unsafe(ptr.cast()); - match ctor.tag() { + match lean_tag(ptr) { 1 => { - let [info_ptr, kind_ptr, args_ptr] = ctor.objs(); + let [info_ptr, kind_ptr, args_ptr] = lean_ctor_objs(ptr); let info = lean_ptr_to_source_info(info_ptr); let kind = lean_ptr_to_name(kind_ptr, cache.global); - let args_array: &LeanArrayObject = as_ref_unsafe(args_ptr.cast()); - let args: Vec<_> = args_array - .data() - .iter() - .map(|&p| lean_ptr_to_syntax(p, cache)) - .collect(); + let args: Vec<_> = + lean_array_to_vec_with(args_ptr, lean_ptr_to_syntax, cache); Syntax::Node(info, kind, args) }, 2 => { - let [info_ptr, val_ptr] = ctor.objs(); + let [info_ptr, val_ptr] = lean_ctor_objs(ptr); let info = lean_ptr_to_source_info(info_ptr); - let val_str: &LeanStringObject = as_ref_unsafe(val_ptr.cast()); - Syntax::Atom(info, val_str.as_string()) + Syntax::Atom(info, lean_obj_to_string(val_ptr)) }, 3 => { - let [info_ptr, raw_val_ptr, val_ptr, preresolved_ptr] = ctor.objs(); + let [info_ptr, raw_val_ptr, val_ptr, preresolved_ptr] = lean_ctor_objs(ptr); let info = lean_ptr_to_source_info(info_ptr); let raw_val = lean_ptr_to_substring(raw_val_ptr); let val = lean_ptr_to_name(val_ptr, cache.global); @@ -274,24 +257,20 @@ fn lean_ptr_to_name_data_value( ptr: *const c_void, cache: &mut Cache<'_>, ) -> (Name, DataValue) { - let ctor: &LeanCtorObject = as_ref_unsafe(ptr.cast()); - let [name_ptr, data_value_ptr] = ctor.objs(); + let [name_ptr, data_value_ptr] = lean_ctor_objs(ptr); let name = lean_ptr_to_name(name_ptr, cache.global); - let data_value_ctor: &LeanCtorObject = as_ref_unsafe(data_value_ptr.cast()); - let [inner_ptr] = data_value_ctor.objs(); - let data_value = match data_value_ctor.tag() { + let [inner_ptr] = lean_ctor_objs(data_value_ptr); + let data_value = match lean_tag(data_value_ptr) { 0 => { - let str: &LeanStringObject = as_ref_unsafe(inner_ptr.cast()); - DataValue::OfString(str.as_string()) + DataValue::OfString(lean_obj_to_string(inner_ptr)) }, 1 => DataValue::OfBool(inner_ptr as usize == 1), 2 => DataValue::OfName(lean_ptr_to_name(inner_ptr, cache.global)), 3 => DataValue::OfNat(Nat::from_ptr(inner_ptr)), 4 => { - let int_ctor: &LeanCtorObject = as_ref_unsafe(inner_ptr.cast()); - let [nat_ptr] = int_ctor.objs(); + let [nat_ptr] = lean_ctor_objs(inner_ptr); let nat = Nat::from_ptr(nat_ptr); - let int = match int_ctor.tag() { + let int = match lean_tag(inner_ptr) { 0 => Int::OfNat(nat), 1 => Int::NegSucc(nat), _ => unreachable!(), @@ -308,30 +287,29 @@ pub fn lean_ptr_to_expr(ptr: *const c_void, cache: &mut Cache<'_>) -> Expr { if let Some(cached) = cache.local.exprs.get(&ptr) { return cached.clone(); } - let ctor: &LeanCtorObject = as_ref_unsafe(ptr.cast()); - let expr = match ctor.tag() { + let expr = match lean_tag(ptr) { 0 => { - let [nat_ptr, _hash_ptr] = ctor.objs(); + let [nat_ptr, _hash_ptr] = lean_ctor_objs(ptr); let nat = Nat::from_ptr(nat_ptr.cast()); Expr::bvar(nat) }, 1 => { - let [name_ptr, _hash_ptr] = ctor.objs(); + let [name_ptr, _hash_ptr] = lean_ctor_objs(ptr); let name = lean_ptr_to_name(name_ptr, cache.global); Expr::fvar(name) }, 2 => { - let [name_ptr, _hash_ptr] = ctor.objs(); + let [name_ptr, _hash_ptr] = lean_ctor_objs(ptr); let name = lean_ptr_to_name(name_ptr, cache.global); Expr::mvar(name) }, 3 => { - let [u_ptr, _hash_ptr] = ctor.objs(); + let [u_ptr, _hash_ptr] = lean_ctor_objs(ptr); let u = lean_ptr_to_level(u_ptr, cache); Expr::sort(u) }, 4 => { - let [name_ptr, levels_ptr, _hash_ptr] = ctor.objs(); + let [name_ptr, levels_ptr, _hash_ptr] = lean_ctor_objs(ptr); let name = lean_ptr_to_name(name_ptr, cache.global); let levels = collect_list_ptrs(levels_ptr) .into_iter() @@ -340,7 +318,7 @@ pub fn lean_ptr_to_expr(ptr: *const c_void, cache: &mut Cache<'_>) -> Expr { Expr::cnst(name, levels) }, 5 => { - let [f_ptr, a_ptr, _hash_ptr] = ctor.objs(); + let [f_ptr, a_ptr, _hash_ptr] = lean_ctor_objs(ptr); let f = lean_ptr_to_expr(f_ptr, cache); let a = lean_ptr_to_expr(a_ptr, cache); Expr::app(f, a) @@ -352,7 +330,7 @@ pub fn lean_ptr_to_expr(ptr: *const c_void, cache: &mut Cache<'_>) -> Expr { body_ptr, _hash_ptr, binder_info_ptr, - ] = ctor.objs(); + ] = lean_ctor_objs(ptr); let binder_name = lean_ptr_to_name(binder_name_ptr, cache.global); let binder_typ = lean_ptr_to_expr(binder_typ_ptr, cache); let body = lean_ptr_to_expr(body_ptr, cache); @@ -372,7 +350,7 @@ pub fn lean_ptr_to_expr(ptr: *const c_void, cache: &mut Cache<'_>) -> Expr { body_ptr, _hash_ptr, binder_info_ptr, - ] = ctor.objs(); + ] = lean_ctor_objs(ptr); let binder_name = lean_ptr_to_name(binder_name_ptr, cache.global); let binder_typ = lean_ptr_to_expr(binder_typ_ptr, cache); let body = lean_ptr_to_expr(body_ptr, cache); @@ -387,7 +365,7 @@ pub fn lean_ptr_to_expr(ptr: *const c_void, cache: &mut Cache<'_>) -> Expr { }, 8 => { let [decl_name_ptr, typ_ptr, value_ptr, body_ptr, _hash_ptr, nondep_ptr] = - ctor.objs(); + lean_ctor_objs(ptr); let decl_name = lean_ptr_to_name(decl_name_ptr, cache.global); let typ = lean_ptr_to_expr(typ_ptr, cache); let value = lean_ptr_to_expr(value_ptr, cache); @@ -396,23 +374,21 @@ pub fn lean_ptr_to_expr(ptr: *const c_void, cache: &mut Cache<'_>) -> Expr { Expr::letE(decl_name, typ, value, body, nondep) }, 9 => { - let [literal_ptr, _hash_ptr] = ctor.objs(); - let literal: &LeanCtorObject = as_ref_unsafe(literal_ptr.cast()); - let [inner_ptr] = literal.objs(); - match literal.tag() { + let [literal_ptr, _hash_ptr] = lean_ctor_objs(ptr); + let [inner_ptr] = lean_ctor_objs(literal_ptr); + match lean_tag(literal_ptr) { 0 => { let nat = Nat::from_ptr(inner_ptr); Expr::lit(Literal::NatVal(nat)) }, 1 => { - let str: &LeanStringObject = as_ref_unsafe(inner_ptr.cast()); - Expr::lit(Literal::StrVal(str.as_string())) + Expr::lit(Literal::StrVal(lean_obj_to_string(inner_ptr))) }, _ => unreachable!(), } }, 10 => { - let [data_ptr, expr_ptr] = ctor.objs(); + let [data_ptr, expr_ptr] = lean_ctor_objs(ptr); let kv_map: Vec<_> = collect_list_ptrs(data_ptr) .into_iter() .map(|p| lean_ptr_to_name_data_value(p, cache)) @@ -421,7 +397,7 @@ pub fn lean_ptr_to_expr(ptr: *const c_void, cache: &mut Cache<'_>) -> Expr { Expr::mdata(kv_map, expr) }, 11 => { - let [typ_name_ptr, idx_ptr, struct_ptr] = ctor.objs(); + let [typ_name_ptr, idx_ptr, struct_ptr] = lean_ctor_objs(ptr); let typ_name = lean_ptr_to_name(typ_name_ptr, cache.global); let idx = Nat::from_ptr(idx_ptr); let struct_expr = lean_ptr_to_expr(struct_ptr, cache); @@ -437,8 +413,7 @@ fn lean_ptr_to_recursor_rule( ptr: *const c_void, cache: &mut Cache<'_>, ) -> RecursorRule { - let ctor: &LeanCtorObject = as_ref_unsafe(ptr.cast()); - let [ctor_ptr, n_fields_ptr, rhs_ptr] = ctor.objs(); + let [ctor_ptr, n_fields_ptr, rhs_ptr] = lean_ctor_objs(ptr); let ctor = lean_ptr_to_name(ctor_ptr, cache.global); let n_fields = Nat::from_ptr(n_fields_ptr); let rhs = lean_ptr_to_expr(rhs_ptr, cache); @@ -449,8 +424,7 @@ fn lean_ptr_to_constant_val( ptr: *const c_void, cache: &mut Cache<'_>, ) -> ConstantVal { - let ctor: &LeanCtorObject = as_ref_unsafe(ptr.cast()); - let [name_ptr, level_params_ptr, typ_ptr] = ctor.objs(); + let [name_ptr, level_params_ptr, typ_ptr] = lean_ctor_objs(ptr); let name = lean_ptr_to_name(name_ptr, cache.global); let level_params: Vec<_> = collect_list_ptrs(level_params_ptr) .into_iter() @@ -464,20 +438,18 @@ pub fn lean_ptr_to_constant_info( ptr: *const c_void, cache: &mut Cache<'_>, ) -> ConstantInfo { - let ctor: &LeanCtorObject = as_ref_unsafe(ptr.cast()); - let [inner_val_ptr] = ctor.objs(); - let inner_val: &LeanCtorObject = as_ref_unsafe(inner_val_ptr.cast()); + let [inner_val_ptr] = lean_ctor_objs(ptr); - match ctor.tag() { + match lean_tag(ptr) { 0 => { - let [constant_val_ptr, is_unsafe_ptr] = inner_val.objs(); + let [constant_val_ptr, is_unsafe_ptr] = lean_ctor_objs(inner_val_ptr); let constant_val = lean_ptr_to_constant_val(constant_val_ptr, cache); let is_unsafe = is_unsafe_ptr as usize == 1; ConstantInfo::AxiomInfo(AxiomVal { cnst: constant_val, is_unsafe }) }, 1 => { let [constant_val_ptr, value_ptr, hints_ptr, all_ptr, safety_ptr] = - inner_val.objs(); + lean_ctor_objs(inner_val_ptr); let constant_val = lean_ptr_to_constant_val(constant_val_ptr, cache); let value = lean_ptr_to_expr(value_ptr, cache); let hints = if lean_is_scalar(hints_ptr) { @@ -487,8 +459,7 @@ pub fn lean_ptr_to_constant_info( _ => unreachable!(), } } else { - let hints_ctor: &LeanCtorObject = as_ref_unsafe(hints_ptr.cast()); - let [height_ptr] = hints_ctor.objs(); + let [height_ptr] = lean_ctor_objs(hints_ptr); ReducibilityHints::Regular(height_ptr as u32) }; let all: Vec<_> = collect_list_ptrs(all_ptr) @@ -510,7 +481,7 @@ pub fn lean_ptr_to_constant_info( }) }, 2 => { - let [constant_val_ptr, value_ptr, all_ptr] = inner_val.objs(); + let [constant_val_ptr, value_ptr, all_ptr] = lean_ctor_objs(inner_val_ptr); let constant_val = lean_ptr_to_constant_val(constant_val_ptr, cache); let value = lean_ptr_to_expr(value_ptr, cache); let all: Vec<_> = collect_list_ptrs(all_ptr) @@ -521,7 +492,7 @@ pub fn lean_ptr_to_constant_info( }, 3 => { let [constant_val_ptr, value_ptr, all_ptr, is_unsafe_ptr] = - inner_val.objs(); + lean_ctor_objs(inner_val_ptr); let constant_val = lean_ptr_to_constant_val(constant_val_ptr, cache); let value = lean_ptr_to_expr(value_ptr, cache); let all: Vec<_> = collect_list_ptrs(all_ptr) @@ -537,7 +508,7 @@ pub fn lean_ptr_to_constant_info( }) }, 4 => { - let [constant_val_ptr, kind_ptr] = inner_val.objs(); + let [constant_val_ptr, kind_ptr] = lean_ctor_objs(inner_val_ptr); let constant_val = lean_ptr_to_constant_val(constant_val_ptr, cache); let kind = match kind_ptr as usize { 0 => QuotKind::Type, @@ -557,7 +528,7 @@ pub fn lean_ptr_to_constant_info( ctors_ptr, num_nested_ptr, bools_ptr, - ] = inner_val.objs(); + ] = lean_ctor_objs(inner_val_ptr); let constant_val = lean_ptr_to_constant_val(constant_val_ptr, cache); let num_params = Nat::from_ptr(num_params_ptr); let num_indices = Nat::from_ptr(num_indices_ptr); @@ -592,7 +563,7 @@ pub fn lean_ptr_to_constant_info( num_params_ptr, num_fields_ptr, is_unsafe_ptr, - ] = inner_val.objs(); + ] = lean_ctor_objs(inner_val_ptr); let constant_val = lean_ptr_to_constant_val(constant_val_ptr, cache); let induct = lean_ptr_to_name(induct_ptr, cache.global); let cidx = Nat::from_ptr(cidx_ptr); @@ -618,7 +589,7 @@ pub fn lean_ptr_to_constant_info( num_minors_ptr, rules_ptr, bools_ptr, - ] = inner_val.objs(); + ] = lean_ctor_objs(inner_val_ptr); let constant_val = lean_ptr_to_constant_val(constant_val_ptr, cache); let all: Vec<_> = collect_list_ptrs(all_ptr) .into_iter() @@ -656,8 +627,7 @@ fn decode_name_constant_info( global: &GlobalCache, ) -> (Name, ConstantInfo) { let mut cache = Cache::new(global); - let prod_ctor: &LeanCtorObject = as_ref_unsafe(ptr.cast()); - let [name_ptr, constant_info_ptr] = prod_ctor.objs(); + let [name_ptr, constant_info_ptr] = lean_ctor_objs(ptr); let name = lean_ptr_to_name(name_ptr, global); let constant_info = lean_ptr_to_constant_info(constant_info_ptr, &mut cache); (name, constant_info) diff --git a/src/lean/ffi/primitives.rs b/src/lean/ffi/primitives.rs index 11ed850c..aa84784e 100644 --- a/src/lean/ffi/primitives.rs +++ b/src/lean/ffi/primitives.rs @@ -8,15 +8,15 @@ use std::ffi::c_void; -use crate::lean::array::LeanArrayObject; +use crate::lean::lean::{ + lean_alloc_array, lean_alloc_ctor, lean_alloc_sarray, lean_array_get_core, + lean_array_set_core, lean_ctor_get, lean_ctor_set, lean_mk_string, + lean_obj_tag, lean_sarray_cptr, lean_uint64_to_nat, +}; use crate::lean::nat::Nat; -use crate::lean::sarray::LeanSArrayObject; -use crate::lean::string::LeanStringObject; use crate::lean::{ - as_ref_unsafe, lean_alloc_array, lean_alloc_ctor, lean_alloc_sarray, - lean_array_get_core, lean_array_set_core, lean_box_fn, lean_ctor_get, - lean_ctor_set, lean_is_scalar, lean_mk_string, lean_obj_tag, - lean_sarray_cptr, lean_uint64_to_nat, + lean_array_data, lean_box_fn, lean_is_scalar, lean_obj_to_string, + lean_sarray_data, }; // ============================================================================= @@ -33,7 +33,7 @@ pub fn build_nat(n: &Nat) -> *mut c_void { return lean_box_fn(val as usize); } // For larger u64 values, use lean_uint64_to_nat - return unsafe { lean_uint64_to_nat(val) }; + return unsafe { lean_uint64_to_nat(val).cast() }; } // For values larger than u64, convert to limbs and use GMP let bytes = n.to_le_bytes(); @@ -63,12 +63,11 @@ pub extern "C" fn rs_roundtrip_nat(nat_ptr: *const c_void) -> *mut c_void { #[unsafe(no_mangle)] pub extern "C" fn rs_roundtrip_string(s_ptr: *const c_void) -> *mut c_void { // Decode - let s_obj: &LeanStringObject = as_ref_unsafe(s_ptr.cast()); - let s = s_obj.as_string(); + let s = lean_obj_to_string(s_ptr); // Re-encode unsafe { let cstr = crate::lean::safe_cstring(s.as_str()); - lean_mk_string(cstr.as_ptr()) + lean_mk_string(cstr.as_ptr()).cast() } } @@ -89,9 +88,8 @@ pub extern "C" fn rs_roundtrip_array_nat( arr_ptr: *const c_void, ) -> *mut c_void { // Decode array - let arr_obj: &LeanArrayObject = as_ref_unsafe(arr_ptr.cast()); let nats: Vec = - arr_obj.data().iter().map(|&p| Nat::from_ptr(p)).collect(); + lean_array_data(arr_ptr).iter().map(|&p| Nat::from_ptr(p)).collect(); // Re-encode as Lean Array build_array_nat(&nats) } @@ -100,14 +98,13 @@ pub extern "C" fn rs_roundtrip_array_nat( #[unsafe(no_mangle)] pub extern "C" fn rs_roundtrip_bytearray(ba_ptr: *const c_void) -> *mut c_void { // Decode ByteArray (scalar array of u8) - let sarray: &LeanSArrayObject = as_ref_unsafe(ba_ptr.cast()); - let bytes = sarray.data(); + let bytes = lean_sarray_data(ba_ptr); // Re-encode unsafe { let ba = lean_alloc_sarray(1, bytes.len(), bytes.len()); let data_ptr = lean_sarray_cptr(ba); std::ptr::copy_nonoverlapping(bytes.as_ptr(), data_ptr, bytes.len()); - ba + ba.cast() } } @@ -127,16 +124,16 @@ pub extern "C" fn rs_roundtrip_bool(bool_ptr: *const c_void) -> *mut c_void { fn build_list_nat(nats: &[Nat]) -> *mut c_void { unsafe { // Build list in reverse (cons builds from the end) - let mut list = lean_box_fn(0); // nil + let mut list = lean_box_fn(0).cast(); // nil for nat in nats.iter().rev() { let nat_obj = build_nat(nat); // cons : α → List α → List α (tag 1, 2 object fields) let cons = lean_alloc_ctor(1, 2, 0); - lean_ctor_set(cons, 0, nat_obj); + lean_ctor_set(cons, 0, nat_obj.cast()); lean_ctor_set(cons, 1, list); list = cons; } - list + list.cast() } } @@ -146,9 +143,9 @@ fn build_array_nat(nats: &[Nat]) -> *mut c_void { let arr = lean_alloc_array(nats.len(), nats.len()); for (i, nat) in nats.iter().enumerate() { let nat_obj = build_nat(nat); - lean_array_set_core(arr, i, nat_obj); + lean_array_set_core(arr, i, nat_obj.cast()); } - arr + arr.cast() } } @@ -166,14 +163,14 @@ pub extern "C" fn rs_roundtrip_point(point_ptr: *const c_void) -> *mut c_void { let y_ptr = lean_ctor_get(point_ptr as *mut _, 1); // Decode the Nats - let x = Nat::from_ptr(x_ptr); - let y = Nat::from_ptr(y_ptr); + let x = Nat::from_ptr(x_ptr.cast()); + let y = Nat::from_ptr(y_ptr.cast()); // Re-encode as Point let point = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(point, 0, build_nat(&x)); - lean_ctor_set(point, 1, build_nat(&y)); - point + lean_ctor_set(point, 0, build_nat(&x).cast()); + lean_ctor_set(point, 1, build_nat(&y).cast()); + point.cast() } } @@ -192,21 +189,21 @@ fn roundtrip_nat_tree_recursive(tree_ptr: *const c_void) -> *mut c_void { 0 => { // leaf : Nat → NatTree let nat_ptr = lean_ctor_get(tree_ptr as *mut _, 0); - let nat = Nat::from_ptr(nat_ptr); + let nat = Nat::from_ptr(nat_ptr.cast()); let leaf = lean_alloc_ctor(0, 1, 0); - lean_ctor_set(leaf, 0, build_nat(&nat)); - leaf + lean_ctor_set(leaf, 0, build_nat(&nat).cast()); + leaf.cast() }, 1 => { // node : NatTree → NatTree → NatTree let left_ptr = lean_ctor_get(tree_ptr as *mut _, 0); let right_ptr = lean_ctor_get(tree_ptr as *mut _, 1); - let left = roundtrip_nat_tree_recursive(left_ptr); - let right = roundtrip_nat_tree_recursive(right_ptr); + let left = roundtrip_nat_tree_recursive(left_ptr.cast()); + let right = roundtrip_nat_tree_recursive(right_ptr.cast()); let node = lean_alloc_ctor(1, 2, 0); - lean_ctor_set(node, 0, left); - lean_ctor_set(node, 1, right); - node + lean_ctor_set(node, 0, left.cast()); + lean_ctor_set(node, 1, right.cast()); + node.cast() }, _ => panic!("Invalid NatTree tag: {}", tag), } @@ -234,15 +231,15 @@ fn build_assoc_list_nat_nat(pairs: &[(Nat, Nat)]) -> *mut c_void { unsafe { // Build in reverse to preserve order // AssocList.nil with 0 fields is represented as lean_box(0) - let mut list = lean_box_fn(0); + let mut list = lean_box_fn(0).cast(); for (k, v) in pairs.iter().rev() { let cons = lean_alloc_ctor(1, 3, 0); // AssocList.cons - lean_ctor_set(cons, 0, build_nat(k)); - lean_ctor_set(cons, 1, build_nat(v)); + lean_ctor_set(cons, 0, build_nat(k).cast()); + lean_ctor_set(cons, 1, build_nat(v).cast()); lean_ctor_set(cons, 2, list); list = cons; } - list + list.cast() } } @@ -259,14 +256,14 @@ pub extern "C" fn rs_roundtrip_dhashmap_raw_nat_nat( let size_ptr = lean_ctor_get(raw_ptr as *mut _, 0); let buckets_ptr = lean_ctor_get(raw_ptr as *mut _, 1); - let size = Nat::from_ptr(size_ptr); + let size = Nat::from_ptr(size_ptr.cast()); // Decode and rebuild buckets - let buckets_obj: &LeanArrayObject = as_ref_unsafe(buckets_ptr.cast()); - let num_buckets = buckets_obj.data().len(); + let buckets_data = lean_array_data(buckets_ptr.cast()); + let num_buckets = buckets_data.len(); let mut all_pairs: Vec<(Nat, Nat)> = Vec::new(); - for &bucket_ptr in buckets_obj.data() { + for &bucket_ptr in buckets_data { let pairs = decode_assoc_list_nat_nat(bucket_ptr); all_pairs.extend(pairs); } @@ -274,7 +271,7 @@ pub extern "C" fn rs_roundtrip_dhashmap_raw_nat_nat( // Rebuild buckets let new_buckets = lean_alloc_array(num_buckets, num_buckets); for i in 0..num_buckets { - lean_array_set_core(new_buckets, i, lean_box_fn(0)); // AssocList.nil + lean_array_set_core(new_buckets, i, lean_box_fn(0).cast()); // AssocList.nil } for (k, v) in &all_pairs { @@ -288,21 +285,20 @@ pub extern "C" fn rs_roundtrip_dhashmap_raw_nat_nat( #[allow(clippy::cast_possible_truncation)] let bucket_idx = (k_u64 as usize) & (num_buckets - 1); - let old_bucket = - lean_array_get_core(new_buckets, bucket_idx) as *mut c_void; + let old_bucket = lean_array_get_core(new_buckets, bucket_idx); let new_bucket = lean_alloc_ctor(1, 3, 0); - lean_ctor_set(new_bucket, 0, build_nat(k)); - lean_ctor_set(new_bucket, 1, build_nat(v)); + lean_ctor_set(new_bucket, 0, build_nat(k).cast()); + lean_ctor_set(new_bucket, 1, build_nat(v).cast()); lean_ctor_set(new_bucket, 2, old_bucket); lean_array_set_core(new_buckets, bucket_idx, new_bucket); } // Build Raw let raw = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(raw, 0, build_nat(&size)); + lean_ctor_set(raw, 0, build_nat(&size).cast()); lean_ctor_set(raw, 1, new_buckets); - raw + raw.cast() } } @@ -330,25 +326,25 @@ pub extern "C" fn rs_roundtrip_hashmap_nat_nat( let size_ptr = lean_ctor_get(map_ptr as *mut _, 0); let buckets_ptr = lean_ctor_get(map_ptr as *mut _, 1); - let size = Nat::from_ptr(size_ptr); + let size = Nat::from_ptr(size_ptr.cast()); // Decode buckets (Array of AssocLists) - let buckets_obj: &LeanArrayObject = as_ref_unsafe(buckets_ptr.cast()); + let buckets_data = lean_array_data(buckets_ptr.cast()); let mut pairs: Vec<(Nat, Nat)> = Vec::new(); - for &bucket_ptr in buckets_obj.data() { + for &bucket_ptr in buckets_data { // Each bucket is an AssocList let bucket_pairs = decode_assoc_list_nat_nat(bucket_ptr); pairs.extend(bucket_pairs); } // Rebuild the HashMap with the same bucket count - let num_buckets = buckets_obj.data().len(); + let num_buckets = buckets_data.len(); let new_buckets = lean_alloc_array(num_buckets, num_buckets); // Initialize all buckets to AssocList.nil (lean_box(0)) for i in 0..num_buckets { - lean_array_set_core(new_buckets, i, lean_box_fn(0)); // AssocList.nil + lean_array_set_core(new_buckets, i, lean_box_fn(0).cast()); // AssocList.nil } // Insert each pair into the appropriate bucket using Lean's hash function @@ -367,13 +363,12 @@ pub extern "C" fn rs_roundtrip_hashmap_nat_nat( let bucket_idx = (k_u64 as usize) & (num_buckets - 1); // Get current bucket AssocList - let old_bucket = - lean_array_get_core(new_buckets, bucket_idx) as *mut c_void; + let old_bucket = lean_array_get_core(new_buckets, bucket_idx); // Build AssocList.cons key value tail (tag 1, 3 fields) let new_bucket = lean_alloc_ctor(1, 3, 0); - lean_ctor_set(new_bucket, 0, build_nat(k)); - lean_ctor_set(new_bucket, 1, build_nat(v)); + lean_ctor_set(new_bucket, 0, build_nat(k).cast()); + lean_ctor_set(new_bucket, 1, build_nat(v).cast()); lean_ctor_set(new_bucket, 2, old_bucket); lean_array_set_core(new_buckets, bucket_idx, new_bucket); @@ -382,10 +377,10 @@ pub extern "C" fn rs_roundtrip_hashmap_nat_nat( // Build Raw (ctor 0, 2 fields: size, buckets) // Due to unboxing, this IS the HashMap let raw = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(raw, 0, build_nat(&size)); + lean_ctor_set(raw, 0, build_nat(&size).cast()); lean_ctor_set(raw, 1, new_buckets); - raw + raw.cast() } } @@ -413,11 +408,11 @@ pub fn decode_assoc_list_nat_nat(list_ptr: *const c_void) -> Vec<(Nat, Nat)> { let value_ptr = lean_ctor_get(current as *mut _, 1); let tail_ptr = lean_ctor_get(current as *mut _, 2); - let k = Nat::from_ptr(key_ptr); - let v = Nat::from_ptr(value_ptr); + let k = Nat::from_ptr(key_ptr.cast()); + let v = Nat::from_ptr(value_ptr.cast()); result.push((k, v)); - current = tail_ptr; + current = tail_ptr.cast(); } } @@ -433,12 +428,12 @@ pub fn decode_assoc_list_nat_nat(list_ptr: *const c_void) -> Vec<(Nat, Nat)> { /// This is essentially just a pointer cast - very fast. #[unsafe(no_mangle)] pub extern "C" fn rs_bytearray_to_u64_le(ba_ptr: *const c_void) -> u64 { + let data = lean_sarray_data(ba_ptr); + if data.len() < 8 { + return 0; + } unsafe { - let arr: &LeanSArrayObject = &*ba_ptr.cast::(); - if arr.data().len() < 8 { - return 0; - } - let data_ptr = lean_sarray_cptr(ba_ptr as *mut _); + let data_ptr = lean_sarray_cptr(ba_ptr.cast_mut().cast()); std::ptr::read_unaligned(data_ptr as *const u64) } } diff --git a/src/lean/ffi/unsigned.rs b/src/lean/ffi/unsigned.rs new file mode 100644 index 00000000..a947480a --- /dev/null +++ b/src/lean/ffi/unsigned.rs @@ -0,0 +1,31 @@ +use std::ffi::c_void; + +use crate::lean::{lean::lean_alloc_sarray, lean_sarray_set_data}; + +#[unsafe(no_mangle)] +extern "C" fn c_u16_to_le_bytes(v: u16) -> *mut c_void { + mk_byte_array(&v.to_le_bytes()) +} + +#[unsafe(no_mangle)] +extern "C" fn c_u32_to_le_bytes(v: u32) -> *mut c_void { + mk_byte_array(&v.to_le_bytes()) +} + +#[unsafe(no_mangle)] +extern "C" fn c_u64_to_le_bytes(v: u64) -> *mut c_void { + mk_byte_array(&v.to_le_bytes()) +} + +#[unsafe(no_mangle)] +extern "C" fn c_usize_to_le_bytes(v: usize) -> *mut c_void { + mk_byte_array(&v.to_le_bytes()) +} + +#[inline] +fn mk_byte_array(bytes: &[u8]) -> *mut c_void { + let len = bytes.len(); + let arr_ptr = unsafe { lean_alloc_sarray(1, len, len) }; + unsafe { lean_sarray_set_data(arr_ptr.cast(), bytes) }; + arr_ptr.cast() +} diff --git a/src/lean/nat.rs b/src/lean/nat.rs index 65b1c495..2ee56fad 100644 --- a/src/lean/nat.rs +++ b/src/lean/nat.rs @@ -10,7 +10,7 @@ use std::mem::MaybeUninit; use num_bigint::BigUint; use crate::{ - lean::{as_ref_unsafe, lean_is_scalar, object::LeanObject}, + lean::{as_ref_unsafe, lean_is_scalar}, lean_unbox, }; @@ -74,7 +74,7 @@ impl Nat { /// ``` #[repr(C)] struct MpzObject { - m_header: LeanObject, + _header: [u8; 8], m_value: Mpz, } @@ -102,7 +102,8 @@ impl Mpz { // GMP interop for building Lean Nat objects from limbs // ============================================================================= -use super::{lean_box_fn, lean_uint64_to_nat}; +use super::lean::lean_uint64_to_nat; +use super::lean_box_fn; /// LEAN_MAX_SMALL_NAT = SIZE_MAX >> 1 const LEAN_MAX_SMALL_NAT: u64 = (usize::MAX >> 1) as u64; @@ -137,7 +138,7 @@ pub fn lean_nat_from_limbs(num_limbs: usize, limbs: *const u64) -> *mut c_void { return lean_box_fn(first as usize); } if num_limbs == 1 { - return unsafe { lean_uint64_to_nat(first) }; + return unsafe { lean_uint64_to_nat(first).cast() }; } // Multi-limb: use GMP unsafe { diff --git a/src/lean/object.rs b/src/lean/object.rs deleted file mode 100644 index 3aca5245..00000000 --- a/src/lean/object.rs +++ /dev/null @@ -1,30 +0,0 @@ -/// ```c -/// typedef struct { -/// int m_rc; -/// unsigned m_cs_sz:16; -/// unsigned m_other:8; -/// unsigned m_tag:8; -/// } lean_object; -/// ``` -#[repr(C)] -pub struct LeanObject { - m_rc: i32, - packed_bits: u32, -} - -impl LeanObject { - #[inline] - pub fn m_cs_sz(&self) -> u16 { - (self.packed_bits & 0xFFFF) as u16 - } - - #[inline] - pub fn m_other(&self) -> u8 { - ((self.packed_bits >> 16) & 0xFF) as u8 - } - - #[inline] - pub fn m_tag(&self) -> u8 { - ((self.packed_bits >> 24) & 0xFF) as u8 - } -} diff --git a/src/lean/sarray.rs b/src/lean/sarray.rs deleted file mode 100644 index b3b5789e..00000000 --- a/src/lean/sarray.rs +++ /dev/null @@ -1,37 +0,0 @@ -//! Lean scalar array (`ByteArray`) object layout. - -use super::{CArray, object::LeanObject}; - -/// ```c -/// typedef struct { -/// lean_object m_header; -/// size_t m_size; -/// size_t m_capacity; -/// uint8_t m_data[]; -/// } lean_sarray_object; -/// ``` -#[repr(C)] -pub struct LeanSArrayObject { - m_header: LeanObject, - m_size: usize, - m_capacity: usize, - m_data: CArray, -} - -impl LeanSArrayObject { - #[inline] - pub fn data(&self) -> &[u8] { - self.m_data.slice(self.m_size) - } - - #[inline] - pub fn data_mut(&mut self) -> &mut [u8] { - self.m_data.slice_mut(self.m_size) - } - - pub fn set_data(&mut self, data: &[u8]) { - assert!(self.m_capacity >= data.len()); - self.m_data.copy_from_slice(data); - self.m_size = data.len(); - } -} diff --git a/src/lean/string.rs b/src/lean/string.rs deleted file mode 100644 index 40bd415f..00000000 --- a/src/lean/string.rs +++ /dev/null @@ -1,27 +0,0 @@ -use crate::lean::{CArray, object::LeanObject}; - -/// ```c -/// typedef struct { -/// lean_object m_header; -/// size_t m_size; /* byte length including '\0' terminator */ -/// size_t m_capacity; -/// size_t m_length; /* UTF8 length */ -/// char m_data[]; -/// } lean_string_object; -/// ``` -#[repr(C)] -pub struct LeanStringObject { - m_header: LeanObject, - m_size: usize, - m_capacity: usize, - m_length: usize, - m_data: CArray, -} - -impl LeanStringObject { - #[inline] - pub fn as_string(&self) -> String { - let bytes = self.m_data.slice(self.m_size - 1); // Ignore the last '\0' - unsafe { String::from_utf8_unchecked(bytes.to_vec()) } - } -} diff --git a/src/sha256.rs b/src/sha256.rs index 61d1c142..5f5c6255 100644 --- a/src/sha256.rs +++ b/src/sha256.rs @@ -1,17 +1,16 @@ use sha2::{Digest, Sha256}; use std::ffi::c_void; -use crate::lean::{as_mut_unsafe, lean_alloc_sarray, sarray::LeanSArrayObject}; +use crate::lean::{lean::lean_alloc_sarray, lean_sarray_data, lean_sarray_set_data}; #[unsafe(no_mangle)] -extern "C" fn rs_sha256(bytes: &LeanSArrayObject) -> *mut c_void { +extern "C" fn rs_sha256(bytes: *const c_void) -> *mut c_void { let mut hasher = Sha256::new(); - hasher.update(bytes.data()); + hasher.update(lean_sarray_data(bytes)); let digest = hasher.finalize(); let digest_slice = digest.as_slice(); assert_eq!(digest_slice.len(), 32); let arr_ptr = unsafe { lean_alloc_sarray(1, 32, 32) }; - let arr: &mut LeanSArrayObject = as_mut_unsafe(arr_ptr.cast()); - arr.set_data(digest_slice); - arr_ptr + unsafe { lean_sarray_set_data(arr_ptr.cast(), digest_slice) }; + arr_ptr.cast() } From a39288c03ba3975b7960cb7e5b5e5d7ec5aecbc2 Mon Sep 17 00:00:00 2001 From: samuelburnham <45365069+samuelburnham@users.noreply.github.com> Date: Fri, 27 Feb 2026 18:00:29 -0500 Subject: [PATCH 03/27] Fmt & clippy --- .github/workflows/ci.yml | 30 +++++++--------- build.rs | 8 ++--- lakefile.lean | 1 + src/iroh/_client.rs | 3 +- src/iroh/client.rs | 24 ++++++------- src/lean.rs | 58 +++++++++++++++++++++++-------- src/lean/ffi.rs | 2 +- src/lean/ffi/aiur/protocol.rs | 62 +++++++++++++++++++-------------- src/lean/ffi/aiur/toplevel.rs | 7 ++-- src/lean/ffi/byte_array.rs | 5 +-- src/lean/ffi/compile.rs | 46 +++++++++---------------- src/lean/ffi/ix/address.rs | 3 +- src/lean/ffi/ix/constant.rs | 18 ++++++---- src/lean/ffi/ix/data.rs | 25 ++++++++------ src/lean/ffi/ix/expr.rs | 18 ++++++---- src/lean/ffi/ix/name.rs | 2 +- src/lean/ffi/ixon/constant.rs | 14 ++++++-- src/lean/ffi/ixon/expr.rs | 30 +++++++++------- src/lean/ffi/ixon/meta.rs | 65 ++++++++++++++++++++++++----------- src/lean/ffi/ixon/sharing.rs | 3 +- src/lean/ffi/ixon/univ.rs | 5 ++- src/lean/ffi/keccak.rs | 25 ++++++++++---- src/lean/ffi/lean_env.rs | 33 +++++++++--------- src/lean/ffi/primitives.rs | 2 +- src/lean/nat.rs | 5 ++- src/sha256.rs | 4 ++- 26 files changed, 292 insertions(+), 206 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 3ab74042..98f11982 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -17,13 +17,13 @@ jobs: lean-test: runs-on: warp-ubuntu-latest-x64-16x steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@v6 - uses: leanprover/lean-action@v1 with: build-args: "--wfail -v" + # TODO: These should both run automatically test: true - - name: Build all targets - run: lake run build-all --wfail + lint: true - name: Test Ix CLI run: lake test -- cli - name: Aiur tests @@ -41,28 +41,22 @@ jobs: repository: argumentcomputer/ci-workflows - uses: ./.github/actions/ci-env - uses: actions/checkout@v6 - - uses: dtolnay/rust-toolchain@stable - - uses: taiki-e/install-action@nextest - - uses: Swatinem/rust-cache@v2 - - name: Tests - run: cargo nextest run --release --profile ci --workspace --run-ignored all - - rust-lint: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v6 + # Install Lean for rust-bindgen step + - uses: leanprover/lean-action@v1 with: - repository: argumentcomputer/ci-workflows - - uses: ./.github/actions/ci-env - - uses: actions/checkout@v6 + build: false + use-github-cache: false - uses: dtolnay/rust-toolchain@stable + - uses: taiki-e/install-action@nextest - uses: Swatinem/rust-cache@v2 - name: Check Rustfmt code style run: cargo fmt --all --check - - name: Check *everything* compiles - run: cargo check --all-targets --all-features --workspace - name: Check clippy warnings run: cargo xclippy -D warnings + - name: Check *everything* compiles + run: cargo check --all-targets --all-features --workspace + - name: Tests + run: cargo nextest run --release --profile ci --workspace --run-ignored all - name: Get Rust version run: | echo "RUST_VERSION=$(awk -F '"' '/^channel/ {print $2}' rust-toolchain.toml)" | tee -a $GITHUB_ENV diff --git a/build.rs b/build.rs index 8f3f0a44..f6aca34c 100644 --- a/build.rs +++ b/build.rs @@ -9,16 +9,14 @@ fn find_lean_include_dir() -> PathBuf { } } // 2. Try `lean --print-prefix` - if let Ok(output) = Command::new("lean").arg("--print-prefix").output() { - if output.status.success() { - let prefix = - String::from_utf8_lossy(&output.stdout).trim().to_string(); + if let Ok(output) = Command::new("lean").arg("--print-prefix").output() + && output.status.success() { + let prefix = String::from_utf8_lossy(&output.stdout).trim().to_string(); let inc = PathBuf::from(prefix).join("include"); if inc.exists() { return inc; } } - } panic!( "Cannot find Lean include directory. \ Set LEAN_SYSROOT or ensure `lean` is on PATH." diff --git a/lakefile.lean b/lakefile.lean index 6cdde581..b37dfa26 100644 --- a/lakefile.lean +++ b/lakefile.lean @@ -142,6 +142,7 @@ script "get-exe-targets" := do IO.println <| tgt.name.toString |>.dropPrefix "«" |>.dropSuffix "»" |>.toString return 0 +@[lint_driver] script "build-all" (args) := do let pkg ← getRootPackage let libNames := pkg.configTargets LeanLib.configKind |>.map (·.name.toString) diff --git a/src/iroh/_client.rs b/src/iroh/_client.rs index 7046f720..f99c06fe 100644 --- a/src/iroh/_client.rs +++ b/src/iroh/_client.rs @@ -2,8 +2,7 @@ use std::ffi::c_void; use crate::lean::lean_except_error_string; -const ERR_MSG: &str = - "Iroh functions not supported when the Rust `net` feature is disabled \ +const ERR_MSG: &str = "Iroh functions not supported when the Rust `net` feature is disabled \ or on MacOS aarch64-darwin"; /// `Iroh.Connect.putBytes' : @& String → @& Array String → @& String → @& String → Except String PutResponse` diff --git a/src/iroh/client.rs b/src/iroh/client.rs index e9df4f53..bd88fb9e 100644 --- a/src/iroh/client.rs +++ b/src/iroh/client.rs @@ -10,7 +10,7 @@ use tracing_subscriber::{EnvFilter, fmt}; use crate::iroh::common::{GetRequest, PutRequest, Request, Response}; use crate::lean::{ - lean::{ lean_alloc_ctor, lean_alloc_sarray, lean_ctor_set, lean_mk_string }, + lean::{lean_alloc_ctor, lean_alloc_sarray, lean_ctor_set, lean_mk_string}, lean_array_to_vec, lean_except_error_string, lean_except_ok, lean_obj_to_string, lean_sarray_set_data, safe_cstring, }; @@ -79,12 +79,10 @@ extern "C" fn rs_iroh_put( match rt.block_on(connect(&node_id, &addrs, &relay_url, request)) { Ok(response) => match response { - Response::Put(put_response) => { - lean_except_ok(mk_put_response( - &put_response.message, - &put_response.hash, - )) - }, + Response::Put(put_response) => lean_except_ok(mk_put_response( + &put_response.message, + &put_response.hash, + )), _ => lean_except_error_string("error: incorrect server response"), }, Err(err) => lean_except_error_string(&err.to_string()), @@ -110,13 +108,11 @@ extern "C" fn rs_iroh_get( match rt.block_on(connect(&node_id, &addrs, &relay_url, request)) { Ok(response) => match response { - Response::Get(get_response) => { - lean_except_ok(mk_get_response( - &get_response.message, - &get_response.hash, - &get_response.bytes, - )) - }, + Response::Get(get_response) => lean_except_ok(mk_get_response( + &get_response.message, + &get_response.hash, + &get_response.bytes, + )), _ => lean_except_error_string("error: incorrect server response"), }, Err(err) => lean_except_error_string(&err.to_string()), diff --git a/src/lean.rs b/src/lean.rs index c33612a7..e938f92a 100644 --- a/src/lean.rs +++ b/src/lean.rs @@ -11,7 +11,12 @@ non_snake_case, dead_code, unsafe_op_in_unsafe_fn, - clippy::all + unused_qualifications, + clippy::all, + clippy::ptr_as_ptr, + clippy::cast_possible_wrap, + clippy::cast_possible_truncation, + clippy::derive_partial_eq_without_eq )] pub mod lean { include!(concat!(env!("OUT_DIR"), "/lean.rs")); @@ -77,7 +82,7 @@ macro_rules! lean_unbox { #[inline] pub fn lean_unbox_u32(ptr: *const c_void) -> u32 { - unsafe { lean::lean_unbox_uint32(ptr as *mut _) as u32 } + unsafe { lean::lean_unbox_uint32(ptr as *mut _) } } #[inline] @@ -92,43 +97,60 @@ pub fn lean_box_u64(v: u64) -> *mut c_void { pub fn lean_obj_to_string(ptr: *const c_void) -> String { unsafe { - let obj = ptr as *mut lean::lean_object; + let obj = ptr.cast_mut().cast::(); let len = lean::lean_string_size(obj) - 1; // m_size includes NUL let data = lean::lean_string_cstr(obj); - let bytes = std::slice::from_raw_parts(data as *const u8, len); + let bytes = std::slice::from_raw_parts(data.cast::(), len); String::from_utf8_unchecked(bytes.to_vec()) } } #[inline] pub fn lean_tag(ptr: *const c_void) -> u8 { + #[allow(clippy::cast_possible_truncation)] // tags always fit in u8 unsafe { lean::lean_obj_tag(ptr as *mut _) as u8 } } #[inline] -pub fn lean_ctor_objs(ptr: *const c_void) -> [*const c_void; N] { +pub fn lean_ctor_objs( + ptr: *const c_void, +) -> [*const c_void; N] { // Use raw pointer arithmetic instead of lean_ctor_get to avoid its // bounds-check assertion. Call sites legitimately read past the object // fields into the scalar area (e.g. Expr.Data hash, Bool/BinderInfo // stored as UInt8 scalars). This matches the old LeanCtorObject::objs(). - let base = unsafe { (ptr as *const *const c_void).add(1) }; + let base = unsafe { ptr.cast::<*const c_void>().add(1) }; std::array::from_fn(|i| unsafe { *base.add(i) }) } #[inline] -pub fn lean_ctor_scalar_u64(ptr: *const c_void, num_objs: usize, offset: usize) -> u64 { +pub fn lean_ctor_scalar_u64( + ptr: *const c_void, + num_objs: usize, + offset: usize, +) -> u64 { unsafe { - std::ptr::read_unaligned(ptr.cast::().add(8 + num_objs * 8 + offset).cast()) + std::ptr::read_unaligned( + ptr.cast::().add(8 + num_objs * 8 + offset).cast(), + ) } } #[inline] -pub fn lean_ctor_scalar_u8(ptr: *const c_void, num_objs: usize, offset: usize) -> u8 { +pub fn lean_ctor_scalar_u8( + ptr: *const c_void, + num_objs: usize, + offset: usize, +) -> u8 { unsafe { *ptr.cast::().add(8 + num_objs * 8 + offset) } } #[inline] -pub fn lean_ctor_scalar_bool(ptr: *const c_void, num_objs: usize, offset: usize) -> bool { +pub fn lean_ctor_scalar_bool( + ptr: *const c_void, + num_objs: usize, + offset: usize, +) -> bool { lean_ctor_scalar_u8(ptr, num_objs, offset) != 0 } @@ -139,7 +161,7 @@ pub fn lean_ctor_scalar_bool(ptr: *const c_void, num_objs: usize, offset: usize) /// Return a slice over the elements of a Lean `Array` object. pub fn lean_array_data(ptr: *const c_void) -> &'static [*const c_void] { unsafe { - let obj = ptr as *mut lean::lean_object; + let obj = ptr.cast_mut().cast::(); let size = lean::lean_array_size(obj); let cptr = lean::lean_array_cptr(obj); std::slice::from_raw_parts(cptr.cast(), size) @@ -147,7 +169,10 @@ pub fn lean_array_data(ptr: *const c_void) -> &'static [*const c_void] { } /// Convert a Lean `Array` to a `Vec` by mapping each element. -pub fn lean_array_to_vec(ptr: *const c_void, f: fn(*const c_void) -> T) -> Vec { +pub fn lean_array_to_vec( + ptr: *const c_void, + f: fn(*const c_void) -> T, +) -> Vec { lean_array_data(ptr).iter().map(|&p| f(p)).collect() } @@ -167,7 +192,7 @@ pub fn lean_array_to_vec_with( /// Return a byte slice over a Lean `ByteArray` (scalar array) object. pub fn lean_sarray_data(ptr: *const c_void) -> &'static [u8] { unsafe { - let obj = ptr as *mut lean::lean_object; + let obj = ptr.cast_mut().cast::(); let size = lean::lean_sarray_size(obj); let cptr = lean::lean_sarray_cptr(obj); std::slice::from_raw_parts(cptr, size) @@ -181,11 +206,11 @@ pub fn lean_sarray_data(ptr: *const c_void) -> &'static [u8] { /// with sufficient capacity for `data`. pub unsafe fn lean_sarray_set_data(ptr: *mut c_void, data: &[u8]) { unsafe { - let obj = ptr as *mut lean::lean_object; + let obj = ptr.cast::(); let cptr = lean::lean_sarray_cptr(obj); std::ptr::copy_nonoverlapping(data.as_ptr(), cptr, data.len()); // Update m_size: at offset 8 (after lean_object header) - *(ptr.cast::().add(8) as *mut usize) = data.len(); + *ptr.cast::().add(8).cast::() = data.len(); } } @@ -258,6 +283,9 @@ pub fn lean_except_error_string(msg: &str) -> *mut c_void { } /// No-op foreach callback for external classes that hold no Lean references. +/// +/// # Safety +/// Must only be used as a `lean_external_foreach_fn` callback. pub unsafe extern "C" fn noop_foreach( _: *mut c_void, _: *mut lean::lean_object, diff --git a/src/lean/ffi.rs b/src/lean/ffi.rs index a0ff9e54..c92ba53c 100644 --- a/src/lean/ffi.rs +++ b/src/lean/ffi.rs @@ -21,7 +21,7 @@ unsafe impl Send for ExternalClassPtr {} unsafe impl Sync for ExternalClassPtr {} use crate::lean::{ - lean::{ lean_io_result_mk_error, lean_mk_io_user_error, lean_mk_string }, + lean::{lean_io_result_mk_error, lean_mk_io_user_error, lean_mk_string}, lean_array_to_vec, lean_sarray_data, lean_unbox_u32, }; diff --git a/src/lean/ffi/aiur/protocol.rs b/src/lean/ffi/aiur/protocol.rs index fe0cd27f..09f233e0 100644 --- a/src/lean/ffi/aiur/protocol.rs +++ b/src/lean/ffi/aiur/protocol.rs @@ -14,8 +14,6 @@ use crate::{ synthesis::AiurSystem, }, lean::{ - lean_array_data, lean_array_to_vec, lean_sarray_data, lean_sarray_set_data, - lean_ctor_objs, ffi::{ ExternalClassPtr, aiur::{ @@ -24,13 +22,13 @@ use crate::{ drop_raw, to_raw, }, lean::{ - lean_alloc_array, lean_alloc_ctor, lean_alloc_external, lean_alloc_sarray, - lean_array_set_core, lean_ctor_set, lean_get_external_data, - lean_register_external_class, + lean_alloc_array, lean_alloc_ctor, lean_alloc_external, + lean_alloc_sarray, lean_array_set_core, lean_ctor_set, + lean_get_external_data, lean_register_external_class, }, - lean_box_fn, lean_box_u64, - lean_except_error_string, lean_except_ok, - noop_foreach, + lean_array_data, lean_array_to_vec, lean_box_fn, lean_box_u64, + lean_ctor_objs, lean_except_error_string, lean_except_ok, lean_sarray_data, + lean_sarray_set_data, noop_foreach, }, }; @@ -44,9 +42,15 @@ static AIUR_SYSTEM_CLASS: OnceLock = OnceLock::new(); fn get_aiur_proof_class() -> *mut c_void { AIUR_PROOF_CLASS .get_or_init(|| { - ExternalClassPtr(unsafe { - lean_register_external_class(Some(aiur_proof_finalizer), Some(noop_foreach)) - }.cast()) + ExternalClassPtr( + unsafe { + lean_register_external_class( + Some(aiur_proof_finalizer), + Some(noop_foreach), + ) + } + .cast(), + ) }) .0 } @@ -54,19 +58,25 @@ fn get_aiur_proof_class() -> *mut c_void { fn get_aiur_system_class() -> *mut c_void { AIUR_SYSTEM_CLASS .get_or_init(|| { - ExternalClassPtr(unsafe { - lean_register_external_class(Some(aiur_system_finalizer), Some(noop_foreach)) - }.cast()) + ExternalClassPtr( + unsafe { + lean_register_external_class( + Some(aiur_system_finalizer), + Some(noop_foreach), + ) + } + .cast(), + ) }) .0 } extern "C" fn aiur_proof_finalizer(ptr: *mut c_void) { - drop_raw(ptr as *mut Proof); + drop_raw(ptr.cast::()); } extern "C" fn aiur_system_finalizer(ptr: *mut c_void) { - drop_raw(ptr as *mut AiurSystem); + drop_raw(ptr.cast::()); } // ============================================================================= @@ -75,10 +85,9 @@ extern "C" fn aiur_system_finalizer(ptr: *mut c_void) { /// `Aiur.Proof.toBytes : @& Proof → ByteArray` #[unsafe(no_mangle)] -extern "C" fn rs_aiur_proof_to_bytes( - proof_obj: *const c_void, -) -> *mut c_void { - let proof: &Proof = unsafe { &*lean_get_external_data(proof_obj as *mut _).cast() }; +extern "C" fn rs_aiur_proof_to_bytes(proof_obj: *const c_void) -> *mut c_void { + let proof: &Proof = + unsafe { &*lean_get_external_data(proof_obj as *mut _).cast() }; let bytes = proof.to_bytes().expect("Serialization error"); let len = bytes.len(); let arr_ptr = unsafe { lean_alloc_sarray(1, len, len) }; @@ -88,11 +97,9 @@ extern "C" fn rs_aiur_proof_to_bytes( /// `Aiur.Proof.ofBytes : @& ByteArray → Proof` #[unsafe(no_mangle)] -extern "C" fn rs_aiur_proof_of_bytes( - byte_array: *const c_void, -) -> *mut c_void { - let proof = - Proof::from_bytes(lean_sarray_data(byte_array)).expect("Deserialization error"); +extern "C" fn rs_aiur_proof_of_bytes(byte_array: *const c_void) -> *mut c_void { + let proof = Proof::from_bytes(lean_sarray_data(byte_array)) + .expect("Deserialization error"); let ptr = to_raw(proof) as *mut c_void; unsafe { lean_alloc_external(get_aiur_proof_class().cast(), ptr) }.cast() } @@ -164,7 +171,10 @@ extern "C" fn rs_aiur_system_prove( // proof: Proof (external object) let lean_proof = unsafe { - lean_alloc_external(get_aiur_proof_class().cast(), to_raw(proof) as *mut c_void) + lean_alloc_external( + get_aiur_proof_class().cast(), + to_raw(proof) as *mut c_void, + ) }; // io_data: Array G diff --git a/src/lean/ffi/aiur/toplevel.rs b/src/lean/ffi/aiur/toplevel.rs index 465f0ddb..f365c87a 100644 --- a/src/lean/ffi/aiur/toplevel.rs +++ b/src/lean/ffi/aiur/toplevel.rs @@ -9,9 +9,9 @@ use crate::{ bytecode::{Block, Ctrl, Function, FunctionLayout, Op, Toplevel, ValIdx}, }, lean::{ - lean_array_to_vec, ffi::aiur::{lean_unbox_g, lean_unbox_nat_as_usize}, - lean_ctor_objs, lean_is_scalar, lean_obj_to_string, lean_tag, + lean_array_to_vec, lean_ctor_objs, lean_is_scalar, lean_obj_to_string, + lean_tag, }, }; @@ -205,6 +205,7 @@ fn lean_ptr_to_function(ptr: *const c_void) -> Function { pub(crate) fn lean_ptr_to_toplevel(ptr: *const c_void) -> Toplevel { let [functions_ptr, memory_sizes_ptr] = lean_ctor_objs(ptr); let functions = lean_array_to_vec(functions_ptr, lean_ptr_to_function); - let memory_sizes = lean_array_to_vec(memory_sizes_ptr, lean_unbox_nat_as_usize); + let memory_sizes = + lean_array_to_vec(memory_sizes_ptr, lean_unbox_nat_as_usize); Toplevel { functions, memory_sizes } } diff --git a/src/lean/ffi/byte_array.rs b/src/lean/ffi/byte_array.rs index 4fc41f53..6e986699 100644 --- a/src/lean/ffi/byte_array.rs +++ b/src/lean/ffi/byte_array.rs @@ -5,9 +5,6 @@ use crate::lean::lean_sarray_data; /// `@& ByteArray → @& ByteArray → Bool` /// Efficient implementation for `BEq ByteArray` #[unsafe(no_mangle)] -extern "C" fn rs_byte_array_beq( - a: *const c_void, - b: *const c_void, -) -> bool { +extern "C" fn rs_byte_array_beq(a: *const c_void, b: *const c_void) -> bool { lean_sarray_data(a) == lean_sarray_data(b) } diff --git a/src/lean/ffi/compile.rs b/src/lean/ffi/compile.rs index 9a1fc342..977db279 100644 --- a/src/lean/ffi/compile.rs +++ b/src/lean/ffi/compile.rs @@ -29,8 +29,6 @@ use crate::ix::ixon::constant::{Constant as IxonConstant, ConstantInfo}; use crate::ix::ixon::expr::Expr as IxonExpr; use crate::ix::ixon::serialize::put_expr; use crate::ix::ixon::{Comm, ConstantMeta}; -use crate::lean::nat::Nat; -use crate::lean::{lean_obj_to_string, lean_sarray_data, lean_sarray_set_data}; use crate::lean::lean::{ lean_alloc_array, lean_alloc_ctor, lean_alloc_sarray, lean_array_set_core, lean_ctor_get, lean_ctor_set, lean_ctor_set_uint8, lean_ctor_set_uint64, @@ -38,6 +36,8 @@ use crate::lean::lean::{ lean_mk_io_user_error, lean_mk_string, lean_obj_tag, lean_sarray_cptr, lean_uint64_to_nat, }; +use crate::lean::nat::Nat; +use crate::lean::{lean_obj_to_string, lean_sarray_data, lean_sarray_set_data}; use dashmap::DashMap; use dashmap::DashSet; @@ -1185,32 +1185,28 @@ pub fn decode_serialize_error(ptr: *const c_void) -> SerializeError { match tag { 0 => { let str_ptr = lean_ctor_get(ptr as *mut _, 0); - let expected = - lean_obj_to_string(str_ptr.cast()); + let expected = lean_obj_to_string(str_ptr.cast()); SerializeError::UnexpectedEof { expected } }, 1 => { let str_ptr = lean_ctor_get(ptr as *mut _, 0); let base = ptr.cast::(); let tag_val = *base.add(8 + 8); - let context = - lean_obj_to_string(str_ptr.cast()); + let context = lean_obj_to_string(str_ptr.cast()); SerializeError::InvalidTag { tag: tag_val, context } }, 2 => { let str_ptr = lean_ctor_get(ptr as *mut _, 0); let base = ptr.cast::(); let flag = *base.add(8 + 8); - let context = - lean_obj_to_string(str_ptr.cast()); + let context = lean_obj_to_string(str_ptr.cast()); SerializeError::InvalidFlag { flag, context } }, 3 => { let str_ptr = lean_ctor_get(ptr as *mut _, 0); let base = ptr.cast::(); let variant = *base.add(8 + 8).cast::(); - let context = - lean_obj_to_string(str_ptr.cast()); + let context = lean_obj_to_string(str_ptr.cast()); SerializeError::InvalidVariant { variant, context } }, 4 => { @@ -1334,8 +1330,7 @@ pub fn decode_decompile_error(ptr: *const c_void) -> DecompileError { .to_u64() .and_then(|x| usize::try_from(x).ok()) .unwrap_or(0); - let constant = - lean_obj_to_string(str_ptr.cast()).clone(); + let constant = lean_obj_to_string(str_ptr.cast()).clone(); DecompileError::InvalidRefIndex { idx, refs_len, constant } }, 1 => { @@ -1347,8 +1342,7 @@ pub fn decode_decompile_error(ptr: *const c_void) -> DecompileError { .to_u64() .and_then(|x| usize::try_from(x).ok()) .unwrap_or(0); - let constant = - lean_obj_to_string(str_ptr.cast()).clone(); + let constant = lean_obj_to_string(str_ptr.cast()).clone(); DecompileError::InvalidUnivIndex { idx, univs_len, constant } }, 2 => { @@ -1360,8 +1354,7 @@ pub fn decode_decompile_error(ptr: *const c_void) -> DecompileError { .to_u64() .and_then(|x| usize::try_from(x).ok()) .unwrap_or(0); - let constant = - lean_obj_to_string(str_ptr.cast()).clone(); + let constant = lean_obj_to_string(str_ptr.cast()).clone(); DecompileError::InvalidShareIndex { idx, max, constant } }, 3 => { @@ -1373,8 +1366,7 @@ pub fn decode_decompile_error(ptr: *const c_void) -> DecompileError { .to_u64() .and_then(|x| usize::try_from(x).ok()) .unwrap_or(0); - let constant = - lean_obj_to_string(str_ptr.cast()).clone(); + let constant = lean_obj_to_string(str_ptr.cast()).clone(); DecompileError::InvalidRecIndex { idx, ctx_size, constant } }, 4 => { @@ -1386,8 +1378,7 @@ pub fn decode_decompile_error(ptr: *const c_void) -> DecompileError { .to_u64() .and_then(|x| usize::try_from(x).ok()) .unwrap_or(0); - let constant = - lean_obj_to_string(str_ptr.cast()).clone(); + let constant = lean_obj_to_string(str_ptr.cast()).clone(); DecompileError::InvalidUnivVarIndex { idx, max, constant } }, 5 => { @@ -1406,14 +1397,12 @@ pub fn decode_decompile_error(ptr: *const c_void) -> DecompileError { let addr_ptr = lean_ctor_get(ptr as *mut _, 0); let str_ptr = lean_ctor_get(ptr as *mut _, 1); let addr = decode_ixon_address(addr_ptr.cast()); - let expected = - lean_obj_to_string(str_ptr.cast()).clone(); + let expected = lean_obj_to_string(str_ptr.cast()).clone(); DecompileError::BadBlobFormat { addr, expected } }, 9 => { let str_ptr = lean_ctor_get(ptr as *mut _, 0); - let msg = - lean_obj_to_string(str_ptr.cast()).clone(); + let msg = lean_obj_to_string(str_ptr.cast()).clone(); DecompileError::BadConstantFormat { msg } }, 10 => { @@ -1479,8 +1468,7 @@ pub fn decode_compile_error(ptr: *const c_void) -> CompileError { match tag { 0 => { let str_ptr = lean_ctor_get(ptr as *mut _, 0); - let name = - lean_obj_to_string(str_ptr.cast()).clone(); + let name = lean_obj_to_string(str_ptr.cast()).clone(); CompileError::MissingConstant { name } }, 1 => { @@ -1489,14 +1477,12 @@ pub fn decode_compile_error(ptr: *const c_void) -> CompileError { }, 2 => { let str_ptr = lean_ctor_get(ptr as *mut _, 0); - let reason = - lean_obj_to_string(str_ptr.cast()).clone(); + let reason = lean_obj_to_string(str_ptr.cast()).clone(); CompileError::InvalidMutualBlock { reason } }, 3 => { let str_ptr = lean_ctor_get(ptr as *mut _, 0); - let desc = - lean_obj_to_string(str_ptr.cast()).clone(); + let desc = lean_obj_to_string(str_ptr.cast()).clone(); CompileError::UnsupportedExpr { desc } }, 4 => { diff --git a/src/lean/ffi/ix/address.rs b/src/lean/ffi/ix/address.rs index 9f77d120..da4ac1f8 100644 --- a/src/lean/ffi/ix/address.rs +++ b/src/lean/ffi/ix/address.rs @@ -5,7 +5,8 @@ use std::ffi::c_void; use crate::lean::{ - lean::{lean_alloc_sarray, lean_sarray_cptr}, lean_sarray_data, + lean::{lean_alloc_sarray, lean_sarray_cptr}, + lean_sarray_data, }; /// Build a Ix.Address from a blake3::Hash. diff --git a/src/lean/ffi/ix/constant.rs b/src/lean/ffi/ix/constant.rs index 71e8a1b6..3f947219 100644 --- a/src/lean/ffi/ix/constant.rs +++ b/src/lean/ffi/ix/constant.rs @@ -17,12 +17,14 @@ use crate::ix::env::{ DefinitionVal, InductiveVal, Name, OpaqueVal, QuotKind, QuotVal, RecursorRule, RecursorVal, ReducibilityHints, TheoremVal, }; -use crate::lean::nat::Nat; use crate::lean::lean::{ lean_alloc_array, lean_alloc_ctor, lean_array_set_core, lean_ctor_get, lean_ctor_set, lean_ctor_set_uint8, lean_obj_tag, }; -use crate::lean::{lean_array_data, lean_box_fn, lean_ctor_scalar_u8, lean_is_scalar}; +use crate::lean::nat::Nat; +use crate::lean::{ + lean_array_data, lean_box_fn, lean_ctor_scalar_u8, lean_is_scalar, +}; use super::super::builder::LeanBuildCache; use super::super::primitives::build_nat; @@ -289,8 +291,10 @@ pub fn decode_constant_val(ptr: *const c_void) -> ConstantVal { let name = decode_ix_name(name_ptr.cast()); - let level_params: Vec = - lean_array_data(level_params_ptr.cast()).iter().map(|&p| decode_ix_name(p)).collect(); + let level_params: Vec = lean_array_data(level_params_ptr.cast()) + .iter() + .map(|&p| decode_ix_name(p)) + .collect(); let typ = decode_ix_expr(type_ptr.cast()); @@ -500,8 +504,10 @@ pub fn decode_constant_info(ptr: *const c_void) -> ConstantInfo { let k = lean_ctor_scalar_u8(inner_ptr.cast(), 7, 0) != 0; let is_unsafe = lean_ctor_scalar_u8(inner_ptr.cast(), 7, 1) != 0; - let rules: Vec = - lean_array_data(rules_ptr.cast()).iter().map(|&p| decode_recursor_rule(p)).collect(); + let rules: Vec = lean_array_data(rules_ptr.cast()) + .iter() + .map(|&p| decode_recursor_rule(p)) + .collect(); ConstantInfo::RecInfo(RecursorVal { cnst: decode_constant_val(cnst_ptr.cast()), diff --git a/src/lean/ffi/ix/data.rs b/src/lean/ffi/ix/data.rs index d2c5c77a..4c7fc401 100644 --- a/src/lean/ffi/ix/data.rs +++ b/src/lean/ffi/ix/data.rs @@ -5,12 +5,14 @@ use std::ffi::c_void; use crate::ix::env::{ DataValue, Int, Name, SourceInfo, Substring, Syntax, SyntaxPreresolved, }; -use crate::lean::nat::Nat; use crate::lean::lean::{ lean_alloc_array, lean_alloc_ctor, lean_array_set_core, lean_ctor_get, lean_ctor_set, lean_ctor_set_uint8, lean_mk_string, lean_obj_tag, }; -use crate::lean::{lean_array_data, lean_ctor_scalar_u8, lean_is_scalar, lean_obj_to_string}; +use crate::lean::nat::Nat; +use crate::lean::{ + lean_array_data, lean_ctor_scalar_u8, lean_is_scalar, lean_obj_to_string, +}; use super::super::builder::LeanBuildCache; use super::super::primitives::build_nat; @@ -296,8 +298,8 @@ pub fn decode_data_value(ptr: *const c_void) -> DataValue { 4 => { // ofInt: 1 object field let inner_ptr = lean_ctor_get(ptr as *mut _, 0); - let int_tag = lean_obj_tag(inner_ptr as *mut _); - let nat_ptr = lean_ctor_get(inner_ptr as *mut _, 0); + let int_tag = lean_obj_tag(inner_ptr.cast()); + let nat_ptr = lean_ctor_get(inner_ptr.cast(), 0); let nat = Nat::from_ptr(nat_ptr.cast()); match int_tag { 0 => DataValue::OfInt(Int::OfNat(nat)), @@ -332,8 +334,10 @@ pub fn decode_ix_syntax(ptr: *const c_void) -> Syntax { let info = decode_ix_source_info(info_ptr.cast()); let kind = decode_ix_name(kind_ptr.cast()); - let args: Vec = - lean_array_data(args_ptr.cast()).iter().map(|&p| decode_ix_syntax(p)).collect(); + let args: Vec = lean_array_data(args_ptr.cast()) + .iter() + .map(|&p| decode_ix_syntax(p)) + .collect(); Syntax::Node(info, kind, args) }, @@ -355,10 +359,11 @@ pub fn decode_ix_syntax(ptr: *const c_void) -> Syntax { let info = decode_ix_source_info(info_ptr.cast()); let raw_val = decode_substring(raw_val_ptr.cast()); let val = decode_ix_name(val_ptr.cast()); - let preresolved: Vec = lean_array_data(preresolved_ptr.cast()) - .iter() - .map(|&p| decode_syntax_preresolved(p)) - .collect(); + let preresolved: Vec = + lean_array_data(preresolved_ptr.cast()) + .iter() + .map(|&p| decode_syntax_preresolved(p)) + .collect(); Syntax::Ident(info, raw_val, val, preresolved) }, diff --git a/src/lean/ffi/ix/expr.rs b/src/lean/ffi/ix/expr.rs index f986a6c1..81b3f236 100644 --- a/src/lean/ffi/ix/expr.rs +++ b/src/lean/ffi/ix/expr.rs @@ -19,12 +19,14 @@ use std::ffi::c_void; use crate::ix::env::{ BinderInfo, DataValue, Expr, ExprData, Level, Literal, Name, }; -use crate::lean::nat::Nat; use crate::lean::lean::{ lean_alloc_array, lean_alloc_ctor, lean_array_set_core, lean_ctor_get, lean_ctor_set, lean_ctor_set_uint8, lean_inc, lean_mk_string, lean_obj_tag, }; -use crate::lean::{lean_array_data, lean_box_fn, lean_ctor_scalar_u8, lean_obj_to_string}; +use crate::lean::nat::Nat; +use crate::lean::{ + lean_array_data, lean_box_fn, lean_ctor_scalar_u8, lean_obj_to_string, +}; use super::super::builder::LeanBuildCache; use super::super::primitives::build_nat; @@ -264,8 +266,10 @@ pub fn decode_ix_expr(ptr: *const c_void) -> Expr { let levels_ptr = lean_ctor_get(ptr as *mut _, 1); let name = decode_ix_name(name_ptr.cast()); - let levels: Vec = - lean_array_data(levels_ptr.cast()).iter().map(|&p| decode_ix_level(p)).collect(); + let levels: Vec = lean_array_data(levels_ptr.cast()) + .iter() + .map(|&p| decode_ix_level(p)) + .collect(); Expr::cnst(name, levels) }, @@ -341,8 +345,10 @@ pub fn decode_ix_expr(ptr: *const c_void) -> Expr { let data_ptr = lean_ctor_get(ptr as *mut _, 0); let expr_ptr = lean_ctor_get(ptr as *mut _, 1); - let data: Vec<(Name, DataValue)> = - lean_array_data(data_ptr.cast()).iter().map(|&p| decode_name_data_value(p)).collect(); + let data: Vec<(Name, DataValue)> = lean_array_data(data_ptr.cast()) + .iter() + .map(|&p| decode_name_data_value(p)) + .collect(); let inner = decode_ix_expr(expr_ptr.cast()); Expr::mdata(data, inner) diff --git a/src/lean/ffi/ix/name.rs b/src/lean/ffi/ix/name.rs index eef4dc77..5ac159e5 100644 --- a/src/lean/ffi/ix/name.rs +++ b/src/lean/ffi/ix/name.rs @@ -8,12 +8,12 @@ use std::ffi::c_void; use crate::ix::env::{Name, NameData}; -use crate::lean::nat::Nat; use crate::lean::lean::{ lean_alloc_array, lean_alloc_ctor, lean_array_set_core, lean_ctor_get, lean_ctor_set, lean_inc, lean_mk_string, lean_obj_tag, }; use crate::lean::lean_obj_to_string; +use crate::lean::nat::Nat; use super::super::builder::LeanBuildCache; use super::super::primitives::build_nat; diff --git a/src/lean/ffi/ixon/constant.rs b/src/lean/ffi/ixon/constant.rs index 22e96ef5..9c03c4ad 100644 --- a/src/lean/ffi/ixon/constant.rs +++ b/src/lean/ffi/ixon/constant.rs @@ -489,7 +489,11 @@ pub fn decode_ixon_axiom(ptr: *const c_void) -> IxonAxiom { let lvls = *scalar_base.cast::(); // isUnsafe at offset 8 let is_unsafe = *scalar_base.add(8) != 0; - IxonAxiom { is_unsafe, lvls, typ: Arc::new(decode_ixon_expr(typ_ptr.cast())) } + IxonAxiom { + is_unsafe, + lvls, + typ: Arc::new(decode_ixon_expr(typ_ptr.cast())), + } } } @@ -640,10 +644,14 @@ pub fn decode_ixon_constant_info(ptr: *const c_void) -> IxonConstantInfo { 1 => IxonConstantInfo::Recr(decode_ixon_recursor(inner_ptr.cast())), 2 => IxonConstantInfo::Axio(decode_ixon_axiom(inner_ptr.cast())), 3 => IxonConstantInfo::Quot(decode_ixon_quotient(inner_ptr.cast())), - 4 => IxonConstantInfo::CPrj(decode_ixon_constructor_proj(inner_ptr.cast())), + 4 => { + IxonConstantInfo::CPrj(decode_ixon_constructor_proj(inner_ptr.cast())) + }, 5 => IxonConstantInfo::RPrj(decode_ixon_recursor_proj(inner_ptr.cast())), 6 => IxonConstantInfo::IPrj(decode_ixon_inductive_proj(inner_ptr.cast())), - 7 => IxonConstantInfo::DPrj(decode_ixon_definition_proj(inner_ptr.cast())), + 7 => { + IxonConstantInfo::DPrj(decode_ixon_definition_proj(inner_ptr.cast())) + }, 8 => { let muts = lean_array_to_vec(inner_ptr.cast(), decode_ixon_mut_const); IxonConstantInfo::Muts(muts) diff --git a/src/lean/ffi/ixon/expr.rs b/src/lean/ffi/ixon/expr.rs index 2b5a5d75..730a51e0 100644 --- a/src/lean/ffi/ixon/expr.rs +++ b/src/lean/ffi/ixon/expr.rs @@ -145,18 +145,21 @@ pub fn build_ixon_expr_array(exprs: &[Arc]) -> *mut c_void { fn decode_u64_array(ptr: *const c_void) -> Vec { use crate::lean::lean_is_scalar; - crate::lean::lean_array_data(ptr).iter().map(|&elem| { - if lean_is_scalar(elem) { - // Small scalar value - lean_unbox!(u64, elem) - } else { - // Heap-boxed UInt64: value is at offset 8 (after 8-byte header) - unsafe { - let base = elem.cast::(); - *base.add(8).cast::() + crate::lean::lean_array_data(ptr) + .iter() + .map(|&elem| { + if lean_is_scalar(elem) { + // Small scalar value + lean_unbox!(u64, elem) + } else { + // Heap-boxed UInt64: value is at offset 8 (after 8-byte header) + unsafe { + let base = elem.cast::(); + *base.add(8).cast::() + } } - } - }).collect() + }) + .collect() } /// Decode Ixon.Expr (12 constructors). @@ -270,7 +273,10 @@ pub fn decode_ixon_expr(ptr: *const c_void) -> IxonExpr { /// Decode Array Ixon.Expr. pub fn decode_ixon_expr_array(ptr: *const c_void) -> Vec> { - crate::lean::lean_array_data(ptr).iter().map(|&e| Arc::new(decode_ixon_expr(e))).collect() + crate::lean::lean_array_data(ptr) + .iter() + .map(|&e| Arc::new(decode_ixon_expr(e))) + .collect() } // ============================================================================= diff --git a/src/lean/ffi/ixon/meta.rs b/src/lean/ffi/ixon/meta.rs index 7e3a123a..68a0ef2f 100644 --- a/src/lean/ffi/ixon/meta.rs +++ b/src/lean/ffi/ixon/meta.rs @@ -13,11 +13,11 @@ use crate::ix::ixon::metadata::{ }; use crate::lean::lean::{ lean_alloc_array, lean_alloc_ctor, lean_array_set_core, lean_ctor_get, - lean_ctor_set, lean_ctor_set_uint64, lean_ctor_set_uint8, lean_obj_tag, + lean_ctor_set, lean_ctor_set_uint8, lean_ctor_set_uint64, lean_obj_tag, }; use crate::lean::{ - lean_array_data, lean_array_to_vec, lean_box_fn, lean_ctor_scalar_u64, - lean_ctor_scalar_u8, lean_is_scalar, + lean_array_data, lean_array_to_vec, lean_box_fn, lean_ctor_scalar_u8, + lean_ctor_scalar_u64, lean_is_scalar, }; use super::constant::{ @@ -143,11 +143,17 @@ pub fn build_kvmap_array(kvmaps: &[KVMap]) -> *mut c_void { /// Decode KVMap (Array (Address × DataValue)). pub fn decode_ixon_kvmap(ptr: *const c_void) -> KVMap { - lean_array_data(ptr).iter().map(|&pair| unsafe { - let addr_ptr = lean_ctor_get(pair as *mut _, 0); - let dv_ptr = lean_ctor_get(pair as *mut _, 1); - (decode_ixon_address(addr_ptr.cast()), decode_ixon_data_value(dv_ptr.cast())) - }).collect() + lean_array_data(ptr) + .iter() + .map(|&pair| unsafe { + let addr_ptr = lean_ctor_get(pair as *mut _, 0); + let dv_ptr = lean_ctor_get(pair as *mut _, 1); + ( + decode_ixon_address(addr_ptr.cast()), + decode_ixon_data_value(dv_ptr.cast()), + ) + }) + .collect() } /// Decode Array KVMap. @@ -318,14 +324,20 @@ pub fn decode_expr_meta_data(ptr: *const c_void) -> ExprMetaData { // prj: 1 obj field (structName), 1× u64 scalar let name_ptr = lean_ctor_get(ptr as *mut _, 0); let child = lean_ctor_scalar_u64(ptr, 1, 0); - ExprMetaData::Prj { struct_name: decode_ixon_address(name_ptr.cast()), child } + ExprMetaData::Prj { + struct_name: decode_ixon_address(name_ptr.cast()), + child, + } }, 6 => { // mdata: 1 obj field (mdata: Array KVMap), 1× u64 scalar let mdata_ptr = lean_ctor_get(ptr as *mut _, 0); let child = lean_ctor_scalar_u64(ptr, 1, 0); - ExprMetaData::Mdata { mdata: decode_kvmap_array(mdata_ptr.cast()), child } + ExprMetaData::Mdata { + mdata: decode_kvmap_array(mdata_ptr.cast()), + child, + } }, _ => panic!("Invalid Ixon.ExprMetaData tag: {}", tag), @@ -478,10 +490,12 @@ pub fn decode_constant_meta(ptr: *const c_void) -> ConstantMeta { // defn: 6 obj fields, 2× u64 scalar let name = decode_ixon_address(lean_ctor_get(ptr as *mut _, 0).cast()); let lvls = decode_address_array(lean_ctor_get(ptr as *mut _, 1).cast()); - let hints = decode_reducibility_hints(lean_ctor_get(ptr as *mut _, 2).cast()); + let hints = + decode_reducibility_hints(lean_ctor_get(ptr as *mut _, 2).cast()); let all = decode_address_array(lean_ctor_get(ptr as *mut _, 3).cast()); let ctx = decode_address_array(lean_ctor_get(ptr as *mut _, 4).cast()); - let arena = decode_expr_meta_arena(lean_ctor_get(ptr as *mut _, 5).cast()); + let arena = + decode_expr_meta_arena(lean_ctor_get(ptr as *mut _, 5).cast()); let type_root = lean_ctor_scalar_u64(ptr, 6, 0); let value_root = lean_ctor_scalar_u64(ptr, 6, 8); ConstantMeta::Def { @@ -500,7 +514,8 @@ pub fn decode_constant_meta(ptr: *const c_void) -> ConstantMeta { // axio: 3 obj fields, 1× u64 scalar let name = decode_ixon_address(lean_ctor_get(ptr as *mut _, 0).cast()); let lvls = decode_address_array(lean_ctor_get(ptr as *mut _, 1).cast()); - let arena = decode_expr_meta_arena(lean_ctor_get(ptr as *mut _, 2).cast()); + let arena = + decode_expr_meta_arena(lean_ctor_get(ptr as *mut _, 2).cast()); let type_root = lean_ctor_scalar_u64(ptr, 3, 0); ConstantMeta::Axio { name, lvls, arena, type_root } }, @@ -509,7 +524,8 @@ pub fn decode_constant_meta(ptr: *const c_void) -> ConstantMeta { // quot: 3 obj fields, 1× u64 scalar let name = decode_ixon_address(lean_ctor_get(ptr as *mut _, 0).cast()); let lvls = decode_address_array(lean_ctor_get(ptr as *mut _, 1).cast()); - let arena = decode_expr_meta_arena(lean_ctor_get(ptr as *mut _, 2).cast()); + let arena = + decode_expr_meta_arena(lean_ctor_get(ptr as *mut _, 2).cast()); let type_root = lean_ctor_scalar_u64(ptr, 3, 0); ConstantMeta::Quot { name, lvls, arena, type_root } }, @@ -518,10 +534,12 @@ pub fn decode_constant_meta(ptr: *const c_void) -> ConstantMeta { // indc: 6 obj fields, 1× u64 scalar let name = decode_ixon_address(lean_ctor_get(ptr as *mut _, 0).cast()); let lvls = decode_address_array(lean_ctor_get(ptr as *mut _, 1).cast()); - let ctors = decode_address_array(lean_ctor_get(ptr as *mut _, 2).cast()); + let ctors = + decode_address_array(lean_ctor_get(ptr as *mut _, 2).cast()); let all = decode_address_array(lean_ctor_get(ptr as *mut _, 3).cast()); let ctx = decode_address_array(lean_ctor_get(ptr as *mut _, 4).cast()); - let arena = decode_expr_meta_arena(lean_ctor_get(ptr as *mut _, 5).cast()); + let arena = + decode_expr_meta_arena(lean_ctor_get(ptr as *mut _, 5).cast()); let type_root = lean_ctor_scalar_u64(ptr, 6, 0); ConstantMeta::Indc { name, lvls, ctors, all, ctx, arena, type_root } }, @@ -530,8 +548,10 @@ pub fn decode_constant_meta(ptr: *const c_void) -> ConstantMeta { // ctor: 4 obj fields, 1× u64 scalar let name = decode_ixon_address(lean_ctor_get(ptr as *mut _, 0).cast()); let lvls = decode_address_array(lean_ctor_get(ptr as *mut _, 1).cast()); - let induct = decode_ixon_address(lean_ctor_get(ptr as *mut _, 2).cast()); - let arena = decode_expr_meta_arena(lean_ctor_get(ptr as *mut _, 3).cast()); + let induct = + decode_ixon_address(lean_ctor_get(ptr as *mut _, 2).cast()); + let arena = + decode_expr_meta_arena(lean_ctor_get(ptr as *mut _, 3).cast()); let type_root = lean_ctor_scalar_u64(ptr, 4, 0); ConstantMeta::Ctor { name, lvls, induct, arena, type_root } }, @@ -540,11 +560,14 @@ pub fn decode_constant_meta(ptr: *const c_void) -> ConstantMeta { // recr: 7 obj fields, 1× u64 scalar let name = decode_ixon_address(lean_ctor_get(ptr as *mut _, 0).cast()); let lvls = decode_address_array(lean_ctor_get(ptr as *mut _, 1).cast()); - let rules = decode_address_array(lean_ctor_get(ptr as *mut _, 2).cast()); + let rules = + decode_address_array(lean_ctor_get(ptr as *mut _, 2).cast()); let all = decode_address_array(lean_ctor_get(ptr as *mut _, 3).cast()); let ctx = decode_address_array(lean_ctor_get(ptr as *mut _, 4).cast()); - let arena = decode_expr_meta_arena(lean_ctor_get(ptr as *mut _, 5).cast()); - let rule_roots = decode_u64_array(lean_ctor_get(ptr as *mut _, 6).cast()); + let arena = + decode_expr_meta_arena(lean_ctor_get(ptr as *mut _, 5).cast()); + let rule_roots = + decode_u64_array(lean_ctor_get(ptr as *mut _, 6).cast()); let type_root = lean_ctor_scalar_u64(ptr, 7, 0); ConstantMeta::Rec { name, diff --git a/src/lean/ffi/ixon/sharing.rs b/src/lean/ffi/ixon/sharing.rs index 3b55c6f0..b88004b4 100644 --- a/src/lean/ffi/ixon/sharing.rs +++ b/src/lean/ffi/ixon/sharing.rs @@ -17,7 +17,8 @@ use super::serialize::lean_ptr_to_ixon_expr; /// This helps diagnose why Lean and Rust make different sharing decisions. #[unsafe(no_mangle)] pub extern "C" fn rs_debug_sharing_analysis(exprs_ptr: *const c_void) { - let exprs: Vec> = lean_array_to_vec(exprs_ptr, lean_ptr_to_ixon_expr); + let exprs: Vec> = + lean_array_to_vec(exprs_ptr, lean_ptr_to_ixon_expr); println!("[Rust] Analyzing {} input expressions", exprs.len()); diff --git a/src/lean/ffi/ixon/univ.rs b/src/lean/ffi/ixon/univ.rs index d5497091..0326d462 100644 --- a/src/lean/ffi/ixon/univ.rs +++ b/src/lean/ffi/ixon/univ.rs @@ -113,7 +113,10 @@ pub fn decode_ixon_univ(ptr: *const c_void) -> IxonUniv { /// Decode Array Ixon.Univ. pub fn decode_ixon_univ_array(ptr: *const c_void) -> Vec> { - crate::lean::lean_array_data(ptr).iter().map(|&u| Arc::new(decode_ixon_univ(u))).collect() + crate::lean::lean_array_data(ptr) + .iter() + .map(|&u| Arc::new(decode_ixon_univ(u))) + .collect() } // ============================================================================= diff --git a/src/lean/ffi/keccak.rs b/src/lean/ffi/keccak.rs index df4bd7d1..1e2b19c9 100644 --- a/src/lean/ffi/keccak.rs +++ b/src/lean/ffi/keccak.rs @@ -4,7 +4,10 @@ use std::sync::OnceLock; use tiny_keccak::{Hasher, Keccak}; use crate::lean::{ - lean::{ lean_alloc_external, lean_alloc_sarray, lean_get_external_data, lean_register_external_class }, + lean::{ + lean_alloc_external, lean_alloc_sarray, lean_get_external_data, + lean_register_external_class, + }, lean_sarray_data, lean_sarray_set_data, noop_foreach, }; @@ -15,15 +18,21 @@ static KECCAK_CLASS: OnceLock = OnceLock::new(); fn get_keccak_class() -> *mut c_void { KECCAK_CLASS .get_or_init(|| { - ExternalClassPtr(unsafe { - lean_register_external_class(Some(keccak_finalizer), Some(noop_foreach)) - }.cast()) + ExternalClassPtr( + unsafe { + lean_register_external_class( + Some(keccak_finalizer), + Some(noop_foreach), + ) + } + .cast(), + ) }) .0 } extern "C" fn keccak_finalizer(ptr: *mut c_void) { - drop_raw(ptr as *mut Keccak); + drop_raw(ptr.cast::()); } /// `Keccak.Hasher.init : Unit → Hasher` @@ -40,7 +49,8 @@ extern "C" fn rs_keccak256_hasher_update( hasher_obj: *mut c_void, input: *const c_void, ) -> *mut c_void { - let hasher: &Keccak = unsafe { &*lean_get_external_data(hasher_obj.cast()).cast() }; + let hasher: &Keccak = + unsafe { &*lean_get_external_data(hasher_obj.cast()).cast() }; let mut new_hasher = hasher.clone(); new_hasher.update(lean_sarray_data(input)); let ptr = to_raw(new_hasher) as *mut c_void; @@ -52,7 +62,8 @@ extern "C" fn rs_keccak256_hasher_update( extern "C" fn rs_keccak256_hasher_finalize( hasher_obj: *mut c_void, ) -> *mut c_void { - let hasher: &Keccak = unsafe { &*lean_get_external_data(hasher_obj.cast()).cast() }; + let hasher: &Keccak = + unsafe { &*lean_get_external_data(hasher_obj.cast()).cast() }; let mut data = [0u8; 32]; hasher.clone().finalize(&mut data); let arr_ptr = unsafe { lean_alloc_sarray(1, 32, 32) }; diff --git a/src/lean/ffi/lean_env.rs b/src/lean/ffi/lean_env.rs index addc8470..513e7902 100644 --- a/src/lean/ffi/lean_env.rs +++ b/src/lean/ffi/lean_env.rs @@ -121,9 +121,7 @@ pub fn lean_ptr_to_name(ptr: *const c_void, global: &GlobalCache) -> Name { // Recursive call - will also use global cache let pre = lean_ptr_to_name(pre_ptr, global); match lean_tag(ptr) { - 1 => { - Name::str(pre, lean_obj_to_string(pos_ptr)) - }, + 1 => Name::str(pre, lean_obj_to_string(pos_ptr)), 2 => Name::num(pre, Nat::from_ptr(pos_ptr)), _ => unreachable!(), } @@ -146,19 +144,23 @@ fn lean_ptr_to_level(ptr: *const c_void, cache: &mut Cache<'_>) -> Level { Level::succ(u) }, 2 => { - let [u, v] = lean_ctor_objs::<2>(ptr).map(|p| lean_ptr_to_level(p, cache)); + let [u, v] = + lean_ctor_objs::<2>(ptr).map(|p| lean_ptr_to_level(p, cache)); Level::max(u, v) }, 3 => { - let [u, v] = lean_ctor_objs::<2>(ptr).map(|p| lean_ptr_to_level(p, cache)); + let [u, v] = + lean_ctor_objs::<2>(ptr).map(|p| lean_ptr_to_level(p, cache)); Level::imax(u, v) }, 4 => { - let [name] = lean_ctor_objs::<1>(ptr).map(|p| lean_ptr_to_name(p, cache.global)); + let [name] = + lean_ctor_objs::<1>(ptr).map(|p| lean_ptr_to_name(p, cache.global)); Level::param(name) }, 5 => { - let [name] = lean_ctor_objs::<1>(ptr).map(|p| lean_ptr_to_name(p, cache.global)); + let [name] = + lean_ctor_objs::<1>(ptr).map(|p| lean_ptr_to_name(p, cache.global)); Level::mvar(name) }, _ => unreachable!(), @@ -182,7 +184,8 @@ fn lean_ptr_to_source_info(ptr: *const c_void) -> SourceInfo { } match lean_tag(ptr) { 0 => { - let [leading_ptr, pos_ptr, trailing_ptr, end_pos_ptr] = lean_ctor_objs(ptr); + let [leading_ptr, pos_ptr, trailing_ptr, end_pos_ptr] = + lean_ctor_objs(ptr); let leading = lean_ptr_to_substring(leading_ptr); let pos = Nat::from_ptr(pos_ptr); let trailing = lean_ptr_to_substring(trailing_ptr); @@ -239,7 +242,8 @@ fn lean_ptr_to_syntax(ptr: *const c_void, cache: &mut Cache<'_>) -> Syntax { Syntax::Atom(info, lean_obj_to_string(val_ptr)) }, 3 => { - let [info_ptr, raw_val_ptr, val_ptr, preresolved_ptr] = lean_ctor_objs(ptr); + let [info_ptr, raw_val_ptr, val_ptr, preresolved_ptr] = + lean_ctor_objs(ptr); let info = lean_ptr_to_source_info(info_ptr); let raw_val = lean_ptr_to_substring(raw_val_ptr); let val = lean_ptr_to_name(val_ptr, cache.global); @@ -261,9 +265,7 @@ fn lean_ptr_to_name_data_value( let name = lean_ptr_to_name(name_ptr, cache.global); let [inner_ptr] = lean_ctor_objs(data_value_ptr); let data_value = match lean_tag(data_value_ptr) { - 0 => { - DataValue::OfString(lean_obj_to_string(inner_ptr)) - }, + 0 => DataValue::OfString(lean_obj_to_string(inner_ptr)), 1 => DataValue::OfBool(inner_ptr as usize == 1), 2 => DataValue::OfName(lean_ptr_to_name(inner_ptr, cache.global)), 3 => DataValue::OfNat(Nat::from_ptr(inner_ptr)), @@ -381,9 +383,7 @@ pub fn lean_ptr_to_expr(ptr: *const c_void, cache: &mut Cache<'_>) -> Expr { let nat = Nat::from_ptr(inner_ptr); Expr::lit(Literal::NatVal(nat)) }, - 1 => { - Expr::lit(Literal::StrVal(lean_obj_to_string(inner_ptr))) - }, + 1 => Expr::lit(Literal::StrVal(lean_obj_to_string(inner_ptr))), _ => unreachable!(), } }, @@ -481,7 +481,8 @@ pub fn lean_ptr_to_constant_info( }) }, 2 => { - let [constant_val_ptr, value_ptr, all_ptr] = lean_ctor_objs(inner_val_ptr); + let [constant_val_ptr, value_ptr, all_ptr] = + lean_ctor_objs(inner_val_ptr); let constant_val = lean_ptr_to_constant_val(constant_val_ptr, cache); let value = lean_ptr_to_expr(value_ptr, cache); let all: Vec<_> = collect_list_ptrs(all_ptr) diff --git a/src/lean/ffi/primitives.rs b/src/lean/ffi/primitives.rs index aa84784e..097b2f4f 100644 --- a/src/lean/ffi/primitives.rs +++ b/src/lean/ffi/primitives.rs @@ -43,7 +43,7 @@ pub fn build_nat(n: &Nat) -> *mut c_void { arr[..chunk.len()].copy_from_slice(chunk); limbs.push(u64::from_le_bytes(arr)); } - crate::lean::nat::lean_nat_from_limbs(limbs.len(), limbs.as_ptr()) + unsafe { crate::lean::nat::lean_nat_from_limbs(limbs.len(), limbs.as_ptr()) } } // ============================================================================= diff --git a/src/lean/nat.rs b/src/lean/nat.rs index 2ee56fad..142ef15c 100644 --- a/src/lean/nat.rs +++ b/src/lean/nat.rs @@ -129,12 +129,15 @@ unsafe extern "C" { /// Create a Lean `Nat` from a little-endian array of u64 limbs. /// Replaces the C function `c_lean_nat_from_limbs` from `ixon_ffi.c`. -pub fn lean_nat_from_limbs(num_limbs: usize, limbs: *const u64) -> *mut c_void { +/// # Safety +/// `limbs` must be valid for reading `num_limbs` elements. +pub unsafe fn lean_nat_from_limbs(num_limbs: usize, limbs: *const u64) -> *mut c_void { if num_limbs == 0 { return lean_box_fn(0); } let first = unsafe { *limbs }; if num_limbs == 1 && first <= LEAN_MAX_SMALL_NAT { + #[allow(clippy::cast_possible_truncation)] // only targets 64-bit return lean_box_fn(first as usize); } if num_limbs == 1 { diff --git a/src/sha256.rs b/src/sha256.rs index 5f5c6255..6763b98f 100644 --- a/src/sha256.rs +++ b/src/sha256.rs @@ -1,7 +1,9 @@ use sha2::{Digest, Sha256}; use std::ffi::c_void; -use crate::lean::{lean::lean_alloc_sarray, lean_sarray_data, lean_sarray_set_data}; +use crate::lean::{ + lean::lean_alloc_sarray, lean_sarray_data, lean_sarray_set_data, +}; #[unsafe(no_mangle)] extern "C" fn rs_sha256(bytes: *const c_void) -> *mut c_void { From 5104c504016ad803a872ec3c10cb6318b86f730d Mon Sep 17 00:00:00 2001 From: samuelburnham <45365069+samuelburnham@users.noreply.github.com> Date: Fri, 27 Feb 2026 18:10:06 -0500 Subject: [PATCH 04/27] cargo fmt --- .github/workflows/ci.yml | 5 +---- build.rs | 13 +++++++------ src/lean.rs | 4 +++- src/lean/nat.rs | 5 ++++- 4 files changed, 15 insertions(+), 12 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 98f11982..4d24bc5b 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -21,9 +21,6 @@ jobs: - uses: leanprover/lean-action@v1 with: build-args: "--wfail -v" - # TODO: These should both run automatically - test: true - lint: true - name: Test Ix CLI run: lake test -- cli - name: Aiur tests @@ -44,7 +41,7 @@ jobs: # Install Lean for rust-bindgen step - uses: leanprover/lean-action@v1 with: - build: false + auto-config: false use-github-cache: false - uses: dtolnay/rust-toolchain@stable - uses: taiki-e/install-action@nextest diff --git a/build.rs b/build.rs index f6aca34c..e8b98e57 100644 --- a/build.rs +++ b/build.rs @@ -10,13 +10,14 @@ fn find_lean_include_dir() -> PathBuf { } // 2. Try `lean --print-prefix` if let Ok(output) = Command::new("lean").arg("--print-prefix").output() - && output.status.success() { - let prefix = String::from_utf8_lossy(&output.stdout).trim().to_string(); - let inc = PathBuf::from(prefix).join("include"); - if inc.exists() { - return inc; - } + && output.status.success() + { + let prefix = String::from_utf8_lossy(&output.stdout).trim().to_string(); + let inc = PathBuf::from(prefix).join("include"); + if inc.exists() { + return inc; } + } panic!( "Cannot find Lean include directory. \ Set LEAN_SYSROOT or ensure `lean` is on PATH." diff --git a/src/lean.rs b/src/lean.rs index e938f92a..235accfb 100644 --- a/src/lean.rs +++ b/src/lean.rs @@ -108,7 +108,9 @@ pub fn lean_obj_to_string(ptr: *const c_void) -> String { #[inline] pub fn lean_tag(ptr: *const c_void) -> u8 { #[allow(clippy::cast_possible_truncation)] // tags always fit in u8 - unsafe { lean::lean_obj_tag(ptr as *mut _) as u8 } + unsafe { + lean::lean_obj_tag(ptr as *mut _) as u8 + } } #[inline] diff --git a/src/lean/nat.rs b/src/lean/nat.rs index 142ef15c..72cbd985 100644 --- a/src/lean/nat.rs +++ b/src/lean/nat.rs @@ -131,7 +131,10 @@ unsafe extern "C" { /// Replaces the C function `c_lean_nat_from_limbs` from `ixon_ffi.c`. /// # Safety /// `limbs` must be valid for reading `num_limbs` elements. -pub unsafe fn lean_nat_from_limbs(num_limbs: usize, limbs: *const u64) -> *mut c_void { +pub unsafe fn lean_nat_from_limbs( + num_limbs: usize, + limbs: *const u64, +) -> *mut c_void { if num_limbs == 0 { return lean_box_fn(0); } From 55818186100fe5485c308207fe47ad8b4807ef6d Mon Sep 17 00:00:00 2001 From: samuelburnham <45365069+samuelburnham@users.noreply.github.com> Date: Mon, 2 Mar 2026 09:53:42 -0500 Subject: [PATCH 05/27] Test Rust error in CI --- src/lean.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/lean.rs b/src/lean.rs index 235accfb..2a905d57 100644 --- a/src/lean.rs +++ b/src/lean.rs @@ -24,6 +24,7 @@ pub mod lean { pub mod ffi; pub mod nat; +pub mod; use std::ffi::{CString, c_void}; From e5ce7cbcda1729434235e9e17a3e1e7160127749 Mon Sep 17 00:00:00 2001 From: samuelburnham <45365069+samuelburnham@users.noreply.github.com> Date: Mon, 2 Mar 2026 10:42:00 -0500 Subject: [PATCH 06/27] ci: Switch to setup-rust-toolchain action --- .github/workflows/ci.yml | 16 +++++----------- lakefile.lean | 17 ----------------- src/lean.rs | 1 - 3 files changed, 5 insertions(+), 29 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 4d24bc5b..87b22c0f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -18,6 +18,7 @@ jobs: runs-on: warp-ubuntu-latest-x64-16x steps: - uses: actions/checkout@v6 + - uses: actions-rust-lang/setup-rust-toolchain@v1 - uses: leanprover/lean-action@v1 with: build-args: "--wfail -v" @@ -25,8 +26,6 @@ jobs: run: lake test -- cli - name: Aiur tests run: lake test -- --ignored aiur aiur-hashes ixvm - - name: Check lean.h.hash - run: lake run check-lean-h-hash - name: Check Lean versions match for Ix and compiler bench run: diff lean-toolchain Benchmarks/Compile/lean-toolchain @@ -34,22 +33,17 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v6 - with: - repository: argumentcomputer/ci-workflows - - uses: ./.github/actions/ci-env - - uses: actions/checkout@v6 + - uses: actions-rust-lang/setup-rust-toolchain@v1 + - uses: taiki-e/install-action@nextest # Install Lean for rust-bindgen step - uses: leanprover/lean-action@v1 with: auto-config: false use-github-cache: false - - uses: dtolnay/rust-toolchain@stable - - uses: taiki-e/install-action@nextest - - uses: Swatinem/rust-cache@v2 - name: Check Rustfmt code style - run: cargo fmt --all --check + uses: actions-rust-lang/rustfmt@v1 - name: Check clippy warnings - run: cargo xclippy -D warnings + run: cargo xclippy - name: Check *everything* compiles run: cargo check --all-targets --all-features --workspace - name: Tests diff --git a/lakefile.lean b/lakefile.lean index b37dfa26..a1ffbcf9 100644 --- a/lakefile.lean +++ b/lakefile.lean @@ -118,23 +118,6 @@ script install := do setAccessRights tgtPath fileRight return 0 -script "check-lean-h-hash" := do - let cachedLeanHHash := 14792798158057885278 - - let leanIncludeDir ← getLeanIncludeDir - let includedLeanHPath := leanIncludeDir / "lean" / "lean.h" - let includedLeanHBytes ← IO.FS.readBinFile includedLeanHPath - let includedLeanHHash := includedLeanHBytes.hash - - if cachedLeanHHash ≠ includedLeanHHash then - IO.eprintln "Mismatching lean/lean.h hash" - IO.eprintln " 1. Double-check changes made to lean/lean.h" - IO.eprintln s!" 2. Cache {includedLeanHHash} instead" - return 1 - else - IO.println "lean/lean.h hash matches ✓" - return 0 - script "get-exe-targets" := do let pkg ← getRootPackage let exeTargets := pkg.configTargets LeanExe.configKind diff --git a/src/lean.rs b/src/lean.rs index 2a905d57..235accfb 100644 --- a/src/lean.rs +++ b/src/lean.rs @@ -24,7 +24,6 @@ pub mod lean { pub mod ffi; pub mod nat; -pub mod; use std::ffi::{CString, c_void}; From aa2fcc7f0d41c99854c11a8d59e48577a3355374 Mon Sep 17 00:00:00 2001 From: samuelburnham <45365069+samuelburnham@users.noreply.github.com> Date: Mon, 2 Mar 2026 14:05:03 -0500 Subject: [PATCH 07/27] Add API wrappers for c_void pointers --- .github/workflows/compile.yml | 5 +- flake.nix | 1 + src/lean.rs | 1 + src/lean/ffi.rs | 18 - src/lean/ffi/aiur/protocol.rs | 235 ++++----- src/lean/ffi/builder.rs | 10 +- src/lean/ffi/byte_array.rs | 8 +- src/lean/ffi/compile.rs | 14 +- src/lean/ffi/graph.rs | 132 +++-- src/lean/ffi/ix/address.rs | 35 +- src/lean/ffi/ix/constant.rs | 481 +++++++++---------- src/lean/ffi/ix/data.rs | 373 +++++++-------- src/lean/ffi/ix/env.rs | 8 +- src/lean/ffi/ix/expr.rs | 311 ++++++------ src/lean/ffi/ix/level.rs | 121 +++-- src/lean/ffi/ix/name.rs | 80 ++-- src/lean/ffi/ixon/env.rs | 4 +- src/lean/ffi/ixon/meta.rs | 2 +- src/lean/ffi/keccak.rs | 66 +-- src/lean/ffi/primitives.rs | 258 +++++----- src/lean/ffi/unsigned.rs | 28 +- src/lean/nat.rs | 12 +- src/lean/obj.rs | 873 ++++++++++++++++++++++++++++++++++ src/sha256.rs | 15 +- 24 files changed, 1828 insertions(+), 1263 deletions(-) create mode 100644 src/lean/obj.rs diff --git a/.github/workflows/compile.yml b/.github/workflows/compile.yml index f7e0fb17..d1c94f48 100644 --- a/.github/workflows/compile.yml +++ b/.github/workflows/compile.yml @@ -20,8 +20,9 @@ jobs: - uses: actions/checkout@v6 - uses: leanprover/lean-action@v1 with: + auto-config: false + build: true build-args: "ix --wfail -v" - test: false - run: | mkdir -p ~/.local/bin echo | lake run install @@ -56,7 +57,7 @@ jobs: - uses: leanprover/lean-action@v1 with: lake-package-directory: ${{ env.COMPILE_DIR }} - build: false + auto-config: false use-github-cache: false # FLT and FC take a few minutes to rebuild, so we cache the build artifacts - if: matrix.cache_pkg diff --git a/flake.nix b/flake.nix index d7fd3c86..834aa8e0 100644 --- a/flake.nix +++ b/flake.nix @@ -154,6 +154,7 @@ lean.lean-all # Includes Lean compiler, lake, stdlib, etc. gmp cargo-deny + valgrind ]; }; diff --git a/src/lean.rs b/src/lean.rs index 235accfb..d22f2632 100644 --- a/src/lean.rs +++ b/src/lean.rs @@ -24,6 +24,7 @@ pub mod lean { pub mod ffi; pub mod nat; +pub mod obj; use std::ffi::{CString, c_void}; diff --git a/src/lean/ffi.rs b/src/lean/ffi.rs index c92ba53c..9fa234fb 100644 --- a/src/lean/ffi.rs +++ b/src/lean/ffi.rs @@ -14,12 +14,6 @@ pub mod primitives; // Primitives: rs_roundtrip_nat, rs_roundtrip_string, etc. use std::ffi::{CString, c_void}; -/// Wrapper to allow OnceLock storage of an external class pointer. -pub(crate) struct ExternalClassPtr(pub(crate) *mut c_void); -// Safety: the class pointer is initialized once and read-only thereafter. -unsafe impl Send for ExternalClassPtr {} -unsafe impl Sync for ExternalClassPtr {} - use crate::lean::{ lean::{lean_io_result_mk_error, lean_mk_io_user_error, lean_mk_string}, lean_array_to_vec, lean_sarray_data, lean_unbox_u32, @@ -54,18 +48,6 @@ where } } -#[inline] -pub(crate) fn to_raw(t: T) -> *const T { - Box::into_raw(Box::new(t)) -} - -#[inline] -pub(super) fn drop_raw(ptr: *mut T) { - assert!(!ptr.is_null(), "Null pointer free attempt"); - let t = unsafe { Box::from_raw(ptr) }; - drop(t); -} - #[unsafe(no_mangle)] extern "C" fn rs_boxed_u32s_are_equivalent_to_bytes( u32s: *const c_void, diff --git a/src/lean/ffi/aiur/protocol.rs b/src/lean/ffi/aiur/protocol.rs index 09f233e0..09623d40 100644 --- a/src/lean/ffi/aiur/protocol.rs +++ b/src/lean/ffi/aiur/protocol.rs @@ -4,7 +4,6 @@ use multi_stark::{ types::{CommitmentParameters, FriParameters}, }; use rustc_hash::{FxBuildHasher, FxHashMap}; -use std::ffi::c_void; use std::sync::OnceLock; use crate::{ @@ -14,21 +13,14 @@ use crate::{ synthesis::AiurSystem, }, lean::{ - ffi::{ - ExternalClassPtr, - aiur::{ - lean_unbox_g, lean_unbox_nat_as_usize, toplevel::lean_ptr_to_toplevel, - }, - drop_raw, to_raw, + ffi::aiur::{ + lean_unbox_g, lean_unbox_nat_as_usize, toplevel::lean_ptr_to_toplevel, }, - lean::{ - lean_alloc_array, lean_alloc_ctor, lean_alloc_external, - lean_alloc_sarray, lean_array_set_core, lean_ctor_set, - lean_get_external_data, lean_register_external_class, + lean_array_data, lean_array_to_vec, lean_ctor_objs, + obj::{ + ExternalClass, LeanArray, LeanByteArray, LeanCtor, LeanExcept, + LeanExternal, LeanObj, }, - lean_array_data, lean_array_to_vec, lean_box_fn, lean_box_u64, - lean_ctor_objs, lean_except_error_string, lean_except_ok, lean_sarray_data, - lean_sarray_set_data, noop_foreach, }, }; @@ -36,47 +28,15 @@ use crate::{ // External class registration (OnceLock pattern) // ============================================================================= -static AIUR_PROOF_CLASS: OnceLock = OnceLock::new(); -static AIUR_SYSTEM_CLASS: OnceLock = OnceLock::new(); +static AIUR_PROOF_CLASS: OnceLock = OnceLock::new(); +static AIUR_SYSTEM_CLASS: OnceLock = OnceLock::new(); -fn get_aiur_proof_class() -> *mut c_void { - AIUR_PROOF_CLASS - .get_or_init(|| { - ExternalClassPtr( - unsafe { - lean_register_external_class( - Some(aiur_proof_finalizer), - Some(noop_foreach), - ) - } - .cast(), - ) - }) - .0 +fn proof_class() -> &'static ExternalClass { + AIUR_PROOF_CLASS.get_or_init(ExternalClass::register_with_drop::) } -fn get_aiur_system_class() -> *mut c_void { - AIUR_SYSTEM_CLASS - .get_or_init(|| { - ExternalClassPtr( - unsafe { - lean_register_external_class( - Some(aiur_system_finalizer), - Some(noop_foreach), - ) - } - .cast(), - ) - }) - .0 -} - -extern "C" fn aiur_proof_finalizer(ptr: *mut c_void) { - drop_raw(ptr.cast::()); -} - -extern "C" fn aiur_system_finalizer(ptr: *mut c_void) { - drop_raw(ptr.cast::()); +fn system_class() -> &'static ExternalClass { + AIUR_SYSTEM_CLASS.get_or_init(ExternalClass::register_with_drop::) } // ============================================================================= @@ -85,58 +45,49 @@ extern "C" fn aiur_system_finalizer(ptr: *mut c_void) { /// `Aiur.Proof.toBytes : @& Proof → ByteArray` #[unsafe(no_mangle)] -extern "C" fn rs_aiur_proof_to_bytes(proof_obj: *const c_void) -> *mut c_void { - let proof: &Proof = - unsafe { &*lean_get_external_data(proof_obj as *mut _).cast() }; - let bytes = proof.to_bytes().expect("Serialization error"); - let len = bytes.len(); - let arr_ptr = unsafe { lean_alloc_sarray(1, len, len) }; - unsafe { lean_sarray_set_data(arr_ptr.cast(), &bytes) }; - arr_ptr.cast() +extern "C" fn rs_aiur_proof_to_bytes( + proof_obj: LeanExternal, +) -> LeanByteArray { + let bytes = proof_obj.get().to_bytes().expect("Serialization error"); + LeanByteArray::from_bytes(&bytes) } /// `Aiur.Proof.ofBytes : @& ByteArray → Proof` #[unsafe(no_mangle)] -extern "C" fn rs_aiur_proof_of_bytes(byte_array: *const c_void) -> *mut c_void { - let proof = Proof::from_bytes(lean_sarray_data(byte_array)) - .expect("Deserialization error"); - let ptr = to_raw(proof) as *mut c_void; - unsafe { lean_alloc_external(get_aiur_proof_class().cast(), ptr) }.cast() +extern "C" fn rs_aiur_proof_of_bytes( + byte_array: LeanByteArray, +) -> LeanExternal { + let proof = + Proof::from_bytes(byte_array.as_bytes()).expect("Deserialization error"); + LeanExternal::alloc(proof_class(), proof) } /// `AiurSystem.build : @&Bytecode.Toplevel → @&CommitmentParameters → AiurSystem` #[unsafe(no_mangle)] extern "C" fn rs_aiur_system_build( - toplevel: *const c_void, - commitment_parameters: *const c_void, -) -> *mut c_void { + toplevel: LeanObj, + commitment_parameters: LeanObj, +) -> LeanExternal { let system = AiurSystem::build( - lean_ptr_to_toplevel(toplevel), + lean_ptr_to_toplevel(toplevel.as_ptr()), lean_ptr_to_commitment_parameters(commitment_parameters), ); - let ptr = to_raw(system) as *mut c_void; - unsafe { lean_alloc_external(get_aiur_system_class().cast(), ptr) }.cast() + LeanExternal::alloc(system_class(), system) } /// `AiurSystem.verify : @& AiurSystem → @& FriParameters → @& Array G → @& Proof → Except String Unit` #[unsafe(no_mangle)] extern "C" fn rs_aiur_system_verify( - aiur_system_obj: *const c_void, - fri_parameters: *const c_void, - claim: *const c_void, - proof_obj: *const c_void, -) -> *mut c_void { - let aiur_system: &AiurSystem = - unsafe { &*lean_get_external_data(aiur_system_obj as *mut _).cast() }; - - let proof: &Proof = - unsafe { &*lean_get_external_data(proof_obj as *mut _).cast() }; - + aiur_system_obj: LeanExternal, + fri_parameters: LeanObj, + claim: LeanObj, + proof_obj: LeanExternal, +) -> LeanExcept { let fri_parameters = lean_ctor_to_fri_parameters(fri_parameters); - let claim = lean_array_to_vec(claim, lean_unbox_g); - match aiur_system.verify(fri_parameters, &claim, proof) { - Ok(()) => lean_except_ok(lean_box_fn(0)), - Err(err) => lean_except_error_string(&format!("{err:?}")), + let claim = lean_array_to_vec(claim.as_ptr(), lean_unbox_g); + match aiur_system_obj.get().verify(fri_parameters, &claim, proof_obj.get()) { + Ok(()) => LeanExcept::ok(LeanObj::box_usize(0)), + Err(err) => LeanExcept::error_string(&format!("{err:?}")), } } @@ -144,82 +95,66 @@ extern "C" fn rs_aiur_system_verify( /// `Array G × Proof × Array G × Array (Array G × IOKeyInfo)` #[unsafe(no_mangle)] extern "C" fn rs_aiur_system_prove( - aiur_system_obj: *const c_void, - fri_parameters: *const c_void, - fun_idx: *const c_void, - args: *const c_void, - io_data_arr: *const c_void, - io_map_arr: *const c_void, -) -> *mut c_void { - let aiur_system: &AiurSystem = - unsafe { &*lean_get_external_data(aiur_system_obj as *mut _).cast() }; - + aiur_system_obj: LeanExternal, + fri_parameters: LeanObj, + fun_idx: LeanObj, + args: LeanObj, + io_data_arr: LeanObj, + io_map_arr: LeanObj, +) -> LeanObj { let fri_parameters = lean_ctor_to_fri_parameters(fri_parameters); - let fun_idx = lean_unbox_nat_as_usize(fun_idx); - let args = lean_array_to_vec(args, lean_unbox_g); - let io_data = lean_array_to_vec(io_data_arr, lean_unbox_g); + let fun_idx = lean_unbox_nat_as_usize(fun_idx.as_ptr()); + let args = lean_array_to_vec(args.as_ptr(), lean_unbox_g); + let io_data = lean_array_to_vec(io_data_arr.as_ptr(), lean_unbox_g); let io_map = lean_array_to_io_buffer_map(io_map_arr); let mut io_buffer = IOBuffer { data: io_data, map: io_map }; let (claim, proof) = - aiur_system.prove(fri_parameters, fun_idx, &args, &mut io_buffer); - - // Build Lean objects directly from the results. + aiur_system_obj + .get() + .prove(fri_parameters, fun_idx, &args, &mut io_buffer); // claim: Array G let lean_claim = build_g_array(&claim); // proof: Proof (external object) - let lean_proof = unsafe { - lean_alloc_external( - get_aiur_proof_class().cast(), - to_raw(proof) as *mut c_void, - ) - }; + let lean_proof = *LeanExternal::alloc(proof_class(), proof); // io_data: Array G let lean_io_data = build_g_array(&io_buffer.data); // io_map: Array (Array G × IOKeyInfo) - let lean_io_map = unsafe { - let arr = lean_alloc_array(io_buffer.map.len(), io_buffer.map.len()); + let lean_io_map = { + let arr = LeanArray::alloc(io_buffer.map.len()); for (i, (key, info)) in io_buffer.map.iter().enumerate() { let key_arr = build_g_array(key); // IOKeyInfo ctor (tag 0, 2 object fields) - let key_info = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(key_info, 0, lean_box_fn(info.idx).cast()); - lean_ctor_set(key_info, 1, lean_box_fn(info.len).cast()); + let key_info = LeanCtor::alloc(0, 2, 0); + key_info.set(0, LeanObj::box_usize(info.idx)); + key_info.set(1, LeanObj::box_usize(info.len)); // (Array G × IOKeyInfo) tuple - let map_elt = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(map_elt, 0, key_arr.cast()); - lean_ctor_set(map_elt, 1, key_info); - lean_array_set_core(arr, i, map_elt); + let map_elt = LeanCtor::alloc(0, 2, 0); + map_elt.set(0, key_arr); + map_elt.set(1, *key_info); + arr.set(i, *map_elt); } - arr + *arr }; // Build nested tuple: // Array G × Array (Array G × IOKeyInfo) - let io_tuple = unsafe { - let obj = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(obj, 0, lean_io_data.cast()); - lean_ctor_set(obj, 1, lean_io_map); - obj - }; + let io_tuple = LeanCtor::alloc(0, 2, 0); + io_tuple.set(0, lean_io_data); + io_tuple.set(1, lean_io_map); // Proof × Array G × Array (Array G × IOKeyInfo) - let proof_io_tuple = unsafe { - let obj = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(obj, 0, lean_proof); - lean_ctor_set(obj, 1, io_tuple); - obj - }; + let proof_io_tuple = LeanCtor::alloc(0, 2, 0); + proof_io_tuple.set(0, lean_proof); + proof_io_tuple.set(1, *io_tuple); // Array G × Proof × Array G × Array (Array G × IOKeyInfo) - unsafe { - let obj = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(obj, 0, lean_claim.cast()); - lean_ctor_set(obj, 1, proof_io_tuple); - obj.cast() - } + let result = LeanCtor::alloc(0, 2, 0); + result.set(0, lean_claim); + result.set(1, *proof_io_tuple); + *result } // ============================================================================= @@ -227,31 +162,27 @@ extern "C" fn rs_aiur_system_prove( // ============================================================================= /// Build a Lean `Array G` from a slice of field elements. -fn build_g_array(values: &[G]) -> *mut c_void { - unsafe { - let arr = lean_alloc_array(values.len(), values.len()); - for (i, g) in values.iter().enumerate() { - lean_array_set_core(arr, i, lean_box_u64(g.as_canonical_u64()).cast()); - } - arr.cast() +fn build_g_array(values: &[G]) -> LeanObj { + let arr = LeanArray::alloc(values.len()); + for (i, g) in values.iter().enumerate() { + arr.set(i, LeanObj::box_u64(g.as_canonical_u64())); } + *arr } -fn lean_ptr_to_commitment_parameters( - commitment_parameters_ptr: *const c_void, -) -> CommitmentParameters { +fn lean_ptr_to_commitment_parameters(obj: LeanObj) -> CommitmentParameters { CommitmentParameters { - log_blowup: lean_unbox_nat_as_usize(commitment_parameters_ptr), + log_blowup: lean_unbox_nat_as_usize(obj.as_ptr()), } } -fn lean_ctor_to_fri_parameters(ptr: *const c_void) -> FriParameters { +fn lean_ctor_to_fri_parameters(obj: LeanObj) -> FriParameters { let [ log_final_poly_len_ptr, num_queries_ptr, commit_proof_of_work_bits, query_proof_of_work_bits, - ] = lean_ctor_objs(ptr); + ] = lean_ctor_objs(obj.as_ptr()); FriParameters { log_final_poly_len: lean_unbox_nat_as_usize(log_final_poly_len_ptr), num_queries: lean_unbox_nat_as_usize(num_queries_ptr), @@ -262,10 +193,8 @@ fn lean_ctor_to_fri_parameters(ptr: *const c_void) -> FriParameters { } } -fn lean_array_to_io_buffer_map( - array: *const c_void, -) -> FxHashMap, IOKeyInfo> { - let array_data = lean_array_data(array); +fn lean_array_to_io_buffer_map(obj: LeanObj) -> FxHashMap, IOKeyInfo> { + let array_data = lean_array_data(obj.as_ptr()); let mut map = FxHashMap::with_capacity_and_hasher(array_data.len(), FxBuildHasher); for ptr in array_data { diff --git a/src/lean/ffi/builder.rs b/src/lean/ffi/builder.rs index fe0d80af..d3bf73a0 100644 --- a/src/lean/ffi/builder.rs +++ b/src/lean/ffi/builder.rs @@ -1,18 +1,18 @@ //! LeanBuildCache struct for constructing Lean Ix types with caching. -use std::ffi::c_void; - use blake3::Hash; use rustc_hash::FxHashMap; +use crate::lean::obj::{IxExpr, IxLevel, IxName}; + /// Cache for constructing Lean Ix types with deduplication. /// /// This struct maintains caches for names, levels, and expressions to avoid /// rebuilding the same Lean objects multiple times during environment construction. pub struct LeanBuildCache { - pub(crate) names: FxHashMap, - pub(crate) levels: FxHashMap, - pub(crate) exprs: FxHashMap, + pub(crate) names: FxHashMap, + pub(crate) levels: FxHashMap, + pub(crate) exprs: FxHashMap, } impl LeanBuildCache { diff --git a/src/lean/ffi/byte_array.rs b/src/lean/ffi/byte_array.rs index 6e986699..c9f3a3b4 100644 --- a/src/lean/ffi/byte_array.rs +++ b/src/lean/ffi/byte_array.rs @@ -1,10 +1,8 @@ -use std::ffi::c_void; - -use crate::lean::lean_sarray_data; +use crate::lean::obj::LeanByteArray; /// `@& ByteArray → @& ByteArray → Bool` /// Efficient implementation for `BEq ByteArray` #[unsafe(no_mangle)] -extern "C" fn rs_byte_array_beq(a: *const c_void, b: *const c_void) -> bool { - lean_sarray_data(a) == lean_sarray_data(b) +extern "C" fn rs_byte_array_beq(a: LeanByteArray, b: LeanByteArray) -> bool { + a.as_bytes() == b.as_bytes() } diff --git a/src/lean/ffi/compile.rs b/src/lean/ffi/compile.rs index 977db279..475f3cf1 100644 --- a/src/lean/ffi/compile.rs +++ b/src/lean/ffi/compile.rs @@ -84,7 +84,7 @@ pub fn build_raw_named( let addr_obj = build_address_from_ixon(addr); let meta_obj = build_constant_meta(meta); let obj = lean_alloc_ctor(0, 3, 0); - lean_ctor_set(obj, 0, name_obj.cast()); + lean_ctor_set(obj, 0, name_obj.as_mut_ptr().cast()); lean_ctor_set(obj, 1, addr_obj.cast()); lean_ctor_set(obj, 2, meta_obj.cast()); obj.cast() @@ -298,7 +298,7 @@ pub extern "C" fn rs_compile_env_full( std::ptr::copy_nonoverlapping(bytes.as_ptr(), ba_data, bytes.len()); let block = lean_alloc_ctor(0, 2, 8); - lean_ctor_set(block, 0, name_obj.cast()); + lean_ctor_set(block, 0, name_obj.as_mut_ptr().cast()); lean_ctor_set(block, 1, ba); let base = block.cast::(); *base.add(8 + 16).cast::() = *sharing_len as u64; @@ -322,7 +322,7 @@ pub extern "C" fn rs_compile_env_full( std::ptr::copy_nonoverlapping(addr_bytes.as_ptr(), addr_data, 32); let entry_obj = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(entry_obj, 0, name_obj.cast()); + lean_ctor_set(entry_obj, 0, name_obj.as_mut_ptr().cast()); lean_ctor_set(entry_obj, 1, addr_ba); lean_array_set_core(name_to_addr_arr, i, entry_obj); @@ -336,7 +336,7 @@ pub extern "C" fn rs_compile_env_full( // Build RustCompilationResult let result = lean_alloc_ctor(0, 3, 0); lean_ctor_set(result, 0, raw_env.cast()); - lean_ctor_set(result, 1, condensed_obj.cast()); + lean_ctor_set(result, 1, condensed_obj.as_mut_ptr().cast()); lean_ctor_set(result, 2, compiled_obj); lean_io_result_mk_ok(result).cast() @@ -484,7 +484,7 @@ pub extern "C" fn rs_compile_phases( let result = lean_alloc_ctor(0, 3, 0); lean_ctor_set(result, 0, raw_env.cast()); - lean_ctor_set(result, 1, condensed_obj.cast()); + lean_ctor_set(result, 1, condensed_obj.as_mut_ptr().cast()); lean_ctor_set(result, 2, raw_ixon_env); lean_io_result_mk_ok(result).cast() @@ -1556,8 +1556,8 @@ pub extern "C" fn rs_decompile_env(raw_env_ptr: *const c_void) -> *mut c_void { let name_obj = build_name(&mut cache, name); let info_obj = build_constant_info(&mut cache, info); let pair = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(pair, 0, name_obj.cast()); - lean_ctor_set(pair, 1, info_obj.cast()); + lean_ctor_set(pair, 0, name_obj.as_mut_ptr().cast()); + lean_ctor_set(pair, 1, info_obj.as_mut_ptr().cast()); lean_array_set_core(arr, i, pair); } // Except.ok (tag 1) diff --git a/src/lean/ffi/graph.rs b/src/lean/ffi/graph.rs index 3764cb77..8cab23c7 100644 --- a/src/lean/ffi/graph.rs +++ b/src/lean/ffi/graph.rs @@ -6,10 +6,8 @@ use std::sync::Arc; use super::ffi_io_guard; use crate::ix::condense::compute_sccs; use crate::ix::graph::build_ref_graph; -use crate::lean::lean::{ - lean_alloc_array, lean_alloc_ctor, lean_array_set_core, lean_ctor_set, - lean_io_result_mk_ok, -}; +use crate::lean::lean::lean_io_result_mk_ok; +use crate::lean::obj::{LeanArray, LeanCtor, LeanObj}; use super::builder::LeanBuildCache; use super::ix::name::build_name; @@ -19,85 +17,77 @@ use super::lean_env::lean_ptr_to_env; pub fn build_ref_graph_array( cache: &mut LeanBuildCache, refs: &crate::ix::graph::RefMap, -) -> *mut c_void { - unsafe { - let arr = lean_alloc_array(refs.len(), refs.len()); - for (i, (name, ref_set)) in refs.iter().enumerate() { - let name_obj = build_name(cache, name); - - let refs_arr = lean_alloc_array(ref_set.len(), ref_set.len()); - for (j, ref_name) in ref_set.iter().enumerate() { - let ref_name_obj = build_name(cache, ref_name); - lean_array_set_core(refs_arr, j, ref_name_obj.cast()); - } - - let pair = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(pair, 0, name_obj.cast()); - lean_ctor_set(pair, 1, refs_arr); +) -> LeanObj { + let arr = LeanArray::alloc(refs.len()); + for (i, (name, ref_set)) in refs.iter().enumerate() { + let name_obj = build_name(cache, name); - lean_array_set_core(arr, i, pair); + let refs_arr = LeanArray::alloc(ref_set.len()); + for (j, ref_name) in ref_set.iter().enumerate() { + let ref_name_obj = build_name(cache, ref_name); + refs_arr.set(j, ref_name_obj); } - arr.cast() + + let pair = LeanCtor::alloc(0, 2, 0); + pair.set(0, name_obj); + pair.set(1, *refs_arr); + arr.set(i, *pair); } + *arr } /// Build a RustCondensedBlocks structure. pub fn build_condensed_blocks( cache: &mut LeanBuildCache, condensed: &crate::ix::condense::CondensedBlocks, -) -> *mut c_void { - unsafe { - // Build lowLinks: Array (Ix.Name × Ix.Name) - let low_links_arr = - lean_alloc_array(condensed.low_links.len(), condensed.low_links.len()); - for (i, (name, low_link)) in condensed.low_links.iter().enumerate() { - let name_obj = build_name(cache, name); - let low_link_obj = build_name(cache, low_link); - let pair = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(pair, 0, name_obj.cast()); - lean_ctor_set(pair, 1, low_link_obj.cast()); - lean_array_set_core(low_links_arr, i, pair); - } +) -> LeanObj { + // Build lowLinks: Array (Ix.Name × Ix.Name) + let low_links_arr = LeanArray::alloc(condensed.low_links.len()); + for (i, (name, low_link)) in condensed.low_links.iter().enumerate() { + let name_obj = build_name(cache, name); + let low_link_obj = build_name(cache, low_link); + let pair = LeanCtor::alloc(0, 2, 0); + pair.set(0, name_obj); + pair.set(1, low_link_obj); + low_links_arr.set(i, *pair); + } - // Build blocks: Array (Ix.Name × Array Ix.Name) - let blocks_arr = - lean_alloc_array(condensed.blocks.len(), condensed.blocks.len()); - for (i, (name, block_set)) in condensed.blocks.iter().enumerate() { - let name_obj = build_name(cache, name); - let block_names_arr = lean_alloc_array(block_set.len(), block_set.len()); - for (j, block_name) in block_set.iter().enumerate() { - let block_name_obj = build_name(cache, block_name); - lean_array_set_core(block_names_arr, j, block_name_obj.cast()); - } - let pair = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(pair, 0, name_obj.cast()); - lean_ctor_set(pair, 1, block_names_arr); - lean_array_set_core(blocks_arr, i, pair); + // Build blocks: Array (Ix.Name × Array Ix.Name) + let blocks_arr = LeanArray::alloc(condensed.blocks.len()); + for (i, (name, block_set)) in condensed.blocks.iter().enumerate() { + let name_obj = build_name(cache, name); + let block_names_arr = LeanArray::alloc(block_set.len()); + for (j, block_name) in block_set.iter().enumerate() { + let block_name_obj = build_name(cache, block_name); + block_names_arr.set(j, block_name_obj); } + let pair = LeanCtor::alloc(0, 2, 0); + pair.set(0, name_obj); + pair.set(1, *block_names_arr); + blocks_arr.set(i, *pair); + } - // Build blockRefs: Array (Ix.Name × Array Ix.Name) - let block_refs_arr = - lean_alloc_array(condensed.block_refs.len(), condensed.block_refs.len()); - for (i, (name, ref_set)) in condensed.block_refs.iter().enumerate() { - let name_obj = build_name(cache, name); - let refs_arr = lean_alloc_array(ref_set.len(), ref_set.len()); - for (j, ref_name) in ref_set.iter().enumerate() { - let ref_name_obj = build_name(cache, ref_name); - lean_array_set_core(refs_arr, j, ref_name_obj.cast()); - } - let pair = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(pair, 0, name_obj.cast()); - lean_ctor_set(pair, 1, refs_arr); - lean_array_set_core(block_refs_arr, i, pair); + // Build blockRefs: Array (Ix.Name × Array Ix.Name) + let block_refs_arr = LeanArray::alloc(condensed.block_refs.len()); + for (i, (name, ref_set)) in condensed.block_refs.iter().enumerate() { + let name_obj = build_name(cache, name); + let refs_arr = LeanArray::alloc(ref_set.len()); + for (j, ref_name) in ref_set.iter().enumerate() { + let ref_name_obj = build_name(cache, ref_name); + refs_arr.set(j, ref_name_obj); } - - // Build RustCondensedBlocks structure (3 fields) - let result = lean_alloc_ctor(0, 3, 0); - lean_ctor_set(result, 0, low_links_arr); - lean_ctor_set(result, 1, blocks_arr); - lean_ctor_set(result, 2, block_refs_arr); - result.cast() + let pair = LeanCtor::alloc(0, 2, 0); + pair.set(0, name_obj); + pair.set(1, *refs_arr); + block_refs_arr.set(i, *pair); } + + // Build RustCondensedBlocks structure (3 fields) + let result = LeanCtor::alloc(0, 3, 0); + result.set(0, *low_links_arr); + result.set(1, *blocks_arr); + result.set(2, *block_refs_arr); + *result } // ============================================================================= @@ -115,7 +105,7 @@ pub extern "C" fn rs_build_ref_graph( let ref_graph = build_ref_graph(&rust_env); let mut cache = LeanBuildCache::with_capacity(rust_env.len()); let result = build_ref_graph_array(&mut cache, &ref_graph.out_refs); - unsafe { lean_io_result_mk_ok(result.cast()) }.cast() + unsafe { lean_io_result_mk_ok(result.as_ptr() as *mut _) }.cast() })) } @@ -131,6 +121,6 @@ pub extern "C" fn rs_compute_sccs( let condensed = compute_sccs(&ref_graph.out_refs); let mut cache = LeanBuildCache::with_capacity(rust_env.len()); let result = build_condensed_blocks(&mut cache, &condensed); - unsafe { lean_io_result_mk_ok(result.cast()) }.cast() + unsafe { lean_io_result_mk_ok(result.as_ptr() as *mut _) }.cast() })) } diff --git a/src/lean/ffi/ix/address.rs b/src/lean/ffi/ix/address.rs index da4ac1f8..61f8f443 100644 --- a/src/lean/ffi/ix/address.rs +++ b/src/lean/ffi/ix/address.rs @@ -2,40 +2,19 @@ //! //! Address = { hash : ByteArray } - ByteArray wrapper for blake3 Hash -use std::ffi::c_void; - -use crate::lean::{ - lean::{lean_alloc_sarray, lean_sarray_cptr}, - lean_sarray_data, -}; +use crate::lean::obj::{IxAddress, LeanByteArray}; /// Build a Ix.Address from a blake3::Hash. /// Address = { hash : ByteArray } - single field struct, so UNBOXED to ByteArray -pub fn build_address(hash: &blake3::Hash) -> *mut c_void { - unsafe { - let bytes = hash.as_bytes(); - let ba = lean_alloc_sarray(1, bytes.len(), bytes.len()); - let data_ptr = lean_sarray_cptr(ba); - std::ptr::copy_nonoverlapping(bytes.as_ptr(), data_ptr, bytes.len()); - ba.cast() // Due to unboxing, ByteArray IS the Address - } +pub fn build_address(hash: &blake3::Hash) -> IxAddress { + LeanByteArray::from_bytes(hash.as_bytes()) } /// Round-trip an Ix.Address: decode ByteArray, re-encode. /// Address = { hash : ByteArray } - single field struct, so UNBOXED to ByteArray directly #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ix_address( - addr_ptr: *const c_void, -) -> *mut c_void { - unsafe { - // Address is a single-field struct { hash : ByteArray } - // Due to unboxing, addr_ptr IS the ByteArray directly - let bytes = lean_sarray_data(addr_ptr); - - // Rebuild ByteArray - this IS the Address due to unboxing - let new_ba = lean_alloc_sarray(1, bytes.len(), bytes.len()); - let data_ptr = lean_sarray_cptr(new_ba); - std::ptr::copy_nonoverlapping(bytes.as_ptr(), data_ptr, bytes.len()); - new_ba.cast() - } +pub extern "C" fn rs_roundtrip_ix_address(addr: IxAddress) -> IxAddress { + // Address is a single-field struct { hash : ByteArray } + // Due to unboxing, addr IS the ByteArray directly + LeanByteArray::from_bytes(addr.as_bytes()) } diff --git a/src/lean/ffi/ix/constant.rs b/src/lean/ffi/ix/constant.rs index 3f947219..727b1d90 100644 --- a/src/lean/ffi/ix/constant.rs +++ b/src/lean/ffi/ix/constant.rs @@ -17,13 +17,11 @@ use crate::ix::env::{ DefinitionVal, InductiveVal, Name, OpaqueVal, QuotKind, QuotVal, RecursorRule, RecursorVal, ReducibilityHints, TheoremVal, }; -use crate::lean::lean::{ - lean_alloc_array, lean_alloc_ctor, lean_array_set_core, lean_ctor_get, - lean_ctor_set, lean_ctor_set_uint8, lean_obj_tag, -}; +use crate::lean::lean::{lean_ctor_get, lean_obj_tag}; use crate::lean::nat::Nat; +use crate::lean::obj::{IxConstantInfo, LeanArray, LeanCtor, LeanObj}; use crate::lean::{ - lean_array_data, lean_box_fn, lean_ctor_scalar_u8, lean_is_scalar, + lean_array_data, lean_ctor_scalar_u8, lean_is_scalar, }; use super::super::builder::LeanBuildCache; @@ -37,41 +35,39 @@ use super::name::{ pub fn build_constant_val( cache: &mut LeanBuildCache, cv: &ConstantVal, -) -> *mut c_void { - unsafe { - // ConstantVal = { name : Name, levelParams : Array Name, type : Expr } - let name_obj = build_name(cache, &cv.name); - let level_params_obj = build_name_array(cache, &cv.level_params); - let type_obj = build_expr(cache, &cv.typ); - - let obj = lean_alloc_ctor(0, 3, 0); - lean_ctor_set(obj, 0, name_obj.cast()); - lean_ctor_set(obj, 1, level_params_obj.cast()); - lean_ctor_set(obj, 2, type_obj.cast()); - obj.cast() - } +) -> LeanObj { + // ConstantVal = { name : Name, levelParams : Array Name, type : Expr } + let name_obj = build_name(cache, &cv.name); + let level_params_obj = build_name_array(cache, &cv.level_params); + let type_obj = build_expr(cache, &cv.typ); + + let obj = LeanCtor::alloc(0, 3, 0); + obj.set(0, name_obj); + obj.set(1, level_params_obj); + obj.set(2, type_obj); + *obj } /// Build ReducibilityHints. /// NOTE: In Lean 4, 0-field constructors are boxed scalars when the inductive has -/// other constructors with fields. So opaque and abbrev use lean_box_fn. -pub fn build_reducibility_hints(hints: &ReducibilityHints) -> *mut c_void { - unsafe { - match hints { - // | opaque -- tag 0, boxed as scalar - ReducibilityHints::Opaque => lean_box_fn(0), - // | abbrev -- tag 1, boxed as scalar - ReducibilityHints::Abbrev => lean_box_fn(1), - // | regular (h : UInt32) -- tag 2, object constructor - ReducibilityHints::Regular(h) => { - // UInt32 is a scalar, stored inline - let obj = lean_alloc_ctor(2, 0, 4); - // Set the uint32 at offset 0 in the scalar area - let ptr = obj.cast::(); - *(ptr.add(8).cast::()) = *h; - obj.cast() - }, - } +/// other constructors with fields. So opaque and abbrev use box_usize. +pub fn build_reducibility_hints(hints: &ReducibilityHints) -> LeanObj { + match hints { + // | opaque -- tag 0, boxed as scalar + ReducibilityHints::Opaque => LeanObj::box_usize(0), + // | abbrev -- tag 1, boxed as scalar + ReducibilityHints::Abbrev => LeanObj::box_usize(1), + // | regular (h : UInt32) -- tag 2, object constructor + ReducibilityHints::Regular(h) => { + // UInt32 is a scalar, stored inline + let obj = LeanCtor::alloc(2, 0, 4); + // Set the uint32 at offset 0 in the scalar area + unsafe { + let ptr = obj.as_ptr().cast::(); + *(ptr.add(8).cast::().cast_mut()) = *h; + } + *obj + }, } } @@ -79,202 +75,198 @@ pub fn build_reducibility_hints(hints: &ReducibilityHints) -> *mut c_void { pub fn build_constant_info( cache: &mut LeanBuildCache, info: &ConstantInfo, -) -> *mut c_void { - unsafe { - match info { - // | axiomInfo (v : AxiomVal) -- tag 0 - ConstantInfo::AxiomInfo(v) => { - // AxiomVal = { cnst : ConstantVal, isUnsafe : Bool } - let cnst_obj = build_constant_val(cache, &v.cnst); - let axiom_val = lean_alloc_ctor(0, 1, 1); - lean_ctor_set(axiom_val, 0, cnst_obj.cast()); - lean_ctor_set_uint8(axiom_val, 8, v.is_unsafe as u8); - - let obj = lean_alloc_ctor(0, 1, 0); - lean_ctor_set(obj, 0, axiom_val); - obj.cast() - }, - // | defnInfo (v : DefinitionVal) -- tag 1 - ConstantInfo::DefnInfo(v) => { - // DefinitionVal = { cnst, value, hints, safety, all } - // NOTE: safety (DefinitionSafety) is a small enum stored as SCALAR - // Memory layout: 4 obj fields (cnst, value, hints, all), 1 scalar byte (safety) - let cnst_obj = build_constant_val(cache, &v.cnst); - let value_obj = build_expr(cache, &v.value); - let hints_obj = build_reducibility_hints(&v.hints); - let all_obj = build_name_array(cache, &v.all); - let safety_byte = match v.safety { - DefinitionSafety::Unsafe => 0u8, - DefinitionSafety::Safe => 1u8, - DefinitionSafety::Partial => 2u8, - }; - - let defn_val = lean_alloc_ctor(0, 4, 1); // 4 obj fields, 1 scalar byte - lean_ctor_set(defn_val, 0, cnst_obj.cast()); - lean_ctor_set(defn_val, 1, value_obj.cast()); - lean_ctor_set(defn_val, 2, hints_obj.cast()); - lean_ctor_set(defn_val, 3, all_obj.cast()); - lean_ctor_set_uint8(defn_val, 4 * 8, safety_byte); - - let obj = lean_alloc_ctor(1, 1, 0); - lean_ctor_set(obj, 0, defn_val); - obj.cast() - }, - // | thmInfo (v : TheoremVal) -- tag 2 - ConstantInfo::ThmInfo(v) => { - // TheoremVal = { cnst, value, all } - let cnst_obj = build_constant_val(cache, &v.cnst); - let value_obj = build_expr(cache, &v.value); - let all_obj = build_name_array(cache, &v.all); - - let thm_val = lean_alloc_ctor(0, 3, 0); - lean_ctor_set(thm_val, 0, cnst_obj.cast()); - lean_ctor_set(thm_val, 1, value_obj.cast()); - lean_ctor_set(thm_val, 2, all_obj.cast()); - - let obj = lean_alloc_ctor(2, 1, 0); - lean_ctor_set(obj, 0, thm_val); - obj.cast() - }, - // | opaqueInfo (v : OpaqueVal) -- tag 3 - ConstantInfo::OpaqueInfo(v) => { - // OpaqueVal = { cnst, value, isUnsafe, all } - let cnst_obj = build_constant_val(cache, &v.cnst); - let value_obj = build_expr(cache, &v.value); - let all_obj = build_name_array(cache, &v.all); - - let opaque_val = lean_alloc_ctor(0, 3, 1); - lean_ctor_set(opaque_val, 0, cnst_obj.cast()); - lean_ctor_set(opaque_val, 1, value_obj.cast()); - lean_ctor_set(opaque_val, 2, all_obj.cast()); - lean_ctor_set_uint8(opaque_val, 3 * 8, v.is_unsafe as u8); - - let obj = lean_alloc_ctor(3, 1, 0); - lean_ctor_set(obj, 0, opaque_val); - obj.cast() - }, - // | quotInfo (v : QuotVal) -- tag 4 - ConstantInfo::QuotInfo(v) => { - // QuotVal = { cnst, kind } - // NOTE: QuotKind is a small enum stored as SCALAR - // Memory layout: 1 obj field (cnst), 1 scalar byte (kind) - let cnst_obj = build_constant_val(cache, &v.cnst); - let kind_byte = match v.kind { - QuotKind::Type => 0u8, - QuotKind::Ctor => 1u8, - QuotKind::Lift => 2u8, - QuotKind::Ind => 3u8, - }; - - let quot_val = lean_alloc_ctor(0, 1, 1); // 1 obj field, 1 scalar byte - lean_ctor_set(quot_val, 0, cnst_obj.cast()); - lean_ctor_set_uint8(quot_val, 8, kind_byte); - - let obj = lean_alloc_ctor(4, 1, 0); - lean_ctor_set(obj, 0, quot_val); - obj.cast() - }, - // | inductInfo (v : InductiveVal) -- tag 5 - ConstantInfo::InductInfo(v) => { - // InductiveVal = { cnst, numParams, numIndices, all, ctors, numNested, isRec, isUnsafe, isReflexive } - let cnst_obj = build_constant_val(cache, &v.cnst); - let num_params_obj = build_nat(&v.num_params); - let num_indices_obj = build_nat(&v.num_indices); - let all_obj = build_name_array(cache, &v.all); - let ctors_obj = build_name_array(cache, &v.ctors); - let num_nested_obj = build_nat(&v.num_nested); - - // 6 object fields, 3 scalar bytes for bools - let induct_val = lean_alloc_ctor(0, 6, 3); - lean_ctor_set(induct_val, 0, cnst_obj.cast()); - lean_ctor_set(induct_val, 1, num_params_obj.cast()); - lean_ctor_set(induct_val, 2, num_indices_obj.cast()); - lean_ctor_set(induct_val, 3, all_obj.cast()); - lean_ctor_set(induct_val, 4, ctors_obj.cast()); - lean_ctor_set(induct_val, 5, num_nested_obj.cast()); - lean_ctor_set_uint8(induct_val, 6 * 8, v.is_rec as u8); - lean_ctor_set_uint8(induct_val, 6 * 8 + 1, v.is_unsafe as u8); - lean_ctor_set_uint8(induct_val, 6 * 8 + 2, v.is_reflexive as u8); - - let obj = lean_alloc_ctor(5, 1, 0); - lean_ctor_set(obj, 0, induct_val); - obj.cast() - }, - // | ctorInfo (v : ConstructorVal) -- tag 6 - ConstantInfo::CtorInfo(v) => { - // ConstructorVal = { cnst, induct, cidx, numParams, numFields, isUnsafe } - let cnst_obj = build_constant_val(cache, &v.cnst); - let induct_obj = build_name(cache, &v.induct); - let cidx_obj = build_nat(&v.cidx); - let num_params_obj = build_nat(&v.num_params); - let num_fields_obj = build_nat(&v.num_fields); - - // 5 object fields, 1 scalar byte for bool - let ctor_val = lean_alloc_ctor(0, 5, 1); - lean_ctor_set(ctor_val, 0, cnst_obj.cast()); - lean_ctor_set(ctor_val, 1, induct_obj.cast()); - lean_ctor_set(ctor_val, 2, cidx_obj.cast()); - lean_ctor_set(ctor_val, 3, num_params_obj.cast()); - lean_ctor_set(ctor_val, 4, num_fields_obj.cast()); - lean_ctor_set_uint8(ctor_val, 5 * 8, v.is_unsafe as u8); - - let obj = lean_alloc_ctor(6, 1, 0); - lean_ctor_set(obj, 0, ctor_val); - obj.cast() - }, - // | recInfo (v : RecursorVal) -- tag 7 - ConstantInfo::RecInfo(v) => { - // RecursorVal = { cnst, all, numParams, numIndices, numMotives, numMinors, rules, k, isUnsafe } - let cnst_obj = build_constant_val(cache, &v.cnst); - let all_obj = build_name_array(cache, &v.all); - let num_params_obj = build_nat(&v.num_params); - let num_indices_obj = build_nat(&v.num_indices); - let num_motives_obj = build_nat(&v.num_motives); - let num_minors_obj = build_nat(&v.num_minors); - let rules_obj = build_recursor_rules(cache, &v.rules); - - // 7 object fields, 2 scalar bytes for bools - let rec_val = lean_alloc_ctor(0, 7, 2); - lean_ctor_set(rec_val, 0, cnst_obj.cast()); - lean_ctor_set(rec_val, 1, all_obj.cast()); - lean_ctor_set(rec_val, 2, num_params_obj.cast()); - lean_ctor_set(rec_val, 3, num_indices_obj.cast()); - lean_ctor_set(rec_val, 4, num_motives_obj.cast()); - lean_ctor_set(rec_val, 5, num_minors_obj.cast()); - lean_ctor_set(rec_val, 6, rules_obj.cast()); - lean_ctor_set_uint8(rec_val, 7 * 8, v.k as u8); - lean_ctor_set_uint8(rec_val, 7 * 8 + 1, v.is_unsafe as u8); - - let obj = lean_alloc_ctor(7, 1, 0); - lean_ctor_set(obj, 0, rec_val); - obj.cast() - }, - } - } +) -> IxConstantInfo { + let result = match info { + // | axiomInfo (v : AxiomVal) -- tag 0 + ConstantInfo::AxiomInfo(v) => { + // AxiomVal = { cnst : ConstantVal, isUnsafe : Bool } + let cnst_obj = build_constant_val(cache, &v.cnst); + let axiom_val = LeanCtor::alloc(0, 1, 1); + axiom_val.set(0, cnst_obj); + axiom_val.set_u8(8, v.is_unsafe as u8); + + let obj = LeanCtor::alloc(0, 1, 0); + obj.set(0, axiom_val); + *obj + }, + // | defnInfo (v : DefinitionVal) -- tag 1 + ConstantInfo::DefnInfo(v) => { + // DefinitionVal = { cnst, value, hints, safety, all } + // Memory layout: 4 obj fields (cnst, value, hints, all), 1 scalar byte (safety) + let cnst_obj = build_constant_val(cache, &v.cnst); + let value_obj = build_expr(cache, &v.value); + let hints_obj = build_reducibility_hints(&v.hints); + let all_obj = build_name_array(cache, &v.all); + let safety_byte = match v.safety { + DefinitionSafety::Unsafe => 0u8, + DefinitionSafety::Safe => 1u8, + DefinitionSafety::Partial => 2u8, + }; + + let defn_val = LeanCtor::alloc(0, 4, 1); + defn_val.set(0, cnst_obj); + defn_val.set(1, value_obj); + defn_val.set(2, hints_obj); + defn_val.set(3, all_obj); + defn_val.set_u8(4 * 8, safety_byte); + + let obj = LeanCtor::alloc(1, 1, 0); + obj.set(0, defn_val); + *obj + }, + // | thmInfo (v : TheoremVal) -- tag 2 + ConstantInfo::ThmInfo(v) => { + // TheoremVal = { cnst, value, all } + let cnst_obj = build_constant_val(cache, &v.cnst); + let value_obj = build_expr(cache, &v.value); + let all_obj = build_name_array(cache, &v.all); + + let thm_val = LeanCtor::alloc(0, 3, 0); + thm_val.set(0, cnst_obj); + thm_val.set(1, value_obj); + thm_val.set(2, all_obj); + + let obj = LeanCtor::alloc(2, 1, 0); + obj.set(0, thm_val); + *obj + }, + // | opaqueInfo (v : OpaqueVal) -- tag 3 + ConstantInfo::OpaqueInfo(v) => { + // OpaqueVal = { cnst, value, isUnsafe, all } + let cnst_obj = build_constant_val(cache, &v.cnst); + let value_obj = build_expr(cache, &v.value); + let all_obj = build_name_array(cache, &v.all); + + let opaque_val = LeanCtor::alloc(0, 3, 1); + opaque_val.set(0, cnst_obj); + opaque_val.set(1, value_obj); + opaque_val.set(2, all_obj); + opaque_val.set_u8(3 * 8, v.is_unsafe as u8); + + let obj = LeanCtor::alloc(3, 1, 0); + obj.set(0, opaque_val); + *obj + }, + // | quotInfo (v : QuotVal) -- tag 4 + ConstantInfo::QuotInfo(v) => { + // QuotVal = { cnst, kind } + // Memory layout: 1 obj field (cnst), 1 scalar byte (kind) + let cnst_obj = build_constant_val(cache, &v.cnst); + let kind_byte = match v.kind { + QuotKind::Type => 0u8, + QuotKind::Ctor => 1u8, + QuotKind::Lift => 2u8, + QuotKind::Ind => 3u8, + }; + + let quot_val = LeanCtor::alloc(0, 1, 1); + quot_val.set(0, cnst_obj); + quot_val.set_u8(8, kind_byte); + + let obj = LeanCtor::alloc(4, 1, 0); + obj.set(0, quot_val); + *obj + }, + // | inductInfo (v : InductiveVal) -- tag 5 + ConstantInfo::InductInfo(v) => { + // InductiveVal = { cnst, numParams, numIndices, all, ctors, numNested, isRec, isUnsafe, isReflexive } + let cnst_obj = build_constant_val(cache, &v.cnst); + let num_params_obj = build_nat(&v.num_params); + let num_indices_obj = build_nat(&v.num_indices); + let all_obj = build_name_array(cache, &v.all); + let ctors_obj = build_name_array(cache, &v.ctors); + let num_nested_obj = build_nat(&v.num_nested); + + // 6 object fields, 3 scalar bytes for bools + let induct_val = LeanCtor::alloc(0, 6, 3); + induct_val.set(0, cnst_obj); + induct_val.set(1, num_params_obj); + induct_val.set(2, num_indices_obj); + induct_val.set(3, all_obj); + induct_val.set(4, ctors_obj); + induct_val.set(5, num_nested_obj); + induct_val.set_u8(6 * 8, v.is_rec as u8); + induct_val.set_u8(6 * 8 + 1, v.is_unsafe as u8); + induct_val.set_u8(6 * 8 + 2, v.is_reflexive as u8); + + let obj = LeanCtor::alloc(5, 1, 0); + obj.set(0, induct_val); + *obj + }, + // | ctorInfo (v : ConstructorVal) -- tag 6 + ConstantInfo::CtorInfo(v) => { + // ConstructorVal = { cnst, induct, cidx, numParams, numFields, isUnsafe } + let cnst_obj = build_constant_val(cache, &v.cnst); + let induct_obj = build_name(cache, &v.induct); + let cidx_obj = build_nat(&v.cidx); + let num_params_obj = build_nat(&v.num_params); + let num_fields_obj = build_nat(&v.num_fields); + + // 5 object fields, 1 scalar byte for bool + let ctor_val = LeanCtor::alloc(0, 5, 1); + ctor_val.set(0, cnst_obj); + ctor_val.set(1, induct_obj); + ctor_val.set(2, cidx_obj); + ctor_val.set(3, num_params_obj); + ctor_val.set(4, num_fields_obj); + ctor_val.set_u8(5 * 8, v.is_unsafe as u8); + + let obj = LeanCtor::alloc(6, 1, 0); + obj.set(0, ctor_val); + *obj + }, + // | recInfo (v : RecursorVal) -- tag 7 + ConstantInfo::RecInfo(v) => { + // RecursorVal = { cnst, all, numParams, numIndices, numMotives, numMinors, rules, k, isUnsafe } + let cnst_obj = build_constant_val(cache, &v.cnst); + let all_obj = build_name_array(cache, &v.all); + let num_params_obj = build_nat(&v.num_params); + let num_indices_obj = build_nat(&v.num_indices); + let num_motives_obj = build_nat(&v.num_motives); + let num_minors_obj = build_nat(&v.num_minors); + let rules_obj = build_recursor_rules(cache, &v.rules); + + // 7 object fields, 2 scalar bytes for bools + let rec_val = LeanCtor::alloc(0, 7, 2); + rec_val.set(0, cnst_obj); + rec_val.set(1, all_obj); + rec_val.set(2, num_params_obj); + rec_val.set(3, num_indices_obj); + rec_val.set(4, num_motives_obj); + rec_val.set(5, num_minors_obj); + rec_val.set(6, rules_obj); + rec_val.set_u8(7 * 8, v.k as u8); + rec_val.set_u8(7 * 8 + 1, v.is_unsafe as u8); + + let obj = LeanCtor::alloc(7, 1, 0); + obj.set(0, rec_val); + *obj + }, + }; + + IxConstantInfo::new(result) } /// Build an Array of RecursorRule. fn build_recursor_rules( cache: &mut LeanBuildCache, rules: &[RecursorRule], -) -> *mut c_void { - unsafe { - let arr = lean_alloc_array(rules.len(), rules.len()); - for (i, rule) in rules.iter().enumerate() { - // RecursorRule = { ctor : Name, nFields : Nat, rhs : Expr } - let ctor_obj = build_name(cache, &rule.ctor); - let n_fields_obj = build_nat(&rule.n_fields); - let rhs_obj = build_expr(cache, &rule.rhs); - - let rule_obj = lean_alloc_ctor(0, 3, 0); - lean_ctor_set(rule_obj, 0, ctor_obj.cast()); - lean_ctor_set(rule_obj, 1, n_fields_obj.cast()); - lean_ctor_set(rule_obj, 2, rhs_obj.cast()); - - lean_array_set_core(arr, i, rule_obj); - } - arr.cast() +) -> LeanArray { + let arr = LeanArray::alloc(rules.len()); + for (i, rule) in rules.iter().enumerate() { + // RecursorRule = { ctor : Name, nFields : Nat, rhs : Expr } + let ctor_obj = build_name(cache, &rule.ctor); + let n_fields_obj = build_nat(&rule.n_fields); + let rhs_obj = build_expr(cache, &rule.rhs); + + let rule_obj = LeanCtor::alloc(0, 3, 0); + rule_obj.set(0, ctor_obj); + rule_obj.set(1, n_fields_obj); + rule_obj.set(2, rhs_obj); + + arr.set(i, rule_obj); } + arr } // ============================================================================= @@ -303,17 +295,9 @@ pub fn decode_constant_val(ptr: *const c_void) -> ConstantVal { } /// Decode Lean.ReducibilityHints from Lean pointer. -/// | opaque -- tag 0 -/// | abbrev -- tag 1 -/// | regular (h : UInt32) -- tag 2 -/// -/// NOTE: In Lean 4, boxed scalars are `(tag << 1) | 1`: -/// - opaque (tag 0) → scalar value 1 -/// - abbrev (tag 1) → scalar value 3 pub fn decode_reducibility_hints(ptr: *const c_void) -> ReducibilityHints { unsafe { if lean_is_scalar(ptr) { - // Unbox the scalar: tag = (ptr >> 1) let tag = (ptr as usize) >> 1; match tag { 0 => return ReducibilityHints::Opaque, @@ -338,7 +322,6 @@ pub fn decode_reducibility_hints(ptr: *const c_void) -> ReducibilityHints { } /// Decode Ix.RecursorRule from Lean pointer. -/// RecursorRule = { ctor : Name, nfields : Nat, rhs : Expr } fn decode_recursor_rule(ptr: *const c_void) -> RecursorRule { unsafe { let ctor_ptr = lean_ctor_get(ptr as *mut _, 0); @@ -361,8 +344,6 @@ pub fn decode_constant_info(ptr: *const c_void) -> ConstantInfo { match tag { 0 => { - // axiomInfo: AxiomVal = { cnst : ConstantVal, isUnsafe : Bool } - // Structure: 1 obj field (cnst), 1 scalar byte (isUnsafe) let cnst_ptr = lean_ctor_get(inner_ptr, 0); let is_unsafe = lean_ctor_scalar_u8(inner_ptr.cast(), 1, 0) != 0; @@ -372,16 +353,12 @@ pub fn decode_constant_info(ptr: *const c_void) -> ConstantInfo { }) }, 1 => { - // defnInfo: DefinitionVal = { cnst, value, hints, safety, all } - // NOTE: safety (DefinitionSafety) is a small enum and is stored as a SCALAR field - // Memory layout: 4 obj fields (cnst, value, hints, all), 1 scalar byte (safety) let cnst_ptr = lean_ctor_get(inner_ptr, 0); let value_ptr = lean_ctor_get(inner_ptr, 1); let hints_ptr = lean_ctor_get(inner_ptr, 2); - let all_ptr = lean_ctor_get(inner_ptr, 3); // all is at index 3, not 4! + let all_ptr = lean_ctor_get(inner_ptr, 3); - // safety is a scalar at offset 4*8 = 32 bytes from start of object fields - let safety_byte = lean_ctor_scalar_u8(inner_ptr.cast(), 4, 0); // 4 obj fields, offset 0 in scalar area + let safety_byte = lean_ctor_scalar_u8(inner_ptr.cast(), 4, 0); let safety = match safety_byte { 0 => DefinitionSafety::Unsafe, 1 => DefinitionSafety::Safe, @@ -398,7 +375,6 @@ pub fn decode_constant_info(ptr: *const c_void) -> ConstantInfo { }) }, 2 => { - // thmInfo: TheoremVal = { cnst, value, all } let cnst_ptr = lean_ctor_get(inner_ptr, 0); let value_ptr = lean_ctor_get(inner_ptr, 1); let all_ptr = lean_ctor_get(inner_ptr, 2); @@ -410,8 +386,6 @@ pub fn decode_constant_info(ptr: *const c_void) -> ConstantInfo { }) }, 3 => { - // opaqueInfo: OpaqueVal = { cnst, value, isUnsafe, all } - // Structure: 3 obj fields (cnst, value, all), 1 scalar byte (isUnsafe) let cnst_ptr = lean_ctor_get(inner_ptr, 0); let value_ptr = lean_ctor_get(inner_ptr, 1); let all_ptr = lean_ctor_get(inner_ptr, 2); @@ -425,12 +399,9 @@ pub fn decode_constant_info(ptr: *const c_void) -> ConstantInfo { }) }, 4 => { - // quotInfo: QuotVal = { cnst, kind } - // NOTE: QuotKind is a small enum (4 0-field ctors), stored as SCALAR - // Memory layout: 1 obj field (cnst), 1 scalar byte (kind) let cnst_ptr = lean_ctor_get(inner_ptr, 0); - let kind_byte = lean_ctor_scalar_u8(inner_ptr.cast(), 1, 0); // 1 obj field, offset 0 in scalar area + let kind_byte = lean_ctor_scalar_u8(inner_ptr.cast(), 1, 0); let kind = match kind_byte { 0 => QuotKind::Type, 1 => QuotKind::Ctor, @@ -445,8 +416,6 @@ pub fn decode_constant_info(ptr: *const c_void) -> ConstantInfo { }) }, 5 => { - // inductInfo: InductiveVal = { cnst, numParams, numIndices, all, ctors, numNested, isRec, isUnsafe, isReflexive } - // 6 obj fields, 3 scalar bytes let cnst_ptr = lean_ctor_get(inner_ptr, 0); let num_params_ptr = lean_ctor_get(inner_ptr, 1); let num_indices_ptr = lean_ctor_get(inner_ptr, 2); @@ -471,8 +440,6 @@ pub fn decode_constant_info(ptr: *const c_void) -> ConstantInfo { }) }, 6 => { - // ctorInfo: ConstructorVal = { cnst, induct, cidx, numParams, numFields, isUnsafe } - // 5 obj fields, 1 scalar byte let cnst_ptr = lean_ctor_get(inner_ptr, 0); let induct_ptr = lean_ctor_get(inner_ptr, 1); let cidx_ptr = lean_ctor_get(inner_ptr, 2); @@ -491,8 +458,6 @@ pub fn decode_constant_info(ptr: *const c_void) -> ConstantInfo { }) }, 7 => { - // recInfo: RecursorVal = { cnst, all, numParams, numIndices, numMotives, numMinors, rules, k, isUnsafe } - // 7 obj fields, 2 scalar bytes let cnst_ptr = lean_ctor_get(inner_ptr, 0); let all_ptr = lean_ctor_get(inner_ptr, 1); let num_params_ptr = lean_ctor_get(inner_ptr, 2); @@ -529,9 +494,9 @@ pub fn decode_constant_info(ptr: *const c_void) -> ConstantInfo { /// Round-trip an Ix.ConstantInfo: decode from Lean, re-encode via LeanBuildCache. #[unsafe(no_mangle)] pub extern "C" fn rs_roundtrip_ix_constant_info( - info_ptr: *const c_void, -) -> *mut c_void { - let info = decode_constant_info(info_ptr); + info_ptr: IxConstantInfo, +) -> IxConstantInfo { + let info = decode_constant_info(info_ptr.as_ptr()); let mut cache = LeanBuildCache::new(); build_constant_info(&mut cache, &info) } diff --git a/src/lean/ffi/ix/data.rs b/src/lean/ffi/ix/data.rs index 4c7fc401..5b6d6221 100644 --- a/src/lean/ffi/ix/data.rs +++ b/src/lean/ffi/ix/data.rs @@ -5,11 +5,12 @@ use std::ffi::c_void; use crate::ix::env::{ DataValue, Int, Name, SourceInfo, Substring, Syntax, SyntaxPreresolved, }; -use crate::lean::lean::{ - lean_alloc_array, lean_alloc_ctor, lean_array_set_core, lean_ctor_get, - lean_ctor_set, lean_ctor_set_uint8, lean_mk_string, lean_obj_tag, -}; +use crate::lean::lean::{lean_ctor_get, lean_obj_tag}; use crate::lean::nat::Nat; +use crate::lean::obj::{ + IxDataValue, IxInt, IxSourceInfo, IxSubstring, IxSyntax, + IxSyntaxPreresolved, LeanArray, LeanCtor, LeanString, +}; use crate::lean::{ lean_array_data, lean_ctor_scalar_u8, lean_is_scalar, lean_obj_to_string, }; @@ -19,59 +20,52 @@ use super::super::primitives::build_nat; use super::name::{build_name, decode_ix_name}; /// Build a Ix.Int (ofNat or negSucc). -pub fn build_int(int: &Int) -> *mut c_void { - unsafe { - match int { - Int::OfNat(n) => { - let obj = lean_alloc_ctor(0, 1, 0); - lean_ctor_set(obj, 0, build_nat(n).cast()); - obj.cast() - }, - Int::NegSucc(n) => { - let obj = lean_alloc_ctor(1, 1, 0); - lean_ctor_set(obj, 0, build_nat(n).cast()); - obj.cast() - }, - } +pub fn build_int(int: &Int) -> IxInt { + match int { + Int::OfNat(n) => { + let obj = LeanCtor::alloc(0, 1, 0); + obj.set(0, build_nat(n)); + IxInt::new(*obj) + }, + Int::NegSucc(n) => { + let obj = LeanCtor::alloc(1, 1, 0); + obj.set(0, build_nat(n)); + IxInt::new(*obj) + }, } } /// Build a Ix.Substring. -pub fn build_substring(ss: &Substring) -> *mut c_void { - unsafe { - let s_cstr = crate::lean::safe_cstring(ss.str.as_str()); - let obj = lean_alloc_ctor(0, 3, 0); - lean_ctor_set(obj, 0, lean_mk_string(s_cstr.as_ptr())); - lean_ctor_set(obj, 1, build_nat(&ss.start_pos).cast()); - lean_ctor_set(obj, 2, build_nat(&ss.stop_pos).cast()); - obj.cast() - } +pub fn build_substring(ss: &Substring) -> IxSubstring { + let obj = LeanCtor::alloc(0, 3, 0); + obj.set(0, LeanString::from_str(ss.str.as_str())); + obj.set(1, build_nat(&ss.start_pos)); + obj.set(2, build_nat(&ss.stop_pos)); + IxSubstring::new(*obj) } /// Build a Ix.SourceInfo. -pub fn build_source_info(si: &SourceInfo) -> *mut c_void { - unsafe { - match si { - // | original (leading : Substring) (pos : Nat) (trailing : Substring) (endPos : Nat) -- tag 0 - SourceInfo::Original(leading, pos, trailing, end_pos) => { - let obj = lean_alloc_ctor(0, 4, 0); - lean_ctor_set(obj, 0, build_substring(leading).cast()); - lean_ctor_set(obj, 1, build_nat(pos).cast()); - lean_ctor_set(obj, 2, build_substring(trailing).cast()); - lean_ctor_set(obj, 3, build_nat(end_pos).cast()); - obj.cast() - }, - // | synthetic (pos : Nat) (endPos : Nat) (canonical : Bool) -- tag 1 - SourceInfo::Synthetic(pos, end_pos, canonical) => { - let obj = lean_alloc_ctor(1, 2, 1); - lean_ctor_set(obj, 0, build_nat(pos).cast()); - lean_ctor_set(obj, 1, build_nat(end_pos).cast()); - lean_ctor_set_uint8(obj, 2 * 8, *canonical as u8); - obj.cast() - }, - // | none -- tag 2 - SourceInfo::None => lean_alloc_ctor(2, 0, 0).cast(), - } +pub fn build_source_info(si: &SourceInfo) -> IxSourceInfo { + match si { + // | original (leading : Substring) (pos : Nat) (trailing : Substring) (endPos : Nat) -- tag 0 + SourceInfo::Original(leading, pos, trailing, end_pos) => { + let obj = LeanCtor::alloc(0, 4, 0); + obj.set(0, build_substring(leading)); + obj.set(1, build_nat(pos)); + obj.set(2, build_substring(trailing)); + obj.set(3, build_nat(end_pos)); + IxSourceInfo::new(*obj) + }, + // | synthetic (pos : Nat) (endPos : Nat) (canonical : Bool) -- tag 1 + SourceInfo::Synthetic(pos, end_pos, canonical) => { + let obj = LeanCtor::alloc(1, 2, 1); + obj.set(0, build_nat(pos)); + obj.set(1, build_nat(end_pos)); + obj.set_u8(2 * 8, *canonical as u8); + IxSourceInfo::new(*obj) + }, + // | none -- tag 2 + SourceInfo::None => IxSourceInfo::new(*LeanCtor::alloc(2, 0, 0)), } } @@ -79,81 +73,73 @@ pub fn build_source_info(si: &SourceInfo) -> *mut c_void { pub fn build_syntax_preresolved( cache: &mut LeanBuildCache, sp: &SyntaxPreresolved, -) -> *mut c_void { - unsafe { - match sp { - // | namespace (name : Name) -- tag 0 - SyntaxPreresolved::Namespace(name) => { - let obj = lean_alloc_ctor(0, 1, 0); - lean_ctor_set(obj, 0, build_name(cache, name).cast()); - obj.cast() - }, - // | decl (name : Name) (aliases : Array String) -- tag 1 - SyntaxPreresolved::Decl(name, aliases) => { - let name_obj = build_name(cache, name); - let aliases_obj = build_string_array(aliases); - let obj = lean_alloc_ctor(1, 2, 0); - lean_ctor_set(obj, 0, name_obj.cast()); - lean_ctor_set(obj, 1, aliases_obj.cast()); - obj.cast() - }, - } +) -> IxSyntaxPreresolved { + match sp { + // | namespace (name : Name) -- tag 0 + SyntaxPreresolved::Namespace(name) => { + let obj = LeanCtor::alloc(0, 1, 0); + obj.set(0, build_name(cache, name)); + IxSyntaxPreresolved::new(*obj) + }, + // | decl (name : Name) (aliases : Array String) -- tag 1 + SyntaxPreresolved::Decl(name, aliases) => { + let name_obj = build_name(cache, name); + let aliases_obj = build_string_array(aliases); + let obj = LeanCtor::alloc(1, 2, 0); + obj.set(0, name_obj); + obj.set(1, aliases_obj); + IxSyntaxPreresolved::new(*obj) + }, } } /// Build an Array of Strings. -pub fn build_string_array(strings: &[String]) -> *mut c_void { - unsafe { - let arr = lean_alloc_array(strings.len(), strings.len()); - for (i, s) in strings.iter().enumerate() { - let s_cstr = crate::lean::safe_cstring(s.as_str()); - lean_array_set_core(arr, i, lean_mk_string(s_cstr.as_ptr())); - } - arr.cast() +pub fn build_string_array(strings: &[String]) -> LeanArray { + let arr = LeanArray::alloc(strings.len()); + for (i, s) in strings.iter().enumerate() { + arr.set(i, LeanString::from_str(s.as_str())); } + arr } /// Build a Ix.Syntax. -pub fn build_syntax(cache: &mut LeanBuildCache, syn: &Syntax) -> *mut c_void { - unsafe { - match syn { - // | missing -- tag 0 - Syntax::Missing => lean_alloc_ctor(0, 0, 0).cast(), - // | node (info : SourceInfo) (kind : Name) (args : Array Syntax) -- tag 1 - Syntax::Node(info, kind, args) => { - let info_obj = build_source_info(info); - let kind_obj = build_name(cache, kind); - let args_obj = build_syntax_array(cache, args); - let obj = lean_alloc_ctor(1, 3, 0); - lean_ctor_set(obj, 0, info_obj.cast()); - lean_ctor_set(obj, 1, kind_obj.cast()); - lean_ctor_set(obj, 2, args_obj.cast()); - obj.cast() - }, - // | atom (info : SourceInfo) (val : String) -- tag 2 - Syntax::Atom(info, val) => { - let info_obj = build_source_info(info); - let val_cstr = crate::lean::safe_cstring(val.as_str()); - let obj = lean_alloc_ctor(2, 2, 0); - lean_ctor_set(obj, 0, info_obj.cast()); - lean_ctor_set(obj, 1, lean_mk_string(val_cstr.as_ptr())); - obj.cast() - }, - // | ident (info : SourceInfo) (rawVal : Substring) (val : Name) (preresolved : Array SyntaxPreresolved) -- tag 3 - Syntax::Ident(info, raw_val, val, preresolved) => { - let info_obj = build_source_info(info); - let raw_val_obj = build_substring(raw_val); - let val_obj = build_name(cache, val); - let preresolved_obj = - build_syntax_preresolved_array(cache, preresolved); - let obj = lean_alloc_ctor(3, 4, 0); - lean_ctor_set(obj, 0, info_obj.cast()); - lean_ctor_set(obj, 1, raw_val_obj.cast()); - lean_ctor_set(obj, 2, val_obj.cast()); - lean_ctor_set(obj, 3, preresolved_obj.cast()); - obj.cast() - }, - } +pub fn build_syntax(cache: &mut LeanBuildCache, syn: &Syntax) -> IxSyntax { + match syn { + // | missing -- tag 0 + Syntax::Missing => IxSyntax::new(*LeanCtor::alloc(0, 0, 0)), + // | node (info : SourceInfo) (kind : Name) (args : Array Syntax) -- tag 1 + Syntax::Node(info, kind, args) => { + let info_obj = build_source_info(info); + let kind_obj = build_name(cache, kind); + let args_obj = build_syntax_array(cache, args); + let obj = LeanCtor::alloc(1, 3, 0); + obj.set(0, info_obj); + obj.set(1, kind_obj); + obj.set(2, args_obj); + IxSyntax::new(*obj) + }, + // | atom (info : SourceInfo) (val : String) -- tag 2 + Syntax::Atom(info, val) => { + let info_obj = build_source_info(info); + let obj = LeanCtor::alloc(2, 2, 0); + obj.set(0, info_obj); + obj.set(1, LeanString::from_str(val.as_str())); + IxSyntax::new(*obj) + }, + // | ident (info : SourceInfo) (rawVal : Substring) (val : Name) (preresolved : Array SyntaxPreresolved) -- tag 3 + Syntax::Ident(info, raw_val, val, preresolved) => { + let info_obj = build_source_info(info); + let raw_val_obj = build_substring(raw_val); + let val_obj = build_name(cache, val); + let preresolved_obj = + build_syntax_preresolved_array(cache, preresolved); + let obj = LeanCtor::alloc(3, 4, 0); + obj.set(0, info_obj); + obj.set(1, raw_val_obj); + obj.set(2, val_obj); + obj.set(3, preresolved_obj); + IxSyntax::new(*obj) + }, } } @@ -161,72 +147,63 @@ pub fn build_syntax(cache: &mut LeanBuildCache, syn: &Syntax) -> *mut c_void { pub fn build_syntax_array( cache: &mut LeanBuildCache, items: &[Syntax], -) -> *mut c_void { - unsafe { - let arr = lean_alloc_array(items.len(), items.len()); - for (i, item) in items.iter().enumerate() { - let item_obj = build_syntax(cache, item); - lean_array_set_core(arr, i, item_obj.cast()); - } - arr.cast() +) -> LeanArray { + let arr = LeanArray::alloc(items.len()); + for (i, item) in items.iter().enumerate() { + arr.set(i, build_syntax(cache, item)); } + arr } /// Build an Array of SyntaxPreresolved. pub fn build_syntax_preresolved_array( cache: &mut LeanBuildCache, items: &[SyntaxPreresolved], -) -> *mut c_void { - unsafe { - let arr = lean_alloc_array(items.len(), items.len()); - for (i, item) in items.iter().enumerate() { - let item_obj = build_syntax_preresolved(cache, item); - lean_array_set_core(arr, i, item_obj.cast()); - } - arr.cast() +) -> LeanArray { + let arr = LeanArray::alloc(items.len()); + for (i, item) in items.iter().enumerate() { + arr.set(i, build_syntax_preresolved(cache, item)); } + arr } /// Build Ix.DataValue. pub fn build_data_value( cache: &mut LeanBuildCache, dv: &DataValue, -) -> *mut c_void { - unsafe { - match dv { - DataValue::OfString(s) => { - let s_cstr = crate::lean::safe_cstring(s.as_str()); - let obj = lean_alloc_ctor(0, 1, 0); - lean_ctor_set(obj, 0, lean_mk_string(s_cstr.as_ptr())); - obj.cast() - }, - DataValue::OfBool(b) => { - // 0 object fields, 1 scalar byte - let obj = lean_alloc_ctor(1, 0, 1); - lean_ctor_set_uint8(obj, 0, *b as u8); - obj.cast() - }, - DataValue::OfName(n) => { - let obj = lean_alloc_ctor(2, 1, 0); - lean_ctor_set(obj, 0, build_name(cache, n).cast()); - obj.cast() - }, - DataValue::OfNat(n) => { - let obj = lean_alloc_ctor(3, 1, 0); - lean_ctor_set(obj, 0, build_nat(n).cast()); - obj.cast() - }, - DataValue::OfInt(i) => { - let obj = lean_alloc_ctor(4, 1, 0); - lean_ctor_set(obj, 0, build_int(i).cast()); - obj.cast() - }, - DataValue::OfSyntax(syn) => { - let obj = lean_alloc_ctor(5, 1, 0); - lean_ctor_set(obj, 0, build_syntax(cache, syn).cast()); - obj.cast() - }, - } +) -> IxDataValue { + match dv { + DataValue::OfString(s) => { + let obj = LeanCtor::alloc(0, 1, 0); + obj.set(0, LeanString::from_str(s.as_str())); + IxDataValue::new(*obj) + }, + DataValue::OfBool(b) => { + // 0 object fields, 1 scalar byte + let obj = LeanCtor::alloc(1, 0, 1); + obj.set_u8(0, *b as u8); + IxDataValue::new(*obj) + }, + DataValue::OfName(n) => { + let obj = LeanCtor::alloc(2, 1, 0); + obj.set(0, build_name(cache, n)); + IxDataValue::new(*obj) + }, + DataValue::OfNat(n) => { + let obj = LeanCtor::alloc(3, 1, 0); + obj.set(0, build_nat(n)); + IxDataValue::new(*obj) + }, + DataValue::OfInt(i) => { + let obj = LeanCtor::alloc(4, 1, 0); + obj.set(0, build_int(i)); + IxDataValue::new(*obj) + }, + DataValue::OfSyntax(syn) => { + let obj = LeanCtor::alloc(5, 1, 0); + obj.set(0, build_syntax(cache, syn)); + IxDataValue::new(*obj) + }, } } @@ -234,20 +211,18 @@ pub fn build_data_value( pub fn build_kvmap( cache: &mut LeanBuildCache, data: &[(Name, DataValue)], -) -> *mut c_void { - unsafe { - let arr = lean_alloc_array(data.len(), data.len()); - for (i, (name, dv)) in data.iter().enumerate() { - let name_obj = build_name(cache, name); - let dv_obj = build_data_value(cache, dv); - // Prod (Name × DataValue) - let pair = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(pair, 0, name_obj.cast()); - lean_ctor_set(pair, 1, dv_obj.cast()); - lean_array_set_core(arr, i, pair); - } - arr.cast() +) -> LeanArray { + let arr = LeanArray::alloc(data.len()); + for (i, (name, dv)) in data.iter().enumerate() { + let name_obj = build_name(cache, name); + let dv_obj = build_data_value(cache, dv); + // Prod (Name × DataValue) + let pair = LeanCtor::alloc(0, 2, 0); + pair.set(0, name_obj); + pair.set(1, dv_obj); + arr.set(i, pair); } + arr } // ============================================================================= @@ -462,45 +437,43 @@ pub fn decode_syntax_preresolved(ptr: *const c_void) -> SyntaxPreresolved { /// Round-trip an Ix.Int: decode from Lean, re-encode. #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ix_int(int_ptr: *const c_void) -> *mut c_void { - let int_val = decode_ix_int(int_ptr); +pub extern "C" fn rs_roundtrip_ix_int(int_ptr: IxInt) -> IxInt { + let int_val = decode_ix_int(int_ptr.as_ptr()); build_int(&int_val) } /// Round-trip an Ix.Substring: decode from Lean, re-encode. #[unsafe(no_mangle)] pub extern "C" fn rs_roundtrip_ix_substring( - sub_ptr: *const c_void, -) -> *mut c_void { - let sub = decode_substring(sub_ptr); + sub_ptr: IxSubstring, +) -> IxSubstring { + let sub = decode_substring(sub_ptr.as_ptr()); build_substring(&sub) } /// Round-trip an Ix.SourceInfo: decode from Lean, re-encode. #[unsafe(no_mangle)] pub extern "C" fn rs_roundtrip_ix_source_info( - si_ptr: *const c_void, -) -> *mut c_void { - let si = decode_ix_source_info(si_ptr); + si_ptr: IxSourceInfo, +) -> IxSourceInfo { + let si = decode_ix_source_info(si_ptr.as_ptr()); build_source_info(&si) } /// Round-trip an Ix.SyntaxPreresolved: decode from Lean, re-encode. #[unsafe(no_mangle)] pub extern "C" fn rs_roundtrip_ix_syntax_preresolved( - sp_ptr: *const c_void, -) -> *mut c_void { - let sp = decode_syntax_preresolved(sp_ptr); + sp_ptr: IxSyntaxPreresolved, +) -> IxSyntaxPreresolved { + let sp = decode_syntax_preresolved(sp_ptr.as_ptr()); let mut cache = LeanBuildCache::new(); build_syntax_preresolved(&mut cache, &sp) } /// Round-trip an Ix.Syntax: decode from Lean, re-encode. #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ix_syntax( - syn_ptr: *const c_void, -) -> *mut c_void { - let syn = decode_ix_syntax(syn_ptr); +pub extern "C" fn rs_roundtrip_ix_syntax(syn_ptr: IxSyntax) -> IxSyntax { + let syn = decode_ix_syntax(syn_ptr.as_ptr()); let mut cache = LeanBuildCache::new(); build_syntax(&mut cache, &syn) } @@ -508,9 +481,9 @@ pub extern "C" fn rs_roundtrip_ix_syntax( /// Round-trip an Ix.DataValue: decode from Lean, re-encode. #[unsafe(no_mangle)] pub extern "C" fn rs_roundtrip_ix_data_value( - dv_ptr: *const c_void, -) -> *mut c_void { - let dv = decode_data_value(dv_ptr); + dv_ptr: IxDataValue, +) -> IxDataValue { + let dv = decode_data_value(dv_ptr.as_ptr()); let mut cache = LeanBuildCache::new(); build_data_value(&mut cache, &dv) } diff --git a/src/lean/ffi/ix/env.rs b/src/lean/ffi/ix/env.rs index dae44e18..6ebbdb0a 100644 --- a/src/lean/ffi/ix/env.rs +++ b/src/lean/ffi/ix/env.rs @@ -99,8 +99,8 @@ pub fn build_raw_environment( let val_obj = build_constant_info(cache, info); // Build pair (Name × ConstantInfo) let pair = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(pair, 0, key_obj.cast()); - lean_ctor_set(pair, 1, val_obj.cast()); + lean_ctor_set(pair, 0, key_obj.as_mut_ptr().cast()); + lean_ctor_set(pair, 1, val_obj.as_mut_ptr().cast()); lean_array_set_core(consts_arr, i, pair); } @@ -256,8 +256,8 @@ pub fn build_raw_environment_from_vec( let key_obj = build_name(cache, name); let val_obj = build_constant_info(cache, info); let pair = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(pair, 0, key_obj.cast()); - lean_ctor_set(pair, 1, val_obj.cast()); + lean_ctor_set(pair, 0, key_obj.as_mut_ptr().cast()); + lean_ctor_set(pair, 1, val_obj.as_mut_ptr().cast()); lean_array_set_core(consts_arr, i, pair); } consts_arr.cast() diff --git a/src/lean/ffi/ix/expr.rs b/src/lean/ffi/ix/expr.rs index 81b3f236..d52ba556 100644 --- a/src/lean/ffi/ix/expr.rs +++ b/src/lean/ffi/ix/expr.rs @@ -19,14 +19,10 @@ use std::ffi::c_void; use crate::ix::env::{ BinderInfo, DataValue, Expr, ExprData, Level, Literal, Name, }; -use crate::lean::lean::{ - lean_alloc_array, lean_alloc_ctor, lean_array_set_core, lean_ctor_get, - lean_ctor_set, lean_ctor_set_uint8, lean_inc, lean_mk_string, lean_obj_tag, -}; +use crate::lean::lean::{lean_ctor_get, lean_obj_tag}; use crate::lean::nat::Nat; -use crate::lean::{ - lean_array_data, lean_box_fn, lean_ctor_scalar_u8, lean_obj_to_string, -}; +use crate::lean::obj::{IxExpr, LeanArray, LeanCtor, LeanObj, LeanString}; +use crate::lean::{lean_array_data, lean_ctor_scalar_u8, lean_obj_to_string}; use super::super::builder::LeanBuildCache; use super::super::primitives::build_nat; @@ -37,128 +33,126 @@ use super::name::{build_name, decode_ix_name}; /// Build a Lean Ix.Expr with embedded hash. /// Uses caching to avoid rebuilding the same expression. -pub fn build_expr(cache: &mut LeanBuildCache, expr: &Expr) -> *mut c_void { +pub fn build_expr(cache: &mut LeanBuildCache, expr: &Expr) -> IxExpr { let hash = *expr.get_hash(); if let Some(&cached) = cache.exprs.get(&hash) { - unsafe { lean_inc(cached.cast()) }; + cached.inc_ref(); return cached; } - let result = unsafe { - match expr.as_data() { - ExprData::Bvar(idx, h) => { - let obj = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(obj, 0, build_nat(idx).cast()); - lean_ctor_set(obj, 1, build_address(h).cast()); - obj.cast() - }, - ExprData::Fvar(name, h) => { - let obj = lean_alloc_ctor(1, 2, 0); - lean_ctor_set(obj, 0, build_name(cache, name).cast()); - lean_ctor_set(obj, 1, build_address(h).cast()); - obj.cast() - }, - ExprData::Mvar(name, h) => { - let obj = lean_alloc_ctor(2, 2, 0); - lean_ctor_set(obj, 0, build_name(cache, name).cast()); - lean_ctor_set(obj, 1, build_address(h).cast()); - obj.cast() - }, - ExprData::Sort(level, h) => { - let obj = lean_alloc_ctor(3, 2, 0); - lean_ctor_set(obj, 0, build_level(cache, level).cast()); - lean_ctor_set(obj, 1, build_address(h).cast()); - obj.cast() - }, - ExprData::Const(name, levels, h) => { - let name_obj = build_name(cache, name); - let levels_obj = build_level_array(cache, levels); - let obj = lean_alloc_ctor(4, 3, 0); - lean_ctor_set(obj, 0, name_obj.cast()); - lean_ctor_set(obj, 1, levels_obj.cast()); - lean_ctor_set(obj, 2, build_address(h).cast()); - obj.cast() - }, - ExprData::App(fn_expr, arg_expr, h) => { - let fn_obj = build_expr(cache, fn_expr); - let arg_obj = build_expr(cache, arg_expr); - let obj = lean_alloc_ctor(5, 3, 0); - lean_ctor_set(obj, 0, fn_obj.cast()); - lean_ctor_set(obj, 1, arg_obj.cast()); - lean_ctor_set(obj, 2, build_address(h).cast()); - obj.cast() - }, - ExprData::Lam(name, ty, body, bi, h) => { - let name_obj = build_name(cache, name); - let ty_obj = build_expr(cache, ty); - let body_obj = build_expr(cache, body); - let hash_obj = build_address(h); - // 4 object fields, 1 scalar byte for BinderInfo - let obj = lean_alloc_ctor(6, 4, 1); - lean_ctor_set(obj, 0, name_obj.cast()); - lean_ctor_set(obj, 1, ty_obj.cast()); - lean_ctor_set(obj, 2, body_obj.cast()); - lean_ctor_set(obj, 3, hash_obj.cast()); - lean_ctor_set_uint8(obj, 4 * 8, binder_info_to_u8(bi)); - obj.cast() - }, - ExprData::ForallE(name, ty, body, bi, h) => { - let name_obj = build_name(cache, name); - let ty_obj = build_expr(cache, ty); - let body_obj = build_expr(cache, body); - let hash_obj = build_address(h); - let obj = lean_alloc_ctor(7, 4, 1); - lean_ctor_set(obj, 0, name_obj.cast()); - lean_ctor_set(obj, 1, ty_obj.cast()); - lean_ctor_set(obj, 2, body_obj.cast()); - lean_ctor_set(obj, 3, hash_obj.cast()); - lean_ctor_set_uint8(obj, 4 * 8, binder_info_to_u8(bi)); - obj.cast() - }, - ExprData::LetE(name, ty, val, body, non_dep, h) => { - let name_obj = build_name(cache, name); - let ty_obj = build_expr(cache, ty); - let val_obj = build_expr(cache, val); - let body_obj = build_expr(cache, body); - let hash_obj = build_address(h); - // 5 object fields, 1 scalar byte for Bool - let obj = lean_alloc_ctor(8, 5, 1); - lean_ctor_set(obj, 0, name_obj.cast()); - lean_ctor_set(obj, 1, ty_obj.cast()); - lean_ctor_set(obj, 2, val_obj.cast()); - lean_ctor_set(obj, 3, body_obj.cast()); - lean_ctor_set(obj, 4, hash_obj.cast()); - lean_ctor_set_uint8(obj, 5 * 8, *non_dep as u8); - obj.cast() - }, - ExprData::Lit(lit, h) => { - let lit_obj = build_literal(lit); - let obj = lean_alloc_ctor(9, 2, 0); - lean_ctor_set(obj, 0, lit_obj.cast()); - lean_ctor_set(obj, 1, build_address(h).cast()); - obj.cast() - }, - ExprData::Mdata(md, inner, h) => { - let md_obj = build_mdata_array(cache, md); - let inner_obj = build_expr(cache, inner); - let obj = lean_alloc_ctor(10, 3, 0); - lean_ctor_set(obj, 0, md_obj.cast()); - lean_ctor_set(obj, 1, inner_obj.cast()); - lean_ctor_set(obj, 2, build_address(h).cast()); - obj.cast() - }, - ExprData::Proj(type_name, idx, struct_expr, h) => { - let name_obj = build_name(cache, type_name); - let idx_obj = build_nat(idx); - let struct_obj = build_expr(cache, struct_expr); - let obj = lean_alloc_ctor(11, 4, 0); - lean_ctor_set(obj, 0, name_obj.cast()); - lean_ctor_set(obj, 1, idx_obj.cast()); - lean_ctor_set(obj, 2, struct_obj.cast()); - lean_ctor_set(obj, 3, build_address(h).cast()); - obj.cast() - }, - } + let result = match expr.as_data() { + ExprData::Bvar(idx, h) => { + let obj = LeanCtor::alloc(0, 2, 0); + obj.set(0, build_nat(idx)); + obj.set(1, build_address(h)); + IxExpr::new(*obj) + }, + ExprData::Fvar(name, h) => { + let obj = LeanCtor::alloc(1, 2, 0); + obj.set(0, build_name(cache, name)); + obj.set(1, build_address(h)); + IxExpr::new(*obj) + }, + ExprData::Mvar(name, h) => { + let obj = LeanCtor::alloc(2, 2, 0); + obj.set(0, build_name(cache, name)); + obj.set(1, build_address(h)); + IxExpr::new(*obj) + }, + ExprData::Sort(level, h) => { + let obj = LeanCtor::alloc(3, 2, 0); + obj.set(0, build_level(cache, level)); + obj.set(1, build_address(h)); + IxExpr::new(*obj) + }, + ExprData::Const(name, levels, h) => { + let name_obj = build_name(cache, name); + let levels_obj = build_level_array(cache, levels); + let obj = LeanCtor::alloc(4, 3, 0); + obj.set(0, name_obj); + obj.set(1, levels_obj); + obj.set(2, build_address(h)); + IxExpr::new(*obj) + }, + ExprData::App(fn_expr, arg_expr, h) => { + let fn_obj = build_expr(cache, fn_expr); + let arg_obj = build_expr(cache, arg_expr); + let obj = LeanCtor::alloc(5, 3, 0); + obj.set(0, fn_obj); + obj.set(1, arg_obj); + obj.set(2, build_address(h)); + IxExpr::new(*obj) + }, + ExprData::Lam(name, ty, body, bi, h) => { + let name_obj = build_name(cache, name); + let ty_obj = build_expr(cache, ty); + let body_obj = build_expr(cache, body); + let hash_obj = build_address(h); + // 4 object fields, 1 scalar byte for BinderInfo + let obj = LeanCtor::alloc(6, 4, 1); + obj.set(0, name_obj); + obj.set(1, ty_obj); + obj.set(2, body_obj); + obj.set(3, hash_obj); + obj.set_u8(4 * 8, binder_info_to_u8(bi)); + IxExpr::new(*obj) + }, + ExprData::ForallE(name, ty, body, bi, h) => { + let name_obj = build_name(cache, name); + let ty_obj = build_expr(cache, ty); + let body_obj = build_expr(cache, body); + let hash_obj = build_address(h); + let obj = LeanCtor::alloc(7, 4, 1); + obj.set(0, name_obj); + obj.set(1, ty_obj); + obj.set(2, body_obj); + obj.set(3, hash_obj); + obj.set_u8(4 * 8, binder_info_to_u8(bi)); + IxExpr::new(*obj) + }, + ExprData::LetE(name, ty, val, body, non_dep, h) => { + let name_obj = build_name(cache, name); + let ty_obj = build_expr(cache, ty); + let val_obj = build_expr(cache, val); + let body_obj = build_expr(cache, body); + let hash_obj = build_address(h); + // 5 object fields, 1 scalar byte for Bool + let obj = LeanCtor::alloc(8, 5, 1); + obj.set(0, name_obj); + obj.set(1, ty_obj); + obj.set(2, val_obj); + obj.set(3, body_obj); + obj.set(4, hash_obj); + obj.set_u8(5 * 8, *non_dep as u8); + IxExpr::new(*obj) + }, + ExprData::Lit(lit, h) => { + let lit_obj = build_literal(lit); + let obj = LeanCtor::alloc(9, 2, 0); + obj.set(0, lit_obj); + obj.set(1, build_address(h)); + IxExpr::new(*obj) + }, + ExprData::Mdata(md, inner, h) => { + let md_obj = build_mdata_array(cache, md); + let inner_obj = build_expr(cache, inner); + let obj = LeanCtor::alloc(10, 3, 0); + obj.set(0, md_obj); + obj.set(1, inner_obj); + obj.set(2, build_address(h)); + IxExpr::new(*obj) + }, + ExprData::Proj(type_name, idx, struct_expr, h) => { + let name_obj = build_name(cache, type_name); + let idx_obj = build_nat(idx); + let struct_obj = build_expr(cache, struct_expr); + let obj = LeanCtor::alloc(11, 4, 0); + obj.set(0, name_obj); + obj.set(1, idx_obj); + obj.set(2, struct_obj); + obj.set(3, build_address(h)); + IxExpr::new(*obj) + }, }; cache.exprs.insert(hash, result); @@ -169,15 +163,13 @@ pub fn build_expr(cache: &mut LeanBuildCache, expr: &Expr) -> *mut c_void { fn build_mdata_array( cache: &mut LeanBuildCache, md: &[(Name, DataValue)], -) -> *mut c_void { - unsafe { - let arr = lean_alloc_array(md.len(), md.len()); - for (i, (name, dv)) in md.iter().enumerate() { - let pair = build_name_datavalue_pair(cache, name, dv); - lean_array_set_core(arr, i, pair.cast()); - } - arr.cast() +) -> LeanArray { + let arr = LeanArray::alloc(md.len()); + for (i, (name, dv)) in md.iter().enumerate() { + let pair = build_name_datavalue_pair(cache, name, dv); + arr.set(i, pair); } + arr } /// Build a (Name, DataValue) pair (Prod). @@ -185,40 +177,35 @@ fn build_name_datavalue_pair( cache: &mut LeanBuildCache, name: &Name, dv: &DataValue, -) -> *mut c_void { - unsafe { - let name_obj = build_name(cache, name); - let dv_obj = build_data_value(cache, dv); - let pair = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(pair, 0, name_obj.cast()); - lean_ctor_set(pair, 1, dv_obj.cast()); - pair.cast() - } +) -> LeanObj { + let name_obj = build_name(cache, name); + let dv_obj = build_data_value(cache, dv); + let pair = LeanCtor::alloc(0, 2, 0); + pair.set(0, name_obj); + pair.set(1, dv_obj); + *pair } /// Build a Literal (natVal or strVal). -pub fn build_literal(lit: &Literal) -> *mut c_void { - unsafe { - match lit { - Literal::NatVal(n) => { - let obj = lean_alloc_ctor(0, 1, 0); - lean_ctor_set(obj, 0, build_nat(n).cast()); - obj.cast() - }, - Literal::StrVal(s) => { - let s_cstr = crate::lean::safe_cstring(s.as_str()); - let obj = lean_alloc_ctor(1, 1, 0); - lean_ctor_set(obj, 0, lean_mk_string(s_cstr.as_ptr())); - obj.cast() - }, - } +pub fn build_literal(lit: &Literal) -> LeanObj { + match lit { + Literal::NatVal(n) => { + let obj = LeanCtor::alloc(0, 1, 0); + obj.set(0, build_nat(n)); + *obj + }, + Literal::StrVal(s) => { + let obj = LeanCtor::alloc(1, 1, 0); + obj.set(0, LeanString::from_str(s.as_str())); + *obj + }, } } /// Build Ix.BinderInfo enum. /// BinderInfo is a 4-constructor enum with no fields, stored as boxed scalar. -pub fn build_binder_info(bi: &BinderInfo) -> *mut c_void { - lean_box_fn(binder_info_to_u8(bi) as usize) +pub fn build_binder_info(bi: &BinderInfo) -> LeanObj { + LeanObj::box_usize(binder_info_to_u8(bi) as usize) } /// Convert BinderInfo to u8 tag. @@ -286,8 +273,6 @@ pub fn decode_ix_expr(ptr: *const c_void) -> Expr { let name_ptr = lean_ctor_get(ptr as *mut _, 0); let ty_ptr = lean_ctor_get(ptr as *mut _, 1); let body_ptr = lean_ctor_get(ptr as *mut _, 2); - // hash at field 3 - // bi is a scalar byte at offset 4*8 let name = decode_ix_name(name_ptr.cast()); let ty = decode_ix_expr(ty_ptr.cast()); @@ -321,8 +306,6 @@ pub fn decode_ix_expr(ptr: *const c_void) -> Expr { let ty_ptr = lean_ctor_get(ptr as *mut _, 1); let val_ptr = lean_ctor_get(ptr as *mut _, 2); let body_ptr = lean_ctor_get(ptr as *mut _, 3); - // hash at field 4 - // nonDep is scalar byte after 5 obj fields let name = decode_ix_name(name_ptr.cast()); let ty = decode_ix_expr(ty_ptr.cast()); @@ -418,8 +401,8 @@ pub fn decode_binder_info(bi_byte: u8) -> BinderInfo { /// Round-trip an Ix.Expr: decode from Lean, re-encode via LeanBuildCache. #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ix_expr(expr_ptr: *const c_void) -> *mut c_void { - let expr = decode_ix_expr(expr_ptr); +pub extern "C" fn rs_roundtrip_ix_expr(expr_ptr: IxExpr) -> IxExpr { + let expr = decode_ix_expr(expr_ptr.as_ptr()); let mut cache = LeanBuildCache::new(); build_expr(&mut cache, &expr) } diff --git a/src/lean/ffi/ix/level.rs b/src/lean/ffi/ix/level.rs index 026e597a..73a66429 100644 --- a/src/lean/ffi/ix/level.rs +++ b/src/lean/ffi/ix/level.rs @@ -11,10 +11,8 @@ use std::ffi::c_void; use crate::ix::env::{Level, LevelData}; -use crate::lean::lean::{ - lean_alloc_array, lean_alloc_ctor, lean_array_set_core, lean_ctor_get, - lean_ctor_set, lean_inc, lean_obj_tag, -}; +use crate::lean::lean::{lean_ctor_get, lean_obj_tag}; +use crate::lean::obj::{IxLevel, LeanArray, LeanCtor}; use super::super::builder::LeanBuildCache; use super::address::build_address; @@ -22,60 +20,58 @@ use super::name::{build_name, decode_ix_name}; /// Build a Lean Ix.Level with embedded hash. /// Uses caching to avoid rebuilding the same level. -pub fn build_level(cache: &mut LeanBuildCache, level: &Level) -> *mut c_void { +pub fn build_level(cache: &mut LeanBuildCache, level: &Level) -> IxLevel { let hash = *level.get_hash(); if let Some(&cached) = cache.levels.get(&hash) { - unsafe { lean_inc(cached.cast()) }; + cached.inc_ref(); return cached; } - let result = unsafe { - match level.as_data() { - LevelData::Zero(h) => { - let obj = lean_alloc_ctor(0, 1, 0); - lean_ctor_set(obj, 0, build_address(h).cast()); - obj.cast() - }, - LevelData::Succ(x, h) => { - let x_obj = build_level(cache, x); - let obj = lean_alloc_ctor(1, 2, 0); - lean_ctor_set(obj, 0, x_obj.cast()); - lean_ctor_set(obj, 1, build_address(h).cast()); - obj.cast() - }, - LevelData::Max(x, y, h) => { - let x_obj = build_level(cache, x); - let y_obj = build_level(cache, y); - let obj = lean_alloc_ctor(2, 3, 0); - lean_ctor_set(obj, 0, x_obj.cast()); - lean_ctor_set(obj, 1, y_obj.cast()); - lean_ctor_set(obj, 2, build_address(h).cast()); - obj.cast() - }, - LevelData::Imax(x, y, h) => { - let x_obj = build_level(cache, x); - let y_obj = build_level(cache, y); - let obj = lean_alloc_ctor(3, 3, 0); - lean_ctor_set(obj, 0, x_obj.cast()); - lean_ctor_set(obj, 1, y_obj.cast()); - lean_ctor_set(obj, 2, build_address(h).cast()); - obj.cast() - }, - LevelData::Param(n, h) => { - let n_obj = build_name(cache, n); - let obj = lean_alloc_ctor(4, 2, 0); - lean_ctor_set(obj, 0, n_obj.cast()); - lean_ctor_set(obj, 1, build_address(h).cast()); - obj.cast() - }, - LevelData::Mvar(n, h) => { - let n_obj = build_name(cache, n); - let obj = lean_alloc_ctor(5, 2, 0); - lean_ctor_set(obj, 0, n_obj.cast()); - lean_ctor_set(obj, 1, build_address(h).cast()); - obj.cast() - }, - } + let result = match level.as_data() { + LevelData::Zero(h) => { + let ctor = LeanCtor::alloc(0, 1, 0); + ctor.set(0, build_address(h)); + IxLevel::new(*ctor) + }, + LevelData::Succ(x, h) => { + let x_obj = build_level(cache, x); + let ctor = LeanCtor::alloc(1, 2, 0); + ctor.set(0, x_obj); + ctor.set(1, build_address(h)); + IxLevel::new(*ctor) + }, + LevelData::Max(x, y, h) => { + let x_obj = build_level(cache, x); + let y_obj = build_level(cache, y); + let ctor = LeanCtor::alloc(2, 3, 0); + ctor.set(0, x_obj); + ctor.set(1, y_obj); + ctor.set(2, build_address(h)); + IxLevel::new(*ctor) + }, + LevelData::Imax(x, y, h) => { + let x_obj = build_level(cache, x); + let y_obj = build_level(cache, y); + let ctor = LeanCtor::alloc(3, 3, 0); + ctor.set(0, x_obj); + ctor.set(1, y_obj); + ctor.set(2, build_address(h)); + IxLevel::new(*ctor) + }, + LevelData::Param(n, h) => { + let n_obj = build_name(cache, n); + let ctor = LeanCtor::alloc(4, 2, 0); + ctor.set(0, n_obj); + ctor.set(1, build_address(h)); + IxLevel::new(*ctor) + }, + LevelData::Mvar(n, h) => { + let n_obj = build_name(cache, n); + let ctor = LeanCtor::alloc(5, 2, 0); + ctor.set(0, n_obj); + ctor.set(1, build_address(h)); + IxLevel::new(*ctor) + }, }; cache.levels.insert(hash, result); @@ -86,15 +82,12 @@ pub fn build_level(cache: &mut LeanBuildCache, level: &Level) -> *mut c_void { pub fn build_level_array( cache: &mut LeanBuildCache, levels: &[Level], -) -> *mut c_void { - unsafe { - let arr = lean_alloc_array(levels.len(), levels.len()); - for (i, level) in levels.iter().enumerate() { - let level_obj = build_level(cache, level); - lean_array_set_core(arr, i, level_obj.cast()); - } - arr.cast() +) -> LeanArray { + let arr = LeanArray::alloc(levels.len()); + for (i, level) in levels.iter().enumerate() { + arr.set(i, build_level(cache, level)); } + arr } /// Decode a Lean Ix.Level to Rust Level. @@ -144,10 +137,8 @@ pub fn decode_level_array(ptr: *const c_void) -> Vec { /// Round-trip an Ix.Level: decode from Lean, re-encode via LeanBuildCache. #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ix_level( - level_ptr: *const c_void, -) -> *mut c_void { - let level = decode_ix_level(level_ptr); +pub extern "C" fn rs_roundtrip_ix_level(level_ptr: IxLevel) -> IxLevel { + let level = decode_ix_level(level_ptr.as_ptr()); let mut cache = LeanBuildCache::new(); build_level(&mut cache, &level) } diff --git a/src/lean/ffi/ix/name.rs b/src/lean/ffi/ix/name.rs index 5ac159e5..bbc38ca1 100644 --- a/src/lean/ffi/ix/name.rs +++ b/src/lean/ffi/ix/name.rs @@ -8,12 +8,10 @@ use std::ffi::c_void; use crate::ix::env::{Name, NameData}; -use crate::lean::lean::{ - lean_alloc_array, lean_alloc_ctor, lean_array_set_core, lean_ctor_get, - lean_ctor_set, lean_inc, lean_mk_string, lean_obj_tag, -}; +use crate::lean::lean::{lean_ctor_get, lean_obj_tag}; use crate::lean::lean_obj_to_string; use crate::lean::nat::Nat; +use crate::lean::obj::{IxName, LeanArray, LeanCtor, LeanString}; use super::super::builder::LeanBuildCache; use super::super::primitives::build_nat; @@ -21,42 +19,37 @@ use super::address::build_address; /// Build a Lean Ix.Name with embedded hash. /// Uses caching to avoid rebuilding the same name. -pub fn build_name(cache: &mut LeanBuildCache, name: &Name) -> *mut c_void { +pub fn build_name(cache: &mut LeanBuildCache, name: &Name) -> IxName { let hash = name.get_hash(); if let Some(&cached) = cache.names.get(hash) { - unsafe { lean_inc(cached.cast()) }; + cached.inc_ref(); return cached; } - let result = unsafe { - match name.as_data() { - NameData::Anonymous(h) => { - // anonymous: (hash : Address) - let obj = lean_alloc_ctor(0, 1, 0); - lean_ctor_set(obj, 0, build_address(h).cast()); - obj.cast() - }, - NameData::Str(parent, s, h) => { - // str: (parent : Name) (s : String) (hash : Address) - let parent_obj = build_name(cache, parent); - let s_cstr = crate::lean::safe_cstring(s.as_str()); - let obj = lean_alloc_ctor(1, 3, 0); - lean_ctor_set(obj, 0, parent_obj.cast()); - lean_ctor_set(obj, 1, lean_mk_string(s_cstr.as_ptr())); - lean_ctor_set(obj, 2, build_address(h).cast()); - obj.cast() - }, - NameData::Num(parent, n, h) => { - // num: (parent : Name) (i : Nat) (hash : Address) - let parent_obj = build_name(cache, parent); - let n_obj = build_nat(n); - let obj = lean_alloc_ctor(2, 3, 0); - lean_ctor_set(obj, 0, parent_obj.cast()); - lean_ctor_set(obj, 1, n_obj.cast()); - lean_ctor_set(obj, 2, build_address(h).cast()); - obj.cast() - }, - } + let result = match name.as_data() { + NameData::Anonymous(h) => { + let ctor = LeanCtor::alloc(0, 1, 0); + ctor.set(0, build_address(h)); + IxName::new(*ctor) + }, + NameData::Str(parent, s, h) => { + let parent_obj = build_name(cache, parent); + let s_obj = LeanString::from_str(s.as_str()); + let ctor = LeanCtor::alloc(1, 3, 0); + ctor.set(0, parent_obj); + ctor.set(1, s_obj); + ctor.set(2, build_address(h)); + IxName::new(*ctor) + }, + NameData::Num(parent, n, h) => { + let parent_obj = build_name(cache, parent); + let n_obj = build_nat(n); + let ctor = LeanCtor::alloc(2, 3, 0); + ctor.set(0, parent_obj); + ctor.set(1, n_obj); + ctor.set(2, build_address(h)); + IxName::new(*ctor) + }, }; cache.names.insert(*hash, result); @@ -67,15 +60,12 @@ pub fn build_name(cache: &mut LeanBuildCache, name: &Name) -> *mut c_void { pub fn build_name_array( cache: &mut LeanBuildCache, names: &[Name], -) -> *mut c_void { - unsafe { - let arr = lean_alloc_array(names.len(), names.len()); - for (i, name) in names.iter().enumerate() { - let name_obj = build_name(cache, name); - lean_array_set_core(arr, i, name_obj.cast()); - } - arr.cast() +) -> LeanArray { + let arr = LeanArray::alloc(names.len()); + for (i, name) in names.iter().enumerate() { + arr.set(i, build_name(cache, name)); } + arr } /// Decode a Lean Ix.Name to Rust Name. @@ -121,8 +111,8 @@ pub fn decode_name_array(ptr: *const c_void) -> Vec { /// Round-trip an Ix.Name: decode from Lean, re-encode via LeanBuildCache. #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ix_name(name_ptr: *const c_void) -> *mut c_void { - let name = decode_ix_name(name_ptr); +pub extern "C" fn rs_roundtrip_ix_name(name_ptr: IxName) -> IxName { + let name = decode_ix_name(name_ptr.as_ptr()); let mut cache = LeanBuildCache::new(); build_name(&mut cache, &name) } diff --git a/src/lean/ffi/ixon/env.rs b/src/lean/ffi/ixon/env.rs index 5a0305ed..8b1196b2 100644 --- a/src/lean/ffi/ixon/env.rs +++ b/src/lean/ffi/ixon/env.rs @@ -129,7 +129,7 @@ pub fn build_raw_named( let addr_obj = build_address_from_ixon(&rn.addr); let meta_obj = build_constant_meta(&rn.const_meta); let obj = lean_alloc_ctor(0, 3, 0); - lean_ctor_set(obj, 0, name_obj.cast()); + lean_ctor_set(obj, 0, name_obj.as_mut_ptr().cast()); lean_ctor_set(obj, 1, addr_obj.cast()); lean_ctor_set(obj, 2, meta_obj.cast()); obj.cast() @@ -242,7 +242,7 @@ pub fn build_raw_name_entry( let name_obj = build_name(cache, name); let obj = lean_alloc_ctor(0, 2, 0); lean_ctor_set(obj, 0, addr_obj.cast()); - lean_ctor_set(obj, 1, name_obj.cast()); + lean_ctor_set(obj, 1, name_obj.as_mut_ptr().cast()); obj.cast() } } diff --git a/src/lean/ffi/ixon/meta.rs b/src/lean/ffi/ixon/meta.rs index 68a0ef2f..4a1bf5dd 100644 --- a/src/lean/ffi/ixon/meta.rs +++ b/src/lean/ffi/ixon/meta.rs @@ -401,7 +401,7 @@ pub fn build_constant_meta(meta: &ConstantMeta) -> *mut c_void { let obj = lean_alloc_ctor(1, 6, 16); lean_ctor_set(obj, 0, build_address_from_ixon(name).cast()); lean_ctor_set(obj, 1, build_address_array(lvls).cast()); - lean_ctor_set(obj, 2, build_reducibility_hints(hints).cast()); + lean_ctor_set(obj, 2, build_reducibility_hints(hints).as_mut_ptr().cast()); lean_ctor_set(obj, 3, build_address_array(all).cast()); lean_ctor_set(obj, 4, build_address_array(ctx).cast()); lean_ctor_set(obj, 5, build_expr_meta_arena(arena).cast()); diff --git a/src/lean/ffi/keccak.rs b/src/lean/ffi/keccak.rs index 1e2b19c9..622369db 100644 --- a/src/lean/ffi/keccak.rs +++ b/src/lean/ffi/keccak.rs @@ -1,72 +1,38 @@ -use std::ffi::c_void; use std::sync::OnceLock; use tiny_keccak::{Hasher, Keccak}; -use crate::lean::{ - lean::{ - lean_alloc_external, lean_alloc_sarray, lean_get_external_data, - lean_register_external_class, - }, - lean_sarray_data, lean_sarray_set_data, noop_foreach, -}; +use crate::lean::obj::{ExternalClass, LeanByteArray, LeanExternal, LeanObj}; -use super::{ExternalClassPtr, drop_raw, to_raw}; +static KECCAK_CLASS: OnceLock = OnceLock::new(); -static KECCAK_CLASS: OnceLock = OnceLock::new(); - -fn get_keccak_class() -> *mut c_void { - KECCAK_CLASS - .get_or_init(|| { - ExternalClassPtr( - unsafe { - lean_register_external_class( - Some(keccak_finalizer), - Some(noop_foreach), - ) - } - .cast(), - ) - }) - .0 -} - -extern "C" fn keccak_finalizer(ptr: *mut c_void) { - drop_raw(ptr.cast::()); +fn keccak_class() -> &'static ExternalClass { + KECCAK_CLASS.get_or_init(ExternalClass::register_with_drop::) } /// `Keccak.Hasher.init : Unit → Hasher` #[unsafe(no_mangle)] -extern "C" fn rs_keccak256_hasher_init(_unit: *const c_void) -> *mut c_void { - let hasher = Keccak::v256(); - let ptr = to_raw(hasher) as *mut c_void; - unsafe { lean_alloc_external(get_keccak_class().cast(), ptr) }.cast() +extern "C" fn rs_keccak256_hasher_init(_unit: LeanObj) -> LeanExternal { + LeanExternal::alloc(keccak_class(), Keccak::v256()) } /// `Keccak.Hasher.update : (hasher: Hasher) → (input: @& ByteArray) → Hasher` #[unsafe(no_mangle)] extern "C" fn rs_keccak256_hasher_update( - hasher_obj: *mut c_void, - input: *const c_void, -) -> *mut c_void { - let hasher: &Keccak = - unsafe { &*lean_get_external_data(hasher_obj.cast()).cast() }; - let mut new_hasher = hasher.clone(); - new_hasher.update(lean_sarray_data(input)); - let ptr = to_raw(new_hasher) as *mut c_void; - unsafe { lean_alloc_external(get_keccak_class().cast(), ptr) }.cast() + hasher: LeanExternal, + input: LeanByteArray, +) -> LeanExternal { + let mut new_hasher = hasher.get().clone(); + new_hasher.update(input.as_bytes()); + LeanExternal::alloc(keccak_class(), new_hasher) } /// `Keccak.Hasher.finalize : (hasher: Hasher) → ByteArray` #[unsafe(no_mangle)] extern "C" fn rs_keccak256_hasher_finalize( - hasher_obj: *mut c_void, -) -> *mut c_void { - let hasher: &Keccak = - unsafe { &*lean_get_external_data(hasher_obj.cast()).cast() }; + hasher: LeanExternal, +) -> LeanByteArray { let mut data = [0u8; 32]; - hasher.clone().finalize(&mut data); - let arr_ptr = unsafe { lean_alloc_sarray(1, 32, 32) }; - unsafe { lean_sarray_set_data(arr_ptr.cast(), &data) }; - arr_ptr.cast() + hasher.get().clone().finalize(&mut data); + LeanByteArray::from_bytes(&data) } diff --git a/src/lean/ffi/primitives.rs b/src/lean/ffi/primitives.rs index 097b2f4f..50f6ea0c 100644 --- a/src/lean/ffi/primitives.rs +++ b/src/lean/ffi/primitives.rs @@ -9,31 +9,30 @@ use std::ffi::c_void; use crate::lean::lean::{ - lean_alloc_array, lean_alloc_ctor, lean_alloc_sarray, lean_array_get_core, - lean_array_set_core, lean_ctor_get, lean_ctor_set, lean_mk_string, - lean_obj_tag, lean_sarray_cptr, lean_uint64_to_nat, + lean_ctor_get, lean_obj_tag, + lean_uint64_to_nat, }; use crate::lean::nat::Nat; -use crate::lean::{ - lean_array_data, lean_box_fn, lean_is_scalar, lean_obj_to_string, - lean_sarray_data, +use crate::lean::obj::{ + LeanArray, LeanByteArray, LeanCtor, LeanList, LeanObj, LeanString, }; +use crate::lean::{lean_array_data, lean_is_scalar}; // ============================================================================= // Nat Building // ============================================================================= /// Build a Lean Nat from a Rust Nat. -pub fn build_nat(n: &Nat) -> *mut c_void { +pub fn build_nat(n: &Nat) -> LeanObj { // Try to get as u64 first if let Some(val) = n.to_u64() { // For small values that fit in a boxed scalar (max value is usize::MAX >> 1) if val <= (usize::MAX >> 1) as u64 { #[allow(clippy::cast_possible_truncation)] - return lean_box_fn(val as usize); + return LeanObj::box_usize(val as usize); } // For larger u64 values, use lean_uint64_to_nat - return unsafe { lean_uint64_to_nat(val).cast() }; + return unsafe { LeanObj::from_raw(lean_uint64_to_nat(val).cast()) }; } // For values larger than u64, convert to limbs and use GMP let bytes = n.to_le_bytes(); @@ -43,7 +42,11 @@ pub fn build_nat(n: &Nat) -> *mut c_void { arr[..chunk.len()].copy_from_slice(chunk); limbs.push(u64::from_le_bytes(arr)); } - unsafe { crate::lean::nat::lean_nat_from_limbs(limbs.len(), limbs.as_ptr()) } + unsafe { + LeanObj::from_raw( + crate::lean::nat::lean_nat_from_limbs(limbs.len(), limbs.as_ptr()), + ) + } } // ============================================================================= @@ -52,68 +55,49 @@ pub fn build_nat(n: &Nat) -> *mut c_void { /// Round-trip a Nat: decode from Lean, re-encode to Lean. #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_nat(nat_ptr: *const c_void) -> *mut c_void { - // Decode - let nat = Nat::from_ptr(nat_ptr); - // Re-encode +pub extern "C" fn rs_roundtrip_nat(nat_ptr: LeanObj) -> LeanObj { + let nat = Nat::from_ptr(nat_ptr.as_ptr()); build_nat(&nat) } /// Round-trip a String: decode from Lean, re-encode to Lean. #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_string(s_ptr: *const c_void) -> *mut c_void { - // Decode - let s = lean_obj_to_string(s_ptr); - // Re-encode - unsafe { - let cstr = crate::lean::safe_cstring(s.as_str()); - lean_mk_string(cstr.as_ptr()).cast() - } +pub extern "C" fn rs_roundtrip_string(s_ptr: LeanObj) -> LeanObj { + let s = unsafe { LeanString::from_raw(s_ptr.as_ptr()) }; + *LeanString::from_str(&s.to_string()) } /// Round-trip a List Nat: decode from Lean, re-encode to Lean. #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_list_nat( - list_ptr: *const c_void, -) -> *mut c_void { +pub extern "C" fn rs_roundtrip_list_nat(list_ptr: LeanObj) -> LeanObj { // Decode list to Vec - let nats: Vec = crate::lean::collect_list(list_ptr, Nat::from_ptr); + let nats: Vec = crate::lean::collect_list(list_ptr.as_ptr(), Nat::from_ptr); // Re-encode as Lean List build_list_nat(&nats) } /// Round-trip an Array Nat: decode from Lean, re-encode to Lean. #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_array_nat( - arr_ptr: *const c_void, -) -> *mut c_void { +pub extern "C" fn rs_roundtrip_array_nat(arr_ptr: LeanObj) -> LeanObj { // Decode array let nats: Vec = - lean_array_data(arr_ptr).iter().map(|&p| Nat::from_ptr(p)).collect(); + lean_array_data(arr_ptr.as_ptr()).iter().map(|&p| Nat::from_ptr(p)).collect(); // Re-encode as Lean Array build_array_nat(&nats) } /// Round-trip a ByteArray: decode from Lean, re-encode to Lean. #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_bytearray(ba_ptr: *const c_void) -> *mut c_void { - // Decode ByteArray (scalar array of u8) - let bytes = lean_sarray_data(ba_ptr); - // Re-encode - unsafe { - let ba = lean_alloc_sarray(1, bytes.len(), bytes.len()); - let data_ptr = lean_sarray_cptr(ba); - std::ptr::copy_nonoverlapping(bytes.as_ptr(), data_ptr, bytes.len()); - ba.cast() - } +pub extern "C" fn rs_roundtrip_bytearray(ba: LeanByteArray) -> LeanByteArray { + LeanByteArray::from_bytes(ba.as_bytes()) } /// Round-trip a Bool: decode from Lean, re-encode. /// Bool in Lean is passed as unboxed scalar: false = 0, true = 1 #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_bool(bool_ptr: *const c_void) -> *mut c_void { +pub extern "C" fn rs_roundtrip_bool(bool_ptr: LeanObj) -> LeanObj { // Bool is passed as unboxed scalar - just return it as-is - bool_ptr as *mut c_void + bool_ptr } // ============================================================================= @@ -121,32 +105,18 @@ pub extern "C" fn rs_roundtrip_bool(bool_ptr: *const c_void) -> *mut c_void { // ============================================================================= /// Build a Lean List Nat from a Vec. -fn build_list_nat(nats: &[Nat]) -> *mut c_void { - unsafe { - // Build list in reverse (cons builds from the end) - let mut list = lean_box_fn(0).cast(); // nil - for nat in nats.iter().rev() { - let nat_obj = build_nat(nat); - // cons : α → List α → List α (tag 1, 2 object fields) - let cons = lean_alloc_ctor(1, 2, 0); - lean_ctor_set(cons, 0, nat_obj.cast()); - lean_ctor_set(cons, 1, list); - list = cons; - } - list.cast() - } +fn build_list_nat(nats: &[Nat]) -> LeanObj { + let items: Vec = nats.iter().map(build_nat).collect(); + *LeanList::from_iter(items) } /// Build a Lean Array Nat from a Vec. -fn build_array_nat(nats: &[Nat]) -> *mut c_void { - unsafe { - let arr = lean_alloc_array(nats.len(), nats.len()); - for (i, nat) in nats.iter().enumerate() { - let nat_obj = build_nat(nat); - lean_array_set_core(arr, i, nat_obj.cast()); - } - arr.cast() +fn build_array_nat(nats: &[Nat]) -> LeanObj { + let arr = LeanArray::alloc(nats.len()); + for (i, nat) in nats.iter().enumerate() { + arr.set(i, build_nat(nat)); } + *arr } // ============================================================================= @@ -156,33 +126,31 @@ fn build_array_nat(nats: &[Nat]) -> *mut c_void { /// Round-trip a Point (structure with x, y : Nat). /// Point is a structure, which in Lean is represented as a constructor with tag 0. #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_point(point_ptr: *const c_void) -> *mut c_void { +pub extern "C" fn rs_roundtrip_point(point_ptr: LeanObj) -> LeanObj { unsafe { // Point is a structure (single constructor, tag 0) with 2 Nat fields - let x_ptr = lean_ctor_get(point_ptr as *mut _, 0); - let y_ptr = lean_ctor_get(point_ptr as *mut _, 1); + let x_ptr = lean_ctor_get(point_ptr.as_ptr() as *mut _, 0); + let y_ptr = lean_ctor_get(point_ptr.as_ptr() as *mut _, 1); // Decode the Nats let x = Nat::from_ptr(x_ptr.cast()); let y = Nat::from_ptr(y_ptr.cast()); // Re-encode as Point - let point = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(point, 0, build_nat(&x).cast()); - lean_ctor_set(point, 1, build_nat(&y).cast()); - point.cast() + let point = LeanCtor::alloc(0, 2, 0); + point.set(0, build_nat(&x)); + point.set(1, build_nat(&y)); + *point } } /// Round-trip a NatTree (inductive with leaf : Nat → NatTree | node : NatTree → NatTree → NatTree). #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_nat_tree( - tree_ptr: *const c_void, -) -> *mut c_void { - roundtrip_nat_tree_recursive(tree_ptr) +pub extern "C" fn rs_roundtrip_nat_tree(tree_ptr: LeanObj) -> LeanObj { + roundtrip_nat_tree_recursive(tree_ptr.as_ptr()) } -fn roundtrip_nat_tree_recursive(tree_ptr: *const c_void) -> *mut c_void { +fn roundtrip_nat_tree_recursive(tree_ptr: *const c_void) -> LeanObj { unsafe { let tag = lean_obj_tag(tree_ptr as *mut _); match tag { @@ -190,9 +158,9 @@ fn roundtrip_nat_tree_recursive(tree_ptr: *const c_void) -> *mut c_void { // leaf : Nat → NatTree let nat_ptr = lean_ctor_get(tree_ptr as *mut _, 0); let nat = Nat::from_ptr(nat_ptr.cast()); - let leaf = lean_alloc_ctor(0, 1, 0); - lean_ctor_set(leaf, 0, build_nat(&nat).cast()); - leaf.cast() + let leaf = LeanCtor::alloc(0, 1, 0); + leaf.set(0, build_nat(&nat)); + *leaf }, 1 => { // node : NatTree → NatTree → NatTree @@ -200,10 +168,10 @@ fn roundtrip_nat_tree_recursive(tree_ptr: *const c_void) -> *mut c_void { let right_ptr = lean_ctor_get(tree_ptr as *mut _, 1); let left = roundtrip_nat_tree_recursive(left_ptr.cast()); let right = roundtrip_nat_tree_recursive(right_ptr.cast()); - let node = lean_alloc_ctor(1, 2, 0); - lean_ctor_set(node, 0, left.cast()); - lean_ctor_set(node, 1, right.cast()); - node.cast() + let node = LeanCtor::alloc(1, 2, 0); + node.set(0, left); + node.set(1, right); + *node }, _ => panic!("Invalid NatTree tag: {}", tag), } @@ -214,47 +182,40 @@ fn roundtrip_nat_tree_recursive(tree_ptr: *const c_void) -> *mut c_void { /// AssocList: nil (tag 0, 0 fields) | cons key value tail (tag 1, 3 fields) /// Note: nil with 0 fields may be represented as lean_box(0) #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_assoclist_nat_nat( - list_ptr: *const c_void, -) -> *mut c_void { - // Check if it's a scalar (nil represented as lean_box(0)) - if lean_is_scalar(list_ptr) { - // Return lean_box(0) for nil - return lean_box_fn(0); +pub extern "C" fn rs_roundtrip_assoclist_nat_nat(list_ptr: LeanObj) -> LeanObj { + if list_ptr.is_scalar() { + return LeanObj::box_usize(0); } - let pairs = decode_assoc_list_nat_nat(list_ptr); + let pairs = decode_assoc_list_nat_nat(list_ptr.as_ptr()); build_assoc_list_nat_nat(&pairs) } /// Build an AssocList Nat Nat from pairs -fn build_assoc_list_nat_nat(pairs: &[(Nat, Nat)]) -> *mut c_void { - unsafe { - // Build in reverse to preserve order - // AssocList.nil with 0 fields is represented as lean_box(0) - let mut list = lean_box_fn(0).cast(); - for (k, v) in pairs.iter().rev() { - let cons = lean_alloc_ctor(1, 3, 0); // AssocList.cons - lean_ctor_set(cons, 0, build_nat(k).cast()); - lean_ctor_set(cons, 1, build_nat(v).cast()); - lean_ctor_set(cons, 2, list); - list = cons; - } - list.cast() +fn build_assoc_list_nat_nat(pairs: &[(Nat, Nat)]) -> LeanObj { + // Build in reverse to preserve order + let mut list = LeanObj::box_usize(0); // nil + for (k, v) in pairs.iter().rev() { + let cons = LeanCtor::alloc(1, 3, 0); // AssocList.cons + cons.set(0, build_nat(k)); + cons.set(1, build_nat(v)); + cons.set(2, list); + list = *cons; } + list } /// Round-trip a DHashMap.Raw Nat Nat. #[unsafe(no_mangle)] pub extern "C" fn rs_roundtrip_dhashmap_raw_nat_nat( - raw_ptr: *const c_void, -) -> *mut c_void { - unsafe { - if lean_is_scalar(raw_ptr) { - return raw_ptr as *mut c_void; - } + raw_ptr: LeanObj, +) -> LeanObj { + if raw_ptr.is_scalar() { + return raw_ptr; + } - let size_ptr = lean_ctor_get(raw_ptr as *mut _, 0); - let buckets_ptr = lean_ctor_get(raw_ptr as *mut _, 1); + unsafe { + let size_ptr = lean_ctor_get(raw_ptr.as_ptr() as *mut _, 0); + let buckets_ptr = lean_ctor_get(raw_ptr.as_ptr() as *mut _, 1); let size = Nat::from_ptr(size_ptr.cast()); @@ -269,9 +230,10 @@ pub extern "C" fn rs_roundtrip_dhashmap_raw_nat_nat( } // Rebuild buckets - let new_buckets = lean_alloc_array(num_buckets, num_buckets); + let new_buckets = LeanArray::alloc(num_buckets); + let nil = LeanObj::box_usize(0); for i in 0..num_buckets { - lean_array_set_core(new_buckets, i, lean_box_fn(0).cast()); // AssocList.nil + new_buckets.set(i, nil); } for (k, v) in &all_pairs { @@ -285,20 +247,19 @@ pub extern "C" fn rs_roundtrip_dhashmap_raw_nat_nat( #[allow(clippy::cast_possible_truncation)] let bucket_idx = (k_u64 as usize) & (num_buckets - 1); - let old_bucket = lean_array_get_core(new_buckets, bucket_idx); - let new_bucket = lean_alloc_ctor(1, 3, 0); - lean_ctor_set(new_bucket, 0, build_nat(k).cast()); - lean_ctor_set(new_bucket, 1, build_nat(v).cast()); - lean_ctor_set(new_bucket, 2, old_bucket); - lean_array_set_core(new_buckets, bucket_idx, new_bucket); + let old_bucket = new_buckets.get(bucket_idx); + let new_bucket = LeanCtor::alloc(1, 3, 0); + new_bucket.set(0, build_nat(k)); + new_bucket.set(1, build_nat(v)); + new_bucket.set(2, old_bucket); + new_buckets.set(bucket_idx, *new_bucket); } // Build Raw - let raw = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(raw, 0, build_nat(&size).cast()); - lean_ctor_set(raw, 1, new_buckets); - - raw.cast() + let raw = LeanCtor::alloc(0, 2, 0); + raw.set(0, build_nat(&size)); + raw.set(1, *new_buckets); + *raw } } @@ -318,13 +279,11 @@ pub extern "C" fn rs_roundtrip_dhashmap_raw_nat_nat( /// - nil: lean_box(0) /// - cons key value tail: ctor 1, 3 fields #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_hashmap_nat_nat( - map_ptr: *const c_void, -) -> *mut c_void { +pub extern "C" fn rs_roundtrip_hashmap_nat_nat(map_ptr: LeanObj) -> LeanObj { unsafe { // Due to unboxing, map_ptr points directly to Raw - let size_ptr = lean_ctor_get(map_ptr as *mut _, 0); - let buckets_ptr = lean_ctor_get(map_ptr as *mut _, 1); + let size_ptr = lean_ctor_get(map_ptr.as_ptr() as *mut _, 0); + let buckets_ptr = lean_ctor_get(map_ptr.as_ptr() as *mut _, 1); let size = Nat::from_ptr(size_ptr.cast()); @@ -333,18 +292,18 @@ pub extern "C" fn rs_roundtrip_hashmap_nat_nat( let mut pairs: Vec<(Nat, Nat)> = Vec::new(); for &bucket_ptr in buckets_data { - // Each bucket is an AssocList let bucket_pairs = decode_assoc_list_nat_nat(bucket_ptr); pairs.extend(bucket_pairs); } // Rebuild the HashMap with the same bucket count let num_buckets = buckets_data.len(); - let new_buckets = lean_alloc_array(num_buckets, num_buckets); + let new_buckets = LeanArray::alloc(num_buckets); // Initialize all buckets to AssocList.nil (lean_box(0)) + let nil = LeanObj::box_usize(0); for i in 0..num_buckets { - lean_array_set_core(new_buckets, i, lean_box_fn(0).cast()); // AssocList.nil + new_buckets.set(i, nil); } // Insert each pair into the appropriate bucket using Lean's hash function @@ -363,24 +322,22 @@ pub extern "C" fn rs_roundtrip_hashmap_nat_nat( let bucket_idx = (k_u64 as usize) & (num_buckets - 1); // Get current bucket AssocList - let old_bucket = lean_array_get_core(new_buckets, bucket_idx); + let old_bucket = new_buckets.get(bucket_idx); // Build AssocList.cons key value tail (tag 1, 3 fields) - let new_bucket = lean_alloc_ctor(1, 3, 0); - lean_ctor_set(new_bucket, 0, build_nat(k).cast()); - lean_ctor_set(new_bucket, 1, build_nat(v).cast()); - lean_ctor_set(new_bucket, 2, old_bucket); - - lean_array_set_core(new_buckets, bucket_idx, new_bucket); + let new_bucket = LeanCtor::alloc(1, 3, 0); + new_bucket.set(0, build_nat(k)); + new_bucket.set(1, build_nat(v)); + new_bucket.set(2, old_bucket); + new_buckets.set(bucket_idx, *new_bucket); } // Build Raw (ctor 0, 2 fields: size, buckets) // Due to unboxing, this IS the HashMap - let raw = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(raw, 0, build_nat(&size).cast()); - lean_ctor_set(raw, 1, new_buckets); - - raw.cast() + let raw = LeanCtor::alloc(0, 2, 0); + raw.set(0, build_nat(&size)); + raw.set(1, *new_buckets); + *raw } } @@ -392,18 +349,15 @@ pub fn decode_assoc_list_nat_nat(list_ptr: *const c_void) -> Vec<(Nat, Nat)> { loop { unsafe { - // Check if scalar (shouldn't happen) or object if lean_is_scalar(current) { break; } let tag = lean_obj_tag(current as *mut _); if tag == 0 { - // AssocList.nil break; } - // AssocList.cons: 3 fields (key, value, tail) let key_ptr = lean_ctor_get(current as *mut _, 0); let value_ptr = lean_ctor_get(current as *mut _, 1); let tail_ptr = lean_ctor_get(current as *mut _, 2); @@ -427,13 +381,13 @@ pub fn decode_assoc_list_nat_nat(list_ptr: *const c_void) -> Vec<(Nat, Nat)> { /// Used by Address.Hashable to match Rust's bucket hash computation. /// This is essentially just a pointer cast - very fast. #[unsafe(no_mangle)] -pub extern "C" fn rs_bytearray_to_u64_le(ba_ptr: *const c_void) -> u64 { - let data = lean_sarray_data(ba_ptr); +pub extern "C" fn rs_bytearray_to_u64_le(ba: LeanByteArray) -> u64 { + let data = ba.as_bytes(); if data.len() < 8 { return 0; } unsafe { - let data_ptr = lean_sarray_cptr(ba_ptr.cast_mut().cast()); - std::ptr::read_unaligned(data_ptr as *const u64) + let cptr = crate::lean::lean::lean_sarray_cptr(ba.as_ptr() as *mut _); + std::ptr::read_unaligned(cptr as *const u64) } } diff --git a/src/lean/ffi/unsigned.rs b/src/lean/ffi/unsigned.rs index a947480a..c518f7fc 100644 --- a/src/lean/ffi/unsigned.rs +++ b/src/lean/ffi/unsigned.rs @@ -1,31 +1,21 @@ -use std::ffi::c_void; - -use crate::lean::{lean::lean_alloc_sarray, lean_sarray_set_data}; +use crate::lean::obj::LeanByteArray; #[unsafe(no_mangle)] -extern "C" fn c_u16_to_le_bytes(v: u16) -> *mut c_void { - mk_byte_array(&v.to_le_bytes()) +extern "C" fn c_u16_to_le_bytes(v: u16) -> LeanByteArray { + LeanByteArray::from_bytes(&v.to_le_bytes()) } #[unsafe(no_mangle)] -extern "C" fn c_u32_to_le_bytes(v: u32) -> *mut c_void { - mk_byte_array(&v.to_le_bytes()) +extern "C" fn c_u32_to_le_bytes(v: u32) -> LeanByteArray { + LeanByteArray::from_bytes(&v.to_le_bytes()) } #[unsafe(no_mangle)] -extern "C" fn c_u64_to_le_bytes(v: u64) -> *mut c_void { - mk_byte_array(&v.to_le_bytes()) +extern "C" fn c_u64_to_le_bytes(v: u64) -> LeanByteArray { + LeanByteArray::from_bytes(&v.to_le_bytes()) } #[unsafe(no_mangle)] -extern "C" fn c_usize_to_le_bytes(v: usize) -> *mut c_void { - mk_byte_array(&v.to_le_bytes()) -} - -#[inline] -fn mk_byte_array(bytes: &[u8]) -> *mut c_void { - let len = bytes.len(); - let arr_ptr = unsafe { lean_alloc_sarray(1, len, len) }; - unsafe { lean_sarray_set_data(arr_ptr.cast(), bytes) }; - arr_ptr.cast() +extern "C" fn c_usize_to_le_bytes(v: usize) -> LeanByteArray { + LeanByteArray::from_bytes(&v.to_le_bytes()) } diff --git a/src/lean/nat.rs b/src/lean/nat.rs index 72cbd985..55a00c76 100644 --- a/src/lean/nat.rs +++ b/src/lean/nat.rs @@ -123,7 +123,11 @@ unsafe extern "C" { op: *const u64, ); - /// Lean's internal mpz allocation — takes ownership of the mpz_t value. + #[link_name = "__gmpz_clear"] + fn mpz_clear(x: *mut Mpz); + + /// Lean's internal mpz allocation — deep-copies the mpz value. + /// Caller must still call mpz_clear on the original. fn lean_alloc_mpz(v: *mut Mpz) -> *mut c_void; } @@ -153,7 +157,9 @@ pub unsafe fn lean_nat_from_limbs( // order = -1 (least significant limb first) // size = 8 bytes per limb, endian = 0 (native), nails = 0 mpz_import(value.as_mut_ptr(), num_limbs, -1, 8, 0, 0, limbs); - // lean_alloc_mpz takes ownership of the mpz value - lean_alloc_mpz(value.as_mut_ptr()) + // lean_alloc_mpz deep-copies; we must free the original + let result = lean_alloc_mpz(value.as_mut_ptr()); + mpz_clear(value.as_mut_ptr()); + result } } diff --git a/src/lean/obj.rs b/src/lean/obj.rs new file mode 100644 index 00000000..ded74547 --- /dev/null +++ b/src/lean/obj.rs @@ -0,0 +1,873 @@ +//! Type-safe wrappers for Lean FFI object pointers. +//! +//! Each wrapper is a `#[repr(transparent)]` `Copy` newtype over `*const c_void` +//! that asserts the correct Lean tag on construction and provides safe accessor +//! methods. Reference counting is left to Lean (no `Drop` impl). + +use std::ffi::c_void; +use std::marker::PhantomData; +use std::ops::Deref; + +use super::lean; +use super::safe_cstring; + +// ============================================================================= +// LeanObj — Untyped base wrapper +// ============================================================================= + +/// Untyped wrapper around a raw Lean object pointer. +#[derive(Clone, Copy)] +#[repr(transparent)] +pub struct LeanObj(*const c_void); + +impl LeanObj { + /// Wrap a raw pointer without any tag check. + /// + /// # Safety + /// The pointer must be a valid Lean object (or tagged scalar). + #[inline] + pub unsafe fn from_raw(ptr: *const c_void) -> Self { + Self(ptr) + } + + #[inline] + pub fn as_ptr(self) -> *const c_void { + self.0 + } + + #[inline] + pub fn as_mut_ptr(self) -> *mut c_void { + self.0 as *mut c_void + } + + /// True if this is a tagged scalar (bit 0 set). + #[inline] + pub fn is_scalar(self) -> bool { + self.0 as usize & 1 == 1 + } + + /// Return the object tag. Panics if the object is a scalar. + #[inline] + pub fn tag(self) -> u8 { + assert!(!self.is_scalar(), "tag() called on scalar"); + #[allow(clippy::cast_possible_truncation)] + unsafe { + lean::lean_obj_tag(self.0 as *mut _) as u8 + } + } + + #[inline] + pub fn inc_ref(self) { + if !self.is_scalar() { + unsafe { lean::lean_inc_ref(self.0 as *mut _) } + } + } + + #[inline] + pub fn dec_ref(self) { + if !self.is_scalar() { + unsafe { lean::lean_dec_ref(self.0 as *mut _) } + } + } + + /// Box a `usize` into a tagged scalar pointer. + #[inline] + pub fn box_usize(n: usize) -> Self { + Self(((n << 1) | 1) as *const c_void) + } + + /// Unbox a tagged scalar pointer into a `usize`. + #[inline] + pub fn unbox_usize(self) -> usize { + self.0 as usize >> 1 + } + + #[inline] + pub fn box_u64(n: u64) -> Self { + Self(unsafe { lean::lean_box_uint64(n) }.cast()) + } + + #[inline] + pub fn unbox_u64(self) -> u64 { + unsafe { lean::lean_unbox_uint64(self.0 as *mut _) } + } + + #[inline] + pub fn box_u32(n: u32) -> Self { + Self(unsafe { lean::lean_box_uint32(n) }.cast()) + } + + #[inline] + pub fn unbox_u32(self) -> u32 { + unsafe { lean::lean_unbox_uint32(self.0 as *mut _) } + } +} + +// ============================================================================= +// LeanArray — Array α (tag 246) +// ============================================================================= + +/// Typed wrapper for a Lean `Array α` object (tag 246). +#[derive(Clone, Copy)] +#[repr(transparent)] +pub struct LeanArray(LeanObj); + +impl Deref for LeanArray { + type Target = LeanObj; + #[inline] + fn deref(&self) -> &LeanObj { + &self.0 + } +} + +impl LeanArray { + /// Wrap a raw pointer, asserting it is an `Array` (tag 246). + /// + /// # Safety + /// The pointer must be a valid Lean `Array` object. + pub unsafe fn from_raw(ptr: *const c_void) -> Self { + let obj = LeanObj(ptr); + debug_assert!(!obj.is_scalar() && obj.tag() == 246); + Self(obj) + } + + /// Allocate a new array with `size` elements (capacity = size). + pub fn alloc(size: usize) -> Self { + let obj = unsafe { lean::lean_alloc_array(size, size) }; + Self(LeanObj(obj.cast())) + } + + pub fn len(&self) -> usize { + unsafe { lean::lean_array_size(self.0.as_ptr() as *mut _) } + } + + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + + pub fn get(&self, i: usize) -> LeanObj { + LeanObj(unsafe { lean::lean_array_get_core(self.0.as_ptr() as *mut _, i) }.cast()) + } + + pub fn set(&self, i: usize, val: impl Into) { + let val: LeanObj = val.into(); + unsafe { + lean::lean_array_set_core(self.0.as_ptr() as *mut _, i, val.as_ptr() as *mut _); + } + } + + /// Return a slice over the array elements. + pub fn data(&self) -> &[LeanObj] { + unsafe { + let cptr = lean::lean_array_cptr(self.0.as_ptr() as *mut _); + // Safety: LeanObj is repr(transparent) over *const c_void, and + // lean_array_cptr returns *mut *mut lean_object which has the same layout. + std::slice::from_raw_parts(cptr.cast(), self.len()) + } + } + + pub fn iter(&self) -> impl Iterator + '_ { + self.data().iter().copied() + } + + pub fn map(&self, f: impl Fn(LeanObj) -> T) -> Vec { + self.iter().map(f).collect() + } +} + +// ============================================================================= +// LeanByteArray — ByteArray (tag 248, scalar array) +// ============================================================================= + +/// Typed wrapper for a Lean `ByteArray` object (tag 248). +#[derive(Clone, Copy)] +#[repr(transparent)] +pub struct LeanByteArray(LeanObj); + +impl Deref for LeanByteArray { + type Target = LeanObj; + #[inline] + fn deref(&self) -> &LeanObj { + &self.0 + } +} + +impl LeanByteArray { + /// Wrap a raw pointer, asserting it is a `ByteArray` (tag 248). + /// + /// # Safety + /// The pointer must be a valid Lean `ByteArray` object. + pub unsafe fn from_raw(ptr: *const c_void) -> Self { + let obj = LeanObj(ptr); + debug_assert!(!obj.is_scalar() && obj.tag() == 248); + Self(obj) + } + + /// Allocate a new byte array with `size` bytes (capacity = size). + pub fn alloc(size: usize) -> Self { + let obj = unsafe { lean::lean_alloc_sarray(1, size, size) }; + Self(LeanObj(obj.cast())) + } + + /// Allocate a new byte array and copy `data` into it. + pub fn from_bytes(data: &[u8]) -> Self { + let arr = Self::alloc(data.len()); + unsafe { + let cptr = lean::lean_sarray_cptr(arr.0.as_ptr() as *mut _); + std::ptr::copy_nonoverlapping(data.as_ptr(), cptr, data.len()); + } + arr + } + + pub fn len(&self) -> usize { + unsafe { lean::lean_sarray_size(self.0.as_ptr() as *mut _) } + } + + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + + /// Return the byte contents as a slice. + pub fn as_bytes(&self) -> &[u8] { + unsafe { + let cptr = lean::lean_sarray_cptr(self.0.as_ptr() as *mut _); + std::slice::from_raw_parts(cptr, self.len()) + } + } + + /// Copy `data` into the byte array and update its size. + /// + /// # Safety + /// The caller must ensure the array has sufficient capacity for `data`. + pub unsafe fn set_data(&self, data: &[u8]) { + unsafe { + let obj = self.0.as_mut_ptr(); + let cptr = lean::lean_sarray_cptr(obj as *mut _); + std::ptr::copy_nonoverlapping(data.as_ptr(), cptr, data.len()); + // Update m_size: at offset 8 (after lean_object header) + *obj.cast::().add(8).cast::() = data.len(); + } + } +} + +// ============================================================================= +// LeanString — String (tag 249) +// ============================================================================= + +/// Typed wrapper for a Lean `String` object (tag 249). +#[derive(Clone, Copy)] +#[repr(transparent)] +pub struct LeanString(LeanObj); + +impl Deref for LeanString { + type Target = LeanObj; + #[inline] + fn deref(&self) -> &LeanObj { + &self.0 + } +} + +impl LeanString { + /// Wrap a raw pointer, asserting it is a `String` (tag 249). + /// + /// # Safety + /// The pointer must be a valid Lean `String` object. + pub unsafe fn from_raw(ptr: *const c_void) -> Self { + let obj = LeanObj(ptr); + debug_assert!(!obj.is_scalar() && obj.tag() == 249); + Self(obj) + } + + /// Create a Lean string from a Rust `&str`. + pub fn from_str(s: &str) -> Self { + let c = safe_cstring(s); + let obj = unsafe { lean::lean_mk_string(c.as_ptr()) }; + Self(LeanObj(obj.cast())) + } + + /// Decode the Lean string into a Rust `String`. + pub fn to_string(&self) -> String { + unsafe { + let obj = self.0.as_ptr() as *mut _; + let len = lean::lean_string_size(obj) - 1; // m_size includes NUL + let data = lean::lean_string_cstr(obj); + let bytes = std::slice::from_raw_parts(data.cast::(), len); + String::from_utf8_unchecked(bytes.to_vec()) + } + } + + /// Number of data bytes (excluding the trailing NUL). + pub fn byte_len(&self) -> usize { + unsafe { lean::lean_string_size(self.0.as_ptr() as *mut _) - 1 } + } +} + +// ============================================================================= +// LeanCtor — Constructor objects (tag 0–243) +// ============================================================================= + +/// Typed wrapper for a Lean constructor object (tag 0–243). +#[derive(Clone, Copy)] +#[repr(transparent)] +pub struct LeanCtor(LeanObj); + +impl Deref for LeanCtor { + type Target = LeanObj; + #[inline] + fn deref(&self) -> &LeanObj { + &self.0 + } +} + +impl LeanCtor { + /// Wrap a raw pointer, asserting it is a constructor (tag <= 243). + /// + /// # Safety + /// The pointer must be a valid Lean constructor object. + pub unsafe fn from_raw(ptr: *const c_void) -> Self { + let obj = LeanObj(ptr); + debug_assert!(!obj.is_scalar() && obj.tag() <= 243); + Self(obj) + } + + /// Allocate a new constructor object. + pub fn alloc(tag: u8, num_objs: usize, scalar_size: usize) -> Self { + #[allow(clippy::cast_possible_truncation)] + let obj = unsafe { + lean::lean_alloc_ctor( + tag as u32, + num_objs as u32, + scalar_size as u32, + ) + }; + Self(LeanObj(obj.cast())) + } + + pub fn tag(&self) -> u8 { + self.0.tag() + } + + /// Get the `i`-th object field via `lean_ctor_get`. + pub fn get(&self, i: usize) -> LeanObj { + #[allow(clippy::cast_possible_truncation)] + LeanObj( + unsafe { lean::lean_ctor_get(self.0.as_ptr() as *mut _, i as u32) }.cast(), + ) + } + + /// Set the `i`-th object field via `lean_ctor_set`. + pub fn set(&self, i: usize, val: impl Into) { + let val: LeanObj = val.into(); + #[allow(clippy::cast_possible_truncation)] + unsafe { + lean::lean_ctor_set( + self.0.as_ptr() as *mut _, + i as u32, + val.as_ptr() as *mut _, + ); + } + } + + /// Set a `u8` scalar field at the given byte offset (past all object fields). + pub fn set_u8(&self, offset: usize, val: u8) { + #[allow(clippy::cast_possible_truncation)] + unsafe { + lean::lean_ctor_set_uint8(self.0.as_ptr() as *mut _, offset as u32, val); + } + } + + /// Read `N` object-field pointers using raw pointer math. + /// + /// This bypasses `lean_ctor_get`'s bounds check, which is necessary when + /// reading past the declared object fields into the scalar area (e.g. for + /// `Expr.Data`). + pub fn objs(&self) -> [LeanObj; N] { + let base = unsafe { self.0.as_ptr().cast::<*const c_void>().add(1) }; + std::array::from_fn(|i| LeanObj(unsafe { *base.add(i) })) + } + + /// Read a `u64` scalar at `offset` bytes past `num_objs` object fields. + pub fn scalar_u64(&self, num_objs: usize, offset: usize) -> u64 { + unsafe { + std::ptr::read_unaligned( + self.0 + .as_ptr() + .cast::() + .add(8 + num_objs * 8 + offset) + .cast(), + ) + } + } + + /// Read a `u8` scalar at `offset` bytes past `num_objs` object fields. + pub fn scalar_u8(&self, num_objs: usize, offset: usize) -> u8 { + unsafe { *self.0.as_ptr().cast::().add(8 + num_objs * 8 + offset) } + } + + /// Read a `bool` scalar at `offset` bytes past `num_objs` object fields. + pub fn scalar_bool(&self, num_objs: usize, offset: usize) -> bool { + self.scalar_u8(num_objs, offset) != 0 + } +} + +// ============================================================================= +// LeanExternal — External objects (tag 254) +// ============================================================================= + +/// Typed wrapper for a Lean external object (tag 254) holding a `T`. +#[derive(Clone, Copy)] +#[repr(transparent)] +pub struct LeanExternal(LeanObj, PhantomData); + +impl Deref for LeanExternal { + type Target = LeanObj; + #[inline] + fn deref(&self) -> &LeanObj { + &self.0 + } +} + +impl LeanExternal { + /// Wrap a raw pointer, asserting it is an external object (tag 254). + /// + /// # Safety + /// The pointer must be a valid Lean external object whose data pointer + /// points to a valid `T`. + pub unsafe fn from_raw(ptr: *const c_void) -> Self { + let obj = LeanObj(ptr); + debug_assert!(!obj.is_scalar() && obj.tag() == 254); + Self(obj, PhantomData) + } + + /// Allocate a new external object holding `data`. + pub fn alloc(class: &ExternalClass, data: T) -> Self { + let data_ptr = Box::into_raw(Box::new(data)); + let obj = unsafe { lean::lean_alloc_external(class.0 as *mut _, data_ptr.cast()) }; + Self(LeanObj(obj.cast()), PhantomData) + } + + /// Get a reference to the wrapped data. + pub fn get(&self) -> &T { + unsafe { &*lean::lean_get_external_data(self.0.as_ptr() as *mut _).cast::() } + } +} + +// ============================================================================= +// ExternalClass — Registered external class +// ============================================================================= + +/// A registered Lean external class (wraps `lean_external_class*`). +pub struct ExternalClass(*mut c_void); + +// Safety: the class pointer is initialized once and read-only thereafter. +unsafe impl Send for ExternalClass {} +unsafe impl Sync for ExternalClass {} + +impl ExternalClass { + /// Register a new external class with explicit finalizer and foreach callbacks. + /// + /// # Safety + /// The `finalizer` callback must correctly free the external data, and + /// `foreach` must correctly visit any Lean object references held by the data. + pub unsafe fn register( + finalizer: lean::lean_external_finalize_proc, + foreach: lean::lean_external_foreach_proc, + ) -> Self { + Self(unsafe { lean::lean_register_external_class(finalizer, foreach) }.cast()) + } + + /// Register a new external class that uses `Drop` to finalize `T` + /// and has no Lean object references to visit. + pub fn register_with_drop() -> Self { + unsafe extern "C" fn drop_finalizer(ptr: *mut c_void) { + if !ptr.is_null() { + drop(unsafe { Box::from_raw(ptr.cast::()) }); + } + } + unsafe { + Self::register( + Some(drop_finalizer::), + Some(super::noop_foreach), + ) + } + } +} + +// ============================================================================= +// LeanList — List α +// ============================================================================= + +/// Typed wrapper for a Lean `List α` (nil = scalar `lean_box(0)`, cons = ctor tag 1). +#[derive(Clone, Copy)] +#[repr(transparent)] +pub struct LeanList(LeanObj); + +impl Deref for LeanList { + type Target = LeanObj; + #[inline] + fn deref(&self) -> &LeanObj { + &self.0 + } +} + +impl LeanList { + /// Wrap a raw pointer, asserting it is a valid `List` (scalar nil or ctor tag 1). + /// + /// # Safety + /// The pointer must be a valid Lean `List` object. + pub unsafe fn from_raw(ptr: *const c_void) -> Self { + let obj = LeanObj(ptr); + debug_assert!(obj.is_scalar() || obj.tag() == 1); + Self(obj) + } + + /// The empty list. + pub fn nil() -> Self { + Self(LeanObj::box_usize(0)) + } + + /// Prepend `head` to `tail`. + pub fn cons(head: impl Into, tail: LeanList) -> Self { + let ctor = LeanCtor::alloc(1, 2, 0); + ctor.set(0, head); + ctor.set(1, tail); + Self(ctor.0) + } + + pub fn is_nil(&self) -> bool { + self.0.is_scalar() + } + + pub fn iter(&self) -> LeanListIter { + LeanListIter(self.0) + } + + pub fn collect(&self, f: impl Fn(LeanObj) -> T) -> Vec { + self.iter().map(f).collect() + } + + /// Build a list from an iterator of values convertible to `LeanObj`. + pub fn from_iter(items: impl IntoIterator>) -> Self { + let items: Vec = items.into_iter().map(Into::into).collect(); + let mut list = Self::nil(); + for item in items.into_iter().rev() { + list = Self::cons(item, list); + } + list + } +} + +/// Iterator over the elements of a `LeanList`. +pub struct LeanListIter(LeanObj); + +impl Iterator for LeanListIter { + type Item = LeanObj; + fn next(&mut self) -> Option { + if self.0.is_scalar() { + return None; + } + let ctor = unsafe { LeanCtor::from_raw(self.0.as_ptr()) }; + let [head, tail] = ctor.objs::<2>(); + self.0 = tail; + Some(head) + } +} + +// ============================================================================= +// LeanOption — Option α +// ============================================================================= + +/// Typed wrapper for a Lean `Option α` (none = scalar, some = ctor tag 1). +#[derive(Clone, Copy)] +#[repr(transparent)] +pub struct LeanOption(LeanObj); + +impl Deref for LeanOption { + type Target = LeanObj; + #[inline] + fn deref(&self) -> &LeanObj { + &self.0 + } +} + +impl LeanOption { + /// Wrap a raw pointer, asserting it is a valid `Option`. + /// + /// # Safety + /// The pointer must be a valid Lean `Option` object. + pub unsafe fn from_raw(ptr: *const c_void) -> Self { + let obj = LeanObj(ptr); + debug_assert!(obj.is_scalar() || obj.tag() == 1); + Self(obj) + } + + pub fn none() -> Self { + Self(LeanObj::box_usize(0)) + } + + pub fn some(val: impl Into) -> Self { + let ctor = LeanCtor::alloc(1, 1, 0); + ctor.set(0, val); + Self(ctor.0) + } + + pub fn is_none(&self) -> bool { + self.0.is_scalar() + } + + pub fn is_some(&self) -> bool { + !self.is_none() + } + + pub fn to_option(&self) -> Option { + if self.is_none() { + None + } else { + let ctor = unsafe { LeanCtor::from_raw(self.0.as_ptr()) }; + Some(ctor.get(0)) + } + } +} + +// ============================================================================= +// LeanExcept — Except ε α +// ============================================================================= + +/// Typed wrapper for a Lean `Except ε α` (error = ctor tag 0, ok = ctor tag 1). +#[derive(Clone, Copy)] +#[repr(transparent)] +pub struct LeanExcept(LeanObj); + +impl Deref for LeanExcept { + type Target = LeanObj; + #[inline] + fn deref(&self) -> &LeanObj { + &self.0 + } +} + +impl LeanExcept { + /// Wrap a raw pointer, asserting it is a valid `Except`. + /// + /// # Safety + /// The pointer must be a valid Lean `Except` object. + pub unsafe fn from_raw(ptr: *const c_void) -> Self { + let obj = LeanObj(ptr); + debug_assert!(!obj.is_scalar() && (obj.tag() == 0 || obj.tag() == 1)); + Self(obj) + } + + /// Build `Except.ok val`. + pub fn ok(val: impl Into) -> Self { + let ctor = LeanCtor::alloc(1, 1, 0); + ctor.set(0, val); + Self(ctor.0) + } + + /// Build `Except.error msg`. + pub fn error(msg: impl Into) -> Self { + let ctor = LeanCtor::alloc(0, 1, 0); + ctor.set(0, msg); + Self(ctor.0) + } + + /// Build `Except.error (String.mk msg)` from a Rust string. + pub fn error_string(msg: &str) -> Self { + Self::error(LeanString::from_str(msg)) + } + + pub fn is_ok(&self) -> bool { + self.0.tag() == 1 + } + + pub fn is_error(&self) -> bool { + self.0.tag() == 0 + } + + pub fn into_result(self) -> Result { + let ctor = unsafe { LeanCtor::from_raw(self.0.as_ptr()) }; + if self.is_ok() { + Ok(ctor.get(0)) + } else { + Err(ctor.get(0)) + } + } +} + +// ============================================================================= +// From for LeanObj — allow wrapper types to be passed to set() etc. +// ============================================================================= + +impl From for LeanObj { + #[inline] + fn from(x: LeanArray) -> Self { x.0 } +} + +impl From for LeanObj { + #[inline] + fn from(x: LeanByteArray) -> Self { x.0 } +} + +impl From for LeanObj { + #[inline] + fn from(x: LeanString) -> Self { x.0 } +} + +impl From for LeanObj { + #[inline] + fn from(x: LeanCtor) -> Self { x.0 } +} + +impl From> for LeanObj { + #[inline] + fn from(x: LeanExternal) -> Self { x.0 } +} + +impl From for LeanObj { + #[inline] + fn from(x: LeanList) -> Self { x.0 } +} + +impl From for LeanObj { + #[inline] + fn from(x: LeanOption) -> Self { x.0 } +} + +impl From for LeanObj { + #[inline] + fn from(x: LeanExcept) -> Self { x.0 } +} + +// ============================================================================= +// Domain types — typed newtypes for specific Lean types +// ============================================================================= + +/// Generate a `#[repr(transparent)]` newtype over `LeanObj` for a specific +/// Lean type, with `Deref`, `From`, and a `new` constructor. +macro_rules! lean_domain_type { + ($($(#[$meta:meta])* $name:ident;)*) => {$( + $(#[$meta])* + #[derive(Clone, Copy)] + #[repr(transparent)] + pub struct $name(LeanObj); + + impl Deref for $name { + type Target = LeanObj; + #[inline] + fn deref(&self) -> &LeanObj { &self.0 } + } + + impl From<$name> for LeanObj { + #[inline] + fn from(x: $name) -> Self { x.0 } + } + + impl $name { + #[inline] + pub fn new(obj: LeanObj) -> Self { Self(obj) } + } + )*}; +} + +lean_domain_type! { + // Ix core types + /// Lean `Ix.Name` object. + IxName; + /// Lean `Ix.Level` object. + IxLevel; + /// Lean `Ix.Expr` object. + IxExpr; + /// Lean `Ix.ConstantInfo` object. + IxConstantInfo; + /// Lean `Ix.RawEnvironment` object. + IxRawEnvironment; + /// Lean `Ix.Environment` object. + IxEnvironment; + /// Lean `Ix.RustCondensedBlocks` object. + IxCondensedBlocks; + /// Lean `Ix.CompileM.RustCompilePhases` object. + IxCompilePhases; + + // Ix data types + /// Lean `Ix.Int` object. + IxInt; + /// Lean `Ix.Substring` object. + IxSubstring; + /// Lean `Ix.SourceInfo` object. + IxSourceInfo; + /// Lean `Ix.SyntaxPreresolved` object. + IxSyntaxPreresolved; + /// Lean `Ix.Syntax` object. + IxSyntax; + /// Lean `Ix.DataValue` object. + IxDataValue; + + // Ixon types + /// Lean `Ixon.DefKind` object. + IxonDefKind; + /// Lean `Ixon.DefinitionSafety` object. + IxonDefinitionSafety; + /// Lean `Ixon.QuotKind` object. + IxonQuotKind; + /// Lean `Ixon.Univ` object. + IxonUniv; + /// Lean `Ixon.Expr` object. + IxonExpr; + /// Lean `Ixon.Definition` object. + IxonDefinition; + /// Lean `Ixon.RecursorRule` object. + IxonRecursorRule; + /// Lean `Ixon.Recursor` object. + IxonRecursor; + /// Lean `Ixon.Axiom` object. + IxonAxiom; + /// Lean `Ixon.Quotient` object. + IxonQuotient; + /// Lean `Ixon.Constructor` object. + IxonConstructor; + /// Lean `Ixon.Inductive` object. + IxonInductive; + /// Lean `Ixon.InductiveProj` object. + IxonInductiveProj; + /// Lean `Ixon.ConstructorProj` object. + IxonConstructorProj; + /// Lean `Ixon.RecursorProj` object. + IxonRecursorProj; + /// Lean `Ixon.DefinitionProj` object. + IxonDefinitionProj; + /// Lean `Ixon.MutConst` object. + IxonMutConst; + /// Lean `Ixon.ConstantInfo` object. + IxonConstantInfo; + /// Lean `Ixon.Constant` object. + IxonConstant; + /// Lean `Ixon.DataValue` object. + IxonDataValue; + /// Lean `Ixon.ExprMetaData` object. + IxonExprMetaData; + /// Lean `Ixon.ExprMetaArena` object. + IxonExprMetaArena; + /// Lean `Ixon.ConstantMeta` object. + IxonConstantMeta; + /// Lean `Ixon.Named` object. + IxonNamed; + /// Lean `Ixon.Comm` object. + IxonComm; + /// Lean `Ixon.RawEnv` object. + IxonRawEnv; + + // Error types + /// Lean `Ixon.SerializeError` object. + IxSerializeError; + /// Lean `Ix.DecompileM.DecompileError` object. + IxDecompileError; + /// Lean `Ix.CompileM.CompileError` object. + IxCompileError; + /// Lean `BlockCompareResult` object. + IxBlockCompareResult; + /// Lean `BlockCompareDetail` object. + IxBlockCompareDetail; +} + +/// `Ix.Address = { hash : ByteArray }` — single-field struct, unboxed to `ByteArray`. +pub type IxAddress = LeanByteArray; diff --git a/src/sha256.rs b/src/sha256.rs index 6763b98f..6bf1b5f3 100644 --- a/src/sha256.rs +++ b/src/sha256.rs @@ -1,18 +1,11 @@ use sha2::{Digest, Sha256}; -use std::ffi::c_void; -use crate::lean::{ - lean::lean_alloc_sarray, lean_sarray_data, lean_sarray_set_data, -}; +use crate::lean::obj::LeanByteArray; #[unsafe(no_mangle)] -extern "C" fn rs_sha256(bytes: *const c_void) -> *mut c_void { +extern "C" fn rs_sha256(bytes: LeanByteArray) -> LeanByteArray { let mut hasher = Sha256::new(); - hasher.update(lean_sarray_data(bytes)); + hasher.update(bytes.as_bytes()); let digest = hasher.finalize(); - let digest_slice = digest.as_slice(); - assert_eq!(digest_slice.len(), 32); - let arr_ptr = unsafe { lean_alloc_sarray(1, 32, 32) }; - unsafe { lean_sarray_set_data(arr_ptr.cast(), digest_slice) }; - arr_ptr.cast() + LeanByteArray::from_bytes(digest.as_slice()) } From ad3a4984d0305b5f14724da64ac38fd2dd0b8183 Mon Sep 17 00:00:00 2001 From: samuelburnham <45365069+samuelburnham@users.noreply.github.com> Date: Mon, 2 Mar 2026 16:24:30 -0500 Subject: [PATCH 08/27] checkpoint --- Cargo.toml | 2 +- src/iroh.rs | 44 +- src/iroh/_client.rs | 28 +- src/iroh/_server.rs | 8 +- src/iroh/client.rs | 11 +- src/iroh/server.rs | 8 +- src/lean/ffi.rs | 51 +- src/lean/ffi/compile.rs | 1400 ++++++++++++++------------------ src/lean/ffi/graph.rs | 20 +- src/lean/ffi/ixon/compare.rs | 59 +- src/lean/ffi/ixon/constant.rs | 1055 ++++++++++-------------- src/lean/ffi/ixon/env.rs | 433 ++++------ src/lean/ffi/ixon/expr.rs | 412 ++++------ src/lean/ffi/ixon/meta.rs | 734 ++++++++--------- src/lean/ffi/ixon/serialize.rs | 156 ++-- src/lean/ffi/ixon/sharing.rs | 38 +- src/lean/ffi/ixon/univ.rs | 162 ++-- src/lean/ffi/lean_env.rs | 5 +- src/lean/obj.rs | 144 +++- 19 files changed, 2106 insertions(+), 2664 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 6e5a29e4..2d969fe5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -42,7 +42,7 @@ rand = "0.8.5" quickcheck_macros = "1.0.0" [features] -default = [] +default = ["net"] parallel = ["multi-stark/parallel"] net = ["bytes", "tokio", "iroh", "iroh-base", "n0-snafu", "n0-watcher", "rand", "tracing", "tracing-subscriber", "bincode", "serde" ] diff --git a/src/iroh.rs b/src/iroh.rs index 489a3f2a..5b957d0b 100644 --- a/src/iroh.rs +++ b/src/iroh.rs @@ -4,30 +4,30 @@ //! //! These fallback modules contain dummy functions that can still be called via Lean->C->Rust FFI, but will return an error message that Lean then prints before exiting. -#[cfg(any( - not(feature = "net"), - all(target_os = "macos", target_arch = "aarch64") -))] -pub mod _client; -#[cfg(any( - not(feature = "net"), - all(target_os = "macos", target_arch = "aarch64") -))] -pub mod _server; -#[cfg(all( - feature = "net", - not(all(target_os = "macos", target_arch = "aarch64")) -))] +// #[cfg(any( +// not(feature = "net"), +// all(target_os = "macos", target_arch = "aarch64") +// ))] +// pub mod _client; +// #[cfg(any( +// not(feature = "net"), +// all(target_os = "macos", target_arch = "aarch64") +// ))] +// pub mod _server; +// #[cfg(all( +// feature = "net", +// not(all(target_os = "macos", target_arch = "aarch64")) +// ))] pub mod client; -#[cfg(all( - feature = "net", - not(all(target_os = "macos", target_arch = "aarch64")) -))] +// #[cfg(all( +// feature = "net", +// not(all(target_os = "macos", target_arch = "aarch64")) +// ))] pub mod server; -#[cfg(all( - feature = "net", - not(all(target_os = "macos", target_arch = "aarch64")) -))] +// #[cfg(all( +// feature = "net", +// not(all(target_os = "macos", target_arch = "aarch64")) +// ))] pub mod common { use bincode::{Decode, Encode}; use serde::{Deserialize, Serialize}; diff --git a/src/iroh/_client.rs b/src/iroh/_client.rs index f99c06fe..98a0f631 100644 --- a/src/iroh/_client.rs +++ b/src/iroh/_client.rs @@ -1,6 +1,4 @@ -use std::ffi::c_void; - -use crate::lean::lean_except_error_string; +use crate::lean::obj::{LeanExcept, LeanObj}; const ERR_MSG: &str = "Iroh functions not supported when the Rust `net` feature is disabled \ or on MacOS aarch64-darwin"; @@ -8,21 +6,21 @@ const ERR_MSG: &str = "Iroh functions not supported when the Rust `net` feature /// `Iroh.Connect.putBytes' : @& String → @& Array String → @& String → @& String → Except String PutResponse` #[unsafe(no_mangle)] extern "C" fn rs_iroh_put( - _node_id: *const c_void, - _addrs: *const c_void, - _relay_url: *const c_void, - _input: *const c_void, -) -> *mut c_void { - lean_except_error_string(ERR_MSG) + _node_id: LeanObj, + _addrs: LeanObj, + _relay_url: LeanObj, + _input: LeanObj, +) -> LeanExcept { + LeanExcept::error_string(ERR_MSG) } /// `Iroh.Connect.getBytes' : @& String → @& Array String → @& String → @& String → Except String GetResponse` #[unsafe(no_mangle)] extern "C" fn rs_iroh_get( - _node_id: *const c_void, - _addrs: *const c_void, - _relay_url: *const c_void, - _hash: *const c_void, -) -> *mut c_void { - lean_except_error_string(ERR_MSG) + _node_id: LeanObj, + _addrs: LeanObj, + _relay_url: LeanObj, + _hash: LeanObj, +) -> LeanExcept { + LeanExcept::error_string(ERR_MSG) } diff --git a/src/iroh/_server.rs b/src/iroh/_server.rs index db7ee4a4..87ba4b6c 100644 --- a/src/iroh/_server.rs +++ b/src/iroh/_server.rs @@ -1,11 +1,9 @@ -use std::ffi::c_void; - -use crate::lean::lean_except_error_string; +use crate::lean::obj::LeanExcept; /// `Iroh.Serve.serve' : Unit → Except String Unit` #[unsafe(no_mangle)] -extern "C" fn rs_iroh_serve() -> *mut c_void { - lean_except_error_string( +extern "C" fn rs_iroh_serve() -> LeanExcept { + LeanExcept::error_string( "Iroh functions not supported when the Rust `net` feature is disabled \ or on MacOS aarch64-darwin", ) diff --git a/src/iroh/client.rs b/src/iroh/client.rs index bd88fb9e..00695d09 100644 --- a/src/iroh/client.rs +++ b/src/iroh/client.rs @@ -1,7 +1,7 @@ use iroh::{Endpoint, NodeAddr, NodeId, RelayMode, RelayUrl, SecretKey}; use n0_snafu::{Result, ResultExt}; use n0_watcher::Watcher as _; -use std::ffi::c_void; +use std::ffi::{CString, c_void}; use std::net::SocketAddr; use tracing::info; use tracing_subscriber::layer::SubscriberExt; @@ -12,7 +12,7 @@ use crate::iroh::common::{GetRequest, PutRequest, Request, Response}; use crate::lean::{ lean::{lean_alloc_ctor, lean_alloc_sarray, lean_ctor_set, lean_mk_string}, lean_array_to_vec, lean_except_error_string, lean_except_ok, - lean_obj_to_string, lean_sarray_set_data, safe_cstring, + lean_obj_to_string, lean_sarray_set_data, }; // An example ALPN that we are using to communicate over the `Endpoint` @@ -27,8 +27,8 @@ const READ_SIZE_LIMIT: usize = 100_000_000; /// hash: String /// ``` fn mk_put_response(message: &str, hash: &str) -> *mut c_void { - let c_message = safe_cstring(message); - let c_hash = safe_cstring(hash); + let c_message = CString::new(message).unwrap(); + let c_hash = CString::new(hash).unwrap(); unsafe { let ctor = lean_alloc_ctor(0, 2, 0); lean_ctor_set(ctor, 0, lean_mk_string(c_message.as_ptr())); @@ -37,6 +37,9 @@ fn mk_put_response(message: &str, hash: &str) -> *mut c_void { } } +#[repr(transparent)] +struct LeanPutResponse {} + /// Build a Lean `GetResponse` structure: /// ``` /// structure GetResponse where diff --git a/src/iroh/server.rs b/src/iroh/server.rs index cd647c94..94bb401e 100644 --- a/src/iroh/server.rs +++ b/src/iroh/server.rs @@ -12,7 +12,7 @@ use tracing_subscriber::util::SubscriberInitExt; use tracing_subscriber::{EnvFilter, fmt}; use crate::iroh::common::{GetResponse, PutResponse, Request, Response}; -use crate::lean::{lean_box_fn, lean_except_error_string, lean_except_ok}; +use crate::lean::obj::LeanExcept; // An example ALPN that we are using to communicate over the `Endpoint` const EXAMPLE_ALPN: &[u8] = b"n0/iroh/examples/magic/0"; @@ -21,15 +21,15 @@ const READ_SIZE_LIMIT: usize = 100_000_000; /// `Iroh.Serve.serve' : Unit → Except String Unit` #[unsafe(no_mangle)] -extern "C" fn rs_iroh_serve() -> *mut c_void { +extern "C" fn rs_iroh_serve() -> LeanExcept { // Create a Tokio runtime to block on the async function let rt = tokio::runtime::Runtime::new().expect("Failed to create Tokio runtime"); // Run the async function and block until we get the result match rt.block_on(serve()) { - Ok(()) => lean_except_ok(lean_box_fn(0)), - Err(err) => lean_except_error_string(&err.to_string()), + Ok(()) => LeanExcept::ok(0), + Err(err) => LeanExcept::error_string(&err.to_string()), } } diff --git a/src/lean/ffi.rs b/src/lean/ffi.rs index 9fa234fb..60779bc4 100644 --- a/src/lean/ffi.rs +++ b/src/lean/ffi.rs @@ -12,19 +12,17 @@ pub mod ix; // Ix types: Name, Level, Expr, ConstantInfo, Environment pub mod ixon; // Ixon types: Univ, Expr, Constant, metadata pub mod primitives; // Primitives: rs_roundtrip_nat, rs_roundtrip_string, etc. -use std::ffi::{CString, c_void}; - -use crate::lean::{ - lean::{lean_io_result_mk_error, lean_mk_io_user_error, lean_mk_string}, - lean_array_to_vec, lean_sarray_data, lean_unbox_u32, +use crate::lean::lean::{ + lean_io_result_mk_error, lean_io_result_mk_ok, lean_mk_io_user_error, }; +use crate::lean::obj::{LeanArray, LeanByteArray, LeanObj, LeanString}; /// Guard an FFI function that returns a Lean IO result against panics. /// On panic, returns a Lean IO error with the panic message instead of /// unwinding across the `extern "C"` boundary (which is undefined behavior). -pub(crate) fn ffi_io_guard(f: F) -> *mut c_void +pub(crate) fn ffi_io_guard(f: F) -> LeanObj where - F: FnOnce() -> *mut c_void + std::panic::UnwindSafe, + F: FnOnce() -> LeanObj + std::panic::UnwindSafe, { match std::panic::catch_unwind(f) { Ok(result) => result, @@ -36,26 +34,39 @@ where } else { "FFI panic: unknown".to_string() }; - let c_msg = CString::new(msg).unwrap_or_else(|_| { - CString::new("FFI panic: (invalid message)").unwrap() - }); - unsafe { - let lean_msg = lean_mk_string(c_msg.as_ptr()); - let lean_err = lean_mk_io_user_error(lean_msg); - lean_io_result_mk_error(lean_err).cast() - } + io_error(&msg) }, } } +/// Wrap a Lean value in an IO success result. +pub(crate) fn io_ok(val: impl Into) -> LeanObj { + let val: LeanObj = val.into(); + unsafe { + LeanObj::from_raw(lean_io_result_mk_ok(val.as_mut_ptr().cast()).cast()) + } +} + +/// Create a Lean IO error result from a Rust error message. +pub(crate) fn io_error(msg: &str) -> LeanObj { + let lean_msg = LeanString::from_str(msg); + unsafe { + let lean_err = lean_mk_io_user_error(lean_msg.as_mut_ptr().cast()); + LeanObj::from_raw(lean_io_result_mk_error(lean_err).cast()) + } +} + #[unsafe(no_mangle)] extern "C" fn rs_boxed_u32s_are_equivalent_to_bytes( - u32s: *const c_void, - bytes: *const c_void, + u32s: LeanObj, + bytes: LeanObj, ) -> bool { - let u32s = lean_array_to_vec(u32s, lean_unbox_u32) + let arr = unsafe { LeanArray::from_raw(u32s.as_ptr()) }; + let u32s_flat: Vec = arr + .map(|elem| elem.unbox_u32()) .into_iter() .flat_map(u32::to_le_bytes) - .collect::>(); - u32s == lean_sarray_data(bytes) + .collect(); + let ba = unsafe { LeanByteArray::from_raw(bytes.as_ptr()) }; + u32s_flat == ba.as_bytes() } diff --git a/src/lean/ffi/compile.rs b/src/lean/ffi/compile.rs index 475f3cf1..5d458849 100644 --- a/src/lean/ffi/compile.rs +++ b/src/lean/ffi/compile.rs @@ -6,19 +6,11 @@ //! - `rs_decompile_env`: decompile Ixon back to Lean environment //! - `rs_roundtrip_*`: roundtrip FFI tests for Lean↔Rust type conversions //! - `build_*` / `decode_*`: convert between Lean constructor layouts and Rust types -//! -//! ## Lean object layout conventions -//! -//! Lean constructors are allocated via `lean_alloc_ctor(tag, num_objs, scalar_size)`: -//! - Object fields are accessed with `lean_ctor_get(obj, i)` (0-indexed) -//! - Scalar fields follow objects at byte offset `8 + num_objs * 8` -//! - Scalar fields are accessed via pointer arithmetic on the object base use std::collections::HashMap; -use std::ffi::{CString, c_void}; use std::sync::Arc; -use super::ffi_io_guard; +use super::{ffi_io_guard, io_error, io_ok}; use crate::ix::address::Address; use crate::ix::compile::{CompileState, compile_env}; use crate::ix::condense::compute_sccs; @@ -29,15 +21,12 @@ use crate::ix::ixon::constant::{Constant as IxonConstant, ConstantInfo}; use crate::ix::ixon::expr::Expr as IxonExpr; use crate::ix::ixon::serialize::put_expr; use crate::ix::ixon::{Comm, ConstantMeta}; -use crate::lean::lean::{ - lean_alloc_array, lean_alloc_ctor, lean_alloc_sarray, lean_array_set_core, - lean_ctor_get, lean_ctor_set, lean_ctor_set_uint8, lean_ctor_set_uint64, - lean_inc, lean_io_result_mk_error, lean_io_result_mk_ok, - lean_mk_io_user_error, lean_mk_string, lean_obj_tag, lean_sarray_cptr, - lean_uint64_to_nat, -}; +use crate::lean::lean::lean_uint64_to_nat; use crate::lean::nat::Nat; -use crate::lean::{lean_obj_to_string, lean_sarray_data, lean_sarray_set_data}; +use crate::lean::obj::{ + LeanArray, LeanByteArray, LeanCtor, LeanExcept, LeanObj, LeanString, +}; +use crate::lean::lean_obj_to_string; use dashmap::DashMap; use dashmap::DashSet; @@ -56,20 +45,30 @@ use super::ixon::env::{ use super::ixon::meta::{build_constant_meta, build_ixon_comm}; use super::lean_env::{GlobalCache, lean_ptr_to_env, lean_ptr_to_name}; +// ============================================================================= +// Helper builders +// ============================================================================= + +/// Build a Lean String from a Rust &str. +fn build_lean_string(s: &str) -> LeanObj { + LeanString::from_str(s).into() +} + +/// Build a Lean Nat from a usize. +fn build_lean_nat_usize(n: usize) -> LeanObj { + unsafe { LeanObj::from_raw(lean_uint64_to_nat(n as u64).cast()) } +} + // ============================================================================= // Raw* Builder Functions for Compile FFI // ============================================================================= /// Build RawConst: { addr : Address, const : Ixon.Constant } -pub fn build_raw_const(addr: &Address, constant: &IxonConstant) -> *mut c_void { - unsafe { - let addr_obj = build_address_from_ixon(addr); - let const_obj = build_ixon_constant(constant); - let obj = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(obj, 0, addr_obj.cast()); - lean_ctor_set(obj, 1, const_obj.cast()); - obj.cast() - } +pub fn build_raw_const(addr: &Address, constant: &IxonConstant) -> LeanObj { + let ctor = LeanCtor::alloc(0, 2, 0); + ctor.set(0, build_address_from_ixon(addr)); + ctor.set(1, build_ixon_constant(constant)); + *ctor } /// Build RawNamed: { name : Ix.Name, addr : Address, constMeta : Ixon.ConstantMeta } @@ -78,43 +77,28 @@ pub fn build_raw_named( name: &Name, addr: &Address, meta: &ConstantMeta, -) -> *mut c_void { - unsafe { - let name_obj = build_name(cache, name); - let addr_obj = build_address_from_ixon(addr); - let meta_obj = build_constant_meta(meta); - let obj = lean_alloc_ctor(0, 3, 0); - lean_ctor_set(obj, 0, name_obj.as_mut_ptr().cast()); - lean_ctor_set(obj, 1, addr_obj.cast()); - lean_ctor_set(obj, 2, meta_obj.cast()); - obj.cast() - } +) -> LeanObj { + let ctor = LeanCtor::alloc(0, 3, 0); + ctor.set(0, build_name(cache, name)); + ctor.set(1, build_address_from_ixon(addr)); + ctor.set(2, build_constant_meta(meta)); + *ctor } /// Build RawBlob: { addr : Address, bytes : ByteArray } -pub fn build_raw_blob(addr: &Address, bytes: &[u8]) -> *mut c_void { - unsafe { - let addr_obj = build_address_from_ixon(addr); - let ba = lean_alloc_sarray(1, bytes.len(), bytes.len()); - let ba_data = lean_sarray_cptr(ba); - std::ptr::copy_nonoverlapping(bytes.as_ptr(), ba_data, bytes.len()); - let obj = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(obj, 0, addr_obj.cast()); - lean_ctor_set(obj, 1, ba); - obj.cast() - } +pub fn build_raw_blob(addr: &Address, bytes: &[u8]) -> LeanObj { + let ctor = LeanCtor::alloc(0, 2, 0); + ctor.set(0, build_address_from_ixon(addr)); + ctor.set(1, LeanByteArray::from_bytes(bytes)); + *ctor } /// Build RawComm: { addr : Address, comm : Ixon.Comm } -pub fn build_raw_comm(addr: &Address, comm: &Comm) -> *mut c_void { - unsafe { - let addr_obj = build_address_from_ixon(addr); - let comm_obj = build_ixon_comm(comm); - let obj = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(obj, 0, addr_obj.cast()); - lean_ctor_set(obj, 1, comm_obj.cast()); - obj.cast() - } +pub fn build_raw_comm(addr: &Address, comm: &Comm) -> LeanObj { + let ctor = LeanCtor::alloc(0, 2, 0); + ctor.set(0, build_address_from_ixon(addr)); + ctor.set(1, build_ixon_comm(comm)); + *ctor } // ============================================================================= @@ -123,46 +107,40 @@ pub fn build_raw_comm(addr: &Address, comm: &Comm) -> *mut c_void { /// Round-trip a RustCondensedBlocks structure. #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_rust_condensed_blocks( - ptr: *const c_void, -) -> *mut c_void { - unsafe { - let low_links = lean_ctor_get(ptr as *mut _, 0); - let blocks = lean_ctor_get(ptr as *mut _, 1); - let block_refs = lean_ctor_get(ptr as *mut _, 2); - - lean_inc(low_links); - lean_inc(blocks); - lean_inc(block_refs); - - let result = lean_alloc_ctor(0, 3, 0); - lean_ctor_set(result, 0, low_links); - lean_ctor_set(result, 1, blocks); - lean_ctor_set(result, 2, block_refs); - result.cast() - } +pub extern "C" fn rs_roundtrip_rust_condensed_blocks(obj: LeanObj) -> LeanObj { + let ctor = unsafe { LeanCtor::from_raw(obj.as_ptr()) }; + let low_links = ctor.get(0); + let blocks = ctor.get(1); + let block_refs = ctor.get(2); + + low_links.inc_ref(); + blocks.inc_ref(); + block_refs.inc_ref(); + + let result = LeanCtor::alloc(0, 3, 0); + result.set(0, low_links); + result.set(1, blocks); + result.set(2, block_refs); + *result } /// Round-trip a RustCompilePhases structure. #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_rust_compile_phases( - ptr: *const c_void, -) -> *mut c_void { - unsafe { - let raw_env = lean_ctor_get(ptr as *mut _, 0); - let condensed = lean_ctor_get(ptr as *mut _, 1); - let compile_env = lean_ctor_get(ptr as *mut _, 2); - - lean_inc(raw_env); - lean_inc(condensed); - lean_inc(compile_env); - - let result = lean_alloc_ctor(0, 3, 0); - lean_ctor_set(result, 0, raw_env); - lean_ctor_set(result, 1, condensed); - lean_ctor_set(result, 2, compile_env); - result.cast() - } +pub extern "C" fn rs_roundtrip_rust_compile_phases(obj: LeanObj) -> LeanObj { + let ctor = unsafe { LeanCtor::from_raw(obj.as_ptr()) }; + let raw_env = ctor.get(0); + let condensed = ctor.get(1); + let compile_env = ctor.get(2); + + raw_env.inc_ref(); + condensed.inc_ref(); + compile_env.inc_ref(); + + let result = LeanCtor::alloc(0, 3, 0); + result.set(0, raw_env); + result.set(1, condensed); + result.set(2, compile_env); + *result } // ============================================================================= @@ -171,77 +149,56 @@ pub extern "C" fn rs_roundtrip_rust_compile_phases( /// Round-trip a BlockCompareResult. #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_block_compare_result( - ptr: *const c_void, -) -> *mut c_void { - unsafe { - let tag = lean_obj_tag(ptr as *mut _); - match tag { - 0 => lean_alloc_ctor(0, 0, 0).cast(), - 1 => { - let base = ptr.cast::(); - let lean_size = *base.add(8).cast::(); - let rust_size = *base.add(16).cast::(); - let first_diff = *base.add(24).cast::(); - - let obj = lean_alloc_ctor(1, 0, 24); - let out_base = obj.cast::(); - *out_base.add(8).cast::() = lean_size; - *out_base.add(16).cast::() = rust_size; - *out_base.add(24).cast::() = first_diff; - obj.cast() - }, - 2 => lean_alloc_ctor(2, 0, 0).cast(), - _ => unreachable!("Invalid BlockCompareResult tag: {}", tag), - } +pub extern "C" fn rs_roundtrip_block_compare_result(obj: LeanObj) -> LeanObj { + let ctor = unsafe { LeanCtor::from_raw(obj.as_ptr()) }; + match ctor.tag() { + 0 => *LeanCtor::alloc(0, 0, 0), // match + 1 => { + // mismatch: 0 obj, 24 scalar bytes (3 × u64) + let lean_size = ctor.scalar_u64(0, 0); + let rust_size = ctor.scalar_u64(0, 8); + let first_diff = ctor.scalar_u64(0, 16); + + let out = LeanCtor::alloc(1, 0, 24); + out.set_u64(0, lean_size); + out.set_u64(8, rust_size); + out.set_u64(16, first_diff); + *out + }, + 2 => *LeanCtor::alloc(2, 0, 0), // notFound + _ => unreachable!("Invalid BlockCompareResult tag: {}", ctor.tag()), } } /// Round-trip a BlockCompareDetail. #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_block_compare_detail( - ptr: *const c_void, -) -> *mut c_void { - unsafe { - let result_ptr = lean_ctor_get(ptr as *mut _, 0); - let base = ptr.cast::(); - let lean_sharing_len = *base.add(16).cast::(); - let rust_sharing_len = *base.add(24).cast::(); - - let result_obj = rs_roundtrip_block_compare_result(result_ptr.cast()); - - let obj = lean_alloc_ctor(0, 1, 16); - lean_ctor_set(obj, 0, result_obj.cast()); - let out_base = obj.cast::(); - *out_base.add(16).cast::() = lean_sharing_len; - *out_base.add(24).cast::() = rust_sharing_len; - obj.cast() - } +pub extern "C" fn rs_roundtrip_block_compare_detail(obj: LeanObj) -> LeanObj { + let ctor = unsafe { LeanCtor::from_raw(obj.as_ptr()) }; + let result_ptr = ctor.get(0); + let lean_sharing_len = ctor.scalar_u64(1, 0); + let rust_sharing_len = ctor.scalar_u64(1, 8); + + let result_obj = rs_roundtrip_block_compare_result(result_ptr); + + let out = LeanCtor::alloc(0, 1, 16); + out.set(0, result_obj); + out.set_u64(1 * 8, lean_sharing_len); + out.set_u64(1 * 8 + 8, rust_sharing_len); + *out } // ============================================================================= // Full Compilation FFI // ============================================================================= -/// Create a Lean IO error result from a Rust error message. -unsafe fn make_compile_io_error(msg: &str) -> *mut c_void { - unsafe { - let c_msg = CString::new(msg) - .unwrap_or_else(|_| CString::new("compilation error").unwrap()); - let lean_msg = lean_mk_string(c_msg.as_ptr()); - let lean_err = lean_mk_io_user_error(lean_msg); - lean_io_result_mk_error(lean_err).cast() - } -} - /// FFI function to run the complete compilation pipeline and return all data. #[unsafe(no_mangle)] pub extern "C" fn rs_compile_env_full( - env_consts_ptr: *const c_void, -) -> *mut c_void { + env_consts_ptr: LeanObj, +) -> LeanObj { ffi_io_guard(std::panic::AssertUnwindSafe(|| { // Phase 1: Decode Lean environment - let rust_env = lean_ptr_to_env(env_consts_ptr); + let rust_env = lean_ptr_to_env(env_consts_ptr.as_ptr()); let env_len = rust_env.len(); let rust_env = Arc::new(rust_env); @@ -255,107 +212,98 @@ pub extern "C" fn rs_compile_env_full( Err(e) => { let msg = format!("rs_compile_env_full: Rust compilation failed: {:?}", e); - return unsafe { make_compile_io_error(&msg) }; + return io_error(&msg); }, }; // Phase 4: Build Lean structures let mut cache = LeanBuildCache::with_capacity(env_len); - unsafe { - let raw_env = build_raw_environment(&mut cache, &rust_env); - let condensed_obj = build_condensed_blocks(&mut cache, &condensed); - - // Collect blocks - let mut blocks_data: Vec<(Name, Vec, usize)> = Vec::new(); - let mut seen_addrs: std::collections::HashSet
= - std::collections::HashSet::new(); + let raw_env = + unsafe { LeanObj::from_raw(build_raw_environment(&mut cache, &rust_env)) }; + let condensed_obj = build_condensed_blocks(&mut cache, &condensed); - for entry in compile_stt.name_to_addr.iter() { - let name = entry.key().clone(); - let addr = entry.value().clone(); + // Collect blocks + let mut blocks_data: Vec<(Name, Vec, usize)> = Vec::new(); + let mut seen_addrs: std::collections::HashSet
= + std::collections::HashSet::new(); - if seen_addrs.contains(&addr) { - continue; - } - seen_addrs.insert(addr.clone()); + for entry in compile_stt.name_to_addr.iter() { + let name = entry.key().clone(); + let addr = entry.value().clone(); - if let Some(constant) = compile_stt.env.get_const(&addr) { - let mut bytes = Vec::new(); - constant.put(&mut bytes); - let sharing_len = constant.sharing.len(); - blocks_data.push((name, bytes, sharing_len)); - } + if seen_addrs.contains(&addr) { + continue; } + seen_addrs.insert(addr.clone()); - // Build blocks array - let blocks_arr = lean_alloc_array(blocks_data.len(), blocks_data.len()); - for (i, (name, bytes, sharing_len)) in blocks_data.iter().enumerate() { - let name_obj = build_name(&mut cache, name); - - let ba = lean_alloc_sarray(1, bytes.len(), bytes.len()); - let ba_data = lean_sarray_cptr(ba); - std::ptr::copy_nonoverlapping(bytes.as_ptr(), ba_data, bytes.len()); + if let Some(constant) = compile_stt.env.get_const(&addr) { + let mut bytes = Vec::new(); + constant.put(&mut bytes); + let sharing_len = constant.sharing.len(); + blocks_data.push((name, bytes, sharing_len)); + } + } - let block = lean_alloc_ctor(0, 2, 8); - lean_ctor_set(block, 0, name_obj.as_mut_ptr().cast()); - lean_ctor_set(block, 1, ba); - let base = block.cast::(); - *base.add(8 + 16).cast::() = *sharing_len as u64; + // Build blocks array + let blocks_arr = LeanArray::alloc(blocks_data.len()); + for (i, (name, bytes, sharing_len)) in blocks_data.iter().enumerate() { + let name_obj = build_name(&mut cache, name); + let ba = LeanByteArray::from_bytes(bytes); - lean_array_set_core(blocks_arr, i, block); - } + // Block: { name: Ix.Name, bytes: ByteArray, sharingLen: UInt64 } + let block = LeanCtor::alloc(0, 2, 8); + block.set(0, name_obj); + block.set(1, ba); + block.set_u64(2 * 8, *sharing_len as u64); - // Build nameToAddr array - let name_to_addr_len = compile_stt.name_to_addr.len(); - let name_to_addr_arr = - lean_alloc_array(name_to_addr_len, name_to_addr_len); - for (i, entry) in compile_stt.name_to_addr.iter().enumerate() { - let name = entry.key(); - let addr = entry.value(); + blocks_arr.set(i, *block); + } - let name_obj = build_name(&mut cache, name); + // Build nameToAddr array + let name_to_addr_len = compile_stt.name_to_addr.len(); + let name_to_addr_arr = LeanArray::alloc(name_to_addr_len); + for (i, entry) in compile_stt.name_to_addr.iter().enumerate() { + let name = entry.key(); + let addr = entry.value(); - let addr_bytes = addr.as_bytes(); - let addr_ba = lean_alloc_sarray(1, 32, 32); - let addr_data = lean_sarray_cptr(addr_ba); - std::ptr::copy_nonoverlapping(addr_bytes.as_ptr(), addr_data, 32); + let name_obj = build_name(&mut cache, name); + let addr_ba = LeanByteArray::from_bytes(addr.as_bytes()); - let entry_obj = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(entry_obj, 0, name_obj.as_mut_ptr().cast()); - lean_ctor_set(entry_obj, 1, addr_ba); + let entry_obj = LeanCtor::alloc(0, 2, 0); + entry_obj.set(0, name_obj); + entry_obj.set(1, addr_ba); - lean_array_set_core(name_to_addr_arr, i, entry_obj); - } + name_to_addr_arr.set(i, *entry_obj); + } - // Build RawCompiledEnv - let compiled_obj = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(compiled_obj, 0, blocks_arr); - lean_ctor_set(compiled_obj, 1, name_to_addr_arr); + // Build RawCompiledEnv + let compiled_obj = LeanCtor::alloc(0, 2, 0); + compiled_obj.set(0, *blocks_arr); + compiled_obj.set(1, *name_to_addr_arr); - // Build RustCompilationResult - let result = lean_alloc_ctor(0, 3, 0); - lean_ctor_set(result, 0, raw_env.cast()); - lean_ctor_set(result, 1, condensed_obj.as_mut_ptr().cast()); - lean_ctor_set(result, 2, compiled_obj); + // Build RustCompilationResult + let result = LeanCtor::alloc(0, 3, 0); + result.set(0, raw_env); + result.set(1, condensed_obj); + result.set(2, *compiled_obj); - lean_io_result_mk_ok(result).cast() - } + io_ok(*result) })) } /// FFI function to compile a Lean environment to serialized Ixon.Env bytes. #[unsafe(no_mangle)] -pub extern "C" fn rs_compile_env(env_consts_ptr: *const c_void) -> *mut c_void { +pub extern "C" fn rs_compile_env(env_consts_ptr: LeanObj) -> LeanObj { ffi_io_guard(std::panic::AssertUnwindSafe(|| { - let rust_env = lean_ptr_to_env(env_consts_ptr); + let rust_env = lean_ptr_to_env(env_consts_ptr.as_ptr()); let rust_env = Arc::new(rust_env); let compile_stt = match compile_env(&rust_env) { Ok(stt) => stt, Err(e) => { let msg = format!("rs_compile_env: Rust compilation failed: {:?}", e); - return unsafe { make_compile_io_error(&msg) }; + return io_error(&msg); }, }; @@ -363,41 +311,36 @@ pub extern "C" fn rs_compile_env(env_consts_ptr: *const c_void) -> *mut c_void { let mut buf = Vec::new(); if let Err(e) = compile_stt.env.put(&mut buf) { let msg = format!("rs_compile_env: Env serialization failed: {}", e); - return unsafe { make_compile_io_error(&msg) }; + return io_error(&msg); } // Build Lean ByteArray - unsafe { - let ba = lean_alloc_sarray(1, buf.len(), buf.len()); - let ba_data = lean_sarray_cptr(ba); - std::ptr::copy_nonoverlapping(buf.as_ptr(), ba_data, buf.len()); - lean_io_result_mk_ok(ba).cast() - } + let ba = LeanByteArray::from_bytes(&buf); + io_ok(ba) })) } /// Round-trip a RawEnv: decode from Lean, re-encode via builder. /// This performs a full decode/build cycle to verify FFI correctness. #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_raw_env( - raw_env_ptr: *const c_void, -) -> *mut c_void { - let env = decode_raw_env(raw_env_ptr); +pub extern "C" fn rs_roundtrip_raw_env(raw_env_obj: LeanObj) -> LeanObj { + let env = decode_raw_env(raw_env_obj); build_raw_env(&env) } /// FFI function to run all compilation phases and return combined results. #[unsafe(no_mangle)] pub extern "C" fn rs_compile_phases( - env_consts_ptr: *const c_void, -) -> *mut c_void { + env_consts_ptr: LeanObj, +) -> LeanObj { ffi_io_guard(std::panic::AssertUnwindSafe(|| { - let rust_env = lean_ptr_to_env(env_consts_ptr); + let rust_env = lean_ptr_to_env(env_consts_ptr.as_ptr()); let env_len = rust_env.len(); let rust_env = Arc::new(rust_env); let mut cache = LeanBuildCache::with_capacity(env_len); - let raw_env = build_raw_environment(&mut cache, &rust_env); + let raw_env = + unsafe { LeanObj::from_raw(build_raw_environment(&mut cache, &rust_env)) }; let ref_graph = build_ref_graph(&rust_env); @@ -409,96 +352,90 @@ pub extern "C" fn rs_compile_phases( Ok(stt) => stt, Err(e) => { let msg = format!("rs_compile_phases: compilation failed: {:?}", e); - return unsafe { make_compile_io_error(&msg) }; + return io_error(&msg); }, }; + // Build Lean objects from compile results - unsafe { - let consts: Vec<_> = compile_stt - .env - .consts - .iter() - .map(|e| (e.key().clone(), e.value().clone())) - .collect(); - let consts_arr = lean_alloc_array(consts.len(), consts.len()); - for (i, (addr, constant)) in consts.iter().enumerate() { - let raw_const = build_raw_const(addr, constant); - lean_array_set_core(consts_arr, i, raw_const.cast()); - } + let consts: Vec<_> = compile_stt + .env + .consts + .iter() + .map(|e| (e.key().clone(), e.value().clone())) + .collect(); + let consts_arr = LeanArray::alloc(consts.len()); + for (i, (addr, constant)) in consts.iter().enumerate() { + consts_arr.set(i, build_raw_const(addr, constant)); + } - let named: Vec<_> = compile_stt - .env - .named - .iter() - .map(|e| (e.key().clone(), e.value().clone())) - .collect(); - let named_arr = lean_alloc_array(named.len(), named.len()); - for (i, (name, n)) in named.iter().enumerate() { - let raw_named = build_raw_named(&mut cache, name, &n.addr, &n.meta); - lean_array_set_core(named_arr, i, raw_named.cast()); - } + let named: Vec<_> = compile_stt + .env + .named + .iter() + .map(|e| (e.key().clone(), e.value().clone())) + .collect(); + let named_arr = LeanArray::alloc(named.len()); + for (i, (name, n)) in named.iter().enumerate() { + named_arr.set(i, build_raw_named(&mut cache, name, &n.addr, &n.meta)); + } - let blobs: Vec<_> = compile_stt - .env - .blobs - .iter() - .map(|e| (e.key().clone(), e.value().clone())) - .collect(); - let blobs_arr = lean_alloc_array(blobs.len(), blobs.len()); - for (i, (addr, bytes)) in blobs.iter().enumerate() { - let raw_blob = build_raw_blob(addr, bytes); - lean_array_set_core(blobs_arr, i, raw_blob.cast()); - } + let blobs: Vec<_> = compile_stt + .env + .blobs + .iter() + .map(|e| (e.key().clone(), e.value().clone())) + .collect(); + let blobs_arr = LeanArray::alloc(blobs.len()); + for (i, (addr, bytes)) in blobs.iter().enumerate() { + blobs_arr.set(i, build_raw_blob(addr, bytes)); + } - let comms: Vec<_> = compile_stt - .env - .comms - .iter() - .map(|e| (e.key().clone(), e.value().clone())) - .collect(); - let comms_arr = lean_alloc_array(comms.len(), comms.len()); - for (i, (addr, comm)) in comms.iter().enumerate() { - let raw_comm = build_raw_comm(addr, comm); - lean_array_set_core(comms_arr, i, raw_comm.cast()); - } + let comms: Vec<_> = compile_stt + .env + .comms + .iter() + .map(|e| (e.key().clone(), e.value().clone())) + .collect(); + let comms_arr = LeanArray::alloc(comms.len()); + for (i, (addr, comm)) in comms.iter().enumerate() { + comms_arr.set(i, build_raw_comm(addr, comm)); + } - // Build names array (Address → Ix.Name) - let names: Vec<_> = compile_stt - .env - .names - .iter() - .map(|e| (e.key().clone(), e.value().clone())) - .collect(); - let names_arr = lean_alloc_array(names.len(), names.len()); - for (i, (addr, name)) in names.iter().enumerate() { - let obj = build_raw_name_entry(&mut cache, addr, name); - lean_array_set_core(names_arr, i, obj.cast()); - } + // Build names array (Address → Ix.Name) + let names: Vec<_> = compile_stt + .env + .names + .iter() + .map(|e| (e.key().clone(), e.value().clone())) + .collect(); + let names_arr = LeanArray::alloc(names.len()); + for (i, (addr, name)) in names.iter().enumerate() { + names_arr.set(i, build_raw_name_entry(&mut cache, addr, name)); + } - let raw_ixon_env = lean_alloc_ctor(0, 5, 0); - lean_ctor_set(raw_ixon_env, 0, consts_arr); - lean_ctor_set(raw_ixon_env, 1, named_arr); - lean_ctor_set(raw_ixon_env, 2, blobs_arr); - lean_ctor_set(raw_ixon_env, 3, comms_arr); - lean_ctor_set(raw_ixon_env, 4, names_arr); + let raw_ixon_env = LeanCtor::alloc(0, 5, 0); + raw_ixon_env.set(0, *consts_arr); + raw_ixon_env.set(1, *named_arr); + raw_ixon_env.set(2, *blobs_arr); + raw_ixon_env.set(3, *comms_arr); + raw_ixon_env.set(4, *names_arr); - let result = lean_alloc_ctor(0, 3, 0); - lean_ctor_set(result, 0, raw_env.cast()); - lean_ctor_set(result, 1, condensed_obj.as_mut_ptr().cast()); - lean_ctor_set(result, 2, raw_ixon_env); + let result = LeanCtor::alloc(0, 3, 0); + result.set(0, raw_env); + result.set(1, condensed_obj); + result.set(2, *raw_ixon_env); - lean_io_result_mk_ok(result).cast() - } + io_ok(*result) })) } /// FFI function to compile a Lean environment to a RawEnv. #[unsafe(no_mangle)] pub extern "C" fn rs_compile_env_to_ixon( - env_consts_ptr: *const c_void, -) -> *mut c_void { + env_consts_ptr: LeanObj, +) -> LeanObj { ffi_io_guard(std::panic::AssertUnwindSafe(|| { - let rust_env = lean_ptr_to_env(env_consts_ptr); + let rust_env = lean_ptr_to_env(env_consts_ptr.as_ptr()); let rust_env = Arc::new(rust_env); let compile_stt = match compile_env(&rust_env) { @@ -506,95 +443,89 @@ pub extern "C" fn rs_compile_env_to_ixon( Err(e) => { let msg = format!("rs_compile_env_to_ixon: compilation failed: {:?}", e); - return unsafe { make_compile_io_error(&msg) }; + return io_error(&msg); }, }; let mut cache = LeanBuildCache::with_capacity(rust_env.len()); - unsafe { - let consts: Vec<_> = compile_stt - .env - .consts - .iter() - .map(|e| (e.key().clone(), e.value().clone())) - .collect(); - let consts_arr = lean_alloc_array(consts.len(), consts.len()); - for (i, (addr, constant)) in consts.iter().enumerate() { - let raw_const = build_raw_const(addr, constant); - lean_array_set_core(consts_arr, i, raw_const.cast()); - } - - let named: Vec<_> = compile_stt - .env - .named - .iter() - .map(|e| (e.key().clone(), e.value().clone())) - .collect(); - let named_arr = lean_alloc_array(named.len(), named.len()); - for (i, (name, n)) in named.iter().enumerate() { - let raw_named = build_raw_named(&mut cache, name, &n.addr, &n.meta); - lean_array_set_core(named_arr, i, raw_named.cast()); - } + let consts: Vec<_> = compile_stt + .env + .consts + .iter() + .map(|e| (e.key().clone(), e.value().clone())) + .collect(); + let consts_arr = LeanArray::alloc(consts.len()); + for (i, (addr, constant)) in consts.iter().enumerate() { + consts_arr.set(i, build_raw_const(addr, constant)); + } - let blobs: Vec<_> = compile_stt - .env - .blobs - .iter() - .map(|e| (e.key().clone(), e.value().clone())) - .collect(); - let blobs_arr = lean_alloc_array(blobs.len(), blobs.len()); - for (i, (addr, bytes)) in blobs.iter().enumerate() { - let raw_blob = build_raw_blob(addr, bytes); - lean_array_set_core(blobs_arr, i, raw_blob.cast()); - } + let named: Vec<_> = compile_stt + .env + .named + .iter() + .map(|e| (e.key().clone(), e.value().clone())) + .collect(); + let named_arr = LeanArray::alloc(named.len()); + for (i, (name, n)) in named.iter().enumerate() { + named_arr.set(i, build_raw_named(&mut cache, name, &n.addr, &n.meta)); + } - let comms: Vec<_> = compile_stt - .env - .comms - .iter() - .map(|e| (e.key().clone(), e.value().clone())) - .collect(); - let comms_arr = lean_alloc_array(comms.len(), comms.len()); - for (i, (addr, comm)) in comms.iter().enumerate() { - let raw_comm = build_raw_comm(addr, comm); - lean_array_set_core(comms_arr, i, raw_comm.cast()); - } + let blobs: Vec<_> = compile_stt + .env + .blobs + .iter() + .map(|e| (e.key().clone(), e.value().clone())) + .collect(); + let blobs_arr = LeanArray::alloc(blobs.len()); + for (i, (addr, bytes)) in blobs.iter().enumerate() { + blobs_arr.set(i, build_raw_blob(addr, bytes)); + } - // Build names array (Address → Ix.Name) - let names: Vec<_> = compile_stt - .env - .names - .iter() - .map(|e| (e.key().clone(), e.value().clone())) - .collect(); - let names_arr = lean_alloc_array(names.len(), names.len()); - for (i, (addr, name)) in names.iter().enumerate() { - let obj = build_raw_name_entry(&mut cache, addr, name); - lean_array_set_core(names_arr, i, obj.cast()); - } + let comms: Vec<_> = compile_stt + .env + .comms + .iter() + .map(|e| (e.key().clone(), e.value().clone())) + .collect(); + let comms_arr = LeanArray::alloc(comms.len()); + for (i, (addr, comm)) in comms.iter().enumerate() { + comms_arr.set(i, build_raw_comm(addr, comm)); + } - let result = lean_alloc_ctor(0, 5, 0); - lean_ctor_set(result, 0, consts_arr); - lean_ctor_set(result, 1, named_arr); - lean_ctor_set(result, 2, blobs_arr); - lean_ctor_set(result, 3, comms_arr); - lean_ctor_set(result, 4, names_arr); - lean_io_result_mk_ok(result).cast() + // Build names array (Address → Ix.Name) + let names: Vec<_> = compile_stt + .env + .names + .iter() + .map(|e| (e.key().clone(), e.value().clone())) + .collect(); + let names_arr = LeanArray::alloc(names.len()); + for (i, (addr, name)) in names.iter().enumerate() { + names_arr.set(i, build_raw_name_entry(&mut cache, addr, name)); } + + let result = LeanCtor::alloc(0, 5, 0); + result.set(0, *consts_arr); + result.set(1, *named_arr); + result.set(2, *blobs_arr); + result.set(3, *comms_arr); + result.set(4, *names_arr); + io_ok(*result) })) } /// FFI function to canonicalize environment to Ix.RawEnvironment. #[unsafe(no_mangle)] pub extern "C" fn rs_canonicalize_env_to_ix( - env_consts_ptr: *const c_void, -) -> *mut c_void { + env_consts_ptr: LeanObj, +) -> LeanObj { ffi_io_guard(std::panic::AssertUnwindSafe(|| { - let rust_env = lean_ptr_to_env(env_consts_ptr); + let rust_env = lean_ptr_to_env(env_consts_ptr.as_ptr()); let mut cache = LeanBuildCache::with_capacity(rust_env.len()); - let raw_env = build_raw_environment(&mut cache, &rust_env); - unsafe { lean_io_result_mk_ok(raw_env.cast()).cast() } + let raw_env = + unsafe { LeanObj::from_raw(build_raw_environment(&mut cache, &rust_env)) }; + io_ok(raw_env) })) } @@ -618,9 +549,9 @@ pub struct RustCompiledEnv { /// FFI: Simple test to verify FFI round-trip works. /// Takes a Lean.Name and returns a magic number to verify the call succeeded. #[unsafe(no_mangle)] -extern "C" fn rs_test_ffi_roundtrip(name_ptr: *const c_void) -> u64 { +extern "C" fn rs_test_ffi_roundtrip(name_ptr: LeanObj) -> u64 { let global_cache = GlobalCache::default(); - let name = lean_ptr_to_name(name_ptr, &global_cache); + let name = lean_ptr_to_name(name_ptr.as_ptr(), &global_cache); // Return a magic number plus the hash of the name to verify it worked let hash = name.get_hash(); @@ -633,16 +564,12 @@ extern "C" fn rs_test_ffi_roundtrip(name_ptr: *const c_void) -> u64 { } /// FFI: Compile entire environment with Rust, returning a handle to RustCompiledEnv. -/// Takes: -/// - env_consts_ptr: pointer to List (Name x ConstantInfo) from Lean environment -/// -/// Returns: pointer to RustCompiledEnv (or null on failure) #[unsafe(no_mangle)] extern "C" fn rs_compile_env_rust_first( - env_consts_ptr: *const c_void, + env_consts_ptr: LeanObj, ) -> *mut RustCompiledEnv { // Decode Lean environment - let lean_env = lean_ptr_to_env(env_consts_ptr); + let lean_env = lean_ptr_to_env(env_consts_ptr.as_ptr()); let lean_env = Arc::new(lean_env); // Compile with Rust @@ -685,17 +612,18 @@ extern "C" fn rs_compile_env_rust_first( #[unsafe(no_mangle)] extern "C" fn rs_compare_block( rust_env: *const RustCompiledEnv, - lowlink_name: *const c_void, - lean_bytes: *const c_void, + lowlink_name: LeanObj, + lean_bytes: LeanObj, ) -> u64 { if rust_env.is_null() { return 2u64 << 32; // not found } let global_cache = GlobalCache::default(); - let name = lean_ptr_to_name(lowlink_name, &global_cache); + let name = lean_ptr_to_name(lowlink_name.as_ptr(), &global_cache); let rust_env = unsafe { &*rust_env }; - let lean_data = lean_sarray_data(lean_bytes); + let ba = unsafe { LeanByteArray::from_raw(lean_bytes.as_ptr()) }; + let lean_data = ba.as_bytes(); // Look up Rust's compiled block let rust_bytes = match rust_env.blocks.get(&name) { @@ -748,13 +676,13 @@ extern "C" fn rs_get_rust_env_block_count( #[unsafe(no_mangle)] extern "C" fn rs_get_block_bytes_len( rust_env: *const RustCompiledEnv, - lowlink_name: *const c_void, + lowlink_name: LeanObj, ) -> u64 { if rust_env.is_null() { return 0; } let global_cache = GlobalCache::default(); - let name = lean_ptr_to_name(lowlink_name, &global_cache); + let name = lean_ptr_to_name(lowlink_name.as_ptr(), &global_cache); let rust_env = unsafe { &*rust_env }; @@ -768,14 +696,14 @@ extern "C" fn rs_get_block_bytes_len( #[unsafe(no_mangle)] extern "C" fn rs_copy_block_bytes( rust_env: *const RustCompiledEnv, - lowlink_name: *const c_void, - dest: *mut c_void, + lowlink_name: LeanObj, + dest: LeanObj, ) { if rust_env.is_null() { return; } let global_cache = GlobalCache::default(); - let name = lean_ptr_to_name(lowlink_name, &global_cache); + let name = lean_ptr_to_name(lowlink_name.as_ptr(), &global_cache); let rust_env = unsafe { &*rust_env }; @@ -785,20 +713,21 @@ extern "C" fn rs_copy_block_bytes( }; // Copy into the Lean ByteArray - unsafe { lean_sarray_set_data(dest, bytes) }; + let ba = unsafe { LeanByteArray::from_raw(dest.as_ptr()) }; + unsafe { ba.set_data(bytes) }; } /// FFI: Get Rust's sharing vector length for a block. #[unsafe(no_mangle)] extern "C" fn rs_get_block_sharing_len( rust_env: *const RustCompiledEnv, - lowlink_name: *const c_void, + lowlink_name: LeanObj, ) -> u64 { if rust_env.is_null() { return 0; } let global_cache = GlobalCache::default(); - let name = lean_ptr_to_name(lowlink_name, &global_cache); + let name = lean_ptr_to_name(lowlink_name.as_ptr(), &global_cache); let rust_env = unsafe { &*rust_env }; @@ -911,14 +840,14 @@ fn unshare_expr( #[unsafe(no_mangle)] extern "C" fn rs_get_pre_sharing_exprs( rust_env: *const RustCompiledEnv, - lowlink_name: *const c_void, - out_buf: *mut c_void, + lowlink_name: LeanObj, + out_buf: LeanObj, ) -> u64 { if rust_env.is_null() { return 0; } let global_cache = GlobalCache::default(); - let name = lean_ptr_to_name(lowlink_name, &global_cache); + let name = lean_ptr_to_name(lowlink_name.as_ptr(), &global_cache); let rust_env = unsafe { &*rust_env }; @@ -1004,7 +933,8 @@ extern "C" fn rs_get_pre_sharing_exprs( } // Write to output buffer - unsafe { lean_sarray_set_data(out_buf, &output_bytes) }; + let ba = unsafe { LeanByteArray::from_raw(out_buf.as_ptr()) }; + unsafe { ba.set_data(&output_bytes) }; n_exprs } @@ -1013,13 +943,13 @@ extern "C" fn rs_get_pre_sharing_exprs( #[unsafe(no_mangle)] extern "C" fn rs_get_pre_sharing_exprs_len( rust_env: *const RustCompiledEnv, - lowlink_name: *const c_void, + lowlink_name: LeanObj, ) -> u64 { if rust_env.is_null() { return 0; } let global_cache = GlobalCache::default(); - let name = lean_ptr_to_name(lowlink_name, &global_cache); + let name = lean_ptr_to_name(lowlink_name.as_ptr(), &global_cache); let rust_env = unsafe { &*rust_env }; @@ -1073,14 +1003,14 @@ extern "C" fn rs_get_pre_sharing_exprs_len( #[unsafe(no_mangle)] extern "C" fn rs_lookup_const_addr( rust_env: *const RustCompiledEnv, - name_ptr: *const c_void, - out_addr: *mut c_void, + name_ptr: LeanObj, + out_addr: LeanObj, ) -> u64 { if rust_env.is_null() { return 0; } let global_cache = GlobalCache::default(); - let name = lean_ptr_to_name(name_ptr, &global_cache); + let name = lean_ptr_to_name(name_ptr.as_ptr(), &global_cache); let rust_env = unsafe { &*rust_env }; @@ -1088,7 +1018,8 @@ extern "C" fn rs_lookup_const_addr( match rust_env.compile_state.name_to_addr.get(&name) { Some(addr_ref) => { // Copy the 32-byte address into the output ByteArray - unsafe { lean_sarray_set_data(out_addr, addr_ref.as_bytes()) }; + let ba = unsafe { LeanByteArray::from_raw(out_addr.as_ptr()) }; + unsafe { ba.set_data(addr_ref.as_bytes()) }; 1 }, None => 0, @@ -1113,18 +1044,6 @@ extern "C" fn rs_get_compiled_const_count( use crate::ix::ixon::error::{CompileError, DecompileError, SerializeError}; -/// Build a Lean String from a Rust &str. -fn build_lean_string(s: &str) -> *mut c_void { - let cstr = CString::new(s) - .unwrap_or_else(|_| CString::new("(invalid string)").unwrap()); - unsafe { lean_mk_string(cstr.as_ptr()).cast() } -} - -/// Build a Lean Nat from a usize. -fn build_lean_nat_usize(n: usize) -> *mut c_void { - unsafe { lean_uint64_to_nat(n as u64).cast() } -} - /// Build a Lean Ixon.SerializeError from a Rust SerializeError. /// /// Tags 0–6: @@ -1135,98 +1054,83 @@ fn build_lean_nat_usize(n: usize) -> *mut c_void { /// 4: invalidBool (value : UInt8) → 0 obj + 1 scalar (UInt8) /// 5: addressError → 0 obj + 0 scalar /// 6: invalidShareIndex (idx : UInt64) (max : Nat) → 1 obj (Nat) + 8 scalar (UInt64) -pub fn build_serialize_error(se: &SerializeError) -> *mut c_void { - unsafe { - match se { - SerializeError::UnexpectedEof { expected } => { - let obj = lean_alloc_ctor(0, 1, 0); - lean_ctor_set(obj, 0, build_lean_string(expected).cast()); - obj.cast() - }, - SerializeError::InvalidTag { tag, context } => { - // 1 obj (String) + 1 scalar byte (UInt8) - let obj = lean_alloc_ctor(1, 1, 1); - lean_ctor_set(obj, 0, build_lean_string(context).cast()); - lean_ctor_set_uint8(obj, 8, *tag); - obj.cast() - }, - SerializeError::InvalidFlag { flag, context } => { - let obj = lean_alloc_ctor(2, 1, 1); - lean_ctor_set(obj, 0, build_lean_string(context).cast()); - lean_ctor_set_uint8(obj, 8, *flag); - obj.cast() - }, - SerializeError::InvalidVariant { variant, context } => { - let obj = lean_alloc_ctor(3, 1, 8); - lean_ctor_set(obj, 0, build_lean_string(context).cast()); - lean_ctor_set_uint64(obj, 8, *variant); - obj.cast() - }, - SerializeError::InvalidBool { value } => { - let obj = lean_alloc_ctor(4, 0, 1); - lean_ctor_set_uint8(obj, 0, *value); - obj.cast() - }, - SerializeError::AddressError => lean_alloc_ctor(5, 0, 0).cast(), - SerializeError::InvalidShareIndex { idx, max } => { - let obj = lean_alloc_ctor(6, 1, 8); - lean_ctor_set(obj, 0, build_lean_nat_usize(*max).cast()); - lean_ctor_set_uint64(obj, 8, *idx); - obj.cast() - }, - } +pub fn build_serialize_error(se: &SerializeError) -> LeanObj { + match se { + SerializeError::UnexpectedEof { expected } => { + let ctor = LeanCtor::alloc(0, 1, 0); + ctor.set(0, build_lean_string(expected)); + *ctor + }, + SerializeError::InvalidTag { tag, context } => { + let ctor = LeanCtor::alloc(1, 1, 1); + ctor.set(0, build_lean_string(context)); + ctor.set_u8(1 * 8, *tag); + *ctor + }, + SerializeError::InvalidFlag { flag, context } => { + let ctor = LeanCtor::alloc(2, 1, 1); + ctor.set(0, build_lean_string(context)); + ctor.set_u8(1 * 8, *flag); + *ctor + }, + SerializeError::InvalidVariant { variant, context } => { + let ctor = LeanCtor::alloc(3, 1, 8); + ctor.set(0, build_lean_string(context)); + ctor.set_u64(1 * 8, *variant); + *ctor + }, + SerializeError::InvalidBool { value } => { + let ctor = LeanCtor::alloc(4, 0, 1); + ctor.set_u8(0, *value); + *ctor + }, + SerializeError::AddressError => *LeanCtor::alloc(5, 0, 0), + SerializeError::InvalidShareIndex { idx, max } => { + let ctor = LeanCtor::alloc(6, 1, 8); + ctor.set(0, build_lean_nat_usize(*max)); + ctor.set_u64(1 * 8, *idx); + *ctor + }, } } /// Decode a Lean Ixon.SerializeError to a Rust SerializeError. -pub fn decode_serialize_error(ptr: *const c_void) -> SerializeError { - unsafe { - let tag = lean_obj_tag(ptr as *mut _); - match tag { - 0 => { - let str_ptr = lean_ctor_get(ptr as *mut _, 0); - let expected = lean_obj_to_string(str_ptr.cast()); - SerializeError::UnexpectedEof { expected } - }, - 1 => { - let str_ptr = lean_ctor_get(ptr as *mut _, 0); - let base = ptr.cast::(); - let tag_val = *base.add(8 + 8); - let context = lean_obj_to_string(str_ptr.cast()); - SerializeError::InvalidTag { tag: tag_val, context } - }, - 2 => { - let str_ptr = lean_ctor_get(ptr as *mut _, 0); - let base = ptr.cast::(); - let flag = *base.add(8 + 8); - let context = lean_obj_to_string(str_ptr.cast()); - SerializeError::InvalidFlag { flag, context } - }, - 3 => { - let str_ptr = lean_ctor_get(ptr as *mut _, 0); - let base = ptr.cast::(); - let variant = *base.add(8 + 8).cast::(); - let context = lean_obj_to_string(str_ptr.cast()); - SerializeError::InvalidVariant { variant, context } - }, - 4 => { - let base = ptr.cast::(); - let value = *base.add(8); - SerializeError::InvalidBool { value } - }, - 5 => SerializeError::AddressError, - 6 => { - let nat_ptr = lean_ctor_get(ptr as *mut _, 0); - let base = ptr.cast::(); - let idx = *base.add(8 + 8).cast::(); - let max = Nat::from_ptr(nat_ptr.cast()) - .to_u64() - .and_then(|x| usize::try_from(x).ok()) - .unwrap_or(0); - SerializeError::InvalidShareIndex { idx, max } - }, - _ => unreachable!("Invalid SerializeError tag: {}", tag), - } +pub fn decode_serialize_error(obj: LeanObj) -> SerializeError { + let ctor = unsafe { LeanCtor::from_raw(obj.as_ptr()) }; + match ctor.tag() { + 0 => { + let expected = lean_obj_to_string(ctor.get(0).as_ptr()); + SerializeError::UnexpectedEof { expected } + }, + 1 => { + let context = lean_obj_to_string(ctor.get(0).as_ptr()); + let tag_val = ctor.scalar_u8(1, 0); + SerializeError::InvalidTag { tag: tag_val, context } + }, + 2 => { + let context = lean_obj_to_string(ctor.get(0).as_ptr()); + let flag = ctor.scalar_u8(1, 0); + SerializeError::InvalidFlag { flag, context } + }, + 3 => { + let context = lean_obj_to_string(ctor.get(0).as_ptr()); + let variant = ctor.scalar_u64(1, 0); + SerializeError::InvalidVariant { variant, context } + }, + 4 => { + let value = ctor.scalar_u8(0, 0); + SerializeError::InvalidBool { value } + }, + 5 => SerializeError::AddressError, + 6 => { + let max = Nat::from_ptr(ctor.get(0).as_ptr()) + .to_u64() + .and_then(|x| usize::try_from(x).ok()) + .unwrap_or(0); + let idx = ctor.scalar_u64(1, 0); + SerializeError::InvalidShareIndex { idx, max } + }, + _ => unreachable!("Invalid SerializeError tag: {}", ctor.tag()), } } @@ -1237,180 +1141,142 @@ pub fn decode_serialize_error(ptr: *const c_void) -> SerializeError { /// → 2 object fields (Nat, String) + 8 scalar bytes (UInt64) /// → `lean_alloc_ctor(tag, 2, 8)` /// → obj[0] = Nat, obj[1] = String, scalar[0] = UInt64 -pub fn build_decompile_error(err: &DecompileError) -> *mut c_void { - unsafe { - match err { - DecompileError::InvalidRefIndex { idx, refs_len, constant } => { - let obj = lean_alloc_ctor(0, 2, 8); - lean_ctor_set(obj, 0, build_lean_nat_usize(*refs_len).cast()); - lean_ctor_set(obj, 1, build_lean_string(constant).cast()); - lean_ctor_set_uint64(obj, 2 * 8, *idx); - obj.cast() - }, - DecompileError::InvalidUnivIndex { idx, univs_len, constant } => { - let obj = lean_alloc_ctor(1, 2, 8); - lean_ctor_set(obj, 0, build_lean_nat_usize(*univs_len).cast()); - lean_ctor_set(obj, 1, build_lean_string(constant).cast()); - lean_ctor_set_uint64(obj, 2 * 8, *idx); - obj.cast() - }, - DecompileError::InvalidShareIndex { idx, max, constant } => { - let obj = lean_alloc_ctor(2, 2, 8); - lean_ctor_set(obj, 0, build_lean_nat_usize(*max).cast()); - lean_ctor_set(obj, 1, build_lean_string(constant).cast()); - lean_ctor_set_uint64(obj, 2 * 8, *idx); - obj.cast() - }, - DecompileError::InvalidRecIndex { idx, ctx_size, constant } => { - let obj = lean_alloc_ctor(3, 2, 8); - lean_ctor_set(obj, 0, build_lean_nat_usize(*ctx_size).cast()); - lean_ctor_set(obj, 1, build_lean_string(constant).cast()); - lean_ctor_set_uint64(obj, 2 * 8, *idx); - obj.cast() - }, - DecompileError::InvalidUnivVarIndex { idx, max, constant } => { - let obj = lean_alloc_ctor(4, 2, 8); - lean_ctor_set(obj, 0, build_lean_nat_usize(*max).cast()); - lean_ctor_set(obj, 1, build_lean_string(constant).cast()); - lean_ctor_set_uint64(obj, 2 * 8, *idx); - obj.cast() - }, - DecompileError::MissingAddress(addr) => { - // tag 5, 1 object (Address = ByteArray) - let obj = lean_alloc_ctor(5, 1, 0); - lean_ctor_set(obj, 0, build_address_from_ixon(addr).cast()); - obj.cast() - }, - DecompileError::MissingMetadata(addr) => { - // tag 6, 1 object (Address = ByteArray) - let obj = lean_alloc_ctor(6, 1, 0); - lean_ctor_set(obj, 0, build_address_from_ixon(addr).cast()); - obj.cast() - }, - DecompileError::BlobNotFound(addr) => { - // tag 7, 1 object (Address = ByteArray) - let obj = lean_alloc_ctor(7, 1, 0); - lean_ctor_set(obj, 0, build_address_from_ixon(addr).cast()); - obj.cast() - }, - DecompileError::BadBlobFormat { addr, expected } => { - // tag 8, 2 objects (Address, String) - let obj = lean_alloc_ctor(8, 2, 0); - lean_ctor_set(obj, 0, build_address_from_ixon(addr).cast()); - lean_ctor_set(obj, 1, build_lean_string(expected).cast()); - obj.cast() - }, - DecompileError::BadConstantFormat { msg } => { - // tag 9, 1 object (String) - let obj = lean_alloc_ctor(9, 1, 0); - lean_ctor_set(obj, 0, build_lean_string(msg).cast()); - obj.cast() - }, - DecompileError::Serialize(se) => { - // tag 10, 1 object (SerializeError) - let obj = lean_alloc_ctor(10, 1, 0); - lean_ctor_set(obj, 0, build_serialize_error(se).cast()); - obj.cast() - }, - } +pub fn build_decompile_error(err: &DecompileError) -> LeanObj { + match err { + DecompileError::InvalidRefIndex { idx, refs_len, constant } => { + let ctor = LeanCtor::alloc(0, 2, 8); + ctor.set(0, build_lean_nat_usize(*refs_len)); + ctor.set(1, build_lean_string(constant)); + ctor.set_u64(2 * 8, *idx); + *ctor + }, + DecompileError::InvalidUnivIndex { idx, univs_len, constant } => { + let ctor = LeanCtor::alloc(1, 2, 8); + ctor.set(0, build_lean_nat_usize(*univs_len)); + ctor.set(1, build_lean_string(constant)); + ctor.set_u64(2 * 8, *idx); + *ctor + }, + DecompileError::InvalidShareIndex { idx, max, constant } => { + let ctor = LeanCtor::alloc(2, 2, 8); + ctor.set(0, build_lean_nat_usize(*max)); + ctor.set(1, build_lean_string(constant)); + ctor.set_u64(2 * 8, *idx); + *ctor + }, + DecompileError::InvalidRecIndex { idx, ctx_size, constant } => { + let ctor = LeanCtor::alloc(3, 2, 8); + ctor.set(0, build_lean_nat_usize(*ctx_size)); + ctor.set(1, build_lean_string(constant)); + ctor.set_u64(2 * 8, *idx); + *ctor + }, + DecompileError::InvalidUnivVarIndex { idx, max, constant } => { + let ctor = LeanCtor::alloc(4, 2, 8); + ctor.set(0, build_lean_nat_usize(*max)); + ctor.set(1, build_lean_string(constant)); + ctor.set_u64(2 * 8, *idx); + *ctor + }, + DecompileError::MissingAddress(addr) => { + let ctor = LeanCtor::alloc(5, 1, 0); + ctor.set(0, build_address_from_ixon(addr)); + *ctor + }, + DecompileError::MissingMetadata(addr) => { + let ctor = LeanCtor::alloc(6, 1, 0); + ctor.set(0, build_address_from_ixon(addr)); + *ctor + }, + DecompileError::BlobNotFound(addr) => { + let ctor = LeanCtor::alloc(7, 1, 0); + ctor.set(0, build_address_from_ixon(addr)); + *ctor + }, + DecompileError::BadBlobFormat { addr, expected } => { + let ctor = LeanCtor::alloc(8, 2, 0); + ctor.set(0, build_address_from_ixon(addr)); + ctor.set(1, build_lean_string(expected)); + *ctor + }, + DecompileError::BadConstantFormat { msg } => { + let ctor = LeanCtor::alloc(9, 1, 0); + ctor.set(0, build_lean_string(msg)); + *ctor + }, + DecompileError::Serialize(se) => { + let ctor = LeanCtor::alloc(10, 1, 0); + ctor.set(0, build_serialize_error(se)); + *ctor + }, } } /// Decode a Lean DecompileError to a Rust DecompileError. -pub fn decode_decompile_error(ptr: *const c_void) -> DecompileError { - unsafe { - let tag = lean_obj_tag(ptr as *mut _); - match tag { - 0 => { - let nat_ptr = lean_ctor_get(ptr as *mut _, 0); - let str_ptr = lean_ctor_get(ptr as *mut _, 1); - let base = ptr.cast::(); - let idx = *base.add(8 + 2 * 8).cast::(); - let refs_len = Nat::from_ptr(nat_ptr.cast()) - .to_u64() - .and_then(|x| usize::try_from(x).ok()) - .unwrap_or(0); - let constant = lean_obj_to_string(str_ptr.cast()).clone(); - DecompileError::InvalidRefIndex { idx, refs_len, constant } - }, - 1 => { - let nat_ptr = lean_ctor_get(ptr as *mut _, 0); - let str_ptr = lean_ctor_get(ptr as *mut _, 1); - let base = ptr.cast::(); - let idx = *base.add(8 + 2 * 8).cast::(); - let univs_len = Nat::from_ptr(nat_ptr.cast()) - .to_u64() - .and_then(|x| usize::try_from(x).ok()) - .unwrap_or(0); - let constant = lean_obj_to_string(str_ptr.cast()).clone(); - DecompileError::InvalidUnivIndex { idx, univs_len, constant } - }, - 2 => { - let nat_ptr = lean_ctor_get(ptr as *mut _, 0); - let str_ptr = lean_ctor_get(ptr as *mut _, 1); - let base = ptr.cast::(); - let idx = *base.add(8 + 2 * 8).cast::(); - let max = Nat::from_ptr(nat_ptr.cast()) - .to_u64() - .and_then(|x| usize::try_from(x).ok()) - .unwrap_or(0); - let constant = lean_obj_to_string(str_ptr.cast()).clone(); - DecompileError::InvalidShareIndex { idx, max, constant } - }, - 3 => { - let nat_ptr = lean_ctor_get(ptr as *mut _, 0); - let str_ptr = lean_ctor_get(ptr as *mut _, 1); - let base = ptr.cast::(); - let idx = *base.add(8 + 2 * 8).cast::(); - let ctx_size = Nat::from_ptr(nat_ptr.cast()) - .to_u64() - .and_then(|x| usize::try_from(x).ok()) - .unwrap_or(0); - let constant = lean_obj_to_string(str_ptr.cast()).clone(); - DecompileError::InvalidRecIndex { idx, ctx_size, constant } - }, - 4 => { - let nat_ptr = lean_ctor_get(ptr as *mut _, 0); - let str_ptr = lean_ctor_get(ptr as *mut _, 1); - let base = ptr.cast::(); - let idx = *base.add(8 + 2 * 8).cast::(); - let max = Nat::from_ptr(nat_ptr.cast()) - .to_u64() - .and_then(|x| usize::try_from(x).ok()) - .unwrap_or(0); - let constant = lean_obj_to_string(str_ptr.cast()).clone(); - DecompileError::InvalidUnivVarIndex { idx, max, constant } - }, - 5 => { - let addr_ptr = lean_ctor_get(ptr as *mut _, 0); - DecompileError::MissingAddress(decode_ixon_address(addr_ptr.cast())) - }, - 6 => { - let addr_ptr = lean_ctor_get(ptr as *mut _, 0); - DecompileError::MissingMetadata(decode_ixon_address(addr_ptr.cast())) - }, - 7 => { - let addr_ptr = lean_ctor_get(ptr as *mut _, 0); - DecompileError::BlobNotFound(decode_ixon_address(addr_ptr.cast())) - }, - 8 => { - let addr_ptr = lean_ctor_get(ptr as *mut _, 0); - let str_ptr = lean_ctor_get(ptr as *mut _, 1); - let addr = decode_ixon_address(addr_ptr.cast()); - let expected = lean_obj_to_string(str_ptr.cast()).clone(); - DecompileError::BadBlobFormat { addr, expected } - }, - 9 => { - let str_ptr = lean_ctor_get(ptr as *mut _, 0); - let msg = lean_obj_to_string(str_ptr.cast()).clone(); - DecompileError::BadConstantFormat { msg } - }, - 10 => { - let se_ptr = lean_ctor_get(ptr as *mut _, 0); - DecompileError::Serialize(decode_serialize_error(se_ptr.cast())) - }, - _ => unreachable!("Invalid DecompileError tag: {}", tag), - } +pub fn decode_decompile_error(obj: LeanObj) -> DecompileError { + let ctor = unsafe { LeanCtor::from_raw(obj.as_ptr()) }; + match ctor.tag() { + 0 => { + let refs_len = Nat::from_ptr(ctor.get(0).as_ptr()) + .to_u64() + .and_then(|x| usize::try_from(x).ok()) + .unwrap_or(0); + let constant = lean_obj_to_string(ctor.get(1).as_ptr()); + let idx = ctor.scalar_u64(2, 0); + DecompileError::InvalidRefIndex { idx, refs_len, constant } + }, + 1 => { + let univs_len = Nat::from_ptr(ctor.get(0).as_ptr()) + .to_u64() + .and_then(|x| usize::try_from(x).ok()) + .unwrap_or(0); + let constant = lean_obj_to_string(ctor.get(1).as_ptr()); + let idx = ctor.scalar_u64(2, 0); + DecompileError::InvalidUnivIndex { idx, univs_len, constant } + }, + 2 => { + let max = Nat::from_ptr(ctor.get(0).as_ptr()) + .to_u64() + .and_then(|x| usize::try_from(x).ok()) + .unwrap_or(0); + let constant = lean_obj_to_string(ctor.get(1).as_ptr()); + let idx = ctor.scalar_u64(2, 0); + DecompileError::InvalidShareIndex { idx, max, constant } + }, + 3 => { + let ctx_size = Nat::from_ptr(ctor.get(0).as_ptr()) + .to_u64() + .and_then(|x| usize::try_from(x).ok()) + .unwrap_or(0); + let constant = lean_obj_to_string(ctor.get(1).as_ptr()); + let idx = ctor.scalar_u64(2, 0); + DecompileError::InvalidRecIndex { idx, ctx_size, constant } + }, + 4 => { + let max = Nat::from_ptr(ctor.get(0).as_ptr()) + .to_u64() + .and_then(|x| usize::try_from(x).ok()) + .unwrap_or(0); + let constant = lean_obj_to_string(ctor.get(1).as_ptr()); + let idx = ctor.scalar_u64(2, 0); + DecompileError::InvalidUnivVarIndex { idx, max, constant } + }, + 5 => DecompileError::MissingAddress(decode_ixon_address(ctor.get(0))), + 6 => DecompileError::MissingMetadata(decode_ixon_address(ctor.get(0))), + 7 => DecompileError::BlobNotFound(decode_ixon_address(ctor.get(0))), + 8 => { + let addr = decode_ixon_address(ctor.get(0)); + let expected = lean_obj_to_string(ctor.get(1).as_ptr()); + DecompileError::BadBlobFormat { addr, expected } + }, + 9 => { + let msg = lean_obj_to_string(ctor.get(0).as_ptr()); + DecompileError::BadConstantFormat { msg } + }, + 10 => { + DecompileError::Serialize(decode_serialize_error(ctor.get(0))) + }, + _ => unreachable!("Invalid DecompileError tag: {}", ctor.tag()), } } @@ -1423,108 +1289,89 @@ pub fn decode_decompile_error(ptr: *const c_void) -> DecompileError { /// 3: unsupportedExpr (desc : String) → 1 obj /// 4: unknownUnivParam (curr param : String) → 2 obj /// 5: serializeError (msg : String) → 1 obj -pub fn build_compile_error(err: &CompileError) -> *mut c_void { - unsafe { - match err { - CompileError::MissingConstant { name } => { - let obj = lean_alloc_ctor(0, 1, 0); - lean_ctor_set(obj, 0, build_lean_string(name).cast()); - obj.cast() - }, - CompileError::MissingAddress(addr) => { - let obj = lean_alloc_ctor(1, 1, 0); - lean_ctor_set(obj, 0, build_address_from_ixon(addr).cast()); - obj.cast() - }, - CompileError::InvalidMutualBlock { reason } => { - let obj = lean_alloc_ctor(2, 1, 0); - lean_ctor_set(obj, 0, build_lean_string(reason).cast()); - obj.cast() - }, - CompileError::UnsupportedExpr { desc } => { - let obj = lean_alloc_ctor(3, 1, 0); - lean_ctor_set(obj, 0, build_lean_string(desc).cast()); - obj.cast() - }, - CompileError::UnknownUnivParam { curr, param } => { - let obj = lean_alloc_ctor(4, 2, 0); - lean_ctor_set(obj, 0, build_lean_string(curr).cast()); - lean_ctor_set(obj, 1, build_lean_string(param).cast()); - obj.cast() - }, - CompileError::Serialize(se) => { - let obj = lean_alloc_ctor(5, 1, 0); - lean_ctor_set(obj, 0, build_serialize_error(se).cast()); - obj.cast() - }, - } +pub fn build_compile_error(err: &CompileError) -> LeanObj { + match err { + CompileError::MissingConstant { name } => { + let ctor = LeanCtor::alloc(0, 1, 0); + ctor.set(0, build_lean_string(name)); + *ctor + }, + CompileError::MissingAddress(addr) => { + let ctor = LeanCtor::alloc(1, 1, 0); + ctor.set(0, build_address_from_ixon(addr)); + *ctor + }, + CompileError::InvalidMutualBlock { reason } => { + let ctor = LeanCtor::alloc(2, 1, 0); + ctor.set(0, build_lean_string(reason)); + *ctor + }, + CompileError::UnsupportedExpr { desc } => { + let ctor = LeanCtor::alloc(3, 1, 0); + ctor.set(0, build_lean_string(desc)); + *ctor + }, + CompileError::UnknownUnivParam { curr, param } => { + let ctor = LeanCtor::alloc(4, 2, 0); + ctor.set(0, build_lean_string(curr)); + ctor.set(1, build_lean_string(param)); + *ctor + }, + CompileError::Serialize(se) => { + let ctor = LeanCtor::alloc(5, 1, 0); + ctor.set(0, build_serialize_error(se)); + *ctor + }, } } /// Decode a Lean CompileError to a Rust CompileError. -pub fn decode_compile_error(ptr: *const c_void) -> CompileError { - unsafe { - let tag = lean_obj_tag(ptr as *mut _); - match tag { - 0 => { - let str_ptr = lean_ctor_get(ptr as *mut _, 0); - let name = lean_obj_to_string(str_ptr.cast()).clone(); - CompileError::MissingConstant { name } - }, - 1 => { - let addr_ptr = lean_ctor_get(ptr as *mut _, 0); - CompileError::MissingAddress(decode_ixon_address(addr_ptr.cast())) - }, - 2 => { - let str_ptr = lean_ctor_get(ptr as *mut _, 0); - let reason = lean_obj_to_string(str_ptr.cast()).clone(); - CompileError::InvalidMutualBlock { reason } - }, - 3 => { - let str_ptr = lean_ctor_get(ptr as *mut _, 0); - let desc = lean_obj_to_string(str_ptr.cast()).clone(); - CompileError::UnsupportedExpr { desc } - }, - 4 => { - let str0 = lean_ctor_get(ptr as *mut _, 0); - let str1 = lean_ctor_get(ptr as *mut _, 1); - let curr = lean_obj_to_string(str0 as *const _); - let param = lean_obj_to_string(str1 as *const _); - CompileError::UnknownUnivParam { curr, param } - }, - 5 => { - let se_ptr = lean_ctor_get(ptr as *mut _, 0); - CompileError::Serialize(decode_serialize_error(se_ptr.cast())) - }, - _ => unreachable!("Invalid CompileError tag: {}", tag), - } +pub fn decode_compile_error(obj: LeanObj) -> CompileError { + let ctor = unsafe { LeanCtor::from_raw(obj.as_ptr()) }; + match ctor.tag() { + 0 => { + let name = lean_obj_to_string(ctor.get(0).as_ptr()); + CompileError::MissingConstant { name } + }, + 1 => CompileError::MissingAddress(decode_ixon_address(ctor.get(0))), + 2 => { + let reason = lean_obj_to_string(ctor.get(0).as_ptr()); + CompileError::InvalidMutualBlock { reason } + }, + 3 => { + let desc = lean_obj_to_string(ctor.get(0).as_ptr()); + CompileError::UnsupportedExpr { desc } + }, + 4 => { + let curr = lean_obj_to_string(ctor.get(0).as_ptr()); + let param = lean_obj_to_string(ctor.get(1).as_ptr()); + CompileError::UnknownUnivParam { curr, param } + }, + 5 => { + CompileError::Serialize(decode_serialize_error(ctor.get(0))) + }, + _ => unreachable!("Invalid CompileError tag: {}", ctor.tag()), } } /// FFI: Round-trip a DecompileError: Lean → Rust → Lean. #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_decompile_error( - ptr: *const c_void, -) -> *mut c_void { - let err = decode_decompile_error(ptr); +pub extern "C" fn rs_roundtrip_decompile_error(obj: LeanObj) -> LeanObj { + let err = decode_decompile_error(obj); build_decompile_error(&err) } /// FFI: Round-trip a CompileError: Lean → Rust → Lean. #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_compile_error( - ptr: *const c_void, -) -> *mut c_void { - let err = decode_compile_error(ptr); +pub extern "C" fn rs_roundtrip_compile_error(obj: LeanObj) -> LeanObj { + let err = decode_compile_error(obj); build_compile_error(&err) } /// FFI: Round-trip a SerializeError: Lean → Rust → Lean. #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_serialize_error( - ptr: *const c_void, -) -> *mut c_void { - let err = decode_serialize_error(ptr); +pub extern "C" fn rs_roundtrip_serialize_error(obj: LeanObj) -> LeanObj { + let err = decode_serialize_error(obj); build_serialize_error(&err) } @@ -1534,8 +1381,8 @@ pub extern "C" fn rs_roundtrip_serialize_error( /// FFI: Decompile an Ixon.RawEnv → Except DecompileError (Array (Ix.Name × Ix.ConstantInfo)). Pure. #[unsafe(no_mangle)] -pub extern "C" fn rs_decompile_env(raw_env_ptr: *const c_void) -> *mut c_void { - let decoded = decode_raw_env(raw_env_ptr); +pub extern "C" fn rs_decompile_env(raw_env_obj: LeanObj) -> LeanObj { + let decoded = decode_raw_env(raw_env_obj); let env = decoded_to_ixon_env(&decoded); // Wrap in CompileState (decompile_env only uses .env) @@ -1550,30 +1397,21 @@ pub extern "C" fn rs_decompile_env(raw_env_ptr: *const c_void) -> *mut c_void { Ok(dstt) => { let entries: Vec<_> = dstt.env.into_iter().collect(); let mut cache = LeanBuildCache::with_capacity(entries.len()); - unsafe { - let arr = lean_alloc_array(entries.len(), entries.len()); - for (i, (name, info)) in entries.iter().enumerate() { - let name_obj = build_name(&mut cache, name); - let info_obj = build_constant_info(&mut cache, info); - let pair = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(pair, 0, name_obj.as_mut_ptr().cast()); - lean_ctor_set(pair, 1, info_obj.as_mut_ptr().cast()); - lean_array_set_core(arr, i, pair); - } - // Except.ok (tag 1) - let obj = lean_alloc_ctor(1, 1, 0); - lean_ctor_set(obj, 0, arr); - obj.cast() + + let arr = LeanArray::alloc(entries.len()); + for (i, (name, info)) in entries.iter().enumerate() { + let name_obj = build_name(&mut cache, name); + let info_obj = build_constant_info(&mut cache, info); + let pair = LeanCtor::alloc(0, 2, 0); + pair.set(0, name_obj); + pair.set(1, info_obj); + arr.set(i, *pair); } + + LeanExcept::ok(arr).into() }, Err(e) => { - // Except.error (tag 0) — build DecompileError directly - unsafe { - let err_obj = build_decompile_error(&e); - let obj = lean_alloc_ctor(0, 1, 0); - lean_ctor_set(obj, 0, err_obj.cast()); - obj.cast() - } + LeanExcept::error(build_decompile_error(&e)).into() }, } } diff --git a/src/lean/ffi/graph.rs b/src/lean/ffi/graph.rs index 8cab23c7..50829e7d 100644 --- a/src/lean/ffi/graph.rs +++ b/src/lean/ffi/graph.rs @@ -1,12 +1,10 @@ //! Graph and SCC FFI functions. -use std::ffi::c_void; use std::sync::Arc; -use super::ffi_io_guard; +use super::{ffi_io_guard, io_ok}; use crate::ix::condense::compute_sccs; use crate::ix::graph::build_ref_graph; -use crate::lean::lean::lean_io_result_mk_ok; use crate::lean::obj::{LeanArray, LeanCtor, LeanObj}; use super::builder::LeanBuildCache; @@ -96,31 +94,27 @@ pub fn build_condensed_blocks( /// FFI function to build a reference graph from a Lean environment. #[unsafe(no_mangle)] -pub extern "C" fn rs_build_ref_graph( - env_consts_ptr: *const c_void, -) -> *mut c_void { +pub extern "C" fn rs_build_ref_graph(env_consts_ptr: LeanObj) -> LeanObj { ffi_io_guard(std::panic::AssertUnwindSafe(|| { - let rust_env = lean_ptr_to_env(env_consts_ptr); + let rust_env = lean_ptr_to_env(env_consts_ptr.as_ptr()); let rust_env = Arc::new(rust_env); let ref_graph = build_ref_graph(&rust_env); let mut cache = LeanBuildCache::with_capacity(rust_env.len()); let result = build_ref_graph_array(&mut cache, &ref_graph.out_refs); - unsafe { lean_io_result_mk_ok(result.as_ptr() as *mut _) }.cast() + io_ok(result) })) } /// FFI function to compute SCCs from a Lean environment. #[unsafe(no_mangle)] -pub extern "C" fn rs_compute_sccs( - env_consts_ptr: *const c_void, -) -> *mut c_void { +pub extern "C" fn rs_compute_sccs(env_consts_ptr: LeanObj) -> LeanObj { ffi_io_guard(std::panic::AssertUnwindSafe(|| { - let rust_env = lean_ptr_to_env(env_consts_ptr); + let rust_env = lean_ptr_to_env(env_consts_ptr.as_ptr()); let rust_env = Arc::new(rust_env); let ref_graph = build_ref_graph(&rust_env); let condensed = compute_sccs(&ref_graph.out_refs); let mut cache = LeanBuildCache::with_capacity(rust_env.len()); let result = build_condensed_blocks(&mut cache, &condensed); - unsafe { lean_io_result_mk_ok(result.as_ptr() as *mut _) }.cast() + io_ok(result) })) } diff --git a/src/lean/ffi/ixon/compare.rs b/src/lean/ffi/ixon/compare.rs index f526e0d9..896377aa 100644 --- a/src/lean/ffi/ixon/compare.rs +++ b/src/lean/ffi/ixon/compare.rs @@ -7,8 +7,7 @@ use crate::ix::compile::{BlockCache, CompileState, compile_env, compile_expr}; use crate::ix::env::Name; use crate::ix::ixon::serialize::put_expr; use crate::ix::mutual::MutCtx; -use crate::lean::lean::{lean_alloc_ctor, lean_ctor_set}; -use crate::lean::lean_sarray_data; +use crate::lean::obj::{LeanByteArray, LeanCtor, LeanObj}; use super::super::lean_env::{ Cache as LeanCache, GlobalCache, lean_ptr_to_expr, lean_ptr_to_name, @@ -23,7 +22,7 @@ pub struct RustBlockEnv { #[unsafe(no_mangle)] pub extern "C" fn rs_compare_expr_compilation( lean_expr_ptr: *const c_void, - lean_output: *const c_void, + lean_output: LeanObj, univ_ctx_size: u64, ) -> bool { // Decode Lean.Expr to Rust's representation @@ -58,7 +57,8 @@ pub extern "C" fn rs_compare_expr_compilation( put_expr(&rust_expr, &mut rust_bytes); // Compare byte-for-byte - let lean_bytes = lean_sarray_data(lean_output); + let lean_ba = unsafe { LeanByteArray::from_raw(lean_output.as_ptr()) }; + let lean_bytes = lean_ba.as_bytes(); rust_bytes == lean_bytes } @@ -69,38 +69,32 @@ fn build_block_compare_result( lean_size: u64, rust_size: u64, first_diff_offset: u64, -) -> *mut c_void { - unsafe { - if matched { - lean_alloc_ctor(0, 0, 0).cast() // match - } else if not_found { - lean_alloc_ctor(2, 0, 0).cast() // notFound - } else { - // mismatch - let obj = lean_alloc_ctor(1, 0, 24); - let base = obj.cast::(); - *base.add(8).cast::() = lean_size; - *base.add(16).cast::() = rust_size; - *base.add(24).cast::() = first_diff_offset; - obj.cast() - } +) -> LeanObj { + if matched { + *LeanCtor::alloc(0, 0, 0) // match + } else if not_found { + *LeanCtor::alloc(2, 0, 0) // notFound + } else { + // mismatch + let ctor = LeanCtor::alloc(1, 0, 24); + ctor.set_u64(0, lean_size); + ctor.set_u64(8, rust_size); + ctor.set_u64(16, first_diff_offset); + *ctor } } /// Build a BlockCompareDetail Lean object. fn build_block_compare_detail( - result: *mut c_void, + result: LeanObj, lean_sharing_len: u64, rust_sharing_len: u64, -) -> *mut c_void { - unsafe { - let obj = lean_alloc_ctor(0, 1, 16); - lean_ctor_set(obj, 0, result.cast()); - let base = obj.cast::(); - *base.add(16).cast::() = lean_sharing_len; - *base.add(24).cast::() = rust_sharing_len; - obj.cast() - } +) -> LeanObj { + let ctor = LeanCtor::alloc(0, 1, 16); + ctor.set(0, result); + ctor.set_u64(8, lean_sharing_len); + ctor.set_u64(8 + 8, rust_sharing_len); + *ctor } /// Compare a single block by lowlink name. @@ -113,14 +107,15 @@ fn build_block_compare_detail( pub unsafe extern "C" fn rs_compare_block_v2( rust_env: *const RustBlockEnv, lowlink_name: *const c_void, - lean_bytes: *const c_void, + lean_bytes: LeanObj, lean_sharing_len: u64, -) -> *mut c_void { +) -> LeanObj { let global_cache = GlobalCache::default(); let name = lean_ptr_to_name(lowlink_name, &global_cache); let rust_env = unsafe { &*rust_env }; - let lean_data = lean_sarray_data(lean_bytes); + let lean_ba = unsafe { LeanByteArray::from_raw(lean_bytes.as_ptr()) }; + let lean_data = lean_ba.as_bytes(); // Look up Rust's compiled block let (rust_bytes, rust_sharing_len) = match rust_env.blocks.get(&name) { diff --git a/src/lean/ffi/ixon/constant.rs b/src/lean/ffi/ixon/constant.rs index 9c03c4ad..c0e676ac 100644 --- a/src/lean/ffi/ixon/constant.rs +++ b/src/lean/ffi/ixon/constant.rs @@ -4,467 +4,158 @@ //! Inductive, InductiveProj, ConstructorProj, RecursorProj, DefinitionProj, //! MutConst, ConstantInfo, Constant -use std::ffi::c_void; use std::sync::Arc; use crate::ix::address::Address; use crate::ix::ixon::constant::{ - Axiom as IxonAxiom, Constant as IxonConstant, - ConstantInfo as IxonConstantInfo, Constructor as IxonConstructor, - ConstructorProj, DefKind, Definition as IxonDefinition, DefinitionProj, - Inductive as IxonInductive, InductiveProj, MutConst, - Quotient as IxonQuotient, Recursor as IxonRecursor, RecursorProj, - RecursorRule as IxonRecursorRule, + Axiom, Constant, ConstantInfo, Constructor, ConstructorProj, DefKind, + Definition, DefinitionProj, Inductive, InductiveProj, MutConst, Quotient, + Recursor, RecursorProj, RecursorRule, }; -use crate::lean::lean::{ - lean_alloc_array, lean_alloc_ctor, lean_alloc_sarray, lean_array_set_core, - lean_ctor_get, lean_ctor_set, lean_obj_tag, lean_sarray_cptr, +use crate::lean::obj::{ + IxAddress, IxonAxiom, IxonConstant, IxonConstantInfo, IxonConstructor, + IxonConstructorProj, IxonDefinition, IxonDefinitionProj, IxonExpr, + IxonInductive, IxonInductiveProj, IxonMutConst, IxonQuotient, IxonRecursor, + IxonRecursorProj, IxonRecursorRule, LeanArray, LeanByteArray, LeanCtor, + LeanObj, }; -use crate::lean::{lean_array_to_vec, lean_sarray_data}; -use super::expr::{ - build_ixon_expr, build_ixon_expr_array, decode_ixon_expr, - decode_ixon_expr_array, -}; -use super::univ::{build_ixon_univ_array, decode_ixon_univ_array}; +use super::univ::*; -/// Build Address from Ixon Address type (which is just a [u8; 32]). -pub fn build_address_from_ixon(addr: &Address) -> *mut c_void { - unsafe { - let ba = lean_alloc_sarray(1, 32, 32); - let data_ptr = lean_sarray_cptr(ba); - std::ptr::copy_nonoverlapping(addr.as_bytes().as_ptr(), data_ptr, 32); - ba.cast() +impl IxAddress { + /// Build Address from Ixon Address type (which is just a [u8; 32]). + pub fn build_from_ixon(addr: &Address) -> Self { + LeanByteArray::from_bytes(addr.as_bytes()) } -} -/// Build an Array of Addresses. -pub fn build_address_array(addrs: &[Address]) -> *mut c_void { - unsafe { - let arr = lean_alloc_array(addrs.len(), addrs.len()); + /// Build an Array of Addresses. + pub fn build_array(addrs: &[Address]) -> LeanArray { + let arr = LeanArray::alloc(addrs.len()); for (i, addr) in addrs.iter().enumerate() { - let addr_obj = build_address_from_ixon(addr); - lean_array_set_core(arr, i, addr_obj.cast()); + arr.set(i, Self::build_from_ixon(addr)); } - arr.cast() + arr + } + + /// Decode a ByteArray (Address) to Address. + pub fn decode_ixon(self) -> Address { + Address::from_slice(&self.as_bytes()[..32]).expect("Address should be 32 bytes") + } + + /// Decode Array Address. + pub fn decode_array(obj: LeanObj) -> Vec
{ + let arr = unsafe { LeanArray::from_raw(obj.as_ptr()) }; + arr.map(|elem| { + let ba = unsafe { LeanByteArray::from_raw(elem.as_ptr()) }; + ba.decode_ixon() + }) } } -/// Build Ixon.Definition -/// Lean stores scalar fields ordered by size (largest first). -/// Layout: header(8) + typ(8) + value(8) + lvls(8) + kind(1) + safety(1) + padding(6) -pub fn build_ixon_definition(def: &IxonDefinition) -> *mut c_void { - unsafe { - let typ_obj = build_ixon_expr(&def.typ); - let value_obj = build_ixon_expr(&def.value); - // 2 obj fields, 16 scalar bytes (lvls(8) + kind(1) + safety(1) + padding(6)) - let obj = lean_alloc_ctor(0, 2, 16); - lean_ctor_set(obj, 0, typ_obj.cast()); - lean_ctor_set(obj, 1, value_obj.cast()); - let base = obj.cast::(); - let scalar_base = base.add(2 * 8 + 8); // offset 24 - - // lvls at offset 0 (8 bytes) - largest scalar first - *scalar_base.cast::() = def.lvls; - // kind at offset 8 (1 byte) +impl IxonDefinition { + /// Build Ixon.Definition + pub fn build(def: &Definition) -> Self { + let typ_obj = IxonExpr::build(&def.typ); + let value_obj = IxonExpr::build(&def.value); + let ctor = LeanCtor::alloc(0, 2, 16); + ctor.set(0, typ_obj); + ctor.set(1, value_obj); + ctor.set_u64(2 * 8, def.lvls); let kind_val: u8 = match def.kind { DefKind::Definition => 0, DefKind::Opaque => 1, DefKind::Theorem => 2, }; - *scalar_base.add(8) = kind_val; - // safety at offset 9 (1 byte) + ctor.set_u8(2 * 8 + 8, kind_val); let safety_val: u8 = match def.safety { crate::ix::env::DefinitionSafety::Unsafe => 0, crate::ix::env::DefinitionSafety::Safe => 1, crate::ix::env::DefinitionSafety::Partial => 2, }; - *scalar_base.add(9) = safety_val; - obj.cast() - } -} - -/// Build Ixon.RecursorRule -pub fn build_ixon_recursor_rule(rule: &IxonRecursorRule) -> *mut c_void { - unsafe { - let rhs_obj = build_ixon_expr(&rule.rhs); - // 1 obj field, 8 scalar bytes - let obj = lean_alloc_ctor(0, 1, 8); - lean_ctor_set(obj, 0, rhs_obj.cast()); - let base = obj.cast::(); - *base.add(8 + 8).cast::() = rule.fields; - obj.cast() - } -} - -/// Build Ixon.Recursor -/// Scalars ordered by size: lvls(8) + params(8) + indices(8) + motives(8) + minors(8) + k(1) + isUnsafe(1) + padding(6) -pub fn build_ixon_recursor(rec: &IxonRecursor) -> *mut c_void { - unsafe { - let typ_obj = build_ixon_expr(&rec.typ); - // Build rules array - let rules_arr = lean_alloc_array(rec.rules.len(), rec.rules.len()); - for (i, rule) in rec.rules.iter().enumerate() { - let rule_obj = build_ixon_recursor_rule(rule); - lean_array_set_core(rules_arr, i, rule_obj.cast()); - } - // 2 obj fields (typ, rules), 48 scalar bytes (5×8 + 1 + 1 + 6 padding) - let obj = lean_alloc_ctor(0, 2, 48); - lean_ctor_set(obj, 0, typ_obj.cast()); - lean_ctor_set(obj, 1, rules_arr); - let base = obj.cast::(); - let scalar_base = base.add(2 * 8 + 8); - // u64 fields first - *scalar_base.cast::() = rec.lvls; - *scalar_base.add(8).cast::() = rec.params; - *scalar_base.add(16).cast::() = rec.indices; - *scalar_base.add(24).cast::() = rec.motives; - *scalar_base.add(32).cast::() = rec.minors; - // bool fields last - *scalar_base.add(40) = if rec.k { 1 } else { 0 }; - *scalar_base.add(41) = if rec.is_unsafe { 1 } else { 0 }; - obj.cast() - } -} - -/// Build Ixon.Axiom -/// Scalars ordered by size: lvls(8) + isUnsafe(1) + padding(7) -pub fn build_ixon_axiom(ax: &IxonAxiom) -> *mut c_void { - unsafe { - let typ_obj = build_ixon_expr(&ax.typ); - // 1 obj field, 16 scalar bytes (lvls(8) + isUnsafe(1) + padding(7)) - let obj = lean_alloc_ctor(0, 1, 16); - lean_ctor_set(obj, 0, typ_obj.cast()); - let base = obj.cast::(); - let scalar_base = base.add(8 + 8); - // lvls at offset 0 - *scalar_base.cast::() = ax.lvls; - // isUnsafe at offset 8 - *scalar_base.add(8) = if ax.is_unsafe { 1 } else { 0 }; - obj.cast() - } -} - -/// Build Ixon.Quotient -/// QuotKind is a simple enum stored as scalar u8, not object field. -/// Scalars ordered by size: lvls(8) + kind(1) + padding(7) -pub fn build_ixon_quotient(quot: &IxonQuotient) -> *mut c_void { - unsafe { - let typ_obj = build_ixon_expr(".typ); - // 1 obj field (typ), 16 scalar bytes (lvls(8) + kind(1) + padding(7)) - let obj = lean_alloc_ctor(0, 1, 16); - lean_ctor_set(obj, 0, typ_obj.cast()); - let base = obj.cast::(); - let scalar_base = base.add(8 + 8); - // lvls at offset 0 - *scalar_base.cast::() = quot.lvls; - // kind at offset 8 - let kind_val: u8 = match quot.kind { - crate::ix::env::QuotKind::Type => 0, - crate::ix::env::QuotKind::Ctor => 1, - crate::ix::env::QuotKind::Lift => 2, - crate::ix::env::QuotKind::Ind => 3, - }; - *scalar_base.add(8) = kind_val; - obj.cast() - } -} - -/// Build Ixon.Constructor -/// Scalars ordered by size: lvls(8) + cidx(8) + params(8) + fields(8) + isUnsafe(1) + padding(7) -pub fn build_ixon_constructor(ctor: &IxonConstructor) -> *mut c_void { - unsafe { - let typ_obj = build_ixon_expr(&ctor.typ); - // 1 obj field, 40 scalar bytes (4×8 + 1 + 7 padding) - let obj = lean_alloc_ctor(0, 1, 40); - lean_ctor_set(obj, 0, typ_obj.cast()); - let base = obj.cast::(); - let scalar_base = base.add(8 + 8); - // u64 fields first - *scalar_base.cast::() = ctor.lvls; - *scalar_base.add(8).cast::() = ctor.cidx; - *scalar_base.add(16).cast::() = ctor.params; - *scalar_base.add(24).cast::() = ctor.fields; - // bool field last - *scalar_base.add(32) = if ctor.is_unsafe { 1 } else { 0 }; - obj.cast() - } -} - -/// Build Ixon.Inductive -/// Scalars ordered by size: lvls(8) + params(8) + indices(8) + nested(8) + recr(1) + refl(1) + isUnsafe(1) + padding(5) -pub fn build_ixon_inductive(ind: &IxonInductive) -> *mut c_void { - unsafe { - let typ_obj = build_ixon_expr(&ind.typ); - // Build ctors array - let ctors_arr = lean_alloc_array(ind.ctors.len(), ind.ctors.len()); - for (i, ctor) in ind.ctors.iter().enumerate() { - let ctor_obj = build_ixon_constructor(ctor); - lean_array_set_core(ctors_arr, i, ctor_obj.cast()); - } - // 2 obj fields, 40 scalar bytes (4×8 + 3 + 5 padding) - let obj = lean_alloc_ctor(0, 2, 40); - lean_ctor_set(obj, 0, typ_obj.cast()); - lean_ctor_set(obj, 1, ctors_arr); - let base = obj.cast::(); - let scalar_base = base.add(2 * 8 + 8); - // u64 fields first - *scalar_base.cast::() = ind.lvls; - *scalar_base.add(8).cast::() = ind.params; - *scalar_base.add(16).cast::() = ind.indices; - *scalar_base.add(24).cast::() = ind.nested; - // bool fields last - *scalar_base.add(32) = if ind.recr { 1 } else { 0 }; - *scalar_base.add(33) = if ind.refl { 1 } else { 0 }; - *scalar_base.add(34) = if ind.is_unsafe { 1 } else { 0 }; - obj.cast() - } -} - -/// Build Ixon.InductiveProj -pub fn build_inductive_proj(proj: &InductiveProj) -> *mut c_void { - unsafe { - let block_obj = build_address_from_ixon(&proj.block); - let obj = lean_alloc_ctor(0, 1, 8); - lean_ctor_set(obj, 0, block_obj.cast()); - let base = obj.cast::(); - *base.add(8 + 8).cast::() = proj.idx; - obj.cast() - } -} - -/// Build Ixon.ConstructorProj -pub fn build_constructor_proj(proj: &ConstructorProj) -> *mut c_void { - unsafe { - let block_obj = build_address_from_ixon(&proj.block); - let obj = lean_alloc_ctor(0, 1, 16); - lean_ctor_set(obj, 0, block_obj.cast()); - let base = obj.cast::(); - *base.add(8 + 8).cast::() = proj.idx; - *base.add(8 + 16).cast::() = proj.cidx; - obj.cast() - } -} - -/// Build Ixon.RecursorProj -pub fn build_recursor_proj(proj: &RecursorProj) -> *mut c_void { - unsafe { - let block_obj = build_address_from_ixon(&proj.block); - let obj = lean_alloc_ctor(0, 1, 8); - lean_ctor_set(obj, 0, block_obj.cast()); - let base = obj.cast::(); - *base.add(8 + 8).cast::() = proj.idx; - obj.cast() - } -} - -/// Build Ixon.DefinitionProj -pub fn build_definition_proj(proj: &DefinitionProj) -> *mut c_void { - unsafe { - let block_obj = build_address_from_ixon(&proj.block); - let obj = lean_alloc_ctor(0, 1, 8); - lean_ctor_set(obj, 0, block_obj.cast()); - let base = obj.cast::(); - *base.add(8 + 8).cast::() = proj.idx; - obj.cast() - } -} - -/// Build Ixon.MutConst -pub fn build_mut_const(mc: &MutConst) -> *mut c_void { - unsafe { - match mc { - MutConst::Defn(def) => { - let def_obj = build_ixon_definition(def); - let obj = lean_alloc_ctor(0, 1, 0); - lean_ctor_set(obj, 0, def_obj.cast()); - obj.cast() - }, - MutConst::Indc(ind) => { - let ind_obj = build_ixon_inductive(ind); - let obj = lean_alloc_ctor(1, 1, 0); - lean_ctor_set(obj, 0, ind_obj.cast()); - obj.cast() - }, - MutConst::Recr(rec) => { - let rec_obj = build_ixon_recursor(rec); - let obj = lean_alloc_ctor(2, 1, 0); - lean_ctor_set(obj, 0, rec_obj.cast()); - obj.cast() - }, - } - } -} - -/// Build Ixon.ConstantInfo (9 constructors) -pub fn build_ixon_constant_info(info: &IxonConstantInfo) -> *mut c_void { - unsafe { - match info { - IxonConstantInfo::Defn(def) => { - let def_obj = build_ixon_definition(def); - let obj = lean_alloc_ctor(0, 1, 0); - lean_ctor_set(obj, 0, def_obj.cast()); - obj.cast() - }, - IxonConstantInfo::Recr(rec) => { - let rec_obj = build_ixon_recursor(rec); - let obj = lean_alloc_ctor(1, 1, 0); - lean_ctor_set(obj, 0, rec_obj.cast()); - obj.cast() - }, - IxonConstantInfo::Axio(ax) => { - let ax_obj = build_ixon_axiom(ax); - let obj = lean_alloc_ctor(2, 1, 0); - lean_ctor_set(obj, 0, ax_obj.cast()); - obj.cast() - }, - IxonConstantInfo::Quot(quot) => { - let quot_obj = build_ixon_quotient(quot); - let obj = lean_alloc_ctor(3, 1, 0); - lean_ctor_set(obj, 0, quot_obj.cast()); - obj.cast() - }, - IxonConstantInfo::CPrj(proj) => { - let proj_obj = build_constructor_proj(proj); - let obj = lean_alloc_ctor(4, 1, 0); - lean_ctor_set(obj, 0, proj_obj.cast()); - obj.cast() - }, - IxonConstantInfo::RPrj(proj) => { - let proj_obj = build_recursor_proj(proj); - let obj = lean_alloc_ctor(5, 1, 0); - lean_ctor_set(obj, 0, proj_obj.cast()); - obj.cast() - }, - IxonConstantInfo::IPrj(proj) => { - let proj_obj = build_inductive_proj(proj); - let obj = lean_alloc_ctor(6, 1, 0); - lean_ctor_set(obj, 0, proj_obj.cast()); - obj.cast() - }, - IxonConstantInfo::DPrj(proj) => { - let proj_obj = build_definition_proj(proj); - let obj = lean_alloc_ctor(7, 1, 0); - lean_ctor_set(obj, 0, proj_obj.cast()); - obj.cast() - }, - IxonConstantInfo::Muts(muts) => { - let arr = lean_alloc_array(muts.len(), muts.len()); - for (i, mc) in muts.iter().enumerate() { - let mc_obj = build_mut_const(mc); - lean_array_set_core(arr, i, mc_obj.cast()); - } - let obj = lean_alloc_ctor(8, 1, 0); - lean_ctor_set(obj, 0, arr); - obj.cast() - }, - } - } -} - -/// Build Ixon.Constant -pub fn build_ixon_constant(constant: &IxonConstant) -> *mut c_void { - unsafe { - let info_obj = build_ixon_constant_info(&constant.info); - let sharing_obj = build_ixon_expr_array(&constant.sharing); - let refs_obj = build_address_array(&constant.refs); - let univs_obj = build_ixon_univ_array(&constant.univs); - let obj = lean_alloc_ctor(0, 4, 0); - lean_ctor_set(obj, 0, info_obj.cast()); - lean_ctor_set(obj, 1, sharing_obj.cast()); - lean_ctor_set(obj, 2, refs_obj.cast()); - lean_ctor_set(obj, 3, univs_obj.cast()); - obj.cast() + ctor.set_u8(2 * 8 + 9, safety_val); + Self::new(*ctor) } -} - -// ============================================================================= -// Decode Functions -// ============================================================================= -/// Decode a ByteArray (Address) to Address. -pub fn decode_ixon_address(ptr: *const c_void) -> Address { - let bytes = lean_sarray_data(ptr); - Address::from_slice(&bytes[..32]).expect("Address should be 32 bytes") -} - -/// Decode Array Address. -pub fn decode_ixon_address_array(ptr: *const c_void) -> Vec
{ - lean_array_to_vec(ptr, decode_ixon_address) -} - -/// Decode Ixon.Definition. -/// Lean stores scalar fields ordered by size (largest first). -/// Layout: header(8) + typ(8) + value(8) + lvls(8) + kind(1) + safety(1) + padding(6) -pub fn decode_ixon_definition(ptr: *const c_void) -> IxonDefinition { - unsafe { - let typ_ptr = lean_ctor_get(ptr as *mut _, 0); - let value_ptr = lean_ctor_get(ptr as *mut _, 1); - - let base = ptr.cast::(); - // Scalars start after header (8) + 2 obj fields (16) = offset 24 - let scalar_base = base.add(24); - - // lvls at offset 0 (8 bytes) - largest scalar first - let lvls = *scalar_base.cast::(); - // kind at offset 8 (1 byte) - let kind_val = *scalar_base.add(8); + /// Decode Ixon.Definition. + pub fn decode(self) -> Definition { + let ctor = unsafe { LeanCtor::from_raw(self.as_ptr()) }; + let lvls = ctor.scalar_u64(2, 0); + let kind_val = ctor.scalar_u8(2, 8); let kind = match kind_val { 0 => DefKind::Definition, 1 => DefKind::Opaque, 2 => DefKind::Theorem, - _ => panic!("Invalid DefKind: {}", kind_val), + _ => panic!("Invalid DefKind: {kind_val}"), }; - // safety at offset 9 (1 byte) - let safety_val = *scalar_base.add(9); + let safety_val = ctor.scalar_u8(2, 9); let safety = match safety_val { 0 => crate::ix::env::DefinitionSafety::Unsafe, 1 => crate::ix::env::DefinitionSafety::Safe, 2 => crate::ix::env::DefinitionSafety::Partial, - _ => panic!("Invalid DefinitionSafety: {}", safety_val), + _ => panic!("Invalid DefinitionSafety: {safety_val}"), }; - - IxonDefinition { + Definition { kind, safety, lvls, - typ: Arc::new(decode_ixon_expr(typ_ptr.cast())), - value: Arc::new(decode_ixon_expr(value_ptr.cast())), + typ: Arc::new(IxonExpr::new(ctor.get(0)).decode()), + value: Arc::new(IxonExpr::new(ctor.get(1)).decode()), } } } -/// Decode Ixon.RecursorRule. -pub fn decode_ixon_recursor_rule(ptr: *const c_void) -> IxonRecursorRule { - unsafe { - let rhs_ptr = lean_ctor_get(ptr as *mut _, 0); - let base = ptr.cast::(); - let fields = *base.add(8 + 8).cast::(); - IxonRecursorRule { fields, rhs: Arc::new(decode_ixon_expr(rhs_ptr.cast())) } +impl IxonRecursorRule { + /// Build Ixon.RecursorRule + pub fn build(rule: &RecursorRule) -> Self { + let rhs_obj = IxonExpr::build(&rule.rhs); + let ctor = LeanCtor::alloc(0, 1, 8); + ctor.set(0, rhs_obj); + ctor.set_u64(8, rule.fields); + Self::new(*ctor) + } + + /// Decode Ixon.RecursorRule. + pub fn decode(self) -> RecursorRule { + let ctor = unsafe { LeanCtor::from_raw(self.as_ptr()) }; + let fields = ctor.scalar_u64(1, 0); + RecursorRule { fields, rhs: Arc::new(IxonExpr::new(ctor.get(0)).decode()) } } } -/// Decode Ixon.Recursor. -/// Scalars ordered by size: lvls(8) + params(8) + indices(8) + motives(8) + minors(8) + k(1) + isUnsafe(1) + padding(6) -pub fn decode_ixon_recursor(ptr: *const c_void) -> IxonRecursor { - unsafe { - let typ_ptr = lean_ctor_get(ptr as *mut _, 0); - let rules_ptr = lean_ctor_get(ptr as *mut _, 1); - let base = ptr.cast::(); - let scalar_base = base.add(2 * 8 + 8); - // u64 fields first - let lvls = *scalar_base.cast::(); - let params = *scalar_base.add(8).cast::(); - let indices = *scalar_base.add(16).cast::(); - let motives = *scalar_base.add(24).cast::(); - let minors = *scalar_base.add(32).cast::(); - // bool fields last - let k = *scalar_base.add(40) != 0; - let is_unsafe = *scalar_base.add(41) != 0; - - let rules = lean_array_to_vec(rules_ptr.cast(), decode_ixon_recursor_rule); - - IxonRecursor { +impl IxonRecursor { + /// Build Ixon.Recursor + pub fn build(rec: &Recursor) -> Self { + let typ_obj = IxonExpr::build(&rec.typ); + let rules_arr = LeanArray::alloc(rec.rules.len()); + for (i, rule) in rec.rules.iter().enumerate() { + rules_arr.set(i, IxonRecursorRule::build(rule)); + } + let ctor = LeanCtor::alloc(0, 2, 48); + ctor.set(0, typ_obj); + ctor.set(1, rules_arr); + ctor.set_u64(2 * 8, rec.lvls); + ctor.set_u64(2 * 8 + 8, rec.params); + ctor.set_u64(2 * 8 + 16, rec.indices); + ctor.set_u64(2 * 8 + 24, rec.motives); + ctor.set_u64(2 * 8 + 32, rec.minors); + ctor.set_u8(2 * 8 + 40, if rec.k { 1 } else { 0 }); + ctor.set_u8(2 * 8 + 41, if rec.is_unsafe { 1 } else { 0 }); + Self::new(*ctor) + } + + /// Decode Ixon.Recursor. + pub fn decode(self) -> Recursor { + let ctor = unsafe { LeanCtor::from_raw(self.as_ptr()) }; + let lvls = ctor.scalar_u64(2, 0); + let params = ctor.scalar_u64(2, 8); + let indices = ctor.scalar_u64(2, 16); + let motives = ctor.scalar_u64(2, 24); + let minors = ctor.scalar_u64(2, 32); + let k = ctor.scalar_bool(2, 40); + let is_unsafe = ctor.scalar_bool(2, 41); + let rules_arr = unsafe { LeanArray::from_raw(ctor.get(1).as_ptr()) }; + let rules = rules_arr.map(|r| IxonRecursorRule::new(r).decode()); + Recursor { k, is_unsafe, lvls, @@ -472,100 +163,136 @@ pub fn decode_ixon_recursor(ptr: *const c_void) -> IxonRecursor { indices, motives, minors, - typ: Arc::new(decode_ixon_expr(typ_ptr.cast())), + typ: Arc::new(IxonExpr::new(ctor.get(0)).decode()), rules, } } } -/// Decode Ixon.Axiom. -/// Scalars ordered by size: lvls(8) + isUnsafe(1) + padding(7) -pub fn decode_ixon_axiom(ptr: *const c_void) -> IxonAxiom { - unsafe { - let typ_ptr = lean_ctor_get(ptr as *mut _, 0); - let base = ptr.cast::(); - let scalar_base = base.add(8 + 8); - // lvls at offset 0 - let lvls = *scalar_base.cast::(); - // isUnsafe at offset 8 - let is_unsafe = *scalar_base.add(8) != 0; - IxonAxiom { +impl IxonAxiom { + /// Build Ixon.Axiom + pub fn build(ax: &Axiom) -> Self { + let typ_obj = IxonExpr::build(&ax.typ); + let ctor = LeanCtor::alloc(0, 1, 16); + ctor.set(0, typ_obj); + ctor.set_u64(8, ax.lvls); + ctor.set_u8(8 + 8, if ax.is_unsafe { 1 } else { 0 }); + Self::new(*ctor) + } + + /// Decode Ixon.Axiom. + pub fn decode(self) -> Axiom { + let ctor = unsafe { LeanCtor::from_raw(self.as_ptr()) }; + let lvls = ctor.scalar_u64(1, 0); + let is_unsafe = ctor.scalar_bool(1, 8); + Axiom { is_unsafe, lvls, - typ: Arc::new(decode_ixon_expr(typ_ptr.cast())), + typ: Arc::new(IxonExpr::new(ctor.get(0)).decode()), } } } -/// Decode Ixon.Quotient. -/// QuotKind is a scalar (not object field). Scalars: lvls(8) + kind(1) + padding(7) -pub fn decode_ixon_quotient(ptr: *const c_void) -> IxonQuotient { - unsafe { - // typ is the only object field (at index 0) - let typ_ptr = lean_ctor_get(ptr as *mut _, 0); - let base = ptr.cast::(); - let scalar_base = base.add(8 + 8); - // lvls at offset 0 - let lvls = *scalar_base.cast::(); - // kind at offset 8 - let kind_val = *scalar_base.add(8); +impl IxonQuotient { + /// Build Ixon.Quotient + pub fn build(quot: &Quotient) -> Self { + let typ_obj = IxonExpr::build(".typ); + let ctor = LeanCtor::alloc(0, 1, 16); + ctor.set(0, typ_obj); + ctor.set_u64(8, quot.lvls); + let kind_val: u8 = match quot.kind { + crate::ix::env::QuotKind::Type => 0, + crate::ix::env::QuotKind::Ctor => 1, + crate::ix::env::QuotKind::Lift => 2, + crate::ix::env::QuotKind::Ind => 3, + }; + ctor.set_u8(8 + 8, kind_val); + Self::new(*ctor) + } + + /// Decode Ixon.Quotient. + pub fn decode(self) -> Quotient { + let ctor = unsafe { LeanCtor::from_raw(self.as_ptr()) }; + let lvls = ctor.scalar_u64(1, 0); + let kind_val = ctor.scalar_u8(1, 8); let kind = match kind_val { 0 => crate::ix::env::QuotKind::Type, 1 => crate::ix::env::QuotKind::Ctor, 2 => crate::ix::env::QuotKind::Lift, 3 => crate::ix::env::QuotKind::Ind, - _ => panic!("Invalid QuotKind: {}", kind_val), + _ => panic!("Invalid QuotKind: {kind_val}"), }; - IxonQuotient { kind, lvls, typ: Arc::new(decode_ixon_expr(typ_ptr.cast())) } + Quotient { kind, lvls, typ: Arc::new(IxonExpr::new(ctor.get(0)).decode()) } } } -/// Decode Ixon.Constructor. -/// Scalars ordered by size: lvls(8) + cidx(8) + params(8) + fields(8) + isUnsafe(1) + padding(7) -pub fn decode_ixon_constructor(ptr: *const c_void) -> IxonConstructor { - unsafe { - let typ_ptr = lean_ctor_get(ptr as *mut _, 0); - let base = ptr.cast::(); - let scalar_base = base.add(8 + 8); - // u64 fields first - let lvls = *scalar_base.cast::(); - let cidx = *scalar_base.add(8).cast::(); - let params = *scalar_base.add(16).cast::(); - let fields = *scalar_base.add(24).cast::(); - // bool field last - let is_unsafe = *scalar_base.add(32) != 0; - IxonConstructor { +impl IxonConstructor { + /// Build Ixon.Constructor + pub fn build(c: &Constructor) -> Self { + let typ_obj = IxonExpr::build(&c.typ); + let ctor = LeanCtor::alloc(0, 1, 40); + ctor.set(0, typ_obj); + ctor.set_u64(8, c.lvls); + ctor.set_u64(8 + 8, c.cidx); + ctor.set_u64(8 + 16, c.params); + ctor.set_u64(8 + 24, c.fields); + ctor.set_u8(8 + 32, if c.is_unsafe { 1 } else { 0 }); + Self::new(*ctor) + } + + /// Decode Ixon.Constructor. + pub fn decode(self) -> Constructor { + let ctor = unsafe { LeanCtor::from_raw(self.as_ptr()) }; + let lvls = ctor.scalar_u64(1, 0); + let cidx = ctor.scalar_u64(1, 8); + let params = ctor.scalar_u64(1, 16); + let fields = ctor.scalar_u64(1, 24); + let is_unsafe = ctor.scalar_bool(1, 32); + Constructor { is_unsafe, lvls, cidx, params, fields, - typ: Arc::new(decode_ixon_expr(typ_ptr.cast())), + typ: Arc::new(IxonExpr::new(ctor.get(0)).decode()), } } } -/// Decode Ixon.Inductive. -/// Scalars ordered by size: lvls(8) + params(8) + indices(8) + nested(8) + recr(1) + refl(1) + isUnsafe(1) + padding(5) -pub fn decode_ixon_inductive(ptr: *const c_void) -> IxonInductive { - unsafe { - let typ_ptr = lean_ctor_get(ptr as *mut _, 0); - let ctors_ptr = lean_ctor_get(ptr as *mut _, 1); - let base = ptr.cast::(); - let scalar_base = base.add(2 * 8 + 8); - // u64 fields first - let lvls = *scalar_base.cast::(); - let params = *scalar_base.add(8).cast::(); - let indices = *scalar_base.add(16).cast::(); - let nested = *scalar_base.add(24).cast::(); - // bool fields last - let recr = *scalar_base.add(32) != 0; - let refl = *scalar_base.add(33) != 0; - let is_unsafe = *scalar_base.add(34) != 0; - - let ctors = lean_array_to_vec(ctors_ptr.cast(), decode_ixon_constructor); - - IxonInductive { +impl IxonInductive { + /// Build Ixon.Inductive + pub fn build(ind: &Inductive) -> Self { + let typ_obj = IxonExpr::build(&ind.typ); + let ctors_arr = LeanArray::alloc(ind.ctors.len()); + for (i, c) in ind.ctors.iter().enumerate() { + ctors_arr.set(i, IxonConstructor::build(c)); + } + let ctor = LeanCtor::alloc(0, 2, 40); + ctor.set(0, typ_obj); + ctor.set(1, ctors_arr); + ctor.set_u64(2 * 8, ind.lvls); + ctor.set_u64(2 * 8 + 8, ind.params); + ctor.set_u64(2 * 8 + 16, ind.indices); + ctor.set_u64(2 * 8 + 24, ind.nested); + ctor.set_u8(2 * 8 + 32, if ind.recr { 1 } else { 0 }); + ctor.set_u8(2 * 8 + 33, if ind.refl { 1 } else { 0 }); + ctor.set_u8(2 * 8 + 34, if ind.is_unsafe { 1 } else { 0 }); + Self::new(*ctor) + } + + /// Decode Ixon.Inductive. + pub fn decode(self) -> Inductive { + let ctor = unsafe { LeanCtor::from_raw(self.as_ptr()) }; + let lvls = ctor.scalar_u64(2, 0); + let params = ctor.scalar_u64(2, 8); + let indices = ctor.scalar_u64(2, 16); + let nested = ctor.scalar_u64(2, 24); + let recr = ctor.scalar_bool(2, 32); + let refl = ctor.scalar_bool(2, 33); + let is_unsafe = ctor.scalar_bool(2, 34); + let ctors_arr = unsafe { LeanArray::from_raw(ctor.get(1).as_ptr()) }; + let ctors = ctors_arr.map(|c| IxonConstructor::new(c).decode()); + Inductive { recr, refl, is_unsafe, @@ -573,107 +300,227 @@ pub fn decode_ixon_inductive(ptr: *const c_void) -> IxonInductive { params, indices, nested, - typ: Arc::new(decode_ixon_expr(typ_ptr.cast())), + typ: Arc::new(IxonExpr::new(ctor.get(0)).decode()), ctors, } } } -/// Decode Ixon.InductiveProj. -pub fn decode_ixon_inductive_proj(ptr: *const c_void) -> InductiveProj { - unsafe { - let block_ptr = lean_ctor_get(ptr as *mut _, 0); - let base = ptr.cast::(); - let idx = *base.add(8 + 8).cast::(); - InductiveProj { idx, block: decode_ixon_address(block_ptr.cast()) } +impl IxonInductiveProj { + /// Build Ixon.InductiveProj + pub fn build(proj: &InductiveProj) -> Self { + let ctor = LeanCtor::alloc(0, 1, 8); + ctor.set(0, IxAddress::build_from_ixon(&proj.block)); + ctor.set_u64(8, proj.idx); + Self::new(*ctor) + } + + /// Decode Ixon.InductiveProj. + pub fn decode(self) -> InductiveProj { + let ctor = unsafe { LeanCtor::from_raw(self.as_ptr()) }; + let idx = ctor.scalar_u64(1, 0); + let ba = unsafe { LeanByteArray::from_raw(ctor.get(0).as_ptr()) }; + InductiveProj { idx, block: ba.decode_ixon() } } } -/// Decode Ixon.ConstructorProj. -pub fn decode_ixon_constructor_proj(ptr: *const c_void) -> ConstructorProj { - unsafe { - let block_ptr = lean_ctor_get(ptr as *mut _, 0); - let base = ptr.cast::(); - let idx = *base.add(8 + 8).cast::(); - let cidx = *base.add(8 + 16).cast::(); - ConstructorProj { idx, cidx, block: decode_ixon_address(block_ptr.cast()) } +impl IxonConstructorProj { + /// Build Ixon.ConstructorProj + pub fn build(proj: &ConstructorProj) -> Self { + let ctor = LeanCtor::alloc(0, 1, 16); + ctor.set(0, IxAddress::build_from_ixon(&proj.block)); + ctor.set_u64(8, proj.idx); + ctor.set_u64(8 + 8, proj.cidx); + Self::new(*ctor) + } + + /// Decode Ixon.ConstructorProj. + pub fn decode(self) -> ConstructorProj { + let ctor = unsafe { LeanCtor::from_raw(self.as_ptr()) }; + let idx = ctor.scalar_u64(1, 0); + let cidx = ctor.scalar_u64(1, 8); + let ba = unsafe { LeanByteArray::from_raw(ctor.get(0).as_ptr()) }; + ConstructorProj { idx, cidx, block: ba.decode_ixon() } } } -/// Decode Ixon.RecursorProj. -pub fn decode_ixon_recursor_proj(ptr: *const c_void) -> RecursorProj { - unsafe { - let block_ptr = lean_ctor_get(ptr as *mut _, 0); - let base = ptr.cast::(); - let idx = *base.add(8 + 8).cast::(); - RecursorProj { idx, block: decode_ixon_address(block_ptr.cast()) } +impl IxonRecursorProj { + /// Build Ixon.RecursorProj + pub fn build(proj: &RecursorProj) -> Self { + let ctor = LeanCtor::alloc(0, 1, 8); + ctor.set(0, IxAddress::build_from_ixon(&proj.block)); + ctor.set_u64(8, proj.idx); + Self::new(*ctor) + } + + /// Decode Ixon.RecursorProj. + pub fn decode(self) -> RecursorProj { + let ctor = unsafe { LeanCtor::from_raw(self.as_ptr()) }; + let idx = ctor.scalar_u64(1, 0); + let ba = unsafe { LeanByteArray::from_raw(ctor.get(0).as_ptr()) }; + RecursorProj { idx, block: ba.decode_ixon() } } } -/// Decode Ixon.DefinitionProj. -pub fn decode_ixon_definition_proj(ptr: *const c_void) -> DefinitionProj { - unsafe { - let block_ptr = lean_ctor_get(ptr as *mut _, 0); - let base = ptr.cast::(); - let idx = *base.add(8 + 8).cast::(); - DefinitionProj { idx, block: decode_ixon_address(block_ptr.cast()) } +impl IxonDefinitionProj { + /// Build Ixon.DefinitionProj + pub fn build(proj: &DefinitionProj) -> Self { + let ctor = LeanCtor::alloc(0, 1, 8); + ctor.set(0, IxAddress::build_from_ixon(&proj.block)); + ctor.set_u64(8, proj.idx); + Self::new(*ctor) + } + + /// Decode Ixon.DefinitionProj. + pub fn decode(self) -> DefinitionProj { + let ctor = unsafe { LeanCtor::from_raw(self.as_ptr()) }; + let idx = ctor.scalar_u64(1, 0); + let ba = unsafe { LeanByteArray::from_raw(ctor.get(0).as_ptr()) }; + DefinitionProj { idx, block: ba.decode_ixon() } } } -/// Decode Ixon.MutConst. -pub fn decode_ixon_mut_const(ptr: *const c_void) -> MutConst { - unsafe { - let tag = lean_obj_tag(ptr as *mut _); - let inner_ptr = lean_ctor_get(ptr as *mut _, 0); - match tag { - 0 => MutConst::Defn(decode_ixon_definition(inner_ptr.cast())), - 1 => MutConst::Indc(decode_ixon_inductive(inner_ptr.cast())), - 2 => MutConst::Recr(decode_ixon_recursor(inner_ptr.cast())), - _ => panic!("Invalid Ixon.MutConst tag: {}", tag), +impl IxonMutConst { + /// Build Ixon.MutConst + pub fn build(mc: &MutConst) -> Self { + let obj = match mc { + MutConst::Defn(def) => { + let ctor = LeanCtor::alloc(0, 1, 0); + ctor.set(0, IxonDefinition::build(def)); + *ctor + }, + MutConst::Indc(ind) => { + let ctor = LeanCtor::alloc(1, 1, 0); + ctor.set(0, IxonInductive::build(ind)); + *ctor + }, + MutConst::Recr(rec) => { + let ctor = LeanCtor::alloc(2, 1, 0); + ctor.set(0, IxonRecursor::build(rec)); + *ctor + }, + }; + Self::new(obj) + } + + /// Decode Ixon.MutConst. + pub fn decode(self) -> MutConst { + let ctor = unsafe { LeanCtor::from_raw(self.as_ptr()) }; + match ctor.tag() { + 0 => MutConst::Defn(IxonDefinition::new(ctor.get(0)).decode()), + 1 => MutConst::Indc(IxonInductive::new(ctor.get(0)).decode()), + 2 => MutConst::Recr(IxonRecursor::new(ctor.get(0)).decode()), + tag => panic!("Invalid Ixon.MutConst tag: {tag}"), } } } -/// Decode Ixon.ConstantInfo. -pub fn decode_ixon_constant_info(ptr: *const c_void) -> IxonConstantInfo { - unsafe { - let tag = lean_obj_tag(ptr as *mut _); - let inner_ptr = lean_ctor_get(ptr as *mut _, 0); - match tag { - 0 => IxonConstantInfo::Defn(decode_ixon_definition(inner_ptr.cast())), - 1 => IxonConstantInfo::Recr(decode_ixon_recursor(inner_ptr.cast())), - 2 => IxonConstantInfo::Axio(decode_ixon_axiom(inner_ptr.cast())), - 3 => IxonConstantInfo::Quot(decode_ixon_quotient(inner_ptr.cast())), +impl IxonConstantInfo { + /// Build Ixon.ConstantInfo (9 constructors) + pub fn build(info: &ConstantInfo) -> Self { + let obj = match info { + ConstantInfo::Defn(def) => { + let ctor = LeanCtor::alloc(0, 1, 0); + ctor.set(0, IxonDefinition::build(def)); + *ctor + }, + ConstantInfo::Recr(rec) => { + let ctor = LeanCtor::alloc(1, 1, 0); + ctor.set(0, IxonRecursor::build(rec)); + *ctor + }, + ConstantInfo::Axio(ax) => { + let ctor = LeanCtor::alloc(2, 1, 0); + ctor.set(0, IxonAxiom::build(ax)); + *ctor + }, + ConstantInfo::Quot(quot) => { + let ctor = LeanCtor::alloc(3, 1, 0); + ctor.set(0, IxonQuotient::build(quot)); + *ctor + }, + ConstantInfo::CPrj(proj) => { + let ctor = LeanCtor::alloc(4, 1, 0); + ctor.set(0, IxonConstructorProj::build(proj)); + *ctor + }, + ConstantInfo::RPrj(proj) => { + let ctor = LeanCtor::alloc(5, 1, 0); + ctor.set(0, IxonRecursorProj::build(proj)); + *ctor + }, + ConstantInfo::IPrj(proj) => { + let ctor = LeanCtor::alloc(6, 1, 0); + ctor.set(0, IxonInductiveProj::build(proj)); + *ctor + }, + ConstantInfo::DPrj(proj) => { + let ctor = LeanCtor::alloc(7, 1, 0); + ctor.set(0, IxonDefinitionProj::build(proj)); + *ctor + }, + ConstantInfo::Muts(muts) => { + let arr = LeanArray::alloc(muts.len()); + for (i, mc) in muts.iter().enumerate() { + arr.set(i, IxonMutConst::build(mc)); + } + let ctor = LeanCtor::alloc(8, 1, 0); + ctor.set(0, arr); + *ctor + }, + }; + Self::new(obj) + } + + /// Decode Ixon.ConstantInfo. + pub fn decode(self) -> ConstantInfo { + let ctor = unsafe { LeanCtor::from_raw(self.as_ptr()) }; + match ctor.tag() { + 0 => ConstantInfo::Defn(IxonDefinition::new(ctor.get(0)).decode()), + 1 => ConstantInfo::Recr(IxonRecursor::new(ctor.get(0)).decode()), + 2 => ConstantInfo::Axio(IxonAxiom::new(ctor.get(0)).decode()), + 3 => ConstantInfo::Quot(IxonQuotient::new(ctor.get(0)).decode()), 4 => { - IxonConstantInfo::CPrj(decode_ixon_constructor_proj(inner_ptr.cast())) + ConstantInfo::CPrj(IxonConstructorProj::new(ctor.get(0)).decode()) }, - 5 => IxonConstantInfo::RPrj(decode_ixon_recursor_proj(inner_ptr.cast())), - 6 => IxonConstantInfo::IPrj(decode_ixon_inductive_proj(inner_ptr.cast())), + 5 => ConstantInfo::RPrj(IxonRecursorProj::new(ctor.get(0)).decode()), + 6 => ConstantInfo::IPrj(IxonInductiveProj::new(ctor.get(0)).decode()), 7 => { - IxonConstantInfo::DPrj(decode_ixon_definition_proj(inner_ptr.cast())) + ConstantInfo::DPrj(IxonDefinitionProj::new(ctor.get(0)).decode()) }, 8 => { - let muts = lean_array_to_vec(inner_ptr.cast(), decode_ixon_mut_const); - IxonConstantInfo::Muts(muts) + let arr = unsafe { LeanArray::from_raw(ctor.get(0).as_ptr()) }; + let muts = arr.map(|m| IxonMutConst::new(m).decode()); + ConstantInfo::Muts(muts) }, - _ => panic!("Invalid Ixon.ConstantInfo tag: {}", tag), + tag => panic!("Invalid Ixon.ConstantInfo tag: {tag}"), } } } -/// Decode Ixon.Constant. -pub fn decode_ixon_constant(ptr: *const c_void) -> IxonConstant { - unsafe { - let info_ptr = lean_ctor_get(ptr as *mut _, 0); - let sharing_ptr = lean_ctor_get(ptr as *mut _, 1); - let refs_ptr = lean_ctor_get(ptr as *mut _, 2); - let univs_ptr = lean_ctor_get(ptr as *mut _, 3); - - IxonConstant { - info: decode_ixon_constant_info(info_ptr.cast()), - sharing: decode_ixon_expr_array(sharing_ptr.cast()), - refs: decode_ixon_address_array(refs_ptr.cast()), - univs: decode_ixon_univ_array(univs_ptr.cast()), +impl IxonConstant { + /// Build Ixon.Constant + pub fn build(constant: &Constant) -> Self { + let info_obj = IxonConstantInfo::build(&constant.info); + let sharing_obj = IxonExpr::build_array(&constant.sharing); + let refs_obj = IxAddress::build_array(&constant.refs); + let univs_obj = IxonUniv::build_array(&constant.univs); + let ctor = LeanCtor::alloc(0, 4, 0); + ctor.set(0, info_obj); + ctor.set(1, sharing_obj); + ctor.set(2, refs_obj); + ctor.set(3, univs_obj); + Self::new(*ctor) + } + + /// Decode Ixon.Constant. + pub fn decode(self) -> Constant { + let ctor = unsafe { LeanCtor::from_raw(self.as_ptr()) }; + Constant { + info: IxonConstantInfo::new(ctor.get(0)).decode(), + sharing: IxonExpr::decode_array(ctor.get(1)), + refs: IxAddress::decode_array(ctor.get(2)), + univs: IxonUniv::decode_array(ctor.get(3)), } } } @@ -684,124 +531,102 @@ pub fn decode_ixon_constant(ptr: *const c_void) -> IxonConstant { /// Round-trip Ixon.Definition. #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ixon_definition( - ptr: *const c_void, -) -> *mut c_void { - let def = decode_ixon_definition(ptr); - build_ixon_definition(&def) +pub extern "C" fn rs_roundtrip_ixon_definition(obj: LeanObj) -> LeanObj { + let def = IxonDefinition::new(obj).decode(); + IxonDefinition::build(&def).into() } /// Round-trip Ixon.Recursor. #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ixon_recursor( - ptr: *const c_void, -) -> *mut c_void { - let rec = decode_ixon_recursor(ptr); - build_ixon_recursor(&rec) +pub extern "C" fn rs_roundtrip_ixon_recursor(obj: LeanObj) -> LeanObj { + let rec = IxonRecursor::new(obj).decode(); + IxonRecursor::build(&rec).into() } /// Round-trip Ixon.Axiom. #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ixon_axiom(ptr: *const c_void) -> *mut c_void { - let ax = decode_ixon_axiom(ptr); - build_ixon_axiom(&ax) +pub extern "C" fn rs_roundtrip_ixon_axiom(obj: LeanObj) -> LeanObj { + let ax = IxonAxiom::new(obj).decode(); + IxonAxiom::build(&ax).into() } /// Round-trip Ixon.Quotient. #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ixon_quotient( - ptr: *const c_void, -) -> *mut c_void { - let quot = decode_ixon_quotient(ptr); - build_ixon_quotient(") +pub extern "C" fn rs_roundtrip_ixon_quotient(obj: LeanObj) -> LeanObj { + let quot = IxonQuotient::new(obj).decode(); + IxonQuotient::build(").into() } /// Round-trip Ixon.ConstantInfo. #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ixon_constant_info( - ptr: *const c_void, -) -> *mut c_void { - let info = decode_ixon_constant_info(ptr); - build_ixon_constant_info(&info) +pub extern "C" fn rs_roundtrip_ixon_constant_info(obj: LeanObj) -> LeanObj { + let info = IxonConstantInfo::new(obj).decode(); + IxonConstantInfo::build(&info).into() } /// Round-trip Ixon.Constant. #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ixon_constant( - ptr: *const c_void, -) -> *mut c_void { - let constant = decode_ixon_constant(ptr); - build_ixon_constant(&constant) +pub extern "C" fn rs_roundtrip_ixon_constant(obj: LeanObj) -> LeanObj { + let constant = IxonConstant::new(obj).decode(); + IxonConstant::build(&constant).into() } /// Round-trip Ixon.RecursorRule. #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ixon_recursor_rule( - ptr: *const c_void, -) -> *mut c_void { - let rule = decode_ixon_recursor_rule(ptr); - build_ixon_recursor_rule(&rule) +pub extern "C" fn rs_roundtrip_ixon_recursor_rule(obj: LeanObj) -> LeanObj { + let rule = IxonRecursorRule::new(obj).decode(); + IxonRecursorRule::build(&rule).into() } /// Round-trip Ixon.Constructor. #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ixon_constructor( - ptr: *const c_void, -) -> *mut c_void { - let ctor = decode_ixon_constructor(ptr); - build_ixon_constructor(&ctor) +pub extern "C" fn rs_roundtrip_ixon_constructor(obj: LeanObj) -> LeanObj { + let c = IxonConstructor::new(obj).decode(); + IxonConstructor::build(&c).into() } /// Round-trip Ixon.Inductive. #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ixon_inductive( - ptr: *const c_void, -) -> *mut c_void { - let ind = decode_ixon_inductive(ptr); - build_ixon_inductive(&ind) +pub extern "C" fn rs_roundtrip_ixon_inductive(obj: LeanObj) -> LeanObj { + let ind = IxonInductive::new(obj).decode(); + IxonInductive::build(&ind).into() } /// Round-trip Ixon.InductiveProj. #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ixon_inductive_proj( - ptr: *const c_void, -) -> *mut c_void { - let proj = decode_ixon_inductive_proj(ptr); - build_inductive_proj(&proj) +pub extern "C" fn rs_roundtrip_ixon_inductive_proj(obj: LeanObj) -> LeanObj { + let proj = IxonInductiveProj::new(obj).decode(); + IxonInductiveProj::build(&proj).into() } /// Round-trip Ixon.ConstructorProj. #[unsafe(no_mangle)] pub extern "C" fn rs_roundtrip_ixon_constructor_proj( - ptr: *const c_void, -) -> *mut c_void { - let proj = decode_ixon_constructor_proj(ptr); - build_constructor_proj(&proj) + obj: LeanObj, +) -> LeanObj { + let proj = IxonConstructorProj::new(obj).decode(); + IxonConstructorProj::build(&proj).into() } /// Round-trip Ixon.RecursorProj. #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ixon_recursor_proj( - ptr: *const c_void, -) -> *mut c_void { - let proj = decode_ixon_recursor_proj(ptr); - build_recursor_proj(&proj) +pub extern "C" fn rs_roundtrip_ixon_recursor_proj(obj: LeanObj) -> LeanObj { + let proj = IxonRecursorProj::new(obj).decode(); + IxonRecursorProj::build(&proj).into() } /// Round-trip Ixon.DefinitionProj. #[unsafe(no_mangle)] pub extern "C" fn rs_roundtrip_ixon_definition_proj( - ptr: *const c_void, -) -> *mut c_void { - let proj = decode_ixon_definition_proj(ptr); - build_definition_proj(&proj) + obj: LeanObj, +) -> LeanObj { + let proj = IxonDefinitionProj::new(obj).decode(); + IxonDefinitionProj::build(&proj).into() } /// Round-trip Ixon.MutConst. #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ixon_mut_const( - ptr: *const c_void, -) -> *mut c_void { - let mc = decode_ixon_mut_const(ptr); - build_mut_const(&mc) +pub extern "C" fn rs_roundtrip_ixon_mut_const(obj: LeanObj) -> LeanObj { + let mc = IxonMutConst::new(obj).decode(); + IxonMutConst::build(&mc).into() } diff --git a/src/lean/ffi/ixon/env.rs b/src/lean/ffi/ixon/env.rs index 8b1196b2..4323cd90 100644 --- a/src/lean/ffi/ixon/env.rs +++ b/src/lean/ffi/ixon/env.rs @@ -3,30 +3,22 @@ //! Provides full decode/build cycle for RawEnv and its component types: //! RawConst, RawNamed, RawBlob, RawComm. -use std::ffi::c_void; - use crate::ix::address::Address; use crate::ix::env::Name; use crate::ix::ixon::comm::Comm; -use crate::ix::ixon::constant::Constant as IxonConstant; -use crate::ix::ixon::env::{Env as IxonEnv, Named as IxonNamed}; +use crate::ix::ixon::constant::Constant; +use crate::ix::ixon::env::{Env as IxonEnv, Named}; use crate::ix::ixon::metadata::ConstantMeta; -use crate::lean::lean::{ - lean_alloc_array, lean_alloc_ctor, lean_alloc_sarray, lean_array_set_core, - lean_ctor_get, lean_ctor_set, lean_mk_string, lean_sarray_cptr, +use crate::lean::obj::{ + IxAddress, IxonComm, IxonConstant, IxonConstantMeta, IxonNamed, IxonRawEnv, + LeanArray, LeanByteArray, LeanCtor, LeanExcept, LeanObj, }; -use crate::lean::{lean_array_to_vec, lean_sarray_data}; -use super::constant::{ - build_address_from_ixon, build_ixon_constant, decode_ixon_address, - decode_ixon_constant, -}; -use super::meta::{build_constant_meta, decode_constant_meta}; use crate::lean::ffi::builder::LeanBuildCache; use crate::lean::ffi::ix::name::{build_name, decode_ix_name}; // ============================================================================= -// Comm Type (secret: Address, payload: Address) +// Decoded types — intermediate Rust representations // ============================================================================= /// Decoded Ixon.Comm @@ -35,87 +27,95 @@ pub struct DecodedComm { pub payload: Address, } -/// Decode Ixon.Comm from Lean pointer. -/// Comm = { secret : Address, payload : Address } -pub fn decode_comm(ptr: *const c_void) -> DecodedComm { - unsafe { - let secret_ptr = lean_ctor_get(ptr as *mut _, 0); - let payload_ptr = lean_ctor_get(ptr as *mut _, 1); - DecodedComm { - secret: decode_ixon_address(secret_ptr.cast()), - payload: decode_ixon_address(payload_ptr.cast()), - } - } +/// Decoded Ixon.RawConst +pub struct DecodedRawConst { + pub addr: Address, + pub constant: crate::ix::ixon::constant::Constant, } -/// Build Ixon.Comm Lean object. -pub fn build_comm(comm: &DecodedComm) -> *mut c_void { - unsafe { - let secret_obj = build_address_from_ixon(&comm.secret); - let payload_obj = build_address_from_ixon(&comm.payload); - let obj = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(obj, 0, secret_obj.cast()); - lean_ctor_set(obj, 1, payload_obj.cast()); - obj.cast() - } +/// Decoded Ixon.RawNamed +pub struct DecodedRawNamed { + pub name: Name, + pub addr: Address, + pub const_meta: ConstantMeta, } -// ============================================================================= -// RawConst (addr: Address, const: Constant) -// ============================================================================= +/// Decoded Ixon.RawBlob +pub struct DecodedRawBlob { + pub addr: Address, + pub bytes: Vec, +} -/// Decoded Ixon.RawConst -pub struct DecodedRawConst { +/// Decoded Ixon.RawComm +pub struct DecodedRawComm { pub addr: Address, - pub constant: IxonConstant, + pub comm: DecodedComm, } -/// Decode Ixon.RawConst from Lean pointer. -pub fn decode_raw_const(ptr: *const c_void) -> DecodedRawConst { - unsafe { - let addr_ptr = lean_ctor_get(ptr as *mut _, 0); - let const_ptr = lean_ctor_get(ptr as *mut _, 1); - DecodedRawConst { - addr: decode_ixon_address(addr_ptr.cast()), - constant: decode_ixon_constant(const_ptr.cast()), - } - } +/// Decoded Ixon.RawNameEntry +pub struct DecodedRawNameEntry { + pub addr: Address, + pub name: Name, } -/// Build Ixon.RawConst Lean object. -pub fn build_raw_const(rc: &DecodedRawConst) -> *mut c_void { - unsafe { - let addr_obj = build_address_from_ixon(&rc.addr); - let const_obj = build_ixon_constant(&rc.constant); - let obj = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(obj, 0, addr_obj.cast()); - lean_ctor_set(obj, 1, const_obj.cast()); - obj.cast() - } +/// Decoded Ixon.RawEnv +pub struct DecodedRawEnv { + pub consts: Vec, + pub named: Vec, + pub blobs: Vec, + pub comms: Vec, + pub names: Vec, } // ============================================================================= -// RawNamed (name: Ix.Name, addr: Address, constMeta: ConstantMeta) +// Build/Decode functions for sub-types // ============================================================================= -/// Decoded Ixon.RawNamed -pub struct DecodedRawNamed { - pub name: Name, - pub addr: Address, - pub const_meta: ConstantMeta, +/// Decode Ixon.Comm from Lean pointer. +pub fn decode_comm(obj: LeanObj) -> DecodedComm { + let ctor = unsafe { LeanCtor::from_raw(obj.as_ptr()) }; + let ba0 = unsafe { LeanByteArray::from_raw(ctor.get(0).as_ptr()) }; + let ba1 = unsafe { LeanByteArray::from_raw(ctor.get(1).as_ptr()) }; + DecodedComm { + secret: ba0.decode_ixon(), + payload: ba1.decode_ixon(), + } +} + +/// Build Ixon.Comm Lean object. +pub fn build_comm(comm: &DecodedComm) -> LeanObj { + let ctor = LeanCtor::alloc(0, 2, 0); + ctor.set(0, IxAddress::build_from_ixon(&comm.secret)); + ctor.set(1, IxAddress::build_from_ixon(&comm.payload)); + *ctor +} + +/// Decode Ixon.RawConst from Lean pointer. +pub fn decode_raw_const(obj: LeanObj) -> DecodedRawConst { + let ctor = unsafe { LeanCtor::from_raw(obj.as_ptr()) }; + let ba = unsafe { LeanByteArray::from_raw(ctor.get(0).as_ptr()) }; + DecodedRawConst { + addr: ba.decode_ixon(), + constant: IxonConstant::new(ctor.get(1)).decode(), + } +} + +/// Build Ixon.RawConst Lean object. +pub fn build_raw_const(rc: &DecodedRawConst) -> LeanObj { + let ctor = LeanCtor::alloc(0, 2, 0); + ctor.set(0, IxAddress::build_from_ixon(&rc.addr)); + ctor.set(1, IxonConstant::build(&rc.constant)); + *ctor } /// Decode Ixon.RawNamed from Lean pointer. -pub fn decode_raw_named(ptr: *const c_void) -> DecodedRawNamed { - unsafe { - let name_ptr = lean_ctor_get(ptr as *mut _, 0); - let addr_ptr = lean_ctor_get(ptr as *mut _, 1); - let meta_ptr = lean_ctor_get(ptr as *mut _, 2); - DecodedRawNamed { - name: decode_ix_name(name_ptr.cast()), - addr: decode_ixon_address(addr_ptr.cast()), - const_meta: decode_constant_meta(meta_ptr.cast()), - } +pub fn decode_raw_named(obj: LeanObj) -> DecodedRawNamed { + let ctor = unsafe { LeanCtor::from_raw(obj.as_ptr()) }; + let ba = unsafe { LeanByteArray::from_raw(ctor.get(1).as_ptr()) }; + DecodedRawNamed { + name: decode_ix_name(ctor.get(0).as_ptr()), + addr: ba.decode_ixon(), + const_meta: IxonConstantMeta::new(ctor.get(2)).decode(), } } @@ -123,111 +123,60 @@ pub fn decode_raw_named(ptr: *const c_void) -> DecodedRawNamed { pub fn build_raw_named( cache: &mut LeanBuildCache, rn: &DecodedRawNamed, -) -> *mut c_void { - unsafe { - let name_obj = build_name(cache, &rn.name); - let addr_obj = build_address_from_ixon(&rn.addr); - let meta_obj = build_constant_meta(&rn.const_meta); - let obj = lean_alloc_ctor(0, 3, 0); - lean_ctor_set(obj, 0, name_obj.as_mut_ptr().cast()); - lean_ctor_set(obj, 1, addr_obj.cast()); - lean_ctor_set(obj, 2, meta_obj.cast()); - obj.cast() - } -} - -// ============================================================================= -// RawBlob (addr: Address, bytes: ByteArray) -// ============================================================================= - -/// Decoded Ixon.RawBlob -pub struct DecodedRawBlob { - pub addr: Address, - pub bytes: Vec, +) -> LeanObj { + let ctor = LeanCtor::alloc(0, 3, 0); + ctor.set(0, build_name(cache, &rn.name)); + ctor.set(1, IxAddress::build_from_ixon(&rn.addr)); + ctor.set(2, IxonConstantMeta::build(&rn.const_meta)); + *ctor } /// Decode Ixon.RawBlob from Lean pointer. -pub fn decode_raw_blob(ptr: *const c_void) -> DecodedRawBlob { - unsafe { - let addr_ptr = lean_ctor_get(ptr as *mut _, 0); - let bytes_ptr = lean_ctor_get(ptr as *mut _, 1); - DecodedRawBlob { - addr: decode_ixon_address(addr_ptr.cast()), - bytes: lean_sarray_data(bytes_ptr.cast()).to_vec(), - } +pub fn decode_raw_blob(obj: LeanObj) -> DecodedRawBlob { + let ctor = unsafe { LeanCtor::from_raw(obj.as_ptr()) }; + let ba_addr = unsafe { LeanByteArray::from_raw(ctor.get(0).as_ptr()) }; + let ba = unsafe { LeanByteArray::from_raw(ctor.get(1).as_ptr()) }; + DecodedRawBlob { + addr: ba_addr.decode_ixon(), + bytes: ba.as_bytes().to_vec(), } } /// Build Ixon.RawBlob Lean object. -pub fn build_raw_blob(rb: &DecodedRawBlob) -> *mut c_void { - unsafe { - let addr_obj = build_address_from_ixon(&rb.addr); - // Build ByteArray (SArray UInt8) - let len = rb.bytes.len(); - let bytes_obj = lean_alloc_sarray(1, len, len); - let data_ptr = lean_sarray_cptr(bytes_obj); - std::ptr::copy_nonoverlapping(rb.bytes.as_ptr(), data_ptr, len); - - let obj = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(obj, 0, addr_obj.cast()); - lean_ctor_set(obj, 1, bytes_obj); - obj.cast() - } -} - -// ============================================================================= -// RawComm (addr: Address, comm: Comm) -// ============================================================================= - -/// Decoded Ixon.RawComm -pub struct DecodedRawComm { - pub addr: Address, - pub comm: DecodedComm, +pub fn build_raw_blob(rb: &DecodedRawBlob) -> LeanObj { + let ctor = LeanCtor::alloc(0, 2, 0); + ctor.set(0, IxAddress::build_from_ixon(&rb.addr)); + ctor.set(1, LeanByteArray::from_bytes(&rb.bytes)); + *ctor } /// Decode Ixon.RawComm from Lean pointer. -pub fn decode_raw_comm(ptr: *const c_void) -> DecodedRawComm { - unsafe { - let addr_ptr = lean_ctor_get(ptr as *mut _, 0); - let comm_ptr = lean_ctor_get(ptr as *mut _, 1); - DecodedRawComm { - addr: decode_ixon_address(addr_ptr.cast()), - comm: decode_comm(comm_ptr.cast()), - } +pub fn decode_raw_comm(obj: LeanObj) -> DecodedRawComm { + let ctor = unsafe { LeanCtor::from_raw(obj.as_ptr()) }; + DecodedRawComm { + addr: { + let ba = unsafe { LeanByteArray::from_raw(ctor.get(0).as_ptr()) }; + ba.decode_ixon() + }, + comm: decode_comm(ctor.get(1)), } } /// Build Ixon.RawComm Lean object. -pub fn build_raw_comm(rc: &DecodedRawComm) -> *mut c_void { - unsafe { - let addr_obj = build_address_from_ixon(&rc.addr); - let comm_obj = build_comm(&rc.comm); - let obj = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(obj, 0, addr_obj.cast()); - lean_ctor_set(obj, 1, comm_obj.cast()); - obj.cast() - } -} - -// ============================================================================= -// RawNameEntry (addr: Address, name: Ix.Name) -// ============================================================================= - -/// Decoded Ixon.RawNameEntry -pub struct DecodedRawNameEntry { - pub addr: Address, - pub name: Name, +pub fn build_raw_comm(rc: &DecodedRawComm) -> LeanObj { + let ctor = LeanCtor::alloc(0, 2, 0); + ctor.set(0, IxAddress::build_from_ixon(&rc.addr)); + ctor.set(1, build_comm(&rc.comm)); + *ctor } /// Decode Ixon.RawNameEntry from Lean pointer. -pub fn decode_raw_name_entry(ptr: *const c_void) -> DecodedRawNameEntry { - unsafe { - let addr_ptr = lean_ctor_get(ptr as *mut _, 0); - let name_ptr = lean_ctor_get(ptr as *mut _, 1); - DecodedRawNameEntry { - addr: decode_ixon_address(addr_ptr.cast()), - name: decode_ix_name(name_ptr.cast()), - } +pub fn decode_raw_name_entry(obj: LeanObj) -> DecodedRawNameEntry { + let ctor = unsafe { LeanCtor::from_raw(obj.as_ptr()) }; + let ba = unsafe { LeanByteArray::from_raw(ctor.get(0).as_ptr()) }; + DecodedRawNameEntry { + addr: ba.decode_ixon(), + name: decode_ix_name(ctor.get(1).as_ptr()), } } @@ -236,100 +185,84 @@ pub fn build_raw_name_entry( cache: &mut LeanBuildCache, addr: &Address, name: &Name, -) -> *mut c_void { - unsafe { - let addr_obj = build_address_from_ixon(addr); - let name_obj = build_name(cache, name); - let obj = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(obj, 0, addr_obj.cast()); - lean_ctor_set(obj, 1, name_obj.as_mut_ptr().cast()); - obj.cast() - } +) -> LeanObj { + let ctor = LeanCtor::alloc(0, 2, 0); + ctor.set(0, IxAddress::build_from_ixon(addr)); + ctor.set(1, build_name(cache, name)); + *ctor } // ============================================================================= -// RawEnv (consts, named, blobs, comms, names) +// IxonRawEnv methods // ============================================================================= -/// Decoded Ixon.RawEnv -pub struct DecodedRawEnv { - pub consts: Vec, - pub named: Vec, - pub blobs: Vec, - pub comms: Vec, - pub names: Vec, -} - -/// Decode Ixon.RawEnv from Lean pointer. -pub fn decode_raw_env(ptr: *const c_void) -> DecodedRawEnv { - unsafe { - let consts_ptr = lean_ctor_get(ptr as *mut _, 0); - let named_ptr = lean_ctor_get(ptr as *mut _, 1); - let blobs_ptr = lean_ctor_get(ptr as *mut _, 2); - let comms_ptr = lean_ctor_get(ptr as *mut _, 3); - let names_ptr = lean_ctor_get(ptr as *mut _, 4); +impl IxonRawEnv { + /// Decode Ixon.RawEnv from Lean pointer. + pub fn decode_all(obj: LeanObj) -> DecodedRawEnv { + let ctor = unsafe { LeanCtor::from_raw(obj.as_ptr()) }; + let consts_arr = unsafe { LeanArray::from_raw(ctor.get(0).as_ptr()) }; + let named_arr = unsafe { LeanArray::from_raw(ctor.get(1).as_ptr()) }; + let blobs_arr = unsafe { LeanArray::from_raw(ctor.get(2).as_ptr()) }; + let comms_arr = unsafe { LeanArray::from_raw(ctor.get(3).as_ptr()) }; + let names_arr = unsafe { LeanArray::from_raw(ctor.get(4).as_ptr()) }; DecodedRawEnv { - consts: lean_array_to_vec(consts_ptr.cast(), decode_raw_const), - named: lean_array_to_vec(named_ptr.cast(), decode_raw_named), - blobs: lean_array_to_vec(blobs_ptr.cast(), decode_raw_blob), - comms: lean_array_to_vec(comms_ptr.cast(), decode_raw_comm), - names: lean_array_to_vec(names_ptr.cast(), decode_raw_name_entry), + consts: consts_arr.map(decode_raw_const), + named: named_arr.map(decode_raw_named), + blobs: blobs_arr.map(decode_raw_blob), + comms: comms_arr.map(decode_raw_comm), + names: names_arr.map(decode_raw_name_entry), } } -} -/// Build Ixon.RawEnv Lean object. -pub fn build_raw_env(env: &DecodedRawEnv) -> *mut c_void { - unsafe { + /// Build Ixon.RawEnv Lean object. + pub fn build_all(env: &DecodedRawEnv) -> LeanObj { let mut cache = LeanBuildCache::new(); - // Build consts array - let consts_arr = lean_alloc_array(env.consts.len(), env.consts.len()); + let consts_arr = LeanArray::alloc(env.consts.len()); for (i, rc) in env.consts.iter().enumerate() { - let obj = build_raw_const(rc); - lean_array_set_core(consts_arr, i, obj.cast()); + consts_arr.set(i, build_raw_const(rc)); } - // Build named array - let named_arr = lean_alloc_array(env.named.len(), env.named.len()); + let named_arr = LeanArray::alloc(env.named.len()); for (i, rn) in env.named.iter().enumerate() { - let obj = build_raw_named(&mut cache, rn); - lean_array_set_core(named_arr, i, obj.cast()); + named_arr.set(i, build_raw_named(&mut cache, rn)); } - // Build blobs array - let blobs_arr = lean_alloc_array(env.blobs.len(), env.blobs.len()); + let blobs_arr = LeanArray::alloc(env.blobs.len()); for (i, rb) in env.blobs.iter().enumerate() { - let obj = build_raw_blob(rb); - lean_array_set_core(blobs_arr, i, obj.cast()); + blobs_arr.set(i, build_raw_blob(rb)); } - // Build comms array - let comms_arr = lean_alloc_array(env.comms.len(), env.comms.len()); + let comms_arr = LeanArray::alloc(env.comms.len()); for (i, rc) in env.comms.iter().enumerate() { - let obj = build_raw_comm(rc); - lean_array_set_core(comms_arr, i, obj.cast()); + comms_arr.set(i, build_raw_comm(rc)); } - // Build names array - let names_arr = lean_alloc_array(env.names.len(), env.names.len()); + let names_arr = LeanArray::alloc(env.names.len()); for (i, rn) in env.names.iter().enumerate() { - let obj = build_raw_name_entry(&mut cache, &rn.addr, &rn.name); - lean_array_set_core(names_arr, i, obj.cast()); + names_arr.set(i, build_raw_name_entry(&mut cache, &rn.addr, &rn.name)); } - // Build RawEnv structure - let obj = lean_alloc_ctor(0, 5, 0); - lean_ctor_set(obj, 0, consts_arr); - lean_ctor_set(obj, 1, named_arr); - lean_ctor_set(obj, 2, blobs_arr); - lean_ctor_set(obj, 3, comms_arr); - lean_ctor_set(obj, 4, names_arr); - obj.cast() + let ctor = LeanCtor::alloc(0, 5, 0); + ctor.set(0, consts_arr); + ctor.set(1, named_arr); + ctor.set(2, blobs_arr); + ctor.set(3, comms_arr); + ctor.set(4, names_arr); + *ctor } } +// Keep old names as aliases for backward compatibility in consumer code +pub fn decode_raw_env(obj: LeanObj) -> DecodedRawEnv { + IxonRawEnv::decode_all(obj) +} + +pub fn build_raw_env(env: &DecodedRawEnv) -> LeanObj { + IxonRawEnv::build_all(env) +} + // ============================================================================= // DecodedRawEnv ↔ IxonEnv Conversion Helpers // ============================================================================= @@ -344,7 +277,7 @@ pub fn decoded_to_ixon_env(decoded: &DecodedRawEnv) -> IxonEnv { env.store_name(rn.addr.clone(), rn.name.clone()); } for rn in &decoded.named { - let named = IxonNamed::new(rn.addr.clone(), rn.const_meta.clone()); + let named = crate::ix::ixon::env::Named::new(rn.addr.clone(), rn.const_meta.clone()); env.register_name(rn.name.clone(), named); } for rb in &decoded.blobs { @@ -410,21 +343,12 @@ pub fn ixon_env_to_decoded(env: &IxonEnv) -> DecodedRawEnv { /// FFI: Serialize an Ixon.RawEnv → ByteArray via Rust's Env.put. Pure. #[unsafe(no_mangle)] -pub extern "C" fn rs_ser_env(raw_env_ptr: *const c_void) -> *mut c_void { - let decoded = decode_raw_env(raw_env_ptr); +pub extern "C" fn rs_ser_env(raw_env_obj: LeanObj) -> LeanObj { + let decoded = decode_raw_env(raw_env_obj); let env = decoded_to_ixon_env(&decoded); let mut buf = Vec::new(); env.put(&mut buf).expect("Env serialization failed"); - - unsafe { - let ba = lean_alloc_sarray(1, buf.len(), buf.len()); - std::ptr::copy_nonoverlapping( - buf.as_ptr(), - lean_sarray_cptr(ba), - buf.len(), - ); - ba.cast() - } + LeanByteArray::from_bytes(&buf).into() } // ============================================================================= @@ -433,32 +357,19 @@ pub extern "C" fn rs_ser_env(raw_env_ptr: *const c_void) -> *mut c_void { /// FFI: Deserialize ByteArray → Except String Ixon.RawEnv via Rust's Env.get. Pure. #[unsafe(no_mangle)] -pub extern "C" fn rs_des_env(bytes_ptr: *const c_void) -> *mut c_void { - let data = lean_sarray_data(bytes_ptr); +pub extern "C" fn rs_des_env(bytes_obj: LeanObj) -> LeanObj { + let ba = unsafe { LeanByteArray::from_raw(bytes_obj.as_ptr()) }; + let data = ba.as_bytes(); let mut slice: &[u8] = data; match IxonEnv::get(&mut slice) { Ok(env) => { let decoded = ixon_env_to_decoded(&env); let raw_env = build_raw_env(&decoded); - // Except.ok (tag 1) - unsafe { - let obj = lean_alloc_ctor(1, 1, 0); - lean_ctor_set(obj, 0, raw_env.cast()); - obj.cast() - } + LeanExcept::ok(raw_env).into() }, Err(e) => { - // Except.error (tag 0) - let msg = std::ffi::CString::new(format!("rs_des_env: {}", e)) - .unwrap_or_else(|_| { - std::ffi::CString::new("rs_des_env: deserialization error").unwrap() - }); - unsafe { - let lean_str = lean_mk_string(msg.as_ptr()); - let obj = lean_alloc_ctor(0, 1, 0); - lean_ctor_set(obj, 0, lean_str); - obj.cast() - } + let msg = format!("rs_des_env: {e}"); + LeanExcept::error_string(&msg).into() }, } } diff --git a/src/lean/ffi/ixon/expr.rs b/src/lean/ffi/ixon/expr.rs index 730a51e0..41f7547c 100644 --- a/src/lean/ffi/ixon/expr.rs +++ b/src/lean/ffi/ixon/expr.rs @@ -1,282 +1,194 @@ //! Ixon.Expr build/decode/roundtrip FFI. -use std::ffi::c_void; use std::sync::Arc; -use crate::ix::ixon::expr::Expr as IxonExpr; -use crate::lean::lean::{ - lean_alloc_array, lean_alloc_ctor, lean_array_set_core, lean_ctor_get, - lean_ctor_set, lean_obj_tag, -}; -use crate::lean_unbox; +use crate::ix::ixon::expr::Expr; +use crate::lean::obj::{IxonExpr, LeanArray, LeanCtor, LeanObj}; -/// Build Ixon.Expr (12 constructors). -pub fn build_ixon_expr(expr: &IxonExpr) -> *mut c_void { - unsafe { - match expr { - IxonExpr::Sort(idx) => { - let obj = lean_alloc_ctor(0, 0, 8); - let base = obj.cast::(); - *base.add(8).cast::() = *idx; - obj.cast() - }, - IxonExpr::Var(idx) => { - let obj = lean_alloc_ctor(1, 0, 8); - let base = obj.cast::(); - *base.add(8).cast::() = *idx; - obj.cast() - }, - IxonExpr::Ref(ref_idx, univ_idxs) => { - let arr = lean_alloc_array(univ_idxs.len(), univ_idxs.len()); +/// Decode Array UInt64 from Lean. +/// UInt64 values in arrays are stored as: +/// - Scalars (odd pointers) for small values: use unbox_usize +/// - Heap objects (even pointers) with the u64 value at offset 8 +fn decode_u64_array(obj: LeanObj) -> Vec { + let arr = unsafe { LeanArray::from_raw(obj.as_ptr()) }; + arr.map(|elem| { + if elem.is_scalar() { + elem.unbox_usize() as u64 + } else { + let ctor = unsafe { LeanCtor::from_raw(elem.as_ptr()) }; + ctor.scalar_u64(0, 0) + } + }) +} + +impl IxonExpr { + /// Build Ixon.Expr (12 constructors). + pub fn build(expr: &Expr) -> Self { + let obj = match expr { + Expr::Sort(idx) => { + let ctor = LeanCtor::alloc(0, 0, 8); + ctor.set_u64(0, *idx); + *ctor + }, + Expr::Var(idx) => { + let ctor = LeanCtor::alloc(1, 0, 8); + ctor.set_u64(0, *idx); + *ctor + }, + Expr::Ref(ref_idx, univ_idxs) => { + let arr = LeanArray::alloc(univ_idxs.len()); for (i, idx) in univ_idxs.iter().enumerate() { - // Build heap-boxed UInt64: ctor with tag 0, 0 obj fields, 8 scalar bytes - let uint64_obj = lean_alloc_ctor(0, 0, 8); - let base = uint64_obj.cast::(); - *base.add(8).cast::() = *idx; - lean_array_set_core(arr, i, uint64_obj); + let uint64_obj = LeanCtor::alloc(0, 0, 8); + uint64_obj.set_u64(0, *idx); + arr.set(i, uint64_obj); } - let obj = lean_alloc_ctor(2, 1, 8); - lean_ctor_set(obj, 0, arr); - let base = obj.cast::(); - *base.add(8 + 8).cast::() = *ref_idx; - obj.cast() + let ctor = LeanCtor::alloc(2, 1, 8); + ctor.set(0, arr); + ctor.set_u64(8, *ref_idx); + *ctor }, - IxonExpr::Rec(rec_idx, univ_idxs) => { - let arr = lean_alloc_array(univ_idxs.len(), univ_idxs.len()); + Expr::Rec(rec_idx, univ_idxs) => { + let arr = LeanArray::alloc(univ_idxs.len()); for (i, idx) in univ_idxs.iter().enumerate() { - let uint64_obj = lean_alloc_ctor(0, 0, 8); - let base = uint64_obj.cast::(); - *base.add(8).cast::() = *idx; - lean_array_set_core(arr, i, uint64_obj); + let uint64_obj = LeanCtor::alloc(0, 0, 8); + uint64_obj.set_u64(0, *idx); + arr.set(i, uint64_obj); } - let obj = lean_alloc_ctor(3, 1, 8); - lean_ctor_set(obj, 0, arr); - let base = obj.cast::(); - *base.add(8 + 8).cast::() = *rec_idx; - obj.cast() - }, - IxonExpr::Prj(type_ref_idx, field_idx, val) => { - let val_obj = build_ixon_expr(val); - let obj = lean_alloc_ctor(4, 1, 16); - lean_ctor_set(obj, 0, val_obj.cast()); - let base = obj.cast::(); - *base.add(8 + 8).cast::() = *type_ref_idx; - *base.add(8 + 16).cast::() = *field_idx; - obj.cast() - }, - IxonExpr::Str(ref_idx) => { - let obj = lean_alloc_ctor(5, 0, 8); - let base = obj.cast::(); - *base.add(8).cast::() = *ref_idx; - obj.cast() - }, - IxonExpr::Nat(ref_idx) => { - let obj = lean_alloc_ctor(6, 0, 8); - let base = obj.cast::(); - *base.add(8).cast::() = *ref_idx; - obj.cast() - }, - IxonExpr::App(fun, arg) => { - let fun_obj = build_ixon_expr(fun); - let arg_obj = build_ixon_expr(arg); - let obj = lean_alloc_ctor(7, 2, 0); - lean_ctor_set(obj, 0, fun_obj.cast()); - lean_ctor_set(obj, 1, arg_obj.cast()); - obj.cast() - }, - IxonExpr::Lam(ty, body) => { - let ty_obj = build_ixon_expr(ty); - let body_obj = build_ixon_expr(body); - let obj = lean_alloc_ctor(8, 2, 0); - lean_ctor_set(obj, 0, ty_obj.cast()); - lean_ctor_set(obj, 1, body_obj.cast()); - obj.cast() - }, - IxonExpr::All(ty, body) => { - let ty_obj = build_ixon_expr(ty); - let body_obj = build_ixon_expr(body); - let obj = lean_alloc_ctor(9, 2, 0); - lean_ctor_set(obj, 0, ty_obj.cast()); - lean_ctor_set(obj, 1, body_obj.cast()); - obj.cast() - }, - IxonExpr::Let(non_dep, ty, val, body) => { - let ty_obj = build_ixon_expr(ty); - let val_obj = build_ixon_expr(val); - let body_obj = build_ixon_expr(body); - let obj = lean_alloc_ctor(10, 3, 1); - lean_ctor_set(obj, 0, ty_obj.cast()); - lean_ctor_set(obj, 1, val_obj.cast()); - lean_ctor_set(obj, 2, body_obj.cast()); - let base = obj.cast::(); - *base.add(3 * 8 + 8) = if *non_dep { 1 } else { 0 }; - obj.cast() - }, - IxonExpr::Share(idx) => { - let obj = lean_alloc_ctor(11, 0, 8); - let base = obj.cast::(); - *base.add(8).cast::() = *idx; - obj.cast() - }, - } + let ctor = LeanCtor::alloc(3, 1, 8); + ctor.set(0, arr); + ctor.set_u64(8, *rec_idx); + *ctor + }, + Expr::Prj(type_ref_idx, field_idx, val) => { + let val_obj = Self::build(val); + let ctor = LeanCtor::alloc(4, 1, 16); + ctor.set(0, val_obj); + ctor.set_u64(8, *type_ref_idx); + ctor.set_u64(16, *field_idx); + *ctor + }, + Expr::Str(ref_idx) => { + let ctor = LeanCtor::alloc(5, 0, 8); + ctor.set_u64(0, *ref_idx); + *ctor + }, + Expr::Nat(ref_idx) => { + let ctor = LeanCtor::alloc(6, 0, 8); + ctor.set_u64(0, *ref_idx); + *ctor + }, + Expr::App(fun, arg) => { + let fun_obj = Self::build(fun); + let arg_obj = Self::build(arg); + let ctor = LeanCtor::alloc(7, 2, 0); + ctor.set(0, fun_obj); + ctor.set(1, arg_obj); + *ctor + }, + Expr::Lam(ty, body) => { + let ty_obj = Self::build(ty); + let body_obj = Self::build(body); + let ctor = LeanCtor::alloc(8, 2, 0); + ctor.set(0, ty_obj); + ctor.set(1, body_obj); + *ctor + }, + Expr::All(ty, body) => { + let ty_obj = Self::build(ty); + let body_obj = Self::build(body); + let ctor = LeanCtor::alloc(9, 2, 0); + ctor.set(0, ty_obj); + ctor.set(1, body_obj); + *ctor + }, + Expr::Let(non_dep, ty, val, body) => { + let ty_obj = Self::build(ty); + let val_obj = Self::build(val); + let body_obj = Self::build(body); + let ctor = LeanCtor::alloc(10, 3, 1); + ctor.set(0, ty_obj); + ctor.set(1, val_obj); + ctor.set(2, body_obj); + ctor.set_u8(3 * 8, if *non_dep { 1 } else { 0 }); + *ctor + }, + Expr::Share(idx) => { + let ctor = LeanCtor::alloc(11, 0, 8); + ctor.set_u64(0, *idx); + *ctor + }, + }; + Self::new(obj) } -} -/// Build an Array of Ixon.Expr. -pub fn build_ixon_expr_array(exprs: &[Arc]) -> *mut c_void { - unsafe { - let arr = lean_alloc_array(exprs.len(), exprs.len()); + /// Build an Array of Ixon.Expr. + pub fn build_array(exprs: &[Arc]) -> LeanArray { + let arr = LeanArray::alloc(exprs.len()); for (i, expr) in exprs.iter().enumerate() { - let expr_obj = build_ixon_expr(expr); - lean_array_set_core(arr, i, expr_obj.cast()); + arr.set(i, Self::build(expr)); } - arr.cast() + arr } -} -// ============================================================================= -// Decode Functions -// ============================================================================= - -/// Decode Array UInt64 from Lean. -/// UInt64 values in arrays are stored as: -/// - Scalars (odd pointers) for small values: use lean_unbox -/// - Heap objects (even pointers) with the u64 value at offset 8 -fn decode_u64_array(ptr: *const c_void) -> Vec { - use crate::lean::lean_is_scalar; - - crate::lean::lean_array_data(ptr) - .iter() - .map(|&elem| { - if lean_is_scalar(elem) { - // Small scalar value - lean_unbox!(u64, elem) - } else { - // Heap-boxed UInt64: value is at offset 8 (after 8-byte header) - unsafe { - let base = elem.cast::(); - *base.add(8).cast::() - } - } - }) - .collect() -} - -/// Decode Ixon.Expr (12 constructors). -pub fn decode_ixon_expr(ptr: *const c_void) -> IxonExpr { - unsafe { - let tag = lean_obj_tag(ptr as *mut _); - match tag { - 0 => { - // sort (idx : UInt64) - let base = ptr.cast::(); - let idx = *base.add(8).cast::(); - IxonExpr::Sort(idx) - }, - 1 => { - // var (idx : UInt64) - let base = ptr.cast::(); - let idx = *base.add(8).cast::(); - IxonExpr::Var(idx) - }, + /// Decode Ixon.Expr (12 constructors). + pub fn decode(self) -> Expr { + let obj: LeanObj = *self; + let ctor = unsafe { LeanCtor::from_raw(obj.as_ptr()) }; + match ctor.tag() { + 0 => Expr::Sort(ctor.scalar_u64(0, 0)), + 1 => Expr::Var(ctor.scalar_u64(0, 0)), 2 => { - // ref (refIdx : UInt64) (univIdxs : Array UInt64) - let arr_ptr = lean_ctor_get(ptr as *mut _, 0); - let base = ptr.cast::(); - let ref_idx = *base.add(8 + 8).cast::(); - let univ_idxs = decode_u64_array(arr_ptr.cast()); - IxonExpr::Ref(ref_idx, univ_idxs) + let ref_idx = ctor.scalar_u64(1, 0); + let univ_idxs = decode_u64_array(ctor.get(0)); + Expr::Ref(ref_idx, univ_idxs) }, 3 => { - // recur (recIdx : UInt64) (univIdxs : Array UInt64) - let arr_ptr = lean_ctor_get(ptr as *mut _, 0); - let base = ptr.cast::(); - let rec_idx = *base.add(8 + 8).cast::(); - let univ_idxs = decode_u64_array(arr_ptr.cast()); - IxonExpr::Rec(rec_idx, univ_idxs) + let rec_idx = ctor.scalar_u64(1, 0); + let univ_idxs = decode_u64_array(ctor.get(0)); + Expr::Rec(rec_idx, univ_idxs) }, 4 => { - // prj (typeRefIdx : UInt64) (fieldIdx : UInt64) (val : Expr) - let val_ptr = lean_ctor_get(ptr as *mut _, 0); - let base = ptr.cast::(); - let type_ref_idx = *base.add(8 + 8).cast::(); - let field_idx = *base.add(8 + 16).cast::(); - IxonExpr::Prj( + let type_ref_idx = ctor.scalar_u64(1, 0); + let field_idx = ctor.scalar_u64(1, 8); + Expr::Prj( type_ref_idx, field_idx, - Arc::new(decode_ixon_expr(val_ptr.cast())), - ) - }, - 5 => { - // str (refIdx : UInt64) - let base = ptr.cast::(); - let ref_idx = *base.add(8).cast::(); - IxonExpr::Str(ref_idx) - }, - 6 => { - // nat (refIdx : UInt64) - let base = ptr.cast::(); - let ref_idx = *base.add(8).cast::(); - IxonExpr::Nat(ref_idx) - }, - 7 => { - // app (f a : Expr) - let f_ptr = lean_ctor_get(ptr as *mut _, 0); - let a_ptr = lean_ctor_get(ptr as *mut _, 1); - IxonExpr::App( - Arc::new(decode_ixon_expr(f_ptr.cast())), - Arc::new(decode_ixon_expr(a_ptr.cast())), - ) - }, - 8 => { - // lam (ty body : Expr) - let ty_ptr = lean_ctor_get(ptr as *mut _, 0); - let body_ptr = lean_ctor_get(ptr as *mut _, 1); - IxonExpr::Lam( - Arc::new(decode_ixon_expr(ty_ptr.cast())), - Arc::new(decode_ixon_expr(body_ptr.cast())), - ) - }, - 9 => { - // all (ty body : Expr) - let ty_ptr = lean_ctor_get(ptr as *mut _, 0); - let body_ptr = lean_ctor_get(ptr as *mut _, 1); - IxonExpr::All( - Arc::new(decode_ixon_expr(ty_ptr.cast())), - Arc::new(decode_ixon_expr(body_ptr.cast())), + Arc::new(Self::new(ctor.get(0)).decode()), ) }, + 5 => Expr::Str(ctor.scalar_u64(0, 0)), + 6 => Expr::Nat(ctor.scalar_u64(0, 0)), + 7 => Expr::App( + Arc::new(Self::new(ctor.get(0)).decode()), + Arc::new(Self::new(ctor.get(1)).decode()), + ), + 8 => Expr::Lam( + Arc::new(Self::new(ctor.get(0)).decode()), + Arc::new(Self::new(ctor.get(1)).decode()), + ), + 9 => Expr::All( + Arc::new(Self::new(ctor.get(0)).decode()), + Arc::new(Self::new(ctor.get(1)).decode()), + ), 10 => { - // letE (nonDep : Bool) (ty val body : Expr) - let ty_ptr = lean_ctor_get(ptr as *mut _, 0); - let val_ptr = lean_ctor_get(ptr as *mut _, 1); - let body_ptr = lean_ctor_get(ptr as *mut _, 2); - let base = ptr.cast::(); - let non_dep = *base.add(3 * 8 + 8) != 0; - IxonExpr::Let( + let non_dep = ctor.scalar_bool(3, 0); + Expr::Let( non_dep, - Arc::new(decode_ixon_expr(ty_ptr.cast())), - Arc::new(decode_ixon_expr(val_ptr.cast())), - Arc::new(decode_ixon_expr(body_ptr.cast())), + Arc::new(Self::new(ctor.get(0)).decode()), + Arc::new(Self::new(ctor.get(1)).decode()), + Arc::new(Self::new(ctor.get(2)).decode()), ) }, - 11 => { - // share (idx : UInt64) - let base = ptr.cast::(); - let idx = *base.add(8).cast::(); - IxonExpr::Share(idx) - }, - _ => panic!("Invalid Ixon.Expr tag: {}", tag), + 11 => Expr::Share(ctor.scalar_u64(0, 0)), + tag => panic!("Invalid Ixon.Expr tag: {tag}"), } } -} -/// Decode Array Ixon.Expr. -pub fn decode_ixon_expr_array(ptr: *const c_void) -> Vec> { - crate::lean::lean_array_data(ptr) - .iter() - .map(|&e| Arc::new(decode_ixon_expr(e))) - .collect() + /// Decode Array Ixon.Expr. + pub fn decode_array(obj: LeanObj) -> Vec> { + let arr = unsafe { LeanArray::from_raw(obj.as_ptr()) }; + arr.map(|e| Arc::new(Self::new(e).decode())) + } } // ============================================================================= @@ -285,7 +197,7 @@ pub fn decode_ixon_expr_array(ptr: *const c_void) -> Vec> { /// Round-trip Ixon.Expr. #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ixon_expr(ptr: *const c_void) -> *mut c_void { - let expr = decode_ixon_expr(ptr); - build_ixon_expr(&expr) +pub extern "C" fn rs_roundtrip_ixon_expr(obj: LeanObj) -> LeanObj { + let expr = IxonExpr::new(obj).decode(); + IxonExpr::build(&expr).into() } diff --git a/src/lean/ffi/ixon/meta.rs b/src/lean/ffi/ixon/meta.rs index 4a1bf5dd..1ef42dac 100644 --- a/src/lean/ffi/ixon/meta.rs +++ b/src/lean/ffi/ixon/meta.rs @@ -2,27 +2,19 @@ //! //! Includes: DataValue, KVMap, ExprMetaData, ExprMetaArena, ConstantMeta, Named, Comm -use std::ffi::c_void; - use crate::ix::address::Address; use crate::ix::env::BinderInfo; use crate::ix::ixon::Comm; use crate::ix::ixon::env::Named; use crate::ix::ixon::metadata::{ - ConstantMeta, DataValue as IxonDataValue, ExprMeta, ExprMetaData, KVMap, -}; -use crate::lean::lean::{ - lean_alloc_array, lean_alloc_ctor, lean_array_set_core, lean_ctor_get, - lean_ctor_set, lean_ctor_set_uint8, lean_ctor_set_uint64, lean_obj_tag, + ConstantMeta, DataValue, ExprMeta, ExprMetaData, KVMap, }; -use crate::lean::{ - lean_array_data, lean_array_to_vec, lean_box_fn, lean_ctor_scalar_u8, - lean_ctor_scalar_u64, lean_is_scalar, +use crate::lean::obj::{ + IxAddress, IxonComm, IxonConstantMeta, IxonDataValue, IxonExprMetaArena, + IxonExprMetaData, IxonNamed, LeanArray, LeanCtor, LeanObj, }; -use super::constant::{ - build_address_array, build_address_from_ixon, decode_ixon_address, -}; +use super::constant::*; use crate::lean::ffi::ix::constant::{ build_reducibility_hints, decode_reducibility_hints, }; @@ -32,79 +24,73 @@ use crate::lean::ffi::ix::expr::binder_info_to_u8; // DataValue Build/Decode // ============================================================================= -/// Build Ixon.DataValue (for metadata) -pub fn build_ixon_data_value(dv: &IxonDataValue) -> *mut c_void { - unsafe { - match dv { - IxonDataValue::OfString(addr) => { - let addr_obj = build_address_from_ixon(addr); - let obj = lean_alloc_ctor(0, 1, 0); - lean_ctor_set(obj, 0, addr_obj.cast()); - obj.cast() - }, - IxonDataValue::OfBool(b) => { - let obj = lean_alloc_ctor(1, 0, 1); - lean_ctor_set_uint8(obj, 0, if *b { 1 } else { 0 }); - obj.cast() - }, - IxonDataValue::OfName(addr) => { - let addr_obj = build_address_from_ixon(addr); - let obj = lean_alloc_ctor(2, 1, 0); - lean_ctor_set(obj, 0, addr_obj.cast()); - obj.cast() - }, - IxonDataValue::OfNat(addr) => { - let addr_obj = build_address_from_ixon(addr); - let obj = lean_alloc_ctor(3, 1, 0); - lean_ctor_set(obj, 0, addr_obj.cast()); - obj.cast() - }, - IxonDataValue::OfInt(addr) => { - let addr_obj = build_address_from_ixon(addr); - let obj = lean_alloc_ctor(4, 1, 0); - lean_ctor_set(obj, 0, addr_obj.cast()); - obj.cast() - }, - IxonDataValue::OfSyntax(addr) => { - let addr_obj = build_address_from_ixon(addr); - let obj = lean_alloc_ctor(5, 1, 0); - lean_ctor_set(obj, 0, addr_obj.cast()); - obj.cast() - }, - } +impl IxonDataValue { + /// Build Ixon.DataValue (for metadata) + pub fn build(dv: &DataValue) -> Self { + let obj = match dv { + DataValue::OfString(addr) => { + let ctor = LeanCtor::alloc(0, 1, 0); + ctor.set(0, IxAddress::build_from_ixon(addr)); + *ctor + }, + DataValue::OfBool(b) => { + let ctor = LeanCtor::alloc(1, 0, 1); + ctor.set_u8(0, if *b { 1 } else { 0 }); + *ctor + }, + DataValue::OfName(addr) => { + let ctor = LeanCtor::alloc(2, 1, 0); + ctor.set(0, IxAddress::build_from_ixon(addr)); + *ctor + }, + DataValue::OfNat(addr) => { + let ctor = LeanCtor::alloc(3, 1, 0); + ctor.set(0, IxAddress::build_from_ixon(addr)); + *ctor + }, + DataValue::OfInt(addr) => { + let ctor = LeanCtor::alloc(4, 1, 0); + ctor.set(0, IxAddress::build_from_ixon(addr)); + *ctor + }, + DataValue::OfSyntax(addr) => { + let ctor = LeanCtor::alloc(5, 1, 0); + ctor.set(0, IxAddress::build_from_ixon(addr)); + *ctor + }, + }; + Self::new(obj) } -} -/// Decode Ixon.DataValue. -pub fn decode_ixon_data_value(ptr: *const c_void) -> IxonDataValue { - unsafe { - let tag = lean_obj_tag(ptr as *mut _); - match tag { + /// Decode Ixon.DataValue. + pub fn decode(self) -> DataValue { + let ctor = unsafe { LeanCtor::from_raw(self.as_ptr()) }; + match ctor.tag() { 0 => { - let addr_ptr = lean_ctor_get(ptr as *mut _, 0); - IxonDataValue::OfString(decode_ixon_address(addr_ptr.cast())) + let ba = unsafe { crate::lean::obj::LeanByteArray::from_raw(ctor.get(0).as_ptr()) }; + DataValue::OfString(ba.decode_ixon()) }, 1 => { - let b = lean_ctor_scalar_u8(ptr, 0, 0) != 0; - IxonDataValue::OfBool(b) + let b = ctor.scalar_u8(0, 0) != 0; + DataValue::OfBool(b) }, 2 => { - let addr_ptr = lean_ctor_get(ptr as *mut _, 0); - IxonDataValue::OfName(decode_ixon_address(addr_ptr.cast())) + let ba = unsafe { crate::lean::obj::LeanByteArray::from_raw(ctor.get(0).as_ptr()) }; + DataValue::OfName(ba.decode_ixon()) }, 3 => { - let addr_ptr = lean_ctor_get(ptr as *mut _, 0); - IxonDataValue::OfNat(decode_ixon_address(addr_ptr.cast())) + let ba = unsafe { crate::lean::obj::LeanByteArray::from_raw(ctor.get(0).as_ptr()) }; + DataValue::OfNat(ba.decode_ixon()) }, 4 => { - let addr_ptr = lean_ctor_get(ptr as *mut _, 0); - IxonDataValue::OfInt(decode_ixon_address(addr_ptr.cast())) + let ba = unsafe { crate::lean::obj::LeanByteArray::from_raw(ctor.get(0).as_ptr()) }; + DataValue::OfInt(ba.decode_ixon()) }, 5 => { - let addr_ptr = lean_ctor_get(ptr as *mut _, 0); - IxonDataValue::OfSyntax(decode_ixon_address(addr_ptr.cast())) + let ba = unsafe { crate::lean::obj::LeanByteArray::from_raw(ctor.get(0).as_ptr()) }; + DataValue::OfSyntax(ba.decode_ixon()) }, - _ => panic!("Invalid Ixon.DataValue tag: {}", tag), + tag => panic!("Invalid Ixon.DataValue tag: {tag}"), } } } @@ -114,51 +100,43 @@ pub fn decode_ixon_data_value(ptr: *const c_void) -> IxonDataValue { // ============================================================================= /// Build an Ixon.KVMap (Array (Address × DataValue)). -pub fn build_ixon_kvmap(kvmap: &KVMap) -> *mut c_void { - unsafe { - let arr = lean_alloc_array(kvmap.len(), kvmap.len()); - for (i, (addr, dv)) in kvmap.iter().enumerate() { - let addr_obj = build_address_from_ixon(addr); - let dv_obj = build_ixon_data_value(dv); - let pair = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(pair, 0, addr_obj.cast()); - lean_ctor_set(pair, 1, dv_obj.cast()); - lean_array_set_core(arr, i, pair); - } - arr.cast() +pub fn build_ixon_kvmap(kvmap: &KVMap) -> LeanArray { + let arr = LeanArray::alloc(kvmap.len()); + for (i, (addr, dv)) in kvmap.iter().enumerate() { + let pair = LeanCtor::alloc(0, 2, 0); + pair.set(0, IxAddress::build_from_ixon(addr)); + pair.set(1, IxonDataValue::build(dv)); + arr.set(i, pair); } + arr } /// Build Array KVMap. -pub fn build_kvmap_array(kvmaps: &[KVMap]) -> *mut c_void { - unsafe { - let arr = lean_alloc_array(kvmaps.len(), kvmaps.len()); - for (i, kvmap) in kvmaps.iter().enumerate() { - let kvmap_obj = build_ixon_kvmap(kvmap); - lean_array_set_core(arr, i, kvmap_obj.cast()); - } - arr.cast() +pub fn build_kvmap_array(kvmaps: &[KVMap]) -> LeanArray { + let arr = LeanArray::alloc(kvmaps.len()); + for (i, kvmap) in kvmaps.iter().enumerate() { + arr.set(i, build_ixon_kvmap(kvmap)); } + arr } /// Decode KVMap (Array (Address × DataValue)). -pub fn decode_ixon_kvmap(ptr: *const c_void) -> KVMap { - lean_array_data(ptr) - .iter() - .map(|&pair| unsafe { - let addr_ptr = lean_ctor_get(pair as *mut _, 0); - let dv_ptr = lean_ctor_get(pair as *mut _, 1); - ( - decode_ixon_address(addr_ptr.cast()), - decode_ixon_data_value(dv_ptr.cast()), - ) - }) - .collect() +pub fn decode_ixon_kvmap(obj: LeanObj) -> KVMap { + let arr = unsafe { LeanArray::from_raw(obj.as_ptr()) }; + arr.map(|pair| { + let pair_ctor = unsafe { LeanCtor::from_raw(pair.as_ptr()) }; + let ba = unsafe { crate::lean::obj::LeanByteArray::from_raw(pair_ctor.get(0).as_ptr()) }; + ( + ba.decode_ixon(), + IxonDataValue::new(pair_ctor.get(1)).decode(), + ) + }) } /// Decode Array KVMap. -fn decode_kvmap_array(ptr: *const c_void) -> Vec { - lean_array_to_vec(ptr, decode_ixon_kvmap) +fn decode_kvmap_array(obj: LeanObj) -> Vec { + let arr = unsafe { LeanArray::from_raw(obj.as_ptr()) }; + arr.map(decode_ixon_kvmap) } // ============================================================================= @@ -166,181 +144,152 @@ fn decode_kvmap_array(ptr: *const c_void) -> Vec { // ============================================================================= /// Decode Array Address. -fn decode_address_array(ptr: *const c_void) -> Vec
{ - lean_array_to_vec(ptr, decode_ixon_address) +fn decode_address_array(obj: LeanObj) -> Vec
{ + IxAddress::decode_array(obj) } /// Build Array UInt64. -fn build_u64_array(vals: &[u64]) -> *mut c_void { - unsafe { - let arr = lean_alloc_array(vals.len(), vals.len()); - for (i, &v) in vals.iter().enumerate() { - let obj = crate::lean::lean_box_u64(v); - lean_array_set_core(arr, i, obj.cast()); - } - arr.cast() +fn build_u64_array(vals: &[u64]) -> LeanArray { + let arr = LeanArray::alloc(vals.len()); + for (i, &v) in vals.iter().enumerate() { + arr.set(i, LeanObj::box_u64(v)); } + arr } /// Decode Array UInt64. -fn decode_u64_array(ptr: *const c_void) -> Vec { - lean_array_to_vec(ptr, crate::lean::lean_unbox_u64) +fn decode_u64_array(obj: LeanObj) -> Vec { + let arr = unsafe { LeanArray::from_raw(obj.as_ptr()) }; + arr.map(|elem| elem.unbox_u64()) } // ============================================================================= // ExprMetaData Build/Decode // ============================================================================= -/// Build Ixon.ExprMetaData Lean object. -/// -/// | Variant | Tag | Obj fields | Scalar bytes | -/// |------------|-----|------------------------|--------------------------| -/// | leaf | 0 | 0 | 0 | -/// | app | 1 | 0 | 16 (2× u64) | -/// | binder | 2 | 1 (name: Address) | 17 (info: u8, 2× u64) | -/// | letBinder | 3 | 1 (name: Address) | 24 (3× u64) | -/// | ref | 4 | 1 (name: Address) | 0 | -/// | prj | 5 | 1 (structName: Address) | 8 (1× u64) | -/// | mdata | 6 | 1 (mdata: Array) | 8 (1× u64) | -pub fn build_expr_meta_data(node: &ExprMetaData) -> *mut c_void { - unsafe { - match node { - ExprMetaData::Leaf => lean_box_fn(0), +impl IxonExprMetaData { + /// Build Ixon.ExprMetaData Lean object. + pub fn build(node: &ExprMetaData) -> Self { + let obj = match node { + ExprMetaData::Leaf => LeanObj::box_usize(0), ExprMetaData::App { children } => { - // Tag 1, 0 obj fields, 16 scalar bytes (2× u64) - let obj = lean_alloc_ctor(1, 0, 16); - lean_ctor_set_uint64(obj, 0, children[0]); - lean_ctor_set_uint64(obj, 8, children[1]); - obj.cast() + let ctor = LeanCtor::alloc(1, 0, 16); + ctor.set_u64(0, children[0]); + ctor.set_u64(8, children[1]); + *ctor }, ExprMetaData::Binder { name, info, children } => { - // Tag 2, 1 obj field (name), scalar: 2× u64 + u8 (info) - // Lean ABI sorts scalars by size descending: [tyChild: u64 @ 0] [bodyChild: u64 @ 8] [info: u8 @ 16] - let obj = lean_alloc_ctor(2, 1, 17); - lean_ctor_set(obj, 0, build_address_from_ixon(name).cast()); - lean_ctor_set_uint64(obj, 8, children[0]); - lean_ctor_set_uint64(obj, 8 + 8, children[1]); - lean_ctor_set_uint8(obj, 8 + 16, binder_info_to_u8(info)); - obj.cast() + let ctor = LeanCtor::alloc(2, 1, 17); + ctor.set(0, IxAddress::build_from_ixon(name)); + ctor.set_u64(8, children[0]); + ctor.set_u64(8 + 8, children[1]); + ctor.set_u8(8 + 16, binder_info_to_u8(info)); + *ctor }, ExprMetaData::LetBinder { name, children } => { - // Tag 3, 1 obj field (name), 24 scalar bytes (3× u64) - let obj = lean_alloc_ctor(3, 1, 24); - lean_ctor_set(obj, 0, build_address_from_ixon(name).cast()); - lean_ctor_set_uint64(obj, 8, children[0]); - lean_ctor_set_uint64(obj, 8 + 8, children[1]); - lean_ctor_set_uint64(obj, 8 + 16, children[2]); - obj.cast() + let ctor = LeanCtor::alloc(3, 1, 24); + ctor.set(0, IxAddress::build_from_ixon(name)); + ctor.set_u64(8, children[0]); + ctor.set_u64(8 + 8, children[1]); + ctor.set_u64(8 + 16, children[2]); + *ctor }, ExprMetaData::Ref { name } => { - // Tag 4, 1 obj field (name), 0 scalar bytes - let obj = lean_alloc_ctor(4, 1, 0); - lean_ctor_set(obj, 0, build_address_from_ixon(name).cast()); - obj.cast() + let ctor = LeanCtor::alloc(4, 1, 0); + ctor.set(0, IxAddress::build_from_ixon(name)); + *ctor }, ExprMetaData::Prj { struct_name, child } => { - // Tag 5, 1 obj field (structName), 8 scalar bytes (1× u64) - let obj = lean_alloc_ctor(5, 1, 8); - lean_ctor_set(obj, 0, build_address_from_ixon(struct_name).cast()); - lean_ctor_set_uint64(obj, 8, *child); - obj.cast() + let ctor = LeanCtor::alloc(5, 1, 8); + ctor.set(0, IxAddress::build_from_ixon(struct_name)); + ctor.set_u64(8, *child); + *ctor }, ExprMetaData::Mdata { mdata, child } => { - // Tag 6, 1 obj field (mdata: Array KVMap), 8 scalar bytes (1× u64) - let mdata_obj = build_kvmap_array(mdata); - let obj = lean_alloc_ctor(6, 1, 8); - lean_ctor_set(obj, 0, mdata_obj.cast()); - lean_ctor_set_uint64(obj, 8, *child); - obj.cast() + let ctor = LeanCtor::alloc(6, 1, 8); + ctor.set(0, build_kvmap_array(mdata)); + ctor.set_u64(8, *child); + *ctor }, - } + }; + Self::new(obj) } -} -/// Decode Ixon.ExprMetaData from Lean pointer. -pub fn decode_expr_meta_data(ptr: *const c_void) -> ExprMetaData { - unsafe { - // Leaf (tag 0, no fields) is represented as a scalar lean_box(0) - if lean_is_scalar(ptr) { - let tag = (ptr as usize) >> 1; - assert_eq!(tag, 0, "Invalid scalar ExprMetaData tag: {}", tag); + /// Decode Ixon.ExprMetaData from Lean pointer. + pub fn decode(self) -> ExprMetaData { + let obj: LeanObj = *self; + if obj.is_scalar() { + let tag = obj.unbox_usize(); + assert_eq!(tag, 0, "Invalid scalar ExprMetaData tag: {tag}"); return ExprMetaData::Leaf; } - let tag = lean_obj_tag(ptr as *mut _); - match tag { + let ctor = unsafe { LeanCtor::from_raw(obj.as_ptr()) }; + match ctor.tag() { 1 => { - // app: 0 obj fields, 2× u64 scalar - let fun_ = lean_ctor_scalar_u64(ptr, 0, 0); - let arg = lean_ctor_scalar_u64(ptr, 0, 8); + let fun_ = ctor.scalar_u64(0, 0); + let arg = ctor.scalar_u64(0, 8); ExprMetaData::App { children: [fun_, arg] } }, 2 => { - // binder: 1 obj field (name), scalar (Lean ABI: u64s first, then u8): - // [tyChild: u64 @ 0] [bodyChild: u64 @ 8] [info: u8 @ 16] - let name_ptr = lean_ctor_get(ptr as *mut _, 0); - let ty_child = lean_ctor_scalar_u64(ptr, 1, 0); - let body_child = lean_ctor_scalar_u64(ptr, 1, 8); - let info_byte = lean_ctor_scalar_u8(ptr, 1, 16); + let ty_child = ctor.scalar_u64(1, 0); + let body_child = ctor.scalar_u64(1, 8); + let info_byte = ctor.scalar_u8(1, 16); let info = match info_byte { 0 => BinderInfo::Default, 1 => BinderInfo::Implicit, 2 => BinderInfo::StrictImplicit, 3 => BinderInfo::InstImplicit, - _ => panic!("Invalid BinderInfo tag: {}", info_byte), + _ => panic!("Invalid BinderInfo tag: {info_byte}"), }; + let ba = unsafe { crate::lean::obj::LeanByteArray::from_raw(ctor.get(0).as_ptr()) }; ExprMetaData::Binder { - name: decode_ixon_address(name_ptr.cast()), + name: ba.decode_ixon(), info, children: [ty_child, body_child], } }, 3 => { - // letBinder: 1 obj field (name), 3× u64 scalar - let name_ptr = lean_ctor_get(ptr as *mut _, 0); - let ty_child = lean_ctor_scalar_u64(ptr, 1, 0); - let val_child = lean_ctor_scalar_u64(ptr, 1, 8); - let body_child = lean_ctor_scalar_u64(ptr, 1, 16); + let ty_child = ctor.scalar_u64(1, 0); + let val_child = ctor.scalar_u64(1, 8); + let body_child = ctor.scalar_u64(1, 16); + let ba = unsafe { crate::lean::obj::LeanByteArray::from_raw(ctor.get(0).as_ptr()) }; ExprMetaData::LetBinder { - name: decode_ixon_address(name_ptr.cast()), + name: ba.decode_ixon(), children: [ty_child, val_child, body_child], } }, 4 => { - // ref: 1 obj field (name), 0 scalar - let name_ptr = lean_ctor_get(ptr as *mut _, 0); - ExprMetaData::Ref { name: decode_ixon_address(name_ptr.cast()) } + let ba = unsafe { crate::lean::obj::LeanByteArray::from_raw(ctor.get(0).as_ptr()) }; + ExprMetaData::Ref { name: ba.decode_ixon() } }, 5 => { - // prj: 1 obj field (structName), 1× u64 scalar - let name_ptr = lean_ctor_get(ptr as *mut _, 0); - let child = lean_ctor_scalar_u64(ptr, 1, 0); + let child = ctor.scalar_u64(1, 0); + let ba = unsafe { crate::lean::obj::LeanByteArray::from_raw(ctor.get(0).as_ptr()) }; ExprMetaData::Prj { - struct_name: decode_ixon_address(name_ptr.cast()), + struct_name: ba.decode_ixon(), child, } }, 6 => { - // mdata: 1 obj field (mdata: Array KVMap), 1× u64 scalar - let mdata_ptr = lean_ctor_get(ptr as *mut _, 0); - let child = lean_ctor_scalar_u64(ptr, 1, 0); + let child = ctor.scalar_u64(1, 0); ExprMetaData::Mdata { - mdata: decode_kvmap_array(mdata_ptr.cast()), + mdata: decode_kvmap_array(ctor.get(0)), child, } }, - _ => panic!("Invalid Ixon.ExprMetaData tag: {}", tag), + tag => panic!("Invalid Ixon.ExprMetaData tag: {tag}"), } } } @@ -349,44 +298,35 @@ pub fn decode_expr_meta_data(ptr: *const c_void) -> ExprMetaData { // ExprMetaArena Build/Decode // ============================================================================= -/// Build Ixon.ExprMetaArena Lean object. -/// ExprMetaArena is a single-field structure (nodes : Array ExprMetaData), -/// which Lean unboxes — the value IS the Array directly. -pub fn build_expr_meta_arena(arena: &ExprMeta) -> *mut c_void { - unsafe { - let arr = lean_alloc_array(arena.nodes.len(), arena.nodes.len()); +impl IxonExprMetaArena { + /// Build Ixon.ExprMetaArena Lean object. + /// ExprMetaArena is a single-field structure (nodes : Array ExprMetaData), + /// which Lean unboxes — the value IS the Array directly. + pub fn build(arena: &ExprMeta) -> LeanArray { + let arr = LeanArray::alloc(arena.nodes.len()); for (i, node) in arena.nodes.iter().enumerate() { - lean_array_set_core(arr, i, build_expr_meta_data(node).cast()); + arr.set(i, IxonExprMetaData::build(node)); } - arr.cast() + arr } -} -/// Decode Ixon.ExprMetaArena from Lean pointer. -/// Single-field struct is unboxed — ptr IS the Array directly. -pub fn decode_expr_meta_arena(ptr: *const c_void) -> ExprMeta { - ExprMeta { nodes: lean_array_to_vec(ptr, decode_expr_meta_data) } + /// Decode Ixon.ExprMetaArena from Lean pointer. + /// Single-field struct is unboxed — obj IS the Array directly. + pub fn decode(obj: LeanObj) -> ExprMeta { + let arr = unsafe { LeanArray::from_raw(obj.as_ptr()) }; + ExprMeta { nodes: arr.map(|n| IxonExprMetaData::new(n).decode()) } + } } // ============================================================================= // ConstantMeta Build/Decode // ============================================================================= -/// Build Ixon.ConstantMeta Lean object. -/// -/// | Variant | Tag | Obj fields | Scalar bytes | -/// |---------|-----|-----------|-------------| -/// | empty | 0 | 0 | 0 | -/// | defn | 1 | 6 (name, lvls, hints, all, ctx, arena) | 16 (2× u64) | -/// | axio | 2 | 3 (name, lvls, arena) | 8 (1× u64) | -/// | quot | 3 | 3 (name, lvls, arena) | 8 (1× u64) | -/// | indc | 4 | 6 (name, lvls, ctors, all, ctx, arena) | 8 (1× u64) | -/// | ctor | 5 | 4 (name, lvls, induct, arena) | 8 (1× u64) | -/// | recr | 6 | 7 (name, lvls, rules, all, ctx, arena, ruleRoots) | 8 (1× u64) | -pub fn build_constant_meta(meta: &ConstantMeta) -> *mut c_void { - unsafe { - match meta { - ConstantMeta::Empty => lean_box_fn(0), +impl IxonConstantMeta { + /// Build Ixon.ConstantMeta Lean object. + pub fn build(meta: &ConstantMeta) -> Self { + let obj = match meta { + ConstantMeta::Empty => LeanObj::box_usize(0), ConstantMeta::Def { name, @@ -398,56 +338,56 @@ pub fn build_constant_meta(meta: &ConstantMeta) -> *mut c_void { type_root, value_root, } => { - let obj = lean_alloc_ctor(1, 6, 16); - lean_ctor_set(obj, 0, build_address_from_ixon(name).cast()); - lean_ctor_set(obj, 1, build_address_array(lvls).cast()); - lean_ctor_set(obj, 2, build_reducibility_hints(hints).as_mut_ptr().cast()); - lean_ctor_set(obj, 3, build_address_array(all).cast()); - lean_ctor_set(obj, 4, build_address_array(ctx).cast()); - lean_ctor_set(obj, 5, build_expr_meta_arena(arena).cast()); - lean_ctor_set_uint64(obj, 6 * 8, *type_root); - lean_ctor_set_uint64(obj, 6 * 8 + 8, *value_root); - obj.cast() + let ctor = LeanCtor::alloc(1, 6, 16); + ctor.set(0, IxAddress::build_from_ixon(name)); + ctor.set(1, IxAddress::build_array(lvls)); + ctor.set(2, build_reducibility_hints(hints)); + ctor.set(3, IxAddress::build_array(all)); + ctor.set(4, IxAddress::build_array(ctx)); + ctor.set(5, IxonExprMetaArena::build(arena)); + ctor.set_u64(6 * 8, *type_root); + ctor.set_u64(6 * 8 + 8, *value_root); + *ctor }, ConstantMeta::Axio { name, lvls, arena, type_root } => { - let obj = lean_alloc_ctor(2, 3, 8); - lean_ctor_set(obj, 0, build_address_from_ixon(name).cast()); - lean_ctor_set(obj, 1, build_address_array(lvls).cast()); - lean_ctor_set(obj, 2, build_expr_meta_arena(arena).cast()); - lean_ctor_set_uint64(obj, 3 * 8, *type_root); - obj.cast() + let ctor = LeanCtor::alloc(2, 3, 8); + ctor.set(0, IxAddress::build_from_ixon(name)); + ctor.set(1, IxAddress::build_array(lvls)); + ctor.set(2, IxonExprMetaArena::build(arena)); + ctor.set_u64(3 * 8, *type_root); + *ctor }, ConstantMeta::Quot { name, lvls, arena, type_root } => { - let obj = lean_alloc_ctor(3, 3, 8); - lean_ctor_set(obj, 0, build_address_from_ixon(name).cast()); - lean_ctor_set(obj, 1, build_address_array(lvls).cast()); - lean_ctor_set(obj, 2, build_expr_meta_arena(arena).cast()); - lean_ctor_set_uint64(obj, 3 * 8, *type_root); - obj.cast() + let ctor = LeanCtor::alloc(3, 3, 8); + ctor.set(0, IxAddress::build_from_ixon(name)); + ctor.set(1, IxAddress::build_array(lvls)); + ctor.set(2, IxonExprMetaArena::build(arena)); + ctor.set_u64(3 * 8, *type_root); + *ctor }, ConstantMeta::Indc { name, lvls, ctors, all, ctx, arena, type_root } => { - let obj = lean_alloc_ctor(4, 6, 8); - lean_ctor_set(obj, 0, build_address_from_ixon(name).cast()); - lean_ctor_set(obj, 1, build_address_array(lvls).cast()); - lean_ctor_set(obj, 2, build_address_array(ctors).cast()); - lean_ctor_set(obj, 3, build_address_array(all).cast()); - lean_ctor_set(obj, 4, build_address_array(ctx).cast()); - lean_ctor_set(obj, 5, build_expr_meta_arena(arena).cast()); - lean_ctor_set_uint64(obj, 6 * 8, *type_root); - obj.cast() + let ctor = LeanCtor::alloc(4, 6, 8); + ctor.set(0, IxAddress::build_from_ixon(name)); + ctor.set(1, IxAddress::build_array(lvls)); + ctor.set(2, IxAddress::build_array(ctors)); + ctor.set(3, IxAddress::build_array(all)); + ctor.set(4, IxAddress::build_array(ctx)); + ctor.set(5, IxonExprMetaArena::build(arena)); + ctor.set_u64(6 * 8, *type_root); + *ctor }, ConstantMeta::Ctor { name, lvls, induct, arena, type_root } => { - let obj = lean_alloc_ctor(5, 4, 8); - lean_ctor_set(obj, 0, build_address_from_ixon(name).cast()); - lean_ctor_set(obj, 1, build_address_array(lvls).cast()); - lean_ctor_set(obj, 2, build_address_from_ixon(induct).cast()); - lean_ctor_set(obj, 3, build_expr_meta_arena(arena).cast()); - lean_ctor_set_uint64(obj, 4 * 8, *type_root); - obj.cast() + let ctor = LeanCtor::alloc(5, 4, 8); + ctor.set(0, IxAddress::build_from_ixon(name)); + ctor.set(1, IxAddress::build_array(lvls)); + ctor.set(2, IxAddress::build_from_ixon(induct)); + ctor.set(3, IxonExprMetaArena::build(arena)); + ctor.set_u64(4 * 8, *type_root); + *ctor }, ConstantMeta::Rec { @@ -460,44 +400,41 @@ pub fn build_constant_meta(meta: &ConstantMeta) -> *mut c_void { type_root, rule_roots, } => { - let obj = lean_alloc_ctor(6, 7, 8); - lean_ctor_set(obj, 0, build_address_from_ixon(name).cast()); - lean_ctor_set(obj, 1, build_address_array(lvls).cast()); - lean_ctor_set(obj, 2, build_address_array(rules).cast()); - lean_ctor_set(obj, 3, build_address_array(all).cast()); - lean_ctor_set(obj, 4, build_address_array(ctx).cast()); - lean_ctor_set(obj, 5, build_expr_meta_arena(arena).cast()); - lean_ctor_set(obj, 6, build_u64_array(rule_roots).cast()); - lean_ctor_set_uint64(obj, 7 * 8, *type_root); - obj.cast() - }, - } + let ctor = LeanCtor::alloc(6, 7, 8); + ctor.set(0, IxAddress::build_from_ixon(name)); + ctor.set(1, IxAddress::build_array(lvls)); + ctor.set(2, IxAddress::build_array(rules)); + ctor.set(3, IxAddress::build_array(all)); + ctor.set(4, IxAddress::build_array(ctx)); + ctor.set(5, IxonExprMetaArena::build(arena)); + ctor.set(6, build_u64_array(rule_roots)); + ctor.set_u64(7 * 8, *type_root); + *ctor + }, + }; + Self::new(obj) } -} -/// Decode Ixon.ConstantMeta from Lean pointer. -pub fn decode_constant_meta(ptr: *const c_void) -> ConstantMeta { - unsafe { - // Empty (tag 0, no fields) is represented as a scalar lean_box(0) - if lean_is_scalar(ptr) { - let tag = (ptr as usize) >> 1; - assert_eq!(tag, 0, "Invalid scalar ConstantMeta tag: {}", tag); + /// Decode Ixon.ConstantMeta from Lean pointer. + pub fn decode(self) -> ConstantMeta { + let obj: LeanObj = *self; + if obj.is_scalar() { + let tag = obj.unbox_usize(); + assert_eq!(tag, 0, "Invalid scalar ConstantMeta tag: {tag}"); return ConstantMeta::Empty; } - let tag = lean_obj_tag(ptr as *mut _); - match tag { + let ctor = unsafe { LeanCtor::from_raw(obj.as_ptr()) }; + match ctor.tag() { 1 => { - // defn: 6 obj fields, 2× u64 scalar - let name = decode_ixon_address(lean_ctor_get(ptr as *mut _, 0).cast()); - let lvls = decode_address_array(lean_ctor_get(ptr as *mut _, 1).cast()); - let hints = - decode_reducibility_hints(lean_ctor_get(ptr as *mut _, 2).cast()); - let all = decode_address_array(lean_ctor_get(ptr as *mut _, 3).cast()); - let ctx = decode_address_array(lean_ctor_get(ptr as *mut _, 4).cast()); - let arena = - decode_expr_meta_arena(lean_ctor_get(ptr as *mut _, 5).cast()); - let type_root = lean_ctor_scalar_u64(ptr, 6, 0); - let value_root = lean_ctor_scalar_u64(ptr, 6, 8); + let ba = unsafe { crate::lean::obj::LeanByteArray::from_raw(ctor.get(0).as_ptr()) }; + let name = ba.decode_ixon(); + let lvls = decode_address_array(ctor.get(1)); + let hints = decode_reducibility_hints(ctor.get(2).as_ptr()); + let all = decode_address_array(ctor.get(3)); + let ctx = decode_address_array(ctor.get(4)); + let arena = IxonExprMetaArena::decode(ctor.get(5)); + let type_root = ctor.scalar_u64(6, 0); + let value_root = ctor.scalar_u64(6, 8); ConstantMeta::Def { name, lvls, @@ -511,64 +448,56 @@ pub fn decode_constant_meta(ptr: *const c_void) -> ConstantMeta { }, 2 => { - // axio: 3 obj fields, 1× u64 scalar - let name = decode_ixon_address(lean_ctor_get(ptr as *mut _, 0).cast()); - let lvls = decode_address_array(lean_ctor_get(ptr as *mut _, 1).cast()); - let arena = - decode_expr_meta_arena(lean_ctor_get(ptr as *mut _, 2).cast()); - let type_root = lean_ctor_scalar_u64(ptr, 3, 0); + let ba = unsafe { crate::lean::obj::LeanByteArray::from_raw(ctor.get(0).as_ptr()) }; + let name = ba.decode_ixon(); + let lvls = decode_address_array(ctor.get(1)); + let arena = IxonExprMetaArena::decode(ctor.get(2)); + let type_root = ctor.scalar_u64(3, 0); ConstantMeta::Axio { name, lvls, arena, type_root } }, 3 => { - // quot: 3 obj fields, 1× u64 scalar - let name = decode_ixon_address(lean_ctor_get(ptr as *mut _, 0).cast()); - let lvls = decode_address_array(lean_ctor_get(ptr as *mut _, 1).cast()); - let arena = - decode_expr_meta_arena(lean_ctor_get(ptr as *mut _, 2).cast()); - let type_root = lean_ctor_scalar_u64(ptr, 3, 0); + let ba = unsafe { crate::lean::obj::LeanByteArray::from_raw(ctor.get(0).as_ptr()) }; + let name = ba.decode_ixon(); + let lvls = decode_address_array(ctor.get(1)); + let arena = IxonExprMetaArena::decode(ctor.get(2)); + let type_root = ctor.scalar_u64(3, 0); ConstantMeta::Quot { name, lvls, arena, type_root } }, 4 => { - // indc: 6 obj fields, 1× u64 scalar - let name = decode_ixon_address(lean_ctor_get(ptr as *mut _, 0).cast()); - let lvls = decode_address_array(lean_ctor_get(ptr as *mut _, 1).cast()); - let ctors = - decode_address_array(lean_ctor_get(ptr as *mut _, 2).cast()); - let all = decode_address_array(lean_ctor_get(ptr as *mut _, 3).cast()); - let ctx = decode_address_array(lean_ctor_get(ptr as *mut _, 4).cast()); - let arena = - decode_expr_meta_arena(lean_ctor_get(ptr as *mut _, 5).cast()); - let type_root = lean_ctor_scalar_u64(ptr, 6, 0); + let ba = unsafe { crate::lean::obj::LeanByteArray::from_raw(ctor.get(0).as_ptr()) }; + let name = ba.decode_ixon(); + let lvls = decode_address_array(ctor.get(1)); + let ctors = decode_address_array(ctor.get(2)); + let all = decode_address_array(ctor.get(3)); + let ctx = decode_address_array(ctor.get(4)); + let arena = IxonExprMetaArena::decode(ctor.get(5)); + let type_root = ctor.scalar_u64(6, 0); ConstantMeta::Indc { name, lvls, ctors, all, ctx, arena, type_root } }, 5 => { - // ctor: 4 obj fields, 1× u64 scalar - let name = decode_ixon_address(lean_ctor_get(ptr as *mut _, 0).cast()); - let lvls = decode_address_array(lean_ctor_get(ptr as *mut _, 1).cast()); - let induct = - decode_ixon_address(lean_ctor_get(ptr as *mut _, 2).cast()); - let arena = - decode_expr_meta_arena(lean_ctor_get(ptr as *mut _, 3).cast()); - let type_root = lean_ctor_scalar_u64(ptr, 4, 0); + let ba = unsafe { crate::lean::obj::LeanByteArray::from_raw(ctor.get(0).as_ptr()) }; + let name = ba.decode_ixon(); + let lvls = decode_address_array(ctor.get(1)); + let ba2 = unsafe { crate::lean::obj::LeanByteArray::from_raw(ctor.get(2).as_ptr()) }; + let induct = ba2.decode_ixon(); + let arena = IxonExprMetaArena::decode(ctor.get(3)); + let type_root = ctor.scalar_u64(4, 0); ConstantMeta::Ctor { name, lvls, induct, arena, type_root } }, 6 => { - // recr: 7 obj fields, 1× u64 scalar - let name = decode_ixon_address(lean_ctor_get(ptr as *mut _, 0).cast()); - let lvls = decode_address_array(lean_ctor_get(ptr as *mut _, 1).cast()); - let rules = - decode_address_array(lean_ctor_get(ptr as *mut _, 2).cast()); - let all = decode_address_array(lean_ctor_get(ptr as *mut _, 3).cast()); - let ctx = decode_address_array(lean_ctor_get(ptr as *mut _, 4).cast()); - let arena = - decode_expr_meta_arena(lean_ctor_get(ptr as *mut _, 5).cast()); - let rule_roots = - decode_u64_array(lean_ctor_get(ptr as *mut _, 6).cast()); - let type_root = lean_ctor_scalar_u64(ptr, 7, 0); + let ba = unsafe { crate::lean::obj::LeanByteArray::from_raw(ctor.get(0).as_ptr()) }; + let name = ba.decode_ixon(); + let lvls = decode_address_array(ctor.get(1)); + let rules = decode_address_array(ctor.get(2)); + let all = decode_address_array(ctor.get(3)); + let ctx = decode_address_array(ctor.get(4)); + let arena = IxonExprMetaArena::decode(ctor.get(5)); + let rule_roots = decode_u64_array(ctor.get(6)); + let type_root = ctor.scalar_u64(7, 0); ConstantMeta::Rec { name, lvls, @@ -581,7 +510,7 @@ pub fn decode_constant_meta(ptr: *const c_void) -> ConstantMeta { } }, - _ => panic!("Invalid Ixon.ConstantMeta tag: {}", tag), + tag => panic!("Invalid Ixon.ConstantMeta tag: {tag}"), } } } @@ -590,50 +519,43 @@ pub fn decode_constant_meta(ptr: *const c_void) -> ConstantMeta { // Named and Comm Build/Decode // ============================================================================= -/// Build Ixon.Named { addr : Address, constMeta : ConstantMeta } -pub fn build_named(addr: &Address, meta: &ConstantMeta) -> *mut c_void { - unsafe { - let addr_obj = build_address_from_ixon(addr); - let meta_obj = build_constant_meta(meta); - let obj = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(obj, 0, addr_obj.cast()); - lean_ctor_set(obj, 1, meta_obj.cast()); - obj.cast() +impl IxonNamed { + /// Build Ixon.Named { addr : Address, constMeta : ConstantMeta } + pub fn build(addr: &Address, meta: &ConstantMeta) -> Self { + let ctor = LeanCtor::alloc(0, 2, 0); + ctor.set(0, IxAddress::build_from_ixon(addr)); + ctor.set(1, IxonConstantMeta::build(meta)); + Self::new(*ctor) } -} -/// Decode Ixon.Named. -pub fn decode_named(ptr: *const c_void) -> Named { - unsafe { - let addr_ptr = lean_ctor_get(ptr as *mut _, 0); - let meta_ptr = lean_ctor_get(ptr as *mut _, 1); + /// Decode Ixon.Named. + pub fn decode(self) -> Named { + let ctor = unsafe { LeanCtor::from_raw(self.as_ptr()) }; + let ba = unsafe { crate::lean::obj::LeanByteArray::from_raw(ctor.get(0).as_ptr()) }; Named { - addr: decode_ixon_address(addr_ptr.cast()), - meta: decode_constant_meta(meta_ptr.cast()), + addr: ba.decode_ixon(), + meta: IxonConstantMeta::new(ctor.get(1)).decode(), } } } -/// Build Ixon.Comm { secret : Address, payload : Address } -pub fn build_ixon_comm(comm: &Comm) -> *mut c_void { - unsafe { - let secret_obj = build_address_from_ixon(&comm.secret); - let payload_obj = build_address_from_ixon(&comm.payload); - let obj = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(obj, 0, secret_obj.cast()); - lean_ctor_set(obj, 1, payload_obj.cast()); - obj.cast() +impl IxonComm { + /// Build Ixon.Comm { secret : Address, payload : Address } + pub fn build(comm: &Comm) -> Self { + let ctor = LeanCtor::alloc(0, 2, 0); + ctor.set(0, IxAddress::build_from_ixon(&comm.secret)); + ctor.set(1, IxAddress::build_from_ixon(&comm.payload)); + Self::new(*ctor) } -} -/// Decode Ixon.Comm. -pub fn decode_ixon_comm(ptr: *const c_void) -> Comm { - unsafe { - let secret_ptr = lean_ctor_get(ptr as *mut _, 0); - let payload_ptr = lean_ctor_get(ptr as *mut _, 1); + /// Decode Ixon.Comm. + pub fn decode(self) -> Comm { + let ctor = unsafe { LeanCtor::from_raw(self.as_ptr()) }; + let ba0 = unsafe { crate::lean::obj::LeanByteArray::from_raw(ctor.get(0).as_ptr()) }; + let ba1 = unsafe { crate::lean::obj::LeanByteArray::from_raw(ctor.get(1).as_ptr()) }; Comm { - secret: decode_ixon_address(secret_ptr.cast()), - payload: decode_ixon_address(payload_ptr.cast()), + secret: ba0.decode_ixon(), + payload: ba1.decode_ixon(), } } } @@ -644,50 +566,42 @@ pub fn decode_ixon_comm(ptr: *const c_void) -> Comm { /// Round-trip Ixon.DataValue. #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ixon_data_value( - ptr: *const c_void, -) -> *mut c_void { - let dv = decode_ixon_data_value(ptr); - build_ixon_data_value(&dv) +pub extern "C" fn rs_roundtrip_ixon_data_value(obj: LeanObj) -> LeanObj { + let dv = IxonDataValue::new(obj).decode(); + IxonDataValue::build(&dv).into() } /// Round-trip Ixon.Comm. #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ixon_comm(ptr: *const c_void) -> *mut c_void { - let comm = decode_ixon_comm(ptr); - build_ixon_comm(&comm) +pub extern "C" fn rs_roundtrip_ixon_comm(obj: LeanObj) -> LeanObj { + let comm = IxonComm::new(obj).decode(); + IxonComm::build(&comm).into() } /// Round-trip Ixon.ExprMetaData. #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ixon_expr_meta_data( - ptr: *const c_void, -) -> *mut c_void { - let node = decode_expr_meta_data(ptr); - build_expr_meta_data(&node) +pub extern "C" fn rs_roundtrip_ixon_expr_meta_data(obj: LeanObj) -> LeanObj { + let node = IxonExprMetaData::new(obj).decode(); + IxonExprMetaData::build(&node).into() } /// Round-trip Ixon.ExprMetaArena. #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ixon_expr_meta_arena( - ptr: *const c_void, -) -> *mut c_void { - let arena = decode_expr_meta_arena(ptr); - build_expr_meta_arena(&arena) +pub extern "C" fn rs_roundtrip_ixon_expr_meta_arena(obj: LeanObj) -> LeanObj { + let arena = IxonExprMetaArena::decode(obj); + IxonExprMetaArena::build(&arena).into() } /// Round-trip Ixon.ConstantMeta (full arena-based). #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ixon_constant_meta( - ptr: *const c_void, -) -> *mut c_void { - let meta = decode_constant_meta(ptr); - build_constant_meta(&meta) +pub extern "C" fn rs_roundtrip_ixon_constant_meta(obj: LeanObj) -> LeanObj { + let meta = IxonConstantMeta::new(obj).decode(); + IxonConstantMeta::build(&meta).into() } /// Round-trip Ixon.Named (with real metadata). #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ixon_named(ptr: *const c_void) -> *mut c_void { - let named = decode_named(ptr); - build_named(&named.addr, &named.meta) +pub extern "C" fn rs_roundtrip_ixon_named(obj: LeanObj) -> LeanObj { + let named = IxonNamed::new(obj).decode(); + IxonNamed::build(&named.addr, &named.meta).into() } diff --git a/src/lean/ffi/ixon/serialize.rs b/src/lean/ffi/ixon/serialize.rs index 5958c8de..230d0243 100644 --- a/src/lean/ffi/ixon/serialize.rs +++ b/src/lean/ffi/ixon/serialize.rs @@ -3,7 +3,6 @@ //! Contains FFI functions for comparing Lean and Rust serialization outputs, //! and Env serialization roundtrip testing. -use std::ffi::c_void; use std::sync::Arc; use crate::ix::address::Address; @@ -11,139 +10,139 @@ use crate::ix::ixon::expr::Expr as IxonExpr; use crate::ix::ixon::serialize::put_expr; use crate::ix::ixon::sharing::hash_expr; use crate::ix::ixon::univ::{Univ as IxonUniv, put_univ}; -use crate::lean::{ - lean_array_to_vec, lean_ctor_objs, lean_ctor_scalar_u64, lean_is_scalar, - lean_sarray_data, lean_tag, lean_unbox_u64, -}; +use crate::lean::obj::{LeanByteArray, LeanCtor, LeanObj}; use super::constant::{decode_ixon_address, decode_ixon_constant}; /// Unbox a Lean UInt64, handling both scalar and boxed representations. -fn lean_ptr_to_u64(ptr: *const c_void) -> u64 { - if lean_is_scalar(ptr) { - (ptr as usize >> 1) as u64 +fn lean_ptr_to_u64(obj: LeanObj) -> u64 { + if obj.is_scalar() { + obj.unbox_usize() as u64 } else { - lean_unbox_u64(ptr) + obj.unbox_u64() } } /// Decode a Lean `Ixon.Expr` to a Rust `IxonExpr`. -pub fn lean_ptr_to_ixon_expr(ptr: *const c_void) -> Arc { - assert!(!lean_is_scalar(ptr), "Ixon.Expr should not be scalar"); - match lean_tag(ptr) { +pub fn lean_ptr_to_ixon_expr(obj: LeanObj) -> Arc { + assert!(!obj.is_scalar(), "Ixon.Expr should not be scalar"); + let ctor = unsafe { LeanCtor::from_raw(obj.as_ptr()) }; + match ctor.tag() { 0 => { - let idx = lean_ctor_scalar_u64(ptr, 0, 0); + let idx = ctor.scalar_u64(0, 0); Arc::new(IxonExpr::Sort(idx)) }, 1 => { - let idx = lean_ctor_scalar_u64(ptr, 0, 0); + let idx = ctor.scalar_u64(0, 0); Arc::new(IxonExpr::Var(idx)) }, 2 => { - let [univs_ptr] = lean_ctor_objs(ptr); - let ref_idx = lean_ctor_scalar_u64(ptr, 1, 0); - let univs = lean_array_to_vec(univs_ptr, lean_ptr_to_u64); + let ref_idx = ctor.scalar_u64(1, 0); + let univs_arr = + unsafe { crate::lean::obj::LeanArray::from_raw(ctor.get(0).as_ptr()) }; + let univs = univs_arr.map(lean_ptr_to_u64); Arc::new(IxonExpr::Ref(ref_idx, univs)) }, 3 => { - let [univs_ptr] = lean_ctor_objs(ptr); - let rec_idx = lean_ctor_scalar_u64(ptr, 1, 0); - let univs = lean_array_to_vec(univs_ptr, lean_ptr_to_u64); + let rec_idx = ctor.scalar_u64(1, 0); + let univs_arr = + unsafe { crate::lean::obj::LeanArray::from_raw(ctor.get(0).as_ptr()) }; + let univs = univs_arr.map(lean_ptr_to_u64); Arc::new(IxonExpr::Rec(rec_idx, univs)) }, 4 => { - let [val_ptr] = lean_ctor_objs(ptr); - let type_idx = lean_ctor_scalar_u64(ptr, 1, 0); - let field_idx = lean_ctor_scalar_u64(ptr, 1, 8); - let val = lean_ptr_to_ixon_expr(val_ptr); + let type_idx = ctor.scalar_u64(1, 0); + let field_idx = ctor.scalar_u64(1, 8); + let val = lean_ptr_to_ixon_expr(ctor.get(0)); Arc::new(IxonExpr::Prj(type_idx, field_idx, val)) }, 5 => { - let idx = lean_ctor_scalar_u64(ptr, 0, 0); + let idx = ctor.scalar_u64(0, 0); Arc::new(IxonExpr::Str(idx)) }, 6 => { - let idx = lean_ctor_scalar_u64(ptr, 0, 0); + let idx = ctor.scalar_u64(0, 0); Arc::new(IxonExpr::Nat(idx)) }, 7 => { - let [fun_ptr, arg_ptr] = lean_ctor_objs(ptr); - let fun_ = lean_ptr_to_ixon_expr(fun_ptr); - let arg = lean_ptr_to_ixon_expr(arg_ptr); + let [fun_obj, arg_obj] = ctor.objs::<2>(); + let fun_ = lean_ptr_to_ixon_expr(fun_obj); + let arg = lean_ptr_to_ixon_expr(arg_obj); Arc::new(IxonExpr::App(fun_, arg)) }, 8 => { - let [ty_ptr, body_ptr] = lean_ctor_objs(ptr); - let ty = lean_ptr_to_ixon_expr(ty_ptr); - let body = lean_ptr_to_ixon_expr(body_ptr); + let [ty_obj, body_obj] = ctor.objs::<2>(); + let ty = lean_ptr_to_ixon_expr(ty_obj); + let body = lean_ptr_to_ixon_expr(body_obj); Arc::new(IxonExpr::Lam(ty, body)) }, 9 => { - let [ty_ptr, body_ptr] = lean_ctor_objs(ptr); - let ty = lean_ptr_to_ixon_expr(ty_ptr); - let body = lean_ptr_to_ixon_expr(body_ptr); + let [ty_obj, body_obj] = ctor.objs::<2>(); + let ty = lean_ptr_to_ixon_expr(ty_obj); + let body = lean_ptr_to_ixon_expr(body_obj); Arc::new(IxonExpr::All(ty, body)) }, 10 => { - let [ty_ptr, val_ptr, body_ptr] = lean_ctor_objs(ptr); - let base_ptr = ptr.cast::(); - let non_dep = unsafe { *base_ptr.add(8 + 3 * 8) } != 0; - let ty = lean_ptr_to_ixon_expr(ty_ptr); - let val = lean_ptr_to_ixon_expr(val_ptr); - let body = lean_ptr_to_ixon_expr(body_ptr); + let [ty_obj, val_obj, body_obj] = ctor.objs::<3>(); + let non_dep = ctor.scalar_bool(3, 0); + let ty = lean_ptr_to_ixon_expr(ty_obj); + let val = lean_ptr_to_ixon_expr(val_obj); + let body = lean_ptr_to_ixon_expr(body_obj); Arc::new(IxonExpr::Let(non_dep, ty, val, body)) }, 11 => { - let idx = lean_ctor_scalar_u64(ptr, 0, 0); + let idx = ctor.scalar_u64(0, 0); Arc::new(IxonExpr::Share(idx)) }, - tag => panic!("Unknown Ixon.Expr tag: {}", tag), + tag => panic!("Unknown Ixon.Expr tag: {tag}"), } } /// Check if Lean's computed hash matches Rust's computed hash. #[unsafe(no_mangle)] pub extern "C" fn rs_expr_hash_matches( - expr_ptr: *const c_void, - expected_hash: *const c_void, + expr_obj: LeanObj, + expected_hash: LeanObj, ) -> bool { - let expr = lean_ptr_to_ixon_expr(expr_ptr); + let expr = lean_ptr_to_ixon_expr(expr_obj); let hash = hash_expr(&expr); let expected = decode_ixon_address(expected_hash); Address::from_slice(hash.as_bytes()).is_ok_and(|h| h == expected) } /// Decode a Lean `Ixon.Univ` to a Rust `IxonUniv`. -fn lean_ptr_to_ixon_univ(ptr: *const c_void) -> Arc { - if lean_is_scalar(ptr) { +fn lean_ptr_to_ixon_univ(obj: LeanObj) -> Arc { + if obj.is_scalar() { return IxonUniv::zero(); } - match lean_tag(ptr) { + let ctor = unsafe { LeanCtor::from_raw(obj.as_ptr()) }; + match ctor.tag() { 1 => { - let [inner] = lean_ctor_objs(ptr); + let [inner] = ctor.objs::<1>(); IxonUniv::succ(lean_ptr_to_ixon_univ(inner)) }, 2 => { - let [a, b] = lean_ctor_objs(ptr); + let [a, b] = ctor.objs::<2>(); IxonUniv::max(lean_ptr_to_ixon_univ(a), lean_ptr_to_ixon_univ(b)) }, 3 => { - let [a, b] = lean_ctor_objs(ptr); + let [a, b] = ctor.objs::<2>(); IxonUniv::imax(lean_ptr_to_ixon_univ(a), lean_ptr_to_ixon_univ(b)) }, - 4 => IxonUniv::var(lean_ctor_scalar_u64(ptr, 0, 0)), - tag => panic!("Unknown Ixon.Univ tag: {}", tag), + 4 => IxonUniv::var(ctor.scalar_u64(0, 0)), + tag => panic!("Unknown Ixon.Univ tag: {tag}"), } } /// Check if Lean's Ixon.Univ serialization matches Rust. #[unsafe(no_mangle)] pub extern "C" fn rs_eq_univ_serialization( - univ_ptr: *const c_void, - bytes: *const c_void, + univ_obj: LeanObj, + bytes_obj: LeanObj, ) -> bool { - let univ = lean_ptr_to_ixon_univ(univ_ptr); - let bytes_data = lean_sarray_data(bytes); + let univ = lean_ptr_to_ixon_univ(univ_obj); + let ba = unsafe { LeanByteArray::from_raw(bytes_obj.as_ptr()) }; + let bytes_data = ba.as_bytes(); let mut buf = Vec::with_capacity(bytes_data.len()); put_univ(&univ, &mut buf); buf == bytes_data @@ -152,11 +151,12 @@ pub extern "C" fn rs_eq_univ_serialization( /// Check if Lean's Ixon.Expr serialization matches Rust. #[unsafe(no_mangle)] pub extern "C" fn rs_eq_expr_serialization( - expr_ptr: *const c_void, - bytes: *const c_void, + expr_obj: LeanObj, + bytes_obj: LeanObj, ) -> bool { - let expr = lean_ptr_to_ixon_expr(expr_ptr); - let bytes_data = lean_sarray_data(bytes); + let expr = lean_ptr_to_ixon_expr(expr_obj); + let ba = unsafe { LeanByteArray::from_raw(bytes_obj.as_ptr()) }; + let bytes_data = ba.as_bytes(); let mut buf = Vec::with_capacity(bytes_data.len()); put_expr(&expr, &mut buf); buf == bytes_data @@ -165,11 +165,12 @@ pub extern "C" fn rs_eq_expr_serialization( /// Check if Lean's Ixon.Constant serialization matches Rust. #[unsafe(no_mangle)] pub extern "C" fn rs_eq_constant_serialization( - constant_ptr: *const c_void, - bytes: *const c_void, + constant_obj: LeanObj, + bytes_obj: LeanObj, ) -> bool { - let constant = decode_ixon_constant(constant_ptr); - let bytes_data = lean_sarray_data(bytes); + let constant = decode_ixon_constant(constant_obj); + let ba = unsafe { LeanByteArray::from_raw(bytes_obj.as_ptr()) }; + let bytes_data = ba.as_bytes(); let mut buf = Vec::with_capacity(bytes_data.len()); constant.put(&mut buf); buf == bytes_data @@ -179,14 +180,15 @@ pub extern "C" fn rs_eq_constant_serialization( /// Due to HashMap ordering differences, we compare deserialized content rather than bytes. #[unsafe(no_mangle)] pub extern "C" fn rs_eq_env_serialization( - raw_env_ptr: *const c_void, - bytes: *const c_void, + raw_env_obj: LeanObj, + bytes_obj: LeanObj, ) -> bool { use super::env::decode_raw_env; use crate::ix::ixon::env::Env; - let decoded = decode_raw_env(raw_env_ptr); - let bytes_data = lean_sarray_data(bytes); + let decoded = decode_raw_env(raw_env_obj); + let ba = unsafe { LeanByteArray::from_raw(bytes_obj.as_ptr()) }; + let bytes_data = ba.as_bytes(); // Deserialize Lean's bytes using Rust's deserializer let rust_env = match Env::get(&mut &bytes_data[..]) { @@ -248,15 +250,16 @@ pub extern "C" fn rs_eq_env_serialization( /// FFI: Test Env serialization roundtrip. /// Takes: -/// - lean_bytes_ptr: pointer to ByteArray containing serialized Env from Lean +/// - lean_bytes_obj: pointer to ByteArray containing serialized Env from Lean /// /// Returns: true if Rust can deserialize and re-serialize to the same bytes #[unsafe(no_mangle)] -extern "C" fn rs_env_serde_roundtrip(lean_bytes_ptr: *const c_void) -> bool { +extern "C" fn rs_env_serde_roundtrip(lean_bytes_obj: LeanObj) -> bool { use crate::ix::ixon::env::Env; // Get bytes from Lean ByteArray - let lean_bytes = lean_sarray_data(lean_bytes_ptr).to_vec(); + let ba = unsafe { LeanByteArray::from_raw(lean_bytes_obj.as_ptr()) }; + let lean_bytes = ba.as_bytes().to_vec(); // Try to deserialize with Rust let mut slice = lean_bytes.as_slice(); @@ -294,15 +297,16 @@ extern "C" fn rs_env_serde_roundtrip(lean_bytes_ptr: *const c_void) -> bool { /// FFI: Compare Env serialization between Lean and Rust. /// Takes: -/// - lean_bytes_ptr: pointer to ByteArray containing serialized Env from Lean +/// - lean_bytes_obj: pointer to ByteArray containing serialized Env from Lean /// /// Returns: true if Rust can deserialize and the counts match #[unsafe(no_mangle)] -extern "C" fn rs_env_serde_check(lean_bytes_ptr: *const c_void) -> bool { +extern "C" fn rs_env_serde_check(lean_bytes_obj: LeanObj) -> bool { use crate::ix::ixon::env::Env; // Get bytes from Lean ByteArray - let lean_bytes = lean_sarray_data(lean_bytes_ptr).to_vec(); + let ba = unsafe { LeanByteArray::from_raw(lean_bytes_obj.as_ptr()) }; + let lean_bytes = ba.as_bytes().to_vec(); // Try to deserialize with Rust let mut slice = lean_bytes.as_slice(); diff --git a/src/lean/ffi/ixon/sharing.rs b/src/lean/ffi/ixon/sharing.rs index b88004b4..3d2ad32f 100644 --- a/src/lean/ffi/ixon/sharing.rs +++ b/src/lean/ffi/ixon/sharing.rs @@ -1,6 +1,5 @@ //! Ixon sharing analysis FFI. -use std::ffi::c_void; use std::sync::Arc; use crate::ix::ixon::expr::Expr as IxonExpr; @@ -8,7 +7,7 @@ use crate::ix::ixon::serialize::put_expr; use crate::ix::ixon::sharing::{ analyze_block, build_sharing_vec, decide_sharing, }; -use crate::lean::{lean_array_to_vec, lean_sarray_set_data}; +use crate::lean::obj::{LeanArray, LeanByteArray, LeanObj}; use super::expr::decode_ixon_expr_array; use super::serialize::lean_ptr_to_ixon_expr; @@ -16,9 +15,10 @@ use super::serialize::lean_ptr_to_ixon_expr; /// FFI: Debug sharing analysis - print usage counts for subterms with usage >= 2. /// This helps diagnose why Lean and Rust make different sharing decisions. #[unsafe(no_mangle)] -pub extern "C" fn rs_debug_sharing_analysis(exprs_ptr: *const c_void) { +pub extern "C" fn rs_debug_sharing_analysis(exprs_obj: LeanObj) { + let arr = unsafe { LeanArray::from_raw(exprs_obj.as_ptr()) }; let exprs: Vec> = - lean_array_to_vec(exprs_ptr, lean_ptr_to_ixon_expr); + arr.map(|elem| lean_ptr_to_ixon_expr(elem)); println!("[Rust] Analyzing {} input expressions", exprs.len()); @@ -58,8 +58,8 @@ pub extern "C" fn rs_debug_sharing_analysis(exprs_ptr: *const c_void) { /// FFI: Run Rust's sharing analysis on Lean-provided Ixon.Expr array. /// Returns the number of shared items Rust would produce. #[unsafe(no_mangle)] -extern "C" fn rs_analyze_sharing_count(exprs_ptr: *const c_void) -> u64 { - let exprs = decode_ixon_expr_array(exprs_ptr); +extern "C" fn rs_analyze_sharing_count(exprs_obj: LeanObj) -> u64 { + let exprs = decode_ixon_expr_array(exprs_obj); let (info_map, _ptr_to_hash) = analyze_block(&exprs, false); let shared_hashes = decide_sharing(&info_map); @@ -72,11 +72,11 @@ extern "C" fn rs_analyze_sharing_count(exprs_ptr: *const c_void) -> u64 { /// Returns number of shared items. #[unsafe(no_mangle)] extern "C" fn rs_run_sharing_analysis( - exprs_ptr: *const c_void, - out_sharing_vec: *mut c_void, - out_rewritten: *mut c_void, + exprs_obj: LeanObj, + out_sharing_vec: LeanObj, + out_rewritten: LeanObj, ) -> u64 { - let exprs = decode_ixon_expr_array(exprs_ptr); + let exprs = decode_ixon_expr_array(exprs_obj); let (info_map, ptr_to_hash) = analyze_block(&exprs, false); let shared_hashes = decide_sharing(&info_map); @@ -96,8 +96,12 @@ extern "C" fn rs_run_sharing_analysis( } // Write to output arrays - unsafe { lean_sarray_set_data(out_sharing_vec, &sharing_bytes) }; - unsafe { lean_sarray_set_data(out_rewritten, &rewritten_bytes) }; + let sharing_ba = + unsafe { LeanByteArray::from_raw(out_sharing_vec.as_ptr()) }; + unsafe { sharing_ba.set_data(&sharing_bytes) }; + let rewritten_ba = + unsafe { LeanByteArray::from_raw(out_rewritten.as_ptr()) }; + unsafe { rewritten_ba.set_data(&rewritten_bytes) }; shared_hashes.len() as u64 } @@ -110,15 +114,15 @@ extern "C" fn rs_run_sharing_analysis( /// - bits 48-63: Rust sharing count #[unsafe(no_mangle)] extern "C" fn rs_compare_sharing_analysis( - exprs_ptr: *const c_void, - lean_sharing_ptr: *const c_void, - _lean_rewritten_ptr: *const c_void, + exprs_obj: LeanObj, + lean_sharing_obj: LeanObj, + _lean_rewritten_obj: LeanObj, ) -> u64 { // Decode input expressions - let exprs = decode_ixon_expr_array(exprs_ptr); + let exprs = decode_ixon_expr_array(exprs_obj); // Decode Lean's sharing vector - let lean_sharing = decode_ixon_expr_array(lean_sharing_ptr); + let lean_sharing = decode_ixon_expr_array(lean_sharing_obj); // Run Rust's sharing analysis let (info_map, ptr_to_hash) = analyze_block(&exprs, false); diff --git a/src/lean/ffi/ixon/univ.rs b/src/lean/ffi/ixon/univ.rs index 0326d462..1735e94c 100644 --- a/src/lean/ffi/ixon/univ.rs +++ b/src/lean/ffi/ixon/univ.rs @@ -1,122 +1,78 @@ //! Ixon.Univ build/decode/roundtrip FFI. -use std::ffi::c_void; use std::sync::Arc; -use crate::ix::ixon::univ::Univ as IxonUniv; -use crate::lean::{ - lean::{ - lean_alloc_array, lean_alloc_ctor, lean_array_set_core, lean_ctor_get, - lean_ctor_set, lean_obj_tag, - }, - lean_box_fn, lean_is_scalar, -}; +use crate::ix::ixon::univ::Univ; +use crate::lean::obj::{IxonUniv, LeanArray, LeanCtor, LeanObj}; -/// Build Ixon.Univ -pub fn build_ixon_univ(univ: &IxonUniv) -> *mut c_void { - unsafe { - match univ { - IxonUniv::Zero => lean_box_fn(0), - IxonUniv::Succ(inner) => { - let inner_obj = build_ixon_univ(inner); - let obj = lean_alloc_ctor(1, 1, 0); - lean_ctor_set(obj, 0, inner_obj.cast()); - obj.cast() +impl IxonUniv { + /// Build Ixon.Univ + pub fn build(univ: &Univ) -> Self { + let obj = match univ { + Univ::Zero => LeanObj::box_usize(0), + Univ::Succ(inner) => { + let ctor = LeanCtor::alloc(1, 1, 0); + ctor.set(0, Self::build(inner)); + *ctor }, - IxonUniv::Max(a, b) => { - let a_obj = build_ixon_univ(a); - let b_obj = build_ixon_univ(b); - let obj = lean_alloc_ctor(2, 2, 0); - lean_ctor_set(obj, 0, a_obj.cast()); - lean_ctor_set(obj, 1, b_obj.cast()); - obj.cast() + Univ::Max(a, b) => { + let ctor = LeanCtor::alloc(2, 2, 0); + ctor.set(0, Self::build(a)); + ctor.set(1, Self::build(b)); + *ctor }, - IxonUniv::IMax(a, b) => { - let a_obj = build_ixon_univ(a); - let b_obj = build_ixon_univ(b); - let obj = lean_alloc_ctor(3, 2, 0); - lean_ctor_set(obj, 0, a_obj.cast()); - lean_ctor_set(obj, 1, b_obj.cast()); - obj.cast() + Univ::IMax(a, b) => { + let ctor = LeanCtor::alloc(3, 2, 0); + ctor.set(0, Self::build(a)); + ctor.set(1, Self::build(b)); + *ctor }, - IxonUniv::Var(idx) => { - let obj = lean_alloc_ctor(4, 0, 8); - let base = obj.cast::(); - *base.add(8).cast::() = *idx; - obj.cast() + Univ::Var(idx) => { + let ctor = LeanCtor::alloc(4, 0, 8); + ctor.set_u64(0, *idx); + *ctor }, - } + }; + Self::new(obj) } -} -/// Build an Array of Ixon.Univ. -pub fn build_ixon_univ_array(univs: &[Arc]) -> *mut c_void { - unsafe { - let arr = lean_alloc_array(univs.len(), univs.len()); + /// Build an Array of Ixon.Univ. + pub fn build_array(univs: &[Arc]) -> LeanArray { + let arr = LeanArray::alloc(univs.len()); for (i, univ) in univs.iter().enumerate() { - let univ_obj = build_ixon_univ(univ); - lean_array_set_core(arr, i, univ_obj.cast()); + arr.set(i, Self::build(univ)); } - arr.cast() + arr } -} - -// ============================================================================= -// Decode Functions -// ============================================================================= -/// Decode Ixon.Univ (recursive enum). -/// | zero -- tag 0 (no fields) -/// | succ (u : Univ) -- tag 1 -/// | max (a b : Univ) -- tag 2 -/// | imax (a b : Univ) -- tag 3 -/// | var (idx : UInt64) -- tag 4 (scalar field) -pub fn decode_ixon_univ(ptr: *const c_void) -> IxonUniv { - unsafe { - // Note: .zero is a nullary constructor with tag 0, represented as lean_box(0) - if lean_is_scalar(ptr) { - return IxonUniv::Zero; + /// Decode Ixon.Univ (recursive enum). + pub fn decode(self) -> Univ { + let obj: LeanObj = *self; + if obj.is_scalar() { + return Univ::Zero; } - let tag = lean_obj_tag((ptr as *mut c_void).cast()); - match tag { - 0 => IxonUniv::Zero, - 1 => { - let inner_ptr = lean_ctor_get((ptr as *mut c_void).cast(), 0); - IxonUniv::Succ(Arc::new(decode_ixon_univ(inner_ptr.cast()))) - }, - 2 => { - let a_ptr = lean_ctor_get((ptr as *mut c_void).cast(), 0); - let b_ptr = lean_ctor_get((ptr as *mut c_void).cast(), 1); - IxonUniv::Max( - Arc::new(decode_ixon_univ(a_ptr.cast())), - Arc::new(decode_ixon_univ(b_ptr.cast())), - ) - }, - 3 => { - let a_ptr = lean_ctor_get((ptr as *mut c_void).cast(), 0); - let b_ptr = lean_ctor_get((ptr as *mut c_void).cast(), 1); - IxonUniv::IMax( - Arc::new(decode_ixon_univ(a_ptr.cast())), - Arc::new(decode_ixon_univ(b_ptr.cast())), - ) - }, - 4 => { - // scalar field: UInt64 at offset 8 (after header) - let base = ptr.cast::(); - let idx = *(base.add(8).cast::()); - IxonUniv::Var(idx) - }, - _ => panic!("Invalid Ixon.Univ tag: {}", tag), + let ctor = unsafe { LeanCtor::from_raw(obj.as_ptr()) }; + match ctor.tag() { + 0 => Univ::Zero, + 1 => Univ::Succ(Arc::new(Self::new(ctor.get(0)).decode())), + 2 => Univ::Max( + Arc::new(Self::new(ctor.get(0)).decode()), + Arc::new(Self::new(ctor.get(1)).decode()), + ), + 3 => Univ::IMax( + Arc::new(Self::new(ctor.get(0)).decode()), + Arc::new(Self::new(ctor.get(1)).decode()), + ), + 4 => Univ::Var(ctor.scalar_u64(0, 0)), + tag => panic!("Invalid Ixon.Univ tag: {tag}"), } } -} -/// Decode Array Ixon.Univ. -pub fn decode_ixon_univ_array(ptr: *const c_void) -> Vec> { - crate::lean::lean_array_data(ptr) - .iter() - .map(|&u| Arc::new(decode_ixon_univ(u))) - .collect() + /// Decode Array Ixon.Univ. + pub fn decode_array(obj: LeanObj) -> Vec> { + let arr = unsafe { LeanArray::from_raw(obj.as_ptr()) }; + arr.map(|elem| Arc::new(Self::new(elem).decode())) + } } // ============================================================================= @@ -125,7 +81,7 @@ pub fn decode_ixon_univ_array(ptr: *const c_void) -> Vec> { /// Round-trip Ixon.Univ. #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ixon_univ(ptr: *const c_void) -> *mut c_void { - let univ = decode_ixon_univ(ptr); - build_ixon_univ(&univ) +pub extern "C" fn rs_roundtrip_ixon_univ(obj: LeanObj) -> LeanObj { + let univ = IxonUniv::new(obj).decode(); + IxonUniv::build(&univ).into() } diff --git a/src/lean/ffi/lean_env.rs b/src/lean/ffi/lean_env.rs index 513e7902..3581a4f3 100644 --- a/src/lean/ffi/lean_env.rs +++ b/src/lean/ffi/lean_env.rs @@ -19,6 +19,8 @@ use std::sync::Arc; use rustc_hash::FxHashMap; +use crate::lean::obj::LeanObj; + use crate::{ ix::compile::compile_env, ix::decompile::{check_decompile, decompile_env}, @@ -689,7 +691,8 @@ pub fn lean_ptr_to_env_sequential(ptr: *const c_void) -> Env { // roundtrip and size analysis. Output is intentionally suppressed; re-enable // individual `eprintln!` lines when debugging locally. #[unsafe(no_mangle)] -extern "C" fn rs_tmp_decode_const_map(ptr: *const c_void) -> usize { +extern "C" fn rs_tmp_decode_const_map(obj: LeanObj) -> usize { + let ptr = obj.as_ptr(); // Enable hash-consed size tracking for debugging // TODO: Make this configurable via CLI instead of hardcoded crate::ix::compile::TRACK_HASH_CONSED_SIZE diff --git a/src/lean/obj.rs b/src/lean/obj.rs index ded74547..ffc99292 100644 --- a/src/lean/obj.rs +++ b/src/lean/obj.rs @@ -146,13 +146,19 @@ impl LeanArray { } pub fn get(&self, i: usize) -> LeanObj { - LeanObj(unsafe { lean::lean_array_get_core(self.0.as_ptr() as *mut _, i) }.cast()) + LeanObj( + unsafe { lean::lean_array_get_core(self.0.as_ptr() as *mut _, i) }.cast(), + ) } pub fn set(&self, i: usize, val: impl Into) { let val: LeanObj = val.into(); unsafe { - lean::lean_array_set_core(self.0.as_ptr() as *mut _, i, val.as_ptr() as *mut _); + lean::lean_array_set_core( + self.0.as_ptr() as *mut _, + i, + val.as_ptr() as *mut _, + ); } } @@ -334,11 +340,7 @@ impl LeanCtor { pub fn alloc(tag: u8, num_objs: usize, scalar_size: usize) -> Self { #[allow(clippy::cast_possible_truncation)] let obj = unsafe { - lean::lean_alloc_ctor( - tag as u32, - num_objs as u32, - scalar_size as u32, - ) + lean::lean_alloc_ctor(tag as u32, num_objs as u32, scalar_size as u32) }; Self(LeanObj(obj.cast())) } @@ -351,7 +353,8 @@ impl LeanCtor { pub fn get(&self, i: usize) -> LeanObj { #[allow(clippy::cast_possible_truncation)] LeanObj( - unsafe { lean::lean_ctor_get(self.0.as_ptr() as *mut _, i as u32) }.cast(), + unsafe { lean::lean_ctor_get(self.0.as_ptr() as *mut _, i as u32) } + .cast(), ) } @@ -376,6 +379,14 @@ impl LeanCtor { } } + /// Set a `u64` scalar field at the given byte offset (past all object fields). + pub fn set_u64(&self, offset: usize, val: u64) { + #[allow(clippy::cast_possible_truncation)] + unsafe { + lean::lean_ctor_set_uint64(self.0.as_ptr() as *mut _, offset as u32, val); + } + } + /// Read `N` object-field pointers using raw pointer math. /// /// This bypasses `lean_ctor_get`'s bounds check, which is necessary when @@ -390,11 +401,7 @@ impl LeanCtor { pub fn scalar_u64(&self, num_objs: usize, offset: usize) -> u64 { unsafe { std::ptr::read_unaligned( - self.0 - .as_ptr() - .cast::() - .add(8 + num_objs * 8 + offset) - .cast(), + self.0.as_ptr().cast::().add(8 + num_objs * 8 + offset).cast(), ) } } @@ -442,13 +449,16 @@ impl LeanExternal { /// Allocate a new external object holding `data`. pub fn alloc(class: &ExternalClass, data: T) -> Self { let data_ptr = Box::into_raw(Box::new(data)); - let obj = unsafe { lean::lean_alloc_external(class.0 as *mut _, data_ptr.cast()) }; + let obj = + unsafe { lean::lean_alloc_external(class.0 as *mut _, data_ptr.cast()) }; Self(LeanObj(obj.cast()), PhantomData) } /// Get a reference to the wrapped data. pub fn get(&self) -> &T { - unsafe { &*lean::lean_get_external_data(self.0.as_ptr() as *mut _).cast::() } + unsafe { + &*lean::lean_get_external_data(self.0.as_ptr() as *mut _).cast::() + } } } @@ -473,7 +483,9 @@ impl ExternalClass { finalizer: lean::lean_external_finalize_proc, foreach: lean::lean_external_foreach_proc, ) -> Self { - Self(unsafe { lean::lean_register_external_class(finalizer, foreach) }.cast()) + Self( + unsafe { lean::lean_register_external_class(finalizer, foreach) }.cast(), + ) } /// Register a new external class that uses `Drop` to finalize `T` @@ -485,10 +497,7 @@ impl ExternalClass { } } unsafe { - Self::register( - Some(drop_finalizer::), - Some(super::noop_foreach), - ) + Self::register(Some(drop_finalizer::), Some(super::noop_foreach)) } } } @@ -547,7 +556,9 @@ impl LeanList { } /// Build a list from an iterator of values convertible to `LeanObj`. - pub fn from_iter(items: impl IntoIterator>) -> Self { + pub fn from_iter( + items: impl IntoIterator>, + ) -> Self { let items: Vec = items.into_iter().map(Into::into).collect(); let mut list = Self::nil(); for item in items.into_iter().rev() { @@ -686,11 +697,7 @@ impl LeanExcept { pub fn into_result(self) -> Result { let ctor = unsafe { LeanCtor::from_raw(self.0.as_ptr()) }; - if self.is_ok() { - Ok(ctor.get(0)) - } else { - Err(ctor.get(0)) - } + if self.is_ok() { Ok(ctor.get(0)) } else { Err(ctor.get(0)) } } } @@ -700,42 +707,58 @@ impl LeanExcept { impl From for LeanObj { #[inline] - fn from(x: LeanArray) -> Self { x.0 } + fn from(x: LeanArray) -> Self { + x.0 + } } impl From for LeanObj { #[inline] - fn from(x: LeanByteArray) -> Self { x.0 } + fn from(x: LeanByteArray) -> Self { + x.0 + } } impl From for LeanObj { #[inline] - fn from(x: LeanString) -> Self { x.0 } + fn from(x: LeanString) -> Self { + x.0 + } } impl From for LeanObj { #[inline] - fn from(x: LeanCtor) -> Self { x.0 } + fn from(x: LeanCtor) -> Self { + x.0 + } } impl From> for LeanObj { #[inline] - fn from(x: LeanExternal) -> Self { x.0 } + fn from(x: LeanExternal) -> Self { + x.0 + } } impl From for LeanObj { #[inline] - fn from(x: LeanList) -> Self { x.0 } + fn from(x: LeanList) -> Self { + x.0 + } } impl From for LeanObj { #[inline] - fn from(x: LeanOption) -> Self { x.0 } + fn from(x: LeanOption) -> Self { + x.0 + } } impl From for LeanObj { #[inline] - fn from(x: LeanExcept) -> Self { x.0 } + fn from(x: LeanExcept) -> Self { + x.0 + } } // ============================================================================= @@ -869,5 +892,58 @@ lean_domain_type! { IxBlockCompareDetail; } +// ============================================================================= +// LeanProd — Prod α β (pair) +// ============================================================================= + +/// Typed wrapper for a Lean `Prod α β` (ctor tag 0, 2 object fields). +#[derive(Clone, Copy)] +#[repr(transparent)] +pub struct LeanProd(LeanObj); + +impl Deref for LeanProd { + type Target = LeanObj; + #[inline] + fn deref(&self) -> &LeanObj { + &self.0 + } +} + +impl From for LeanObj { + #[inline] + fn from(x: LeanProd) -> Self { + x.0 + } +} + +impl LeanProd { + /// Build a pair `(fst, snd)`. + pub fn new(fst: impl Into, snd: impl Into) -> Self { + let ctor = LeanCtor::alloc(0, 2, 0); + ctor.set(0, fst); + ctor.set(1, snd); + Self(*ctor) + } + + /// Get the first element. + pub fn fst(&self) -> LeanObj { + let ctor = unsafe { LeanCtor::from_raw(self.0.as_ptr()) }; + ctor.get(0) + } + + /// Get the second element. + pub fn snd(&self) -> LeanObj { + let ctor = unsafe { LeanCtor::from_raw(self.0.as_ptr()) }; + ctor.get(1) + } +} + /// `Ix.Address = { hash : ByteArray }` — single-field struct, unboxed to `ByteArray`. pub type IxAddress = LeanByteArray; + +impl From for LeanObj { + #[inline] + fn from(x: u32) -> Self { + Self::box_u32(x) + } +} From 19a292731e1a0fc9729986271685bdde4e34c5be Mon Sep 17 00:00:00 2001 From: samuelburnham <45365069+samuelburnham@users.noreply.github.com> Date: Tue, 3 Mar 2026 10:40:23 -0500 Subject: [PATCH 09/27] Finish porting c_void to typed `LeanObj` API --- Cargo.toml | 2 +- src/iroh.rs | 44 +- src/iroh/client.rs | 95 ++-- src/iroh/server.rs | 1 - src/lean.rs | 246 --------- src/lean/ffi.rs | 6 +- src/lean/ffi/aiur.rs | 14 +- src/lean/ffi/aiur/protocol.rs | 48 +- src/lean/ffi/aiur/toplevel.rs | 204 +++---- src/lean/ffi/compile.rs | 123 +++-- src/lean/ffi/graph.rs | 12 +- src/lean/ffi/ix/constant.rs | 368 ++++++------- src/lean/ffi/ix/data.rs | 341 +++++------- src/lean/ffi/ix/env.rs | 266 ++++----- src/lean/ffi/ix/expr.rs | 283 ++++------ src/lean/ffi/ix/level.rs | 80 ++- src/lean/ffi/ix/name.rs | 70 +-- src/lean/ffi/ixon/compare.rs | 18 +- src/lean/ffi/ixon/constant.rs | 971 ++++++++++++++++----------------- src/lean/ffi/ixon/enums.rs | 51 +- src/lean/ffi/ixon/env.rs | 321 +++++------ src/lean/ffi/ixon/expr.rs | 396 ++++++++------ src/lean/ffi/ixon/meta.rs | 878 +++++++++++++++-------------- src/lean/ffi/ixon/serialize.rs | 28 +- src/lean/ffi/ixon/sharing.rs | 17 +- src/lean/ffi/ixon/univ.rs | 14 +- src/lean/ffi/lean_env.rs | 539 +++++++++--------- src/lean/ffi/primitives.rs | 309 +++++------ src/lean/nat.rs | 26 +- src/lean/obj.rs | 57 +- 30 files changed, 2687 insertions(+), 3141 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 2d969fe5..6e5a29e4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -42,7 +42,7 @@ rand = "0.8.5" quickcheck_macros = "1.0.0" [features] -default = ["net"] +default = [] parallel = ["multi-stark/parallel"] net = ["bytes", "tokio", "iroh", "iroh-base", "n0-snafu", "n0-watcher", "rand", "tracing", "tracing-subscriber", "bincode", "serde" ] diff --git a/src/iroh.rs b/src/iroh.rs index 5b957d0b..489a3f2a 100644 --- a/src/iroh.rs +++ b/src/iroh.rs @@ -4,30 +4,30 @@ //! //! These fallback modules contain dummy functions that can still be called via Lean->C->Rust FFI, but will return an error message that Lean then prints before exiting. -// #[cfg(any( -// not(feature = "net"), -// all(target_os = "macos", target_arch = "aarch64") -// ))] -// pub mod _client; -// #[cfg(any( -// not(feature = "net"), -// all(target_os = "macos", target_arch = "aarch64") -// ))] -// pub mod _server; -// #[cfg(all( -// feature = "net", -// not(all(target_os = "macos", target_arch = "aarch64")) -// ))] +#[cfg(any( + not(feature = "net"), + all(target_os = "macos", target_arch = "aarch64") +))] +pub mod _client; +#[cfg(any( + not(feature = "net"), + all(target_os = "macos", target_arch = "aarch64") +))] +pub mod _server; +#[cfg(all( + feature = "net", + not(all(target_os = "macos", target_arch = "aarch64")) +))] pub mod client; -// #[cfg(all( -// feature = "net", -// not(all(target_os = "macos", target_arch = "aarch64")) -// ))] +#[cfg(all( + feature = "net", + not(all(target_os = "macos", target_arch = "aarch64")) +))] pub mod server; -// #[cfg(all( -// feature = "net", -// not(all(target_os = "macos", target_arch = "aarch64")) -// ))] +#[cfg(all( + feature = "net", + not(all(target_os = "macos", target_arch = "aarch64")) +))] pub mod common { use bincode::{Decode, Encode}; use serde::{Deserialize, Serialize}; diff --git a/src/iroh/client.rs b/src/iroh/client.rs index 00695d09..a760b73f 100644 --- a/src/iroh/client.rs +++ b/src/iroh/client.rs @@ -1,7 +1,6 @@ use iroh::{Endpoint, NodeAddr, NodeId, RelayMode, RelayUrl, SecretKey}; use n0_snafu::{Result, ResultExt}; use n0_watcher::Watcher as _; -use std::ffi::{CString, c_void}; use std::net::SocketAddr; use tracing::info; use tracing_subscriber::layer::SubscriberExt; @@ -9,10 +8,8 @@ use tracing_subscriber::util::SubscriberInitExt; use tracing_subscriber::{EnvFilter, fmt}; use crate::iroh::common::{GetRequest, PutRequest, Request, Response}; -use crate::lean::{ - lean::{lean_alloc_ctor, lean_alloc_sarray, lean_ctor_set, lean_mk_string}, - lean_array_to_vec, lean_except_error_string, lean_except_ok, - lean_obj_to_string, lean_sarray_set_data, +use crate::lean::obj::{ + LeanByteArray, LeanCtor, LeanExcept, LeanObj, LeanString, }; // An example ALPN that we are using to communicate over the `Endpoint` @@ -26,20 +23,13 @@ const READ_SIZE_LIMIT: usize = 100_000_000; /// message: String /// hash: String /// ``` -fn mk_put_response(message: &str, hash: &str) -> *mut c_void { - let c_message = CString::new(message).unwrap(); - let c_hash = CString::new(hash).unwrap(); - unsafe { - let ctor = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(ctor, 0, lean_mk_string(c_message.as_ptr())); - lean_ctor_set(ctor, 1, lean_mk_string(c_hash.as_ptr())); - ctor.cast() - } +fn mk_put_response(message: &str, hash: &str) -> LeanCtor { + let ctor = LeanCtor::alloc(0, 2, 0); + ctor.set(0, LeanString::from_str(message)); + ctor.set(1, LeanString::from_str(hash)); + ctor } -#[repr(transparent)] -struct LeanPutResponse {} - /// Build a Lean `GetResponse` structure: /// ``` /// structure GetResponse where @@ -47,33 +37,27 @@ struct LeanPutResponse {} /// hash: String /// bytes: ByteArray /// ``` -fn mk_get_response(message: &str, hash: &str, bytes: &[u8]) -> *mut c_void { - let c_message = safe_cstring(message); - let c_hash = safe_cstring(hash); - unsafe { - let byte_array = lean_alloc_sarray(1, bytes.len(), bytes.len()); - lean_sarray_set_data(byte_array.cast(), bytes); - - let ctor = lean_alloc_ctor(0, 3, 0); - lean_ctor_set(ctor, 0, lean_mk_string(c_message.as_ptr())); - lean_ctor_set(ctor, 1, lean_mk_string(c_hash.as_ptr())); - lean_ctor_set(ctor, 2, byte_array); - ctor.cast() - } +fn mk_get_response(message: &str, hash: &str, bytes: &[u8]) -> LeanCtor { + let byte_array = LeanByteArray::from_bytes(bytes); + let ctor = LeanCtor::alloc(0, 3, 0); + ctor.set(0, LeanString::from_str(message)); + ctor.set(1, LeanString::from_str(hash)); + ctor.set(2, byte_array); + ctor } /// `Iroh.Connect.putBytes' : @& String → @& Array String → @& String → @& String → Except String PutResponse` #[unsafe(no_mangle)] extern "C" fn rs_iroh_put( - node_id: *const c_void, - addrs: *const c_void, - relay_url: *const c_void, - input: *const c_void, -) -> *mut c_void { - let node_id = lean_obj_to_string(node_id); - let addrs: Vec = lean_array_to_vec(addrs, lean_obj_to_string); - let relay_url = lean_obj_to_string(relay_url); - let input_str = lean_obj_to_string(input); + node_id: LeanObj, + addrs: LeanObj, + relay_url: LeanObj, + input: LeanObj, +) -> LeanExcept { + let node_id = node_id.as_string().to_string(); + let addrs: Vec = addrs.as_array().map(|x| x.as_string().to_string()); + let relay_url = relay_url.as_string().to_string(); + let input_str = input.as_string().to_string(); let request = Request::Put(PutRequest { bytes: input_str.as_bytes().to_vec() }); @@ -82,43 +66,44 @@ extern "C" fn rs_iroh_put( match rt.block_on(connect(&node_id, &addrs, &relay_url, request)) { Ok(response) => match response { - Response::Put(put_response) => lean_except_ok(mk_put_response( + Response::Put(put_response) => LeanExcept::ok(mk_put_response( &put_response.message, &put_response.hash, )), - _ => lean_except_error_string("error: incorrect server response"), + _ => LeanExcept::error_string("error: incorrect server response"), }, - Err(err) => lean_except_error_string(&err.to_string()), + Err(err) => LeanExcept::error_string(&err.to_string()), } } /// `Iroh.Connect.getBytes' : @& String → @& Array String → @& String → @& String → Except String GetResponse` #[unsafe(no_mangle)] extern "C" fn rs_iroh_get( - node_id: *const c_void, - addrs: *const c_void, - relay_url: *const c_void, - hash: *const c_void, -) -> *mut c_void { - let node_id = lean_obj_to_string(node_id); - let addrs: Vec = lean_array_to_vec(addrs, lean_obj_to_string); - let relay_url = lean_obj_to_string(relay_url); - let hash = lean_obj_to_string(hash); - let request = Request::Get(GetRequest { hash: hash.clone() }); + node_id: LeanObj, + addrs: LeanObj, + relay_url: LeanObj, + hash: LeanObj, +) -> LeanExcept { + let node_id = node_id.as_string().to_string(); + let addrs: Vec = addrs.as_array().map(|x| x.as_string().to_string()); + let relay_url = relay_url.as_string().to_string(); + let hash_str = hash.as_string().to_string(); + + let request = Request::Get(GetRequest { hash: hash_str.clone() }); let rt = tokio::runtime::Runtime::new().expect("Failed to create Tokio runtime"); match rt.block_on(connect(&node_id, &addrs, &relay_url, request)) { Ok(response) => match response { - Response::Get(get_response) => lean_except_ok(mk_get_response( + Response::Get(get_response) => LeanExcept::ok(mk_get_response( &get_response.message, &get_response.hash, &get_response.bytes, )), - _ => lean_except_error_string("error: incorrect server response"), + _ => LeanExcept::error_string("error: incorrect server response"), }, - Err(err) => lean_except_error_string(&err.to_string()), + Err(err) => LeanExcept::error_string(&err.to_string()), } } diff --git a/src/iroh/server.rs b/src/iroh/server.rs index 94bb401e..69947587 100644 --- a/src/iroh/server.rs +++ b/src/iroh/server.rs @@ -1,5 +1,4 @@ use std::collections::BTreeMap; -use std::ffi::c_void; use std::sync::{Arc, Mutex}; use std::time::Duration; diff --git a/src/lean.rs b/src/lean.rs index d22f2632..59b760df 100644 --- a/src/lean.rs +++ b/src/lean.rs @@ -28,28 +28,6 @@ pub mod obj; use std::ffi::{CString, c_void}; -#[inline] -#[allow(clippy::not_unsafe_ptr_arg_deref)] -pub fn as_ref_unsafe<'a, T>(ptr: *const T) -> &'a T { - let t_ref = unsafe { ptr.as_ref() }; - t_ref.expect("Null pointer dereference") -} - -#[inline] -#[allow(clippy::not_unsafe_ptr_arg_deref)] -pub fn as_mut_unsafe<'a, T>(ptr: *mut T) -> &'a mut T { - let t_ref = unsafe { ptr.as_mut() }; - t_ref.expect("Null pointer dereference") -} - -/// ```c -/// bool lean_is_scalar(lean_object * o) { return ((size_t)(o) & 1) == 1; } -/// ``` -#[inline] -pub fn lean_is_scalar(ptr: *const T) -> bool { - ptr as usize & 1 == 1 -} - /// Create a CString from a str, stripping any interior null bytes. /// Lean strings are length-prefixed and can contain null bytes, but the /// `lean_mk_string` FFI requires a null-terminated C string. This function @@ -61,230 +39,6 @@ pub fn safe_cstring(s: &str) -> CString { }) } -#[macro_export] -/// ```c -/// lean_object * lean_box(size_t n) { return (lean_object*)(((size_t)(n) << 1) | 1); } -/// ``` -macro_rules! lean_box { - ($e:expr) => { - (($e << 1) | 1) as *const std::ffi::c_void - }; -} - -/// ```c -/// size_t lean_unbox(lean_object * o) { return (size_t)(o) >> 1; } -/// ``` -#[macro_export] -macro_rules! lean_unbox { - ($t:ident, $e:expr) => { - $t::try_from(($e as usize) >> 1).expect("Unintended truncation") - }; -} - -#[inline] -pub fn lean_unbox_u32(ptr: *const c_void) -> u32 { - unsafe { lean::lean_unbox_uint32(ptr as *mut _) } -} - -#[inline] -pub fn lean_unbox_u64(ptr: *const c_void) -> u64 { - unsafe { lean::lean_unbox_uint64(ptr as *mut _) } -} - -#[inline] -pub fn lean_box_u64(v: u64) -> *mut c_void { - unsafe { lean::lean_box_uint64(v).cast() } -} - -pub fn lean_obj_to_string(ptr: *const c_void) -> String { - unsafe { - let obj = ptr.cast_mut().cast::(); - let len = lean::lean_string_size(obj) - 1; // m_size includes NUL - let data = lean::lean_string_cstr(obj); - let bytes = std::slice::from_raw_parts(data.cast::(), len); - String::from_utf8_unchecked(bytes.to_vec()) - } -} - -#[inline] -pub fn lean_tag(ptr: *const c_void) -> u8 { - #[allow(clippy::cast_possible_truncation)] // tags always fit in u8 - unsafe { - lean::lean_obj_tag(ptr as *mut _) as u8 - } -} - -#[inline] -pub fn lean_ctor_objs( - ptr: *const c_void, -) -> [*const c_void; N] { - // Use raw pointer arithmetic instead of lean_ctor_get to avoid its - // bounds-check assertion. Call sites legitimately read past the object - // fields into the scalar area (e.g. Expr.Data hash, Bool/BinderInfo - // stored as UInt8 scalars). This matches the old LeanCtorObject::objs(). - let base = unsafe { ptr.cast::<*const c_void>().add(1) }; - std::array::from_fn(|i| unsafe { *base.add(i) }) -} - -#[inline] -pub fn lean_ctor_scalar_u64( - ptr: *const c_void, - num_objs: usize, - offset: usize, -) -> u64 { - unsafe { - std::ptr::read_unaligned( - ptr.cast::().add(8 + num_objs * 8 + offset).cast(), - ) - } -} - -#[inline] -pub fn lean_ctor_scalar_u8( - ptr: *const c_void, - num_objs: usize, - offset: usize, -) -> u8 { - unsafe { *ptr.cast::().add(8 + num_objs * 8 + offset) } -} - -#[inline] -pub fn lean_ctor_scalar_bool( - ptr: *const c_void, - num_objs: usize, - offset: usize, -) -> bool { - lean_ctor_scalar_u8(ptr, num_objs, offset) != 0 -} - -// ============================================================================= -// Array helpers (replace LeanArrayObject) -// ============================================================================= - -/// Return a slice over the elements of a Lean `Array` object. -pub fn lean_array_data(ptr: *const c_void) -> &'static [*const c_void] { - unsafe { - let obj = ptr.cast_mut().cast::(); - let size = lean::lean_array_size(obj); - let cptr = lean::lean_array_cptr(obj); - std::slice::from_raw_parts(cptr.cast(), size) - } -} - -/// Convert a Lean `Array` to a `Vec` by mapping each element. -pub fn lean_array_to_vec( - ptr: *const c_void, - f: fn(*const c_void) -> T, -) -> Vec { - lean_array_data(ptr).iter().map(|&p| f(p)).collect() -} - -/// Like `lean_array_to_vec` but threads a mutable context through each call. -pub fn lean_array_to_vec_with( - ptr: *const c_void, - f: fn(*const c_void, &mut C) -> T, - c: &mut C, -) -> Vec { - lean_array_data(ptr).iter().map(|&p| f(p, c)).collect() -} - -// ============================================================================= -// SArray (ByteArray) helpers (replace LeanSArrayObject) -// ============================================================================= - -/// Return a byte slice over a Lean `ByteArray` (scalar array) object. -pub fn lean_sarray_data(ptr: *const c_void) -> &'static [u8] { - unsafe { - let obj = ptr.cast_mut().cast::(); - let size = lean::lean_sarray_size(obj); - let cptr = lean::lean_sarray_cptr(obj); - std::slice::from_raw_parts(cptr, size) - } -} - -/// Write bytes into a Lean `ByteArray` and update its size. -/// -/// # Safety -/// The caller must ensure `ptr` points to a valid `lean_sarray_object` -/// with sufficient capacity for `data`. -pub unsafe fn lean_sarray_set_data(ptr: *mut c_void, data: &[u8]) { - unsafe { - let obj = ptr.cast::(); - let cptr = lean::lean_sarray_cptr(obj); - std::ptr::copy_nonoverlapping(data.as_ptr(), cptr, data.len()); - // Update m_size: at offset 8 (after lean_object header) - *ptr.cast::().add(8).cast::() = data.len(); - } -} - -pub struct ListIterator(*const c_void); - -impl Iterator for ListIterator { - type Item = *const c_void; - fn next(&mut self) -> Option { - let ptr = self.0; - if lean_is_scalar(ptr) { - return None; - } - let [head_ptr, tail_ptr] = lean_ctor_objs(ptr); - self.0 = tail_ptr; - Some(head_ptr) - } -} - -pub fn collect_list( - mut ptr: *const c_void, - map_fn: fn(*const c_void) -> T, -) -> Vec { - let mut vec = Vec::new(); - while !lean_is_scalar(ptr) { - let [head_ptr, tail_ptr] = lean_ctor_objs(ptr); - vec.push(map_fn(head_ptr)); - ptr = tail_ptr; - } - vec -} - -/// Box a scalar value into a Lean object pointer. -/// ```c -/// lean_object * lean_box(size_t n) { return (lean_object*)(((size_t)(n) << 1) | 1); } -/// ``` -#[inline] -pub fn lean_box_fn(n: usize) -> *mut c_void { - ((n << 1) | 1) as *mut c_void -} - -// ============================================================================= -// Lean Except constructors -// ============================================================================= - -/// Build `Except.ok val` — tag 1, one object field. -#[inline] -pub fn lean_except_ok(val: *mut c_void) -> *mut c_void { - unsafe { - let obj = lean::lean_alloc_ctor(1, 1, 0); - lean::lean_ctor_set(obj, 0, val.cast()); - obj.cast() - } -} - -/// Build `Except.error msg` — tag 0, one object field. -#[inline] -pub fn lean_except_error(msg: *mut c_void) -> *mut c_void { - unsafe { - let obj = lean::lean_alloc_ctor(0, 1, 0); - lean::lean_ctor_set(obj, 0, msg.cast()); - obj.cast() - } -} - -/// Build `Except.error (lean_mk_string str)` from a Rust string. -#[inline] -pub fn lean_except_error_string(msg: &str) -> *mut c_void { - let c_msg = safe_cstring(msg); - unsafe { lean_except_error(lean::lean_mk_string(c_msg.as_ptr()).cast()) } -} - /// No-op foreach callback for external classes that hold no Lean references. /// /// # Safety diff --git a/src/lean/ffi.rs b/src/lean/ffi.rs index 60779bc4..9fecad9a 100644 --- a/src/lean/ffi.rs +++ b/src/lean/ffi.rs @@ -15,7 +15,7 @@ pub mod primitives; // Primitives: rs_roundtrip_nat, rs_roundtrip_string, etc. use crate::lean::lean::{ lean_io_result_mk_error, lean_io_result_mk_ok, lean_mk_io_user_error, }; -use crate::lean::obj::{LeanArray, LeanByteArray, LeanObj, LeanString}; +use crate::lean::obj::{LeanObj, LeanString}; /// Guard an FFI function that returns a Lean IO result against panics. /// On panic, returns a Lean IO error with the panic message instead of @@ -61,12 +61,12 @@ extern "C" fn rs_boxed_u32s_are_equivalent_to_bytes( u32s: LeanObj, bytes: LeanObj, ) -> bool { - let arr = unsafe { LeanArray::from_raw(u32s.as_ptr()) }; + let arr = u32s.as_array(); let u32s_flat: Vec = arr .map(|elem| elem.unbox_u32()) .into_iter() .flat_map(u32::to_le_bytes) .collect(); - let ba = unsafe { LeanByteArray::from_raw(bytes.as_ptr()) }; + let ba = bytes.as_byte_array(); u32s_flat == ba.as_bytes() } diff --git a/src/lean/ffi/aiur.rs b/src/lean/ffi/aiur.rs index 24927018..949a5dd6 100644 --- a/src/lean/ffi/aiur.rs +++ b/src/lean/ffi/aiur.rs @@ -1,23 +1,21 @@ use multi_stark::p3_field::integers::QuotientMap; -use std::ffi::c_void; pub mod protocol; pub mod toplevel; use crate::{ aiur::G, - lean::{lean_is_scalar, lean_unbox_u64}, - lean_unbox, + lean::obj::LeanObj, }; #[inline] -pub(super) fn lean_unbox_nat_as_usize(ptr: *const c_void) -> usize { - assert!(lean_is_scalar(ptr)); - lean_unbox!(usize, ptr) +pub(super) fn lean_unbox_nat_as_usize(obj: LeanObj) -> usize { + assert!(obj.is_scalar()); + obj.unbox_usize() } #[inline] -pub(super) fn lean_unbox_g(ptr: *const c_void) -> G { - let u64 = lean_unbox_u64(ptr); +pub(super) fn lean_unbox_g(obj: LeanObj) -> G { + let u64 = obj.unbox_u64(); unsafe { G::from_canonical_unchecked(u64) } } diff --git a/src/lean/ffi/aiur/protocol.rs b/src/lean/ffi/aiur/protocol.rs index 09623d40..f4141c90 100644 --- a/src/lean/ffi/aiur/protocol.rs +++ b/src/lean/ffi/aiur/protocol.rs @@ -16,7 +16,6 @@ use crate::{ ffi::aiur::{ lean_unbox_g, lean_unbox_nat_as_usize, toplevel::lean_ptr_to_toplevel, }, - lean_array_data, lean_array_to_vec, lean_ctor_objs, obj::{ ExternalClass, LeanArray, LeanByteArray, LeanCtor, LeanExcept, LeanExternal, LeanObj, @@ -69,7 +68,7 @@ extern "C" fn rs_aiur_system_build( commitment_parameters: LeanObj, ) -> LeanExternal { let system = AiurSystem::build( - lean_ptr_to_toplevel(toplevel.as_ptr()), + lean_ptr_to_toplevel(toplevel), lean_ptr_to_commitment_parameters(commitment_parameters), ); LeanExternal::alloc(system_class(), system) @@ -84,7 +83,7 @@ extern "C" fn rs_aiur_system_verify( proof_obj: LeanExternal, ) -> LeanExcept { let fri_parameters = lean_ctor_to_fri_parameters(fri_parameters); - let claim = lean_array_to_vec(claim.as_ptr(), lean_unbox_g); + let claim = claim.as_array().map(lean_unbox_g); match aiur_system_obj.get().verify(fri_parameters, &claim, proof_obj.get()) { Ok(()) => LeanExcept::ok(LeanObj::box_usize(0)), Err(err) => LeanExcept::error_string(&format!("{err:?}")), @@ -103,9 +102,10 @@ extern "C" fn rs_aiur_system_prove( io_map_arr: LeanObj, ) -> LeanObj { let fri_parameters = lean_ctor_to_fri_parameters(fri_parameters); - let fun_idx = lean_unbox_nat_as_usize(fun_idx.as_ptr()); - let args = lean_array_to_vec(args.as_ptr(), lean_unbox_g); - let io_data = lean_array_to_vec(io_data_arr.as_ptr(), lean_unbox_g); + let fun_idx = lean_unbox_nat_as_usize(fun_idx); + let args = args.as_array().map(lean_unbox_g); + let io_data = + io_data_arr.as_array().map(lean_unbox_g); let io_map = lean_array_to_io_buffer_map(io_map_arr); let mut io_buffer = IOBuffer { data: io_data, map: io_map }; @@ -172,38 +172,32 @@ fn build_g_array(values: &[G]) -> LeanObj { fn lean_ptr_to_commitment_parameters(obj: LeanObj) -> CommitmentParameters { CommitmentParameters { - log_blowup: lean_unbox_nat_as_usize(obj.as_ptr()), + log_blowup: lean_unbox_nat_as_usize(obj), } } fn lean_ctor_to_fri_parameters(obj: LeanObj) -> FriParameters { - let [ - log_final_poly_len_ptr, - num_queries_ptr, - commit_proof_of_work_bits, - query_proof_of_work_bits, - ] = lean_ctor_objs(obj.as_ptr()); + let ctor = obj.as_ctor(); FriParameters { - log_final_poly_len: lean_unbox_nat_as_usize(log_final_poly_len_ptr), - num_queries: lean_unbox_nat_as_usize(num_queries_ptr), - commit_proof_of_work_bits: lean_unbox_nat_as_usize( - commit_proof_of_work_bits, - ), - query_proof_of_work_bits: lean_unbox_nat_as_usize(query_proof_of_work_bits), + log_final_poly_len: lean_unbox_nat_as_usize(ctor.get(0)), + num_queries: lean_unbox_nat_as_usize(ctor.get(1)), + commit_proof_of_work_bits: lean_unbox_nat_as_usize(ctor.get(2)), + query_proof_of_work_bits: lean_unbox_nat_as_usize(ctor.get(3)), } } fn lean_array_to_io_buffer_map(obj: LeanObj) -> FxHashMap, IOKeyInfo> { - let array_data = lean_array_data(obj.as_ptr()); + let arr = obj.as_array(); let mut map = - FxHashMap::with_capacity_and_hasher(array_data.len(), FxBuildHasher); - for ptr in array_data { - let [key_ptr, info_ptr] = lean_ctor_objs(*ptr); - let key = lean_array_to_vec(key_ptr, lean_unbox_g); - let [idx_ptr, len_ptr] = lean_ctor_objs(info_ptr); + FxHashMap::with_capacity_and_hasher(arr.len(), FxBuildHasher); + for elt in arr.iter() { + let pair = elt.as_ctor(); + let key = + pair.get(0).as_array().map(lean_unbox_g); + let info_ctor = pair.get(1).as_ctor(); let info = IOKeyInfo { - idx: lean_unbox_nat_as_usize(idx_ptr), - len: lean_unbox_nat_as_usize(len_ptr), + idx: lean_unbox_nat_as_usize(info_ctor.get(0)), + len: lean_unbox_nat_as_usize(info_ctor.get(1)), }; map.insert(key, info); } diff --git a/src/lean/ffi/aiur/toplevel.rs b/src/lean/ffi/aiur/toplevel.rs index f365c87a..ddd8ce60 100644 --- a/src/lean/ffi/aiur/toplevel.rs +++ b/src/lean/ffi/aiur/toplevel.rs @@ -1,5 +1,3 @@ -use std::ffi::c_void; - use multi_stark::p3_field::PrimeCharacteristicRing; use crate::{ @@ -8,131 +6,133 @@ use crate::{ G, bytecode::{Block, Ctrl, Function, FunctionLayout, Op, Toplevel, ValIdx}, }, - lean::{ - ffi::aiur::{lean_unbox_g, lean_unbox_nat_as_usize}, - lean_array_to_vec, lean_ctor_objs, lean_is_scalar, lean_obj_to_string, - lean_tag, - }, + lean::obj::LeanObj, }; -fn lean_ptr_to_vec_val_idx(ptr: *const c_void) -> Vec { - lean_array_to_vec(ptr, lean_unbox_nat_as_usize) +use crate::lean::ffi::aiur::{lean_unbox_g, lean_unbox_nat_as_usize}; + +fn lean_ptr_to_vec_val_idx(obj: LeanObj) -> Vec { + obj.as_array().map(lean_unbox_nat_as_usize) } -fn lean_ptr_to_op(ptr: *const c_void) -> Op { - match lean_tag(ptr) { +fn lean_ptr_to_op(obj: LeanObj) -> Op { + let ctor = obj.as_ctor(); + match ctor.tag() { 0 => { - let [const_val_ptr] = lean_ctor_objs(ptr); - Op::Const(G::from_u64(const_val_ptr as u64)) + let [const_val] = ctor.objs::<1>(); + Op::Const(G::from_u64(const_val.as_ptr() as u64)) }, 1 => { - let [a_ptr, b_ptr] = lean_ctor_objs(ptr); - Op::Add(lean_unbox_nat_as_usize(a_ptr), lean_unbox_nat_as_usize(b_ptr)) + let [a, b] = ctor.objs::<2>(); + Op::Add(lean_unbox_nat_as_usize(a), lean_unbox_nat_as_usize(b)) }, 2 => { - let [a_ptr, b_ptr] = lean_ctor_objs(ptr); - Op::Sub(lean_unbox_nat_as_usize(a_ptr), lean_unbox_nat_as_usize(b_ptr)) + let [a, b] = ctor.objs::<2>(); + Op::Sub(lean_unbox_nat_as_usize(a), lean_unbox_nat_as_usize(b)) }, 3 => { - let [a_ptr, b_ptr] = lean_ctor_objs(ptr); - Op::Mul(lean_unbox_nat_as_usize(a_ptr), lean_unbox_nat_as_usize(b_ptr)) + let [a, b] = ctor.objs::<2>(); + Op::Mul(lean_unbox_nat_as_usize(a), lean_unbox_nat_as_usize(b)) }, 4 => { - let [a_ptr] = lean_ctor_objs(ptr); - Op::EqZero(lean_unbox_nat_as_usize(a_ptr)) + let [a] = ctor.objs::<1>(); + Op::EqZero(lean_unbox_nat_as_usize(a)) }, 5 => { - let [fun_idx_ptr, val_idxs_ptr, output_size_ptr] = lean_ctor_objs(ptr); - let fun_idx = lean_unbox_nat_as_usize(fun_idx_ptr); - let val_idxs = lean_ptr_to_vec_val_idx(val_idxs_ptr); - let output_size = lean_unbox_nat_as_usize(output_size_ptr); + let [fun_idx, val_idxs, output_size] = ctor.objs::<3>(); + let fun_idx = lean_unbox_nat_as_usize(fun_idx); + let val_idxs = lean_ptr_to_vec_val_idx(val_idxs); + let output_size = lean_unbox_nat_as_usize(output_size); Op::Call(fun_idx, val_idxs, output_size) }, 6 => { - let [val_idxs_ptr] = lean_ctor_objs(ptr); - Op::Store(lean_ptr_to_vec_val_idx(val_idxs_ptr)) + let [val_idxs] = ctor.objs::<1>(); + Op::Store(lean_ptr_to_vec_val_idx(val_idxs)) }, 7 => { - let [width_ptr, val_idx_ptr] = lean_ctor_objs(ptr); + let [width, val_idx] = ctor.objs::<2>(); Op::Load( - lean_unbox_nat_as_usize(width_ptr), - lean_unbox_nat_as_usize(val_idx_ptr), + lean_unbox_nat_as_usize(width), + lean_unbox_nat_as_usize(val_idx), ) }, 8 => { - let [as_ptr, bs_ptr] = lean_ctor_objs(ptr); + let [a, b] = ctor.objs::<2>(); Op::AssertEq( - lean_ptr_to_vec_val_idx(as_ptr), - lean_ptr_to_vec_val_idx(bs_ptr), + lean_ptr_to_vec_val_idx(a), + lean_ptr_to_vec_val_idx(b), ) }, 9 => { - let [key_ptr] = lean_ctor_objs(ptr); - Op::IOGetInfo(lean_ptr_to_vec_val_idx(key_ptr)) + let [key] = ctor.objs::<1>(); + Op::IOGetInfo(lean_ptr_to_vec_val_idx(key)) }, 10 => { - let [key_ptr, idx_ptr, len_ptr] = lean_ctor_objs(ptr); + let [key, idx, len] = ctor.objs::<3>(); Op::IOSetInfo( - lean_ptr_to_vec_val_idx(key_ptr), - lean_unbox_nat_as_usize(idx_ptr), - lean_unbox_nat_as_usize(len_ptr), + lean_ptr_to_vec_val_idx(key), + lean_unbox_nat_as_usize(idx), + lean_unbox_nat_as_usize(len), ) }, 11 => { - let [idx_ptr, len_ptr] = lean_ctor_objs(ptr); + let [idx, len] = ctor.objs::<2>(); Op::IORead( - lean_unbox_nat_as_usize(idx_ptr), - lean_unbox_nat_as_usize(len_ptr), + lean_unbox_nat_as_usize(idx), + lean_unbox_nat_as_usize(len), ) }, 12 => { - let [data_ptr] = lean_ctor_objs(ptr); - Op::IOWrite(lean_ptr_to_vec_val_idx(data_ptr)) + let [data] = ctor.objs::<1>(); + Op::IOWrite(lean_ptr_to_vec_val_idx(data)) }, 13 => { - let [byte_ptr] = lean_ctor_objs(ptr); - Op::U8BitDecomposition(lean_unbox_nat_as_usize(byte_ptr)) + let [byte] = ctor.objs::<1>(); + Op::U8BitDecomposition(lean_unbox_nat_as_usize(byte)) }, 14 => { - let [byte_ptr] = lean_ctor_objs(ptr); - Op::U8ShiftLeft(lean_unbox_nat_as_usize(byte_ptr)) + let [byte] = ctor.objs::<1>(); + Op::U8ShiftLeft(lean_unbox_nat_as_usize(byte)) }, 15 => { - let [byte_ptr] = lean_ctor_objs(ptr); - Op::U8ShiftRight(lean_unbox_nat_as_usize(byte_ptr)) + let [byte] = ctor.objs::<1>(); + Op::U8ShiftRight(lean_unbox_nat_as_usize(byte)) }, 16 => { - let [i, j] = lean_ctor_objs::<2>(ptr).map(lean_unbox_nat_as_usize); + let [i, j] = ctor.objs::<2>().map(lean_unbox_nat_as_usize); Op::U8Xor(i, j) }, 17 => { - let [i, j] = lean_ctor_objs::<2>(ptr).map(lean_unbox_nat_as_usize); + let [i, j] = ctor.objs::<2>().map(lean_unbox_nat_as_usize); Op::U8Add(i, j) }, 18 => { - let [i, j] = lean_ctor_objs::<2>(ptr).map(lean_unbox_nat_as_usize); + let [i, j] = ctor.objs::<2>().map(lean_unbox_nat_as_usize); Op::U8Sub(i, j) }, 19 => { - let [i, j] = lean_ctor_objs::<2>(ptr).map(lean_unbox_nat_as_usize); + let [i, j] = ctor.objs::<2>().map(lean_unbox_nat_as_usize); Op::U8And(i, j) }, 20 => { - let [i, j] = lean_ctor_objs::<2>(ptr).map(lean_unbox_nat_as_usize); + let [i, j] = ctor.objs::<2>().map(lean_unbox_nat_as_usize); Op::U8Or(i, j) }, 21 => { - let [i, j] = lean_ctor_objs::<2>(ptr).map(lean_unbox_nat_as_usize); + let [i, j] = ctor.objs::<2>().map(lean_unbox_nat_as_usize); Op::U8LessThan(i, j) }, 22 => { - let [label_ptr, idxs_ptr] = lean_ctor_objs(ptr); - let label = lean_obj_to_string(label_ptr); - let idxs = if lean_is_scalar(idxs_ptr) { + let [label_obj, idxs_obj] = ctor.objs::<2>(); + let label = label_obj.as_string().to_string(); + let idxs = if idxs_obj.is_scalar() { None } else { - let [idxs_ptr] = lean_ctor_objs(idxs_ptr); - Some(lean_array_to_vec(idxs_ptr, lean_unbox_nat_as_usize)) + let inner_ctor = idxs_obj.as_ctor(); + Some( + inner_ctor.get(0).as_array() + .map(lean_unbox_nat_as_usize), + ) }; Op::Debug(label, idxs) }, @@ -140,72 +140,78 @@ fn lean_ptr_to_op(ptr: *const c_void) -> Op { } } -fn lean_ptr_to_g_block_pair(ptr: *const c_void) -> (G, Block) { - let [g_ptr, block_ptr] = lean_ctor_objs(ptr); - let g = lean_unbox_g(g_ptr); - let block = lean_ptr_to_block(block_ptr); +fn lean_ptr_to_g_block_pair(obj: LeanObj) -> (G, Block) { + let ctor = obj.as_ctor(); + let [g_obj, block_obj] = ctor.objs::<2>(); + let g = lean_unbox_g(g_obj); + let block = lean_ptr_to_block(block_obj); (g, block) } -fn lean_ptr_to_ctrl(ptr: *const c_void) -> Ctrl { - match lean_tag(ptr) { +fn lean_ptr_to_ctrl(obj: LeanObj) -> Ctrl { + let ctor = obj.as_ctor(); + match ctor.tag() { 0 => { - let [val_idx_ptr, cases_ptr, default_ptr] = lean_ctor_objs(ptr); - let val_idx = lean_unbox_nat_as_usize(val_idx_ptr); - let vec_cases = lean_array_to_vec(cases_ptr, lean_ptr_to_g_block_pair); + let [val_idx_obj, cases_obj, default_obj] = ctor.objs::<3>(); + let val_idx = lean_unbox_nat_as_usize(val_idx_obj); + let vec_cases = + cases_obj.as_array().map(lean_ptr_to_g_block_pair); let cases = FxIndexMap::from_iter(vec_cases); - let default = if lean_is_scalar(default_ptr) { + let default = if default_obj.is_scalar() { None } else { - let [block_ptr] = lean_ctor_objs(default_ptr); - let block = lean_ptr_to_block(block_ptr); + let inner_ctor = default_obj.as_ctor(); + let block = lean_ptr_to_block(inner_ctor.get(0)); Some(Box::new(block)) }; Ctrl::Match(val_idx, cases, default) }, 1 => { - let [sel_idx_ptr, val_idxs_ptr] = lean_ctor_objs(ptr); - let sel_idx = lean_unbox_nat_as_usize(sel_idx_ptr); - let val_idxs = lean_ptr_to_vec_val_idx(val_idxs_ptr); + let [sel_idx_obj, val_idxs_obj] = ctor.objs::<2>(); + let sel_idx = lean_unbox_nat_as_usize(sel_idx_obj); + let val_idxs = lean_ptr_to_vec_val_idx(val_idxs_obj); Ctrl::Return(sel_idx, val_idxs) }, _ => unreachable!(), } } -fn lean_ptr_to_block(ptr: *const c_void) -> Block { - let [ops_ptr, ctrl_ptr, min_sel_included_ptr, max_sel_excluded_ptr] = - lean_ctor_objs(ptr); - let ops = lean_array_to_vec(ops_ptr, lean_ptr_to_op); - let ctrl = lean_ptr_to_ctrl(ctrl_ptr); - let min_sel_included = lean_unbox_nat_as_usize(min_sel_included_ptr); - let max_sel_excluded = lean_unbox_nat_as_usize(max_sel_excluded_ptr); +fn lean_ptr_to_block(obj: LeanObj) -> Block { + let ctor = obj.as_ctor(); + let [ops_obj, ctrl_obj, min_sel_obj, max_sel_obj] = ctor.objs::<4>(); + let ops = ops_obj.as_array().map(lean_ptr_to_op); + let ctrl = lean_ptr_to_ctrl(ctrl_obj); + let min_sel_included = lean_unbox_nat_as_usize(min_sel_obj); + let max_sel_excluded = lean_unbox_nat_as_usize(max_sel_obj); Block { ops, ctrl, min_sel_included, max_sel_excluded } } -fn lean_ptr_to_function_layout(ptr: *const c_void) -> FunctionLayout { - let [input_size_ptr, selectors_ptr, auxiliaries_ptr, lookups_ptr] = - lean_ctor_objs(ptr); +fn lean_ptr_to_function_layout(obj: LeanObj) -> FunctionLayout { + let ctor = obj.as_ctor(); + let [input_size, selectors, auxiliaries, lookups] = ctor.objs::<4>(); FunctionLayout { - input_size: lean_unbox_nat_as_usize(input_size_ptr), - selectors: lean_unbox_nat_as_usize(selectors_ptr), - auxiliaries: lean_unbox_nat_as_usize(auxiliaries_ptr), - lookups: lean_unbox_nat_as_usize(lookups_ptr), + input_size: lean_unbox_nat_as_usize(input_size), + selectors: lean_unbox_nat_as_usize(selectors), + auxiliaries: lean_unbox_nat_as_usize(auxiliaries), + lookups: lean_unbox_nat_as_usize(lookups), } } -fn lean_ptr_to_function(ptr: *const c_void) -> Function { - let [body_ptr, layout_ptr, unconstrained_ptr] = lean_ctor_objs(ptr); - let body = lean_ptr_to_block(body_ptr); - let layout = lean_ptr_to_function_layout(layout_ptr); - let unconstrained = unconstrained_ptr as usize != 0; +fn lean_ptr_to_function(obj: LeanObj) -> Function { + let ctor = obj.as_ctor(); + let [body_obj, layout_obj, unconstrained_obj] = ctor.objs::<3>(); + let body = lean_ptr_to_block(body_obj); + let layout = lean_ptr_to_function_layout(layout_obj); + let unconstrained = unconstrained_obj.as_ptr() as usize != 0; Function { body, layout, unconstrained } } -pub(crate) fn lean_ptr_to_toplevel(ptr: *const c_void) -> Toplevel { - let [functions_ptr, memory_sizes_ptr] = lean_ctor_objs(ptr); - let functions = lean_array_to_vec(functions_ptr, lean_ptr_to_function); +pub(crate) fn lean_ptr_to_toplevel(obj: LeanObj) -> Toplevel { + let ctor = obj.as_ctor(); + let [functions_obj, memory_sizes_obj] = ctor.objs::<2>(); + let functions = + functions_obj.as_array().map(lean_ptr_to_function); let memory_sizes = - lean_array_to_vec(memory_sizes_ptr, lean_unbox_nat_as_usize); + memory_sizes_obj.as_array().map(lean_unbox_nat_as_usize); Toplevel { functions, memory_sizes } } diff --git a/src/lean/ffi/compile.rs b/src/lean/ffi/compile.rs index 5d458849..e5bd26fc 100644 --- a/src/lean/ffi/compile.rs +++ b/src/lean/ffi/compile.rs @@ -10,7 +10,7 @@ use std::collections::HashMap; use std::sync::Arc; -use super::{ffi_io_guard, io_error, io_ok}; +use crate::lean::ffi::{ffi_io_guard, io_error, io_ok}; use crate::ix::address::Address; use crate::ix::compile::{CompileState, compile_env}; use crate::ix::condense::compute_sccs; @@ -26,24 +26,23 @@ use crate::lean::nat::Nat; use crate::lean::obj::{ LeanArray, LeanByteArray, LeanCtor, LeanExcept, LeanObj, LeanString, }; -use crate::lean::lean_obj_to_string; use dashmap::DashMap; use dashmap::DashSet; -use super::builder::LeanBuildCache; -use super::graph::build_condensed_blocks; -use super::ix::constant::build_constant_info; -use super::ix::env::build_raw_environment; -use super::ix::name::build_name; -use super::ixon::constant::{ +use crate::lean::ffi::builder::LeanBuildCache; +use crate::lean::ffi::graph::build_condensed_blocks; +use crate::lean::ffi::ix::constant::build_constant_info; +use crate::lean::ffi::ix::env::build_raw_environment; +use crate::lean::ffi::ix::name::build_name; +use crate::lean::ffi::ixon::constant::{ build_address_from_ixon, build_ixon_constant, decode_ixon_address, }; -use super::ixon::env::{ +use crate::lean::ffi::ixon::env::{ build_raw_env, build_raw_name_entry, decode_raw_env, decoded_to_ixon_env, }; -use super::ixon::meta::{build_constant_meta, build_ixon_comm}; -use super::lean_env::{GlobalCache, lean_ptr_to_env, lean_ptr_to_name}; +use crate::lean::ffi::ixon::meta::{build_constant_meta, build_ixon_comm}; +use crate::lean::ffi::lean_env::{GlobalCache, lean_ptr_to_env, lean_ptr_to_name}; // ============================================================================= // Helper builders @@ -108,7 +107,7 @@ pub fn build_raw_comm(addr: &Address, comm: &Comm) -> LeanObj { /// Round-trip a RustCondensedBlocks structure. #[unsafe(no_mangle)] pub extern "C" fn rs_roundtrip_rust_condensed_blocks(obj: LeanObj) -> LeanObj { - let ctor = unsafe { LeanCtor::from_raw(obj.as_ptr()) }; + let ctor = obj.as_ctor(); let low_links = ctor.get(0); let blocks = ctor.get(1); let block_refs = ctor.get(2); @@ -127,7 +126,7 @@ pub extern "C" fn rs_roundtrip_rust_condensed_blocks(obj: LeanObj) -> LeanObj { /// Round-trip a RustCompilePhases structure. #[unsafe(no_mangle)] pub extern "C" fn rs_roundtrip_rust_compile_phases(obj: LeanObj) -> LeanObj { - let ctor = unsafe { LeanCtor::from_raw(obj.as_ptr()) }; + let ctor = obj.as_ctor(); let raw_env = ctor.get(0); let condensed = ctor.get(1); let compile_env = ctor.get(2); @@ -150,9 +149,12 @@ pub extern "C" fn rs_roundtrip_rust_compile_phases(obj: LeanObj) -> LeanObj { /// Round-trip a BlockCompareResult. #[unsafe(no_mangle)] pub extern "C" fn rs_roundtrip_block_compare_result(obj: LeanObj) -> LeanObj { - let ctor = unsafe { LeanCtor::from_raw(obj.as_ptr()) }; + // Tags 0 (match) and 2 (notFound) have 0 fields → Lean represents as scalars + if obj.is_scalar() { + return obj; + } + let ctor = obj.as_ctor(); match ctor.tag() { - 0 => *LeanCtor::alloc(0, 0, 0), // match 1 => { // mismatch: 0 obj, 24 scalar bytes (3 × u64) let lean_size = ctor.scalar_u64(0, 0); @@ -165,7 +167,6 @@ pub extern "C" fn rs_roundtrip_block_compare_result(obj: LeanObj) -> LeanObj { out.set_u64(16, first_diff); *out }, - 2 => *LeanCtor::alloc(2, 0, 0), // notFound _ => unreachable!("Invalid BlockCompareResult tag: {}", ctor.tag()), } } @@ -173,7 +174,7 @@ pub extern "C" fn rs_roundtrip_block_compare_result(obj: LeanObj) -> LeanObj { /// Round-trip a BlockCompareDetail. #[unsafe(no_mangle)] pub extern "C" fn rs_roundtrip_block_compare_detail(obj: LeanObj) -> LeanObj { - let ctor = unsafe { LeanCtor::from_raw(obj.as_ptr()) }; + let ctor = obj.as_ctor(); let result_ptr = ctor.get(0); let lean_sharing_len = ctor.scalar_u64(1, 0); let rust_sharing_len = ctor.scalar_u64(1, 8); @@ -198,7 +199,7 @@ pub extern "C" fn rs_compile_env_full( ) -> LeanObj { ffi_io_guard(std::panic::AssertUnwindSafe(|| { // Phase 1: Decode Lean environment - let rust_env = lean_ptr_to_env(env_consts_ptr.as_ptr()); + let rust_env = lean_ptr_to_env(env_consts_ptr); let env_len = rust_env.len(); let rust_env = Arc::new(rust_env); @@ -220,7 +221,7 @@ pub extern "C" fn rs_compile_env_full( let mut cache = LeanBuildCache::with_capacity(env_len); let raw_env = - unsafe { LeanObj::from_raw(build_raw_environment(&mut cache, &rust_env)) }; + build_raw_environment(&mut cache, &rust_env); let condensed_obj = build_condensed_blocks(&mut cache, &condensed); // Collect blocks @@ -296,7 +297,7 @@ pub extern "C" fn rs_compile_env_full( #[unsafe(no_mangle)] pub extern "C" fn rs_compile_env(env_consts_ptr: LeanObj) -> LeanObj { ffi_io_guard(std::panic::AssertUnwindSafe(|| { - let rust_env = lean_ptr_to_env(env_consts_ptr.as_ptr()); + let rust_env = lean_ptr_to_env(env_consts_ptr); let rust_env = Arc::new(rust_env); let compile_stt = match compile_env(&rust_env) { @@ -334,13 +335,13 @@ pub extern "C" fn rs_compile_phases( env_consts_ptr: LeanObj, ) -> LeanObj { ffi_io_guard(std::panic::AssertUnwindSafe(|| { - let rust_env = lean_ptr_to_env(env_consts_ptr.as_ptr()); + let rust_env = lean_ptr_to_env(env_consts_ptr); let env_len = rust_env.len(); let rust_env = Arc::new(rust_env); let mut cache = LeanBuildCache::with_capacity(env_len); let raw_env = - unsafe { LeanObj::from_raw(build_raw_environment(&mut cache, &rust_env)) }; + build_raw_environment(&mut cache, &rust_env); let ref_graph = build_ref_graph(&rust_env); @@ -435,7 +436,7 @@ pub extern "C" fn rs_compile_env_to_ixon( env_consts_ptr: LeanObj, ) -> LeanObj { ffi_io_guard(std::panic::AssertUnwindSafe(|| { - let rust_env = lean_ptr_to_env(env_consts_ptr.as_ptr()); + let rust_env = lean_ptr_to_env(env_consts_ptr); let rust_env = Arc::new(rust_env); let compile_stt = match compile_env(&rust_env) { @@ -521,10 +522,10 @@ pub extern "C" fn rs_canonicalize_env_to_ix( env_consts_ptr: LeanObj, ) -> LeanObj { ffi_io_guard(std::panic::AssertUnwindSafe(|| { - let rust_env = lean_ptr_to_env(env_consts_ptr.as_ptr()); + let rust_env = lean_ptr_to_env(env_consts_ptr); let mut cache = LeanBuildCache::with_capacity(rust_env.len()); let raw_env = - unsafe { LeanObj::from_raw(build_raw_environment(&mut cache, &rust_env)) }; + build_raw_environment(&mut cache, &rust_env); io_ok(raw_env) })) } @@ -551,7 +552,7 @@ pub struct RustCompiledEnv { #[unsafe(no_mangle)] extern "C" fn rs_test_ffi_roundtrip(name_ptr: LeanObj) -> u64 { let global_cache = GlobalCache::default(); - let name = lean_ptr_to_name(name_ptr.as_ptr(), &global_cache); + let name = lean_ptr_to_name(name_ptr, &global_cache); // Return a magic number plus the hash of the name to verify it worked let hash = name.get_hash(); @@ -569,7 +570,7 @@ extern "C" fn rs_compile_env_rust_first( env_consts_ptr: LeanObj, ) -> *mut RustCompiledEnv { // Decode Lean environment - let lean_env = lean_ptr_to_env(env_consts_ptr.as_ptr()); + let lean_env = lean_ptr_to_env(env_consts_ptr); let lean_env = Arc::new(lean_env); // Compile with Rust @@ -619,10 +620,10 @@ extern "C" fn rs_compare_block( return 2u64 << 32; // not found } let global_cache = GlobalCache::default(); - let name = lean_ptr_to_name(lowlink_name.as_ptr(), &global_cache); + let name = lean_ptr_to_name(lowlink_name, &global_cache); let rust_env = unsafe { &*rust_env }; - let ba = unsafe { LeanByteArray::from_raw(lean_bytes.as_ptr()) }; + let ba = lean_bytes.as_byte_array(); let lean_data = ba.as_bytes(); // Look up Rust's compiled block @@ -682,7 +683,7 @@ extern "C" fn rs_get_block_bytes_len( return 0; } let global_cache = GlobalCache::default(); - let name = lean_ptr_to_name(lowlink_name.as_ptr(), &global_cache); + let name = lean_ptr_to_name(lowlink_name, &global_cache); let rust_env = unsafe { &*rust_env }; @@ -703,7 +704,7 @@ extern "C" fn rs_copy_block_bytes( return; } let global_cache = GlobalCache::default(); - let name = lean_ptr_to_name(lowlink_name.as_ptr(), &global_cache); + let name = lean_ptr_to_name(lowlink_name, &global_cache); let rust_env = unsafe { &*rust_env }; @@ -713,7 +714,7 @@ extern "C" fn rs_copy_block_bytes( }; // Copy into the Lean ByteArray - let ba = unsafe { LeanByteArray::from_raw(dest.as_ptr()) }; + let ba = dest.as_byte_array(); unsafe { ba.set_data(bytes) }; } @@ -727,7 +728,7 @@ extern "C" fn rs_get_block_sharing_len( return 0; } let global_cache = GlobalCache::default(); - let name = lean_ptr_to_name(lowlink_name.as_ptr(), &global_cache); + let name = lean_ptr_to_name(lowlink_name, &global_cache); let rust_env = unsafe { &*rust_env }; @@ -847,7 +848,7 @@ extern "C" fn rs_get_pre_sharing_exprs( return 0; } let global_cache = GlobalCache::default(); - let name = lean_ptr_to_name(lowlink_name.as_ptr(), &global_cache); + let name = lean_ptr_to_name(lowlink_name, &global_cache); let rust_env = unsafe { &*rust_env }; @@ -933,7 +934,7 @@ extern "C" fn rs_get_pre_sharing_exprs( } // Write to output buffer - let ba = unsafe { LeanByteArray::from_raw(out_buf.as_ptr()) }; + let ba = out_buf.as_byte_array(); unsafe { ba.set_data(&output_bytes) }; n_exprs @@ -949,7 +950,7 @@ extern "C" fn rs_get_pre_sharing_exprs_len( return 0; } let global_cache = GlobalCache::default(); - let name = lean_ptr_to_name(lowlink_name.as_ptr(), &global_cache); + let name = lean_ptr_to_name(lowlink_name, &global_cache); let rust_env = unsafe { &*rust_env }; @@ -1010,7 +1011,7 @@ extern "C" fn rs_lookup_const_addr( return 0; } let global_cache = GlobalCache::default(); - let name = lean_ptr_to_name(name_ptr.as_ptr(), &global_cache); + let name = lean_ptr_to_name(name_ptr, &global_cache); let rust_env = unsafe { &*rust_env }; @@ -1018,7 +1019,7 @@ extern "C" fn rs_lookup_const_addr( match rust_env.compile_state.name_to_addr.get(&name) { Some(addr_ref) => { // Copy the 32-byte address into the output ByteArray - let ba = unsafe { LeanByteArray::from_raw(out_addr.as_ptr()) }; + let ba = out_addr.as_byte_array(); unsafe { ba.set_data(addr_ref.as_bytes()) }; 1 }, @@ -1084,7 +1085,7 @@ pub fn build_serialize_error(se: &SerializeError) -> LeanObj { ctor.set_u8(0, *value); *ctor }, - SerializeError::AddressError => *LeanCtor::alloc(5, 0, 0), + SerializeError::AddressError => LeanObj::box_usize(5), SerializeError::InvalidShareIndex { idx, max } => { let ctor = LeanCtor::alloc(6, 1, 8); ctor.set(0, build_lean_nat_usize(*max)); @@ -1096,24 +1097,30 @@ pub fn build_serialize_error(se: &SerializeError) -> LeanObj { /// Decode a Lean Ixon.SerializeError to a Rust SerializeError. pub fn decode_serialize_error(obj: LeanObj) -> SerializeError { - let ctor = unsafe { LeanCtor::from_raw(obj.as_ptr()) }; + // Tag 5 (addressError) has 0 fields → Lean represents as scalar + if obj.is_scalar() { + let tag = obj.unbox_usize(); + assert_eq!(tag, 5, "Invalid scalar SerializeError tag: {}", tag); + return SerializeError::AddressError; + } + let ctor = obj.as_ctor(); match ctor.tag() { 0 => { - let expected = lean_obj_to_string(ctor.get(0).as_ptr()); + let expected = ctor.get(0).as_string().to_string(); SerializeError::UnexpectedEof { expected } }, 1 => { - let context = lean_obj_to_string(ctor.get(0).as_ptr()); + let context = ctor.get(0).as_string().to_string(); let tag_val = ctor.scalar_u8(1, 0); SerializeError::InvalidTag { tag: tag_val, context } }, 2 => { - let context = lean_obj_to_string(ctor.get(0).as_ptr()); + let context = ctor.get(0).as_string().to_string(); let flag = ctor.scalar_u8(1, 0); SerializeError::InvalidFlag { flag, context } }, 3 => { - let context = lean_obj_to_string(ctor.get(0).as_ptr()); + let context = ctor.get(0).as_string().to_string(); let variant = ctor.scalar_u64(1, 0); SerializeError::InvalidVariant { variant, context } }, @@ -1214,14 +1221,14 @@ pub fn build_decompile_error(err: &DecompileError) -> LeanObj { /// Decode a Lean DecompileError to a Rust DecompileError. pub fn decode_decompile_error(obj: LeanObj) -> DecompileError { - let ctor = unsafe { LeanCtor::from_raw(obj.as_ptr()) }; + let ctor = obj.as_ctor(); match ctor.tag() { 0 => { let refs_len = Nat::from_ptr(ctor.get(0).as_ptr()) .to_u64() .and_then(|x| usize::try_from(x).ok()) .unwrap_or(0); - let constant = lean_obj_to_string(ctor.get(1).as_ptr()); + let constant = ctor.get(1).as_string().to_string(); let idx = ctor.scalar_u64(2, 0); DecompileError::InvalidRefIndex { idx, refs_len, constant } }, @@ -1230,7 +1237,7 @@ pub fn decode_decompile_error(obj: LeanObj) -> DecompileError { .to_u64() .and_then(|x| usize::try_from(x).ok()) .unwrap_or(0); - let constant = lean_obj_to_string(ctor.get(1).as_ptr()); + let constant = ctor.get(1).as_string().to_string(); let idx = ctor.scalar_u64(2, 0); DecompileError::InvalidUnivIndex { idx, univs_len, constant } }, @@ -1239,7 +1246,7 @@ pub fn decode_decompile_error(obj: LeanObj) -> DecompileError { .to_u64() .and_then(|x| usize::try_from(x).ok()) .unwrap_or(0); - let constant = lean_obj_to_string(ctor.get(1).as_ptr()); + let constant = ctor.get(1).as_string().to_string(); let idx = ctor.scalar_u64(2, 0); DecompileError::InvalidShareIndex { idx, max, constant } }, @@ -1248,7 +1255,7 @@ pub fn decode_decompile_error(obj: LeanObj) -> DecompileError { .to_u64() .and_then(|x| usize::try_from(x).ok()) .unwrap_or(0); - let constant = lean_obj_to_string(ctor.get(1).as_ptr()); + let constant = ctor.get(1).as_string().to_string(); let idx = ctor.scalar_u64(2, 0); DecompileError::InvalidRecIndex { idx, ctx_size, constant } }, @@ -1257,7 +1264,7 @@ pub fn decode_decompile_error(obj: LeanObj) -> DecompileError { .to_u64() .and_then(|x| usize::try_from(x).ok()) .unwrap_or(0); - let constant = lean_obj_to_string(ctor.get(1).as_ptr()); + let constant = ctor.get(1).as_string().to_string(); let idx = ctor.scalar_u64(2, 0); DecompileError::InvalidUnivVarIndex { idx, max, constant } }, @@ -1266,11 +1273,11 @@ pub fn decode_decompile_error(obj: LeanObj) -> DecompileError { 7 => DecompileError::BlobNotFound(decode_ixon_address(ctor.get(0))), 8 => { let addr = decode_ixon_address(ctor.get(0)); - let expected = lean_obj_to_string(ctor.get(1).as_ptr()); + let expected = ctor.get(1).as_string().to_string(); DecompileError::BadBlobFormat { addr, expected } }, 9 => { - let msg = lean_obj_to_string(ctor.get(0).as_ptr()); + let msg = ctor.get(0).as_string().to_string(); DecompileError::BadConstantFormat { msg } }, 10 => { @@ -1327,24 +1334,24 @@ pub fn build_compile_error(err: &CompileError) -> LeanObj { /// Decode a Lean CompileError to a Rust CompileError. pub fn decode_compile_error(obj: LeanObj) -> CompileError { - let ctor = unsafe { LeanCtor::from_raw(obj.as_ptr()) }; + let ctor = obj.as_ctor(); match ctor.tag() { 0 => { - let name = lean_obj_to_string(ctor.get(0).as_ptr()); + let name = ctor.get(0).as_string().to_string(); CompileError::MissingConstant { name } }, 1 => CompileError::MissingAddress(decode_ixon_address(ctor.get(0))), 2 => { - let reason = lean_obj_to_string(ctor.get(0).as_ptr()); + let reason = ctor.get(0).as_string().to_string(); CompileError::InvalidMutualBlock { reason } }, 3 => { - let desc = lean_obj_to_string(ctor.get(0).as_ptr()); + let desc = ctor.get(0).as_string().to_string(); CompileError::UnsupportedExpr { desc } }, 4 => { - let curr = lean_obj_to_string(ctor.get(0).as_ptr()); - let param = lean_obj_to_string(ctor.get(1).as_ptr()); + let curr = ctor.get(0).as_string().to_string(); + let param = ctor.get(1).as_string().to_string(); CompileError::UnknownUnivParam { curr, param } }, 5 => { diff --git a/src/lean/ffi/graph.rs b/src/lean/ffi/graph.rs index 50829e7d..b41f3263 100644 --- a/src/lean/ffi/graph.rs +++ b/src/lean/ffi/graph.rs @@ -2,14 +2,14 @@ use std::sync::Arc; -use super::{ffi_io_guard, io_ok}; +use crate::lean::ffi::{ffi_io_guard, io_ok}; use crate::ix::condense::compute_sccs; use crate::ix::graph::build_ref_graph; use crate::lean::obj::{LeanArray, LeanCtor, LeanObj}; -use super::builder::LeanBuildCache; -use super::ix::name::build_name; -use super::lean_env::lean_ptr_to_env; +use crate::lean::ffi::builder::LeanBuildCache; +use crate::lean::ffi::ix::name::build_name; +use crate::lean::ffi::lean_env::lean_ptr_to_env; /// Build an Array (Ix.Name × Array Ix.Name) from a RefMap. pub fn build_ref_graph_array( @@ -96,7 +96,7 @@ pub fn build_condensed_blocks( #[unsafe(no_mangle)] pub extern "C" fn rs_build_ref_graph(env_consts_ptr: LeanObj) -> LeanObj { ffi_io_guard(std::panic::AssertUnwindSafe(|| { - let rust_env = lean_ptr_to_env(env_consts_ptr.as_ptr()); + let rust_env = lean_ptr_to_env(env_consts_ptr); let rust_env = Arc::new(rust_env); let ref_graph = build_ref_graph(&rust_env); let mut cache = LeanBuildCache::with_capacity(rust_env.len()); @@ -109,7 +109,7 @@ pub extern "C" fn rs_build_ref_graph(env_consts_ptr: LeanObj) -> LeanObj { #[unsafe(no_mangle)] pub extern "C" fn rs_compute_sccs(env_consts_ptr: LeanObj) -> LeanObj { ffi_io_guard(std::panic::AssertUnwindSafe(|| { - let rust_env = lean_ptr_to_env(env_consts_ptr.as_ptr()); + let rust_env = lean_ptr_to_env(env_consts_ptr); let rust_env = Arc::new(rust_env); let ref_graph = build_ref_graph(&rust_env); let condensed = compute_sccs(&ref_graph.out_refs); diff --git a/src/lean/ffi/ix/constant.rs b/src/lean/ffi/ix/constant.rs index 727b1d90..3e0313ef 100644 --- a/src/lean/ffi/ix/constant.rs +++ b/src/lean/ffi/ix/constant.rs @@ -10,24 +10,18 @@ //! - Tag 6: ctorInfo (v : ConstructorVal) //! - Tag 7: recInfo (v : RecursorVal) -use std::ffi::c_void; - use crate::ix::env::{ AxiomVal, ConstantInfo, ConstantVal, ConstructorVal, DefinitionSafety, DefinitionVal, InductiveVal, Name, OpaqueVal, QuotKind, QuotVal, RecursorRule, RecursorVal, ReducibilityHints, TheoremVal, }; -use crate::lean::lean::{lean_ctor_get, lean_obj_tag}; use crate::lean::nat::Nat; use crate::lean::obj::{IxConstantInfo, LeanArray, LeanCtor, LeanObj}; -use crate::lean::{ - lean_array_data, lean_ctor_scalar_u8, lean_is_scalar, -}; -use super::super::builder::LeanBuildCache; -use super::super::primitives::build_nat; -use super::expr::{build_expr, decode_ix_expr}; -use super::name::{ +use crate::lean::ffi::builder::LeanBuildCache; +use crate::lean::ffi::primitives::build_nat; +use crate::lean::ffi::ix::expr::{build_expr, decode_ix_expr}; +use crate::lean::ffi::ix::name::{ build_name, build_name_array, decode_ix_name, decode_name_array, }; @@ -273,221 +267,167 @@ fn build_recursor_rules( // ConstantInfo Decoders // ============================================================================= -/// Decode Ix.ConstantVal from Lean pointer. +/// Decode Ix.ConstantVal from Lean object. /// ConstantVal = { name : Name, levelParams : Array Name, type : Expr } -pub fn decode_constant_val(ptr: *const c_void) -> ConstantVal { - unsafe { - let name_ptr = lean_ctor_get(ptr as *mut _, 0); - let level_params_ptr = lean_ctor_get(ptr as *mut _, 1); - let type_ptr = lean_ctor_get(ptr as *mut _, 2); - - let name = decode_ix_name(name_ptr.cast()); - - let level_params: Vec = lean_array_data(level_params_ptr.cast()) - .iter() - .map(|&p| decode_ix_name(p)) - .collect(); - - let typ = decode_ix_expr(type_ptr.cast()); - - ConstantVal { name, level_params, typ } - } +pub fn decode_constant_val(obj: LeanObj) -> ConstantVal { + let ctor = obj.as_ctor(); + let name = decode_ix_name(ctor.get(0)); + let level_params: Vec = + ctor.get(1).as_array() + .map(decode_ix_name); + let typ = decode_ix_expr(ctor.get(2)); + + ConstantVal { name, level_params, typ } } -/// Decode Lean.ReducibilityHints from Lean pointer. -pub fn decode_reducibility_hints(ptr: *const c_void) -> ReducibilityHints { - unsafe { - if lean_is_scalar(ptr) { - let tag = (ptr as usize) >> 1; - match tag { - 0 => return ReducibilityHints::Opaque, - 1 => return ReducibilityHints::Abbrev, - _ => panic!("Invalid ReducibilityHints scalar tag: {}", tag), - } - } - - let tag = lean_obj_tag(ptr as *mut _); +/// Decode Lean.ReducibilityHints from Lean object. +pub fn decode_reducibility_hints(obj: LeanObj) -> ReducibilityHints { + if obj.is_scalar() { + let tag = obj.as_ptr() as usize >> 1; match tag { - 0 => ReducibilityHints::Opaque, - 1 => ReducibilityHints::Abbrev, - 2 => { - // regular: 0 obj fields, 4 scalar bytes (UInt32) - let ctor_ptr = ptr.cast::(); - let h = *(ctor_ptr.add(8).cast::()); - ReducibilityHints::Regular(h) - }, - _ => panic!("Invalid ReducibilityHints tag: {}", tag), + 0 => return ReducibilityHints::Opaque, + 1 => return ReducibilityHints::Abbrev, + _ => panic!("Invalid ReducibilityHints scalar tag: {}", tag), } } + + let ctor = obj.as_ctor(); + match ctor.tag() { + 0 => ReducibilityHints::Opaque, + 1 => ReducibilityHints::Abbrev, + 2 => { + // regular: 0 obj fields, 4 scalar bytes (UInt32) + let h = unsafe { *(obj.as_ptr().cast::().add(8).cast::()) }; + ReducibilityHints::Regular(h) + }, + _ => panic!("Invalid ReducibilityHints tag: {}", ctor.tag()), + } } -/// Decode Ix.RecursorRule from Lean pointer. -fn decode_recursor_rule(ptr: *const c_void) -> RecursorRule { - unsafe { - let ctor_ptr = lean_ctor_get(ptr as *mut _, 0); - let n_fields_ptr = lean_ctor_get(ptr as *mut _, 1); - let rhs_ptr = lean_ctor_get(ptr as *mut _, 2); - - RecursorRule { - ctor: decode_ix_name(ctor_ptr.cast()), - n_fields: Nat::from_ptr(n_fields_ptr.cast()), - rhs: decode_ix_expr(rhs_ptr.cast()), - } +/// Decode Ix.RecursorRule from Lean object. +fn decode_recursor_rule(obj: LeanObj) -> RecursorRule { + let ctor = obj.as_ctor(); + RecursorRule { + ctor: decode_ix_name(ctor.get(0)), + n_fields: Nat::from_obj(ctor.get(1)), + rhs: decode_ix_expr(ctor.get(2)), } } -/// Decode Ix.ConstantInfo from Lean pointer. -pub fn decode_constant_info(ptr: *const c_void) -> ConstantInfo { - unsafe { - let tag = lean_obj_tag(ptr as *mut _); - let inner_ptr = lean_ctor_get(ptr as *mut _, 0); +/// Decode Ix.ConstantInfo from Lean object. +pub fn decode_constant_info(obj: LeanObj) -> ConstantInfo { + let outer = obj.as_ctor(); + let inner_obj = outer.get(0); + let inner = inner_obj.as_ctor(); - match tag { - 0 => { - let cnst_ptr = lean_ctor_get(inner_ptr, 0); - let is_unsafe = lean_ctor_scalar_u8(inner_ptr.cast(), 1, 0) != 0; - - ConstantInfo::AxiomInfo(AxiomVal { - cnst: decode_constant_val(cnst_ptr.cast()), - is_unsafe, - }) - }, - 1 => { - let cnst_ptr = lean_ctor_get(inner_ptr, 0); - let value_ptr = lean_ctor_get(inner_ptr, 1); - let hints_ptr = lean_ctor_get(inner_ptr, 2); - let all_ptr = lean_ctor_get(inner_ptr, 3); - - let safety_byte = lean_ctor_scalar_u8(inner_ptr.cast(), 4, 0); - let safety = match safety_byte { - 0 => DefinitionSafety::Unsafe, - 1 => DefinitionSafety::Safe, - 2 => DefinitionSafety::Partial, - _ => panic!("Invalid DefinitionSafety: {}", safety_byte), - }; - - ConstantInfo::DefnInfo(DefinitionVal { - cnst: decode_constant_val(cnst_ptr.cast()), - value: decode_ix_expr(value_ptr.cast()), - hints: decode_reducibility_hints(hints_ptr.cast()), - safety, - all: decode_name_array(all_ptr.cast()), - }) - }, - 2 => { - let cnst_ptr = lean_ctor_get(inner_ptr, 0); - let value_ptr = lean_ctor_get(inner_ptr, 1); - let all_ptr = lean_ctor_get(inner_ptr, 2); - - ConstantInfo::ThmInfo(TheoremVal { - cnst: decode_constant_val(cnst_ptr.cast()), - value: decode_ix_expr(value_ptr.cast()), - all: decode_name_array(all_ptr.cast()), - }) - }, - 3 => { - let cnst_ptr = lean_ctor_get(inner_ptr, 0); - let value_ptr = lean_ctor_get(inner_ptr, 1); - let all_ptr = lean_ctor_get(inner_ptr, 2); - let is_unsafe = lean_ctor_scalar_u8(inner_ptr.cast(), 3, 0) != 0; - - ConstantInfo::OpaqueInfo(OpaqueVal { - cnst: decode_constant_val(cnst_ptr.cast()), - value: decode_ix_expr(value_ptr.cast()), - is_unsafe, - all: decode_name_array(all_ptr.cast()), - }) - }, - 4 => { - let cnst_ptr = lean_ctor_get(inner_ptr, 0); - - let kind_byte = lean_ctor_scalar_u8(inner_ptr.cast(), 1, 0); - let kind = match kind_byte { - 0 => QuotKind::Type, - 1 => QuotKind::Ctor, - 2 => QuotKind::Lift, - 3 => QuotKind::Ind, - _ => panic!("Invalid QuotKind: {}", kind_byte), - }; - - ConstantInfo::QuotInfo(QuotVal { - cnst: decode_constant_val(cnst_ptr.cast()), - kind, - }) - }, - 5 => { - let cnst_ptr = lean_ctor_get(inner_ptr, 0); - let num_params_ptr = lean_ctor_get(inner_ptr, 1); - let num_indices_ptr = lean_ctor_get(inner_ptr, 2); - let all_ptr = lean_ctor_get(inner_ptr, 3); - let ctors_ptr = lean_ctor_get(inner_ptr, 4); - let num_nested_ptr = lean_ctor_get(inner_ptr, 5); - - let is_rec = lean_ctor_scalar_u8(inner_ptr.cast(), 6, 0) != 0; - let is_unsafe = lean_ctor_scalar_u8(inner_ptr.cast(), 6, 1) != 0; - let is_reflexive = lean_ctor_scalar_u8(inner_ptr.cast(), 6, 2) != 0; - - ConstantInfo::InductInfo(InductiveVal { - cnst: decode_constant_val(cnst_ptr.cast()), - num_params: Nat::from_ptr(num_params_ptr.cast()), - num_indices: Nat::from_ptr(num_indices_ptr.cast()), - all: decode_name_array(all_ptr.cast()), - ctors: decode_name_array(ctors_ptr.cast()), - num_nested: Nat::from_ptr(num_nested_ptr.cast()), - is_rec, - is_unsafe, - is_reflexive, - }) - }, - 6 => { - let cnst_ptr = lean_ctor_get(inner_ptr, 0); - let induct_ptr = lean_ctor_get(inner_ptr, 1); - let cidx_ptr = lean_ctor_get(inner_ptr, 2); - let num_params_ptr = lean_ctor_get(inner_ptr, 3); - let num_fields_ptr = lean_ctor_get(inner_ptr, 4); - - let is_unsafe = lean_ctor_scalar_u8(inner_ptr.cast(), 5, 0) != 0; - - ConstantInfo::CtorInfo(ConstructorVal { - cnst: decode_constant_val(cnst_ptr.cast()), - induct: decode_ix_name(induct_ptr.cast()), - cidx: Nat::from_ptr(cidx_ptr.cast()), - num_params: Nat::from_ptr(num_params_ptr.cast()), - num_fields: Nat::from_ptr(num_fields_ptr.cast()), - is_unsafe, - }) - }, - 7 => { - let cnst_ptr = lean_ctor_get(inner_ptr, 0); - let all_ptr = lean_ctor_get(inner_ptr, 1); - let num_params_ptr = lean_ctor_get(inner_ptr, 2); - let num_indices_ptr = lean_ctor_get(inner_ptr, 3); - let num_motives_ptr = lean_ctor_get(inner_ptr, 4); - let num_minors_ptr = lean_ctor_get(inner_ptr, 5); - let rules_ptr = lean_ctor_get(inner_ptr, 6); - - let k = lean_ctor_scalar_u8(inner_ptr.cast(), 7, 0) != 0; - let is_unsafe = lean_ctor_scalar_u8(inner_ptr.cast(), 7, 1) != 0; - - let rules: Vec = lean_array_data(rules_ptr.cast()) - .iter() - .map(|&p| decode_recursor_rule(p)) - .collect(); - - ConstantInfo::RecInfo(RecursorVal { - cnst: decode_constant_val(cnst_ptr.cast()), - all: decode_name_array(all_ptr.cast()), - num_params: Nat::from_ptr(num_params_ptr.cast()), - num_indices: Nat::from_ptr(num_indices_ptr.cast()), - num_motives: Nat::from_ptr(num_motives_ptr.cast()), - num_minors: Nat::from_ptr(num_minors_ptr.cast()), - rules, - k, - is_unsafe, - }) - }, - _ => panic!("Invalid ConstantInfo tag: {}", tag), - } + match outer.tag() { + 0 => { + let is_unsafe = inner.scalar_u8(1, 0) != 0; + + ConstantInfo::AxiomInfo(AxiomVal { + cnst: decode_constant_val(inner.get(0)), + is_unsafe, + }) + }, + 1 => { + let safety_byte = inner.scalar_u8(4, 0); + let safety = match safety_byte { + 0 => DefinitionSafety::Unsafe, + 1 => DefinitionSafety::Safe, + 2 => DefinitionSafety::Partial, + _ => panic!("Invalid DefinitionSafety: {}", safety_byte), + }; + + ConstantInfo::DefnInfo(DefinitionVal { + cnst: decode_constant_val(inner.get(0)), + value: decode_ix_expr(inner.get(1)), + hints: decode_reducibility_hints(inner.get(2)), + safety, + all: decode_name_array(inner.get(3)), + }) + }, + 2 => { + ConstantInfo::ThmInfo(TheoremVal { + cnst: decode_constant_val(inner.get(0)), + value: decode_ix_expr(inner.get(1)), + all: decode_name_array(inner.get(2)), + }) + }, + 3 => { + let is_unsafe = inner.scalar_u8(3, 0) != 0; + + ConstantInfo::OpaqueInfo(OpaqueVal { + cnst: decode_constant_val(inner.get(0)), + value: decode_ix_expr(inner.get(1)), + is_unsafe, + all: decode_name_array(inner.get(2)), + }) + }, + 4 => { + let kind_byte = inner.scalar_u8(1, 0); + let kind = match kind_byte { + 0 => QuotKind::Type, + 1 => QuotKind::Ctor, + 2 => QuotKind::Lift, + 3 => QuotKind::Ind, + _ => panic!("Invalid QuotKind: {}", kind_byte), + }; + + ConstantInfo::QuotInfo(QuotVal { + cnst: decode_constant_val(inner.get(0)), + kind, + }) + }, + 5 => { + let is_rec = inner.scalar_u8(6, 0) != 0; + let is_unsafe = inner.scalar_u8(6, 1) != 0; + let is_reflexive = inner.scalar_u8(6, 2) != 0; + + ConstantInfo::InductInfo(InductiveVal { + cnst: decode_constant_val(inner.get(0)), + num_params: Nat::from_obj(inner.get(1)), + num_indices: Nat::from_obj(inner.get(2)), + all: decode_name_array(inner.get(3)), + ctors: decode_name_array(inner.get(4)), + num_nested: Nat::from_obj(inner.get(5)), + is_rec, + is_unsafe, + is_reflexive, + }) + }, + 6 => { + let is_unsafe = inner.scalar_u8(5, 0) != 0; + + ConstantInfo::CtorInfo(ConstructorVal { + cnst: decode_constant_val(inner.get(0)), + induct: decode_ix_name(inner.get(1)), + cidx: Nat::from_obj(inner.get(2)), + num_params: Nat::from_obj(inner.get(3)), + num_fields: Nat::from_obj(inner.get(4)), + is_unsafe, + }) + }, + 7 => { + let k = inner.scalar_u8(7, 0) != 0; + let is_unsafe = inner.scalar_u8(7, 1) != 0; + + let rules: Vec = + inner.get(6).as_array() + .map(decode_recursor_rule); + + ConstantInfo::RecInfo(RecursorVal { + cnst: decode_constant_val(inner.get(0)), + all: decode_name_array(inner.get(1)), + num_params: Nat::from_obj(inner.get(2)), + num_indices: Nat::from_obj(inner.get(3)), + num_motives: Nat::from_obj(inner.get(4)), + num_minors: Nat::from_obj(inner.get(5)), + rules, + k, + is_unsafe, + }) + }, + _ => panic!("Invalid ConstantInfo tag: {}", outer.tag()), } } @@ -496,7 +436,7 @@ pub fn decode_constant_info(ptr: *const c_void) -> ConstantInfo { pub extern "C" fn rs_roundtrip_ix_constant_info( info_ptr: IxConstantInfo, ) -> IxConstantInfo { - let info = decode_constant_info(info_ptr.as_ptr()); + let info = decode_constant_info(*info_ptr); let mut cache = LeanBuildCache::new(); build_constant_info(&mut cache, &info) } diff --git a/src/lean/ffi/ix/data.rs b/src/lean/ffi/ix/data.rs index 5b6d6221..a3ca65c4 100644 --- a/src/lean/ffi/ix/data.rs +++ b/src/lean/ffi/ix/data.rs @@ -1,23 +1,17 @@ //! Ix.DataValue, Ix.Syntax, Ix.SourceInfo build/decode/roundtrip FFI. -use std::ffi::c_void; - use crate::ix::env::{ DataValue, Int, Name, SourceInfo, Substring, Syntax, SyntaxPreresolved, }; -use crate::lean::lean::{lean_ctor_get, lean_obj_tag}; use crate::lean::nat::Nat; use crate::lean::obj::{ IxDataValue, IxInt, IxSourceInfo, IxSubstring, IxSyntax, - IxSyntaxPreresolved, LeanArray, LeanCtor, LeanString, -}; -use crate::lean::{ - lean_array_data, lean_ctor_scalar_u8, lean_is_scalar, lean_obj_to_string, + IxSyntaxPreresolved, LeanArray, LeanCtor, LeanObj, LeanString, }; -use super::super::builder::LeanBuildCache; -use super::super::primitives::build_nat; -use super::name::{build_name, decode_ix_name}; +use crate::lean::ffi::builder::LeanBuildCache; +use crate::lean::ffi::primitives::build_nat; +use crate::lean::ffi::ix::name::{build_name, decode_ix_name}; /// Build a Ix.Int (ofNat or negSucc). pub fn build_int(int: &Int) -> IxInt { @@ -229,205 +223,160 @@ pub fn build_kvmap( // Decode Functions // ============================================================================= -/// Decode Ix.Int from Lean pointer. +/// Decode Ix.Int from Lean object. /// Ix.Int: ofNat (tag 0, 1 field) | negSucc (tag 1, 1 field) -pub fn decode_ix_int(ptr: *const c_void) -> Int { - unsafe { - let tag = lean_obj_tag(ptr as *mut _); - let nat_ptr = lean_ctor_get(ptr as *mut _, 0); - let nat = Nat::from_ptr(nat_ptr.cast()); - match tag { - 0 => Int::OfNat(nat), - 1 => Int::NegSucc(nat), - _ => panic!("Invalid Ix.Int tag: {}", tag), - } +pub fn decode_ix_int(obj: LeanObj) -> Int { + let ctor = obj.as_ctor(); + let nat = Nat::from_obj(ctor.get(0)); + match ctor.tag() { + 0 => Int::OfNat(nat), + 1 => Int::NegSucc(nat), + _ => panic!("Invalid Ix.Int tag: {}", ctor.tag()), } } -/// Decode Ix.DataValue from a Lean pointer. -pub fn decode_data_value(ptr: *const c_void) -> DataValue { - unsafe { - let tag = lean_obj_tag(ptr as *mut _); - - match tag { - 0 => { - // ofString: 1 object field - let inner_ptr = lean_ctor_get(ptr as *mut _, 0); - DataValue::OfString(lean_obj_to_string(inner_ptr as *const _)) - }, - 1 => { - // ofBool: 0 object fields, 1 scalar byte - let b = lean_ctor_scalar_u8(ptr, 0, 0) != 0; - DataValue::OfBool(b) - }, - 2 => { - // ofName: 1 object field - let inner_ptr = lean_ctor_get(ptr as *mut _, 0); - DataValue::OfName(decode_ix_name(inner_ptr.cast())) - }, - 3 => { - // ofNat: 1 object field - let inner_ptr = lean_ctor_get(ptr as *mut _, 0); - DataValue::OfNat(Nat::from_ptr(inner_ptr.cast())) - }, - 4 => { - // ofInt: 1 object field - let inner_ptr = lean_ctor_get(ptr as *mut _, 0); - let int_tag = lean_obj_tag(inner_ptr.cast()); - let nat_ptr = lean_ctor_get(inner_ptr.cast(), 0); - let nat = Nat::from_ptr(nat_ptr.cast()); - match int_tag { - 0 => DataValue::OfInt(Int::OfNat(nat)), - 1 => DataValue::OfInt(Int::NegSucc(nat)), - _ => panic!("Invalid Int tag: {}", int_tag), - } - }, - 5 => { - // ofSyntax: 1 object field - let inner_ptr = lean_ctor_get(ptr as *mut _, 0); - DataValue::OfSyntax(decode_ix_syntax(inner_ptr.cast()).into()) - }, - _ => panic!("Invalid DataValue tag: {}", tag), - } +/// Decode Ix.DataValue from a Lean object. +pub fn decode_data_value(obj: LeanObj) -> DataValue { + let ctor = obj.as_ctor(); + match ctor.tag() { + 0 => { + // ofString: 1 object field + DataValue::OfString( + ctor.get(0).as_string().to_string(), + ) + }, + 1 => { + // ofBool: 0 object fields, 1 scalar byte + let b = ctor.scalar_u8(0, 0) != 0; + DataValue::OfBool(b) + }, + 2 => { + // ofName: 1 object field + DataValue::OfName(decode_ix_name(ctor.get(0))) + }, + 3 => { + // ofNat: 1 object field + DataValue::OfNat(Nat::from_obj(ctor.get(0))) + }, + 4 => { + // ofInt: 1 object field + let inner = ctor.get(0); + let inner_ctor = inner.as_ctor(); + let nat = Nat::from_obj(inner_ctor.get(0)); + match inner_ctor.tag() { + 0 => DataValue::OfInt(Int::OfNat(nat)), + 1 => DataValue::OfInt(Int::NegSucc(nat)), + _ => panic!("Invalid Int tag: {}", inner_ctor.tag()), + } + }, + 5 => { + // ofSyntax: 1 object field + DataValue::OfSyntax(decode_ix_syntax(ctor.get(0)).into()) + }, + _ => panic!("Invalid DataValue tag: {}", ctor.tag()), } } -/// Decode Ix.Syntax from a Lean pointer. -pub fn decode_ix_syntax(ptr: *const c_void) -> Syntax { - unsafe { - if lean_is_scalar(ptr) { - return Syntax::Missing; - } - let tag = lean_obj_tag(ptr as *mut _); - match tag { - 0 => Syntax::Missing, - 1 => { - // node: info, kind, args - let info_ptr = lean_ctor_get(ptr as *mut _, 0); - let kind_ptr = lean_ctor_get(ptr as *mut _, 1); - let args_ptr = lean_ctor_get(ptr as *mut _, 2); - - let info = decode_ix_source_info(info_ptr.cast()); - let kind = decode_ix_name(kind_ptr.cast()); - let args: Vec = lean_array_data(args_ptr.cast()) - .iter() - .map(|&p| decode_ix_syntax(p)) - .collect(); - - Syntax::Node(info, kind, args) - }, - 2 => { - // atom: info, val - let info_ptr = lean_ctor_get(ptr as *mut _, 0); - let val_ptr = lean_ctor_get(ptr as *mut _, 1); - - let info = decode_ix_source_info(info_ptr.cast()); - Syntax::Atom(info, lean_obj_to_string(val_ptr.cast())) - }, - 3 => { - // ident: info, rawVal, val, preresolved - let info_ptr = lean_ctor_get(ptr as *mut _, 0); - let raw_val_ptr = lean_ctor_get(ptr as *mut _, 1); - let val_ptr = lean_ctor_get(ptr as *mut _, 2); - let preresolved_ptr = lean_ctor_get(ptr as *mut _, 3); - - let info = decode_ix_source_info(info_ptr.cast()); - let raw_val = decode_substring(raw_val_ptr.cast()); - let val = decode_ix_name(val_ptr.cast()); - let preresolved: Vec = - lean_array_data(preresolved_ptr.cast()) - .iter() - .map(|&p| decode_syntax_preresolved(p)) - .collect(); - - Syntax::Ident(info, raw_val, val, preresolved) - }, - _ => panic!("Invalid Syntax tag: {}", tag), - } +/// Decode Ix.Syntax from a Lean object. +pub fn decode_ix_syntax(obj: LeanObj) -> Syntax { + if obj.is_scalar() { + return Syntax::Missing; + } + let ctor = obj.as_ctor(); + match ctor.tag() { + 0 => Syntax::Missing, + 1 => { + // node: info, kind, args + let info = decode_ix_source_info(ctor.get(0)); + let kind = decode_ix_name(ctor.get(1)); + let args: Vec = + ctor.get(2).as_array() + .map(decode_ix_syntax); + + Syntax::Node(info, kind, args) + }, + 2 => { + // atom: info, val + let info = decode_ix_source_info(ctor.get(0)); + Syntax::Atom( + info, + ctor.get(1).as_string().to_string(), + ) + }, + 3 => { + // ident: info, rawVal, val, preresolved + let info = decode_ix_source_info(ctor.get(0)); + let raw_val = decode_substring(ctor.get(1)); + let val = decode_ix_name(ctor.get(2)); + let preresolved: Vec = + ctor.get(3).as_array() + .map(decode_syntax_preresolved); + + Syntax::Ident(info, raw_val, val, preresolved) + }, + _ => panic!("Invalid Syntax tag: {}", ctor.tag()), } } /// Decode Ix.SourceInfo. -pub fn decode_ix_source_info(ptr: *const c_void) -> SourceInfo { - unsafe { - if lean_is_scalar(ptr) { - return SourceInfo::None; - } - let tag = lean_obj_tag(ptr as *mut _); - match tag { - 0 => { - // original - let leading_ptr = lean_ctor_get(ptr as *mut _, 0); - let pos_ptr = lean_ctor_get(ptr as *mut _, 1); - let trailing_ptr = lean_ctor_get(ptr as *mut _, 2); - let end_pos_ptr = lean_ctor_get(ptr as *mut _, 3); - - SourceInfo::Original( - decode_substring(leading_ptr.cast()), - Nat::from_ptr(pos_ptr.cast()), - decode_substring(trailing_ptr.cast()), - Nat::from_ptr(end_pos_ptr.cast()), - ) - }, - 1 => { - // synthetic: 2 obj fields (pos, end_pos), 1 scalar byte (canonical) - let pos_ptr = lean_ctor_get(ptr as *mut _, 0); - let end_pos_ptr = lean_ctor_get(ptr as *mut _, 1); - - let canonical = lean_ctor_scalar_u8(ptr, 2, 0) != 0; - - SourceInfo::Synthetic( - Nat::from_ptr(pos_ptr.cast()), - Nat::from_ptr(end_pos_ptr.cast()), - canonical, - ) - }, - 2 => SourceInfo::None, - _ => panic!("Invalid SourceInfo tag: {}", tag), - } +pub fn decode_ix_source_info(obj: LeanObj) -> SourceInfo { + if obj.is_scalar() { + return SourceInfo::None; + } + let ctor = obj.as_ctor(); + match ctor.tag() { + 0 => { + // original + SourceInfo::Original( + decode_substring(ctor.get(0)), + Nat::from_obj(ctor.get(1)), + decode_substring(ctor.get(2)), + Nat::from_obj(ctor.get(3)), + ) + }, + 1 => { + // synthetic: 2 obj fields (pos, end_pos), 1 scalar byte (canonical) + let canonical = ctor.scalar_u8(2, 0) != 0; + + SourceInfo::Synthetic( + Nat::from_obj(ctor.get(0)), + Nat::from_obj(ctor.get(1)), + canonical, + ) + }, + 2 => SourceInfo::None, + _ => panic!("Invalid SourceInfo tag: {}", ctor.tag()), } } /// Decode Ix.Substring. -pub fn decode_substring(ptr: *const c_void) -> Substring { - unsafe { - let str_ptr = lean_ctor_get(ptr as *mut _, 0); - let start_ptr = lean_ctor_get(ptr as *mut _, 1); - let stop_ptr = lean_ctor_get(ptr as *mut _, 2); - - Substring { - str: lean_obj_to_string(str_ptr.cast()), - start_pos: Nat::from_ptr(start_ptr.cast()), - stop_pos: Nat::from_ptr(stop_ptr.cast()), - } +pub fn decode_substring(obj: LeanObj) -> Substring { + let ctor = obj.as_ctor(); + Substring { + str: ctor.get(0).as_string().to_string(), + start_pos: Nat::from_obj(ctor.get(1)), + stop_pos: Nat::from_obj(ctor.get(2)), } } /// Decode Ix.SyntaxPreresolved. -pub fn decode_syntax_preresolved(ptr: *const c_void) -> SyntaxPreresolved { - unsafe { - let tag = lean_obj_tag(ptr as *mut _); - match tag { - 0 => { - // namespace - let name_ptr = lean_ctor_get(ptr as *mut _, 0); - SyntaxPreresolved::Namespace(decode_ix_name(name_ptr.cast())) - }, - 1 => { - // decl - let name_ptr = lean_ctor_get(ptr as *mut _, 0); - let aliases_ptr = lean_ctor_get(ptr as *mut _, 1); - - let name = decode_ix_name(name_ptr.cast()); - let aliases: Vec = lean_array_data(aliases_ptr.cast()) - .iter() - .map(|&p| lean_obj_to_string(p)) - .collect(); - - SyntaxPreresolved::Decl(name, aliases) - }, - _ => panic!("Invalid SyntaxPreresolved tag: {}", tag), - } +pub fn decode_syntax_preresolved(obj: LeanObj) -> SyntaxPreresolved { + let ctor = obj.as_ctor(); + match ctor.tag() { + 0 => { + // namespace + SyntaxPreresolved::Namespace(decode_ix_name(ctor.get(0))) + }, + 1 => { + // decl + let name = decode_ix_name(ctor.get(0)); + let aliases: Vec = + ctor.get(1).as_array() + .map(|obj| obj.as_string().to_string()); + + SyntaxPreresolved::Decl(name, aliases) + }, + _ => panic!("Invalid SyntaxPreresolved tag: {}", ctor.tag()), } } @@ -438,7 +387,7 @@ pub fn decode_syntax_preresolved(ptr: *const c_void) -> SyntaxPreresolved { /// Round-trip an Ix.Int: decode from Lean, re-encode. #[unsafe(no_mangle)] pub extern "C" fn rs_roundtrip_ix_int(int_ptr: IxInt) -> IxInt { - let int_val = decode_ix_int(int_ptr.as_ptr()); + let int_val = decode_ix_int(*int_ptr); build_int(&int_val) } @@ -447,7 +396,7 @@ pub extern "C" fn rs_roundtrip_ix_int(int_ptr: IxInt) -> IxInt { pub extern "C" fn rs_roundtrip_ix_substring( sub_ptr: IxSubstring, ) -> IxSubstring { - let sub = decode_substring(sub_ptr.as_ptr()); + let sub = decode_substring(*sub_ptr); build_substring(&sub) } @@ -456,7 +405,7 @@ pub extern "C" fn rs_roundtrip_ix_substring( pub extern "C" fn rs_roundtrip_ix_source_info( si_ptr: IxSourceInfo, ) -> IxSourceInfo { - let si = decode_ix_source_info(si_ptr.as_ptr()); + let si = decode_ix_source_info(*si_ptr); build_source_info(&si) } @@ -465,7 +414,7 @@ pub extern "C" fn rs_roundtrip_ix_source_info( pub extern "C" fn rs_roundtrip_ix_syntax_preresolved( sp_ptr: IxSyntaxPreresolved, ) -> IxSyntaxPreresolved { - let sp = decode_syntax_preresolved(sp_ptr.as_ptr()); + let sp = decode_syntax_preresolved(*sp_ptr); let mut cache = LeanBuildCache::new(); build_syntax_preresolved(&mut cache, &sp) } @@ -473,7 +422,7 @@ pub extern "C" fn rs_roundtrip_ix_syntax_preresolved( /// Round-trip an Ix.Syntax: decode from Lean, re-encode. #[unsafe(no_mangle)] pub extern "C" fn rs_roundtrip_ix_syntax(syn_ptr: IxSyntax) -> IxSyntax { - let syn = decode_ix_syntax(syn_ptr.as_ptr()); + let syn = decode_ix_syntax(*syn_ptr); let mut cache = LeanBuildCache::new(); build_syntax(&mut cache, &syn) } @@ -483,7 +432,7 @@ pub extern "C" fn rs_roundtrip_ix_syntax(syn_ptr: IxSyntax) -> IxSyntax { pub extern "C" fn rs_roundtrip_ix_data_value( dv_ptr: IxDataValue, ) -> IxDataValue { - let dv = decode_data_value(dv_ptr.as_ptr()); + let dv = decode_data_value(*dv_ptr); let mut cache = LeanBuildCache::new(); build_data_value(&mut cache, &dv) } diff --git a/src/lean/ffi/ix/env.rs b/src/lean/ffi/ix/env.rs index 6ebbdb0a..a2a7da51 100644 --- a/src/lean/ffi/ix/env.rs +++ b/src/lean/ffi/ix/env.rs @@ -1,19 +1,13 @@ //! Ix.Environment build/decode/roundtrip FFI. -use std::ffi::c_void; - use rustc_hash::FxHashMap; use crate::ix::env::{ConstantInfo, Name}; -use crate::lean::lean::{ - lean_alloc_array, lean_alloc_ctor, lean_array_set_core, lean_ctor_get, - lean_ctor_set, lean_obj_tag, -}; -use crate::lean::{lean_array_data, lean_box_fn, lean_is_scalar}; +use crate::lean::obj::{LeanArray, LeanCtor, LeanObj}; -use super::super::builder::LeanBuildCache; -use super::constant::{build_constant_info, decode_constant_info}; -use super::name::{build_name, decode_ix_name}; +use crate::lean::ffi::builder::LeanBuildCache; +use crate::lean::ffi::ix::constant::{build_constant_info, decode_constant_info}; +use crate::lean::ffi::ix::name::{build_name, decode_ix_name}; // ============================================================================= // HashMap Building @@ -28,49 +22,44 @@ use super::name::{build_name, decode_ix_name}; /// /// AssocList α β = nil | cons (key : α) (value : β) (tail : AssocList α β) pub fn build_hashmap_from_pairs( - pairs: Vec<(*mut c_void, *mut c_void, u64)>, // (key_obj, val_obj, hash) -) -> *mut c_void { + pairs: Vec<(LeanObj, LeanObj, u64)>, // (key_obj, val_obj, hash) +) -> LeanObj { let size = pairs.len(); let bucket_count = (size * 4 / 3 + 1).next_power_of_two().max(8); - unsafe { - // Create array of AssocLists (initially all nil = boxed 0) - let buckets = lean_alloc_array(bucket_count, bucket_count); - for i in 0..bucket_count { - lean_array_set_core(buckets, i, lean_box_fn(0).cast()); // nil - } + // Create array of AssocLists (initially all nil = boxed 0) + let buckets = LeanArray::alloc(bucket_count); + let nil = LeanObj::box_usize(0); + for i in 0..bucket_count { + buckets.set(i, nil); // nil + } - // Insert entries - for (key_obj, val_obj, hash) in pairs { - let bucket_idx = - usize::try_from(hash).expect("hash overflows usize") % bucket_count; + // Insert entries + for (key_obj, val_obj, hash) in pairs { + let bucket_idx = + usize::try_from(hash).expect("hash overflows usize") % bucket_count; - // Get current bucket (AssocList) - let current_tail = lean_array_data(buckets.cast())[bucket_idx]; + // Get current bucket (AssocList) + let current_tail = buckets.get(bucket_idx); - // cons (key : α) (value : β) (tail : AssocList α β) -- tag 1 - let cons = lean_alloc_ctor(1, 3, 0); - lean_ctor_set(cons, 0, key_obj.cast()); - lean_ctor_set(cons, 1, val_obj.cast()); - lean_ctor_set(cons, 2, current_tail as *mut _); + // cons (key : α) (value : β) (tail : AssocList α β) -- tag 1 + let cons = LeanCtor::alloc(1, 3, 0); + cons.set(0, key_obj); + cons.set(1, val_obj); + cons.set(2, current_tail); - lean_array_set_core(buckets, bucket_idx, cons); - } - - // Build Raw { size : Nat, buckets : Array } - // Due to unboxing, this IS the HashMap directly - // Field 0 = size, Field 1 = buckets (2 object fields, no scalars) - let size_obj = if size <= (usize::MAX >> 1) { - lean_box_fn(size).cast() - } else { - crate::lean::lean::lean_uint64_to_nat(size as u64) - }; - - let raw = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(raw, 0, size_obj); - lean_ctor_set(raw, 1, buckets); - raw.cast() + buckets.set(bucket_idx, cons); } + + // Build Raw { size : Nat, buckets : Array } + // Due to unboxing, this IS the HashMap directly + // Field 0 = size, Field 1 = buckets (2 object fields, no scalars) + let size_obj = LeanObj::box_usize(size); + + let raw = LeanCtor::alloc(0, 2, 0); + raw.set(0, size_obj); + raw.set(1, buckets); + *raw } // ============================================================================= @@ -89,23 +78,20 @@ pub fn build_hashmap_from_pairs( pub fn build_raw_environment( cache: &mut LeanBuildCache, consts: &FxHashMap, -) -> *mut c_void { - unsafe { - // Build consts array: Array (Name × ConstantInfo) - // RawEnvironment is a single-field structure that may be unboxed to just the array - let consts_arr = lean_alloc_array(consts.len(), consts.len()); - for (i, (name, info)) in consts.iter().enumerate() { - let key_obj = build_name(cache, name); - let val_obj = build_constant_info(cache, info); - // Build pair (Name × ConstantInfo) - let pair = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(pair, 0, key_obj.as_mut_ptr().cast()); - lean_ctor_set(pair, 1, val_obj.as_mut_ptr().cast()); - lean_array_set_core(consts_arr, i, pair); - } - - consts_arr.cast() +) -> LeanObj { + // Build consts array: Array (Name × ConstantInfo) + let consts_arr = LeanArray::alloc(consts.len()); + for (i, (name, info)) in consts.iter().enumerate() { + let key_obj = build_name(cache, name); + let val_obj = build_constant_info(cache, info); + // Build pair (Name × ConstantInfo) + let pair = LeanCtor::alloc(0, 2, 0); + pair.set(0, key_obj); + pair.set(1, val_obj); + consts_arr.set(i, pair); } + + *consts_arr } // ============================================================================= @@ -114,37 +100,31 @@ pub fn build_raw_environment( /// Decode a HashMap's AssocList and collect key-value pairs using a custom decoder. fn decode_assoc_list( - list_ptr: *const c_void, + obj: LeanObj, decode_key: FK, decode_val: FV, ) -> Vec<(K, V)> where - FK: Fn(*const c_void) -> K, - FV: Fn(*const c_void) -> V, + FK: Fn(LeanObj) -> K, + FV: Fn(LeanObj) -> V, { let mut result = Vec::new(); - let mut current = list_ptr; + let mut current = obj; loop { - unsafe { - if lean_is_scalar(current) { - break; - } - - let tag = lean_obj_tag(current as *mut _); - if tag == 0 { - // AssocList.nil - break; - } - - // AssocList.cons: 3 fields (key, value, tail) - let key_ptr = lean_ctor_get(current as *mut _, 0); - let value_ptr = lean_ctor_get(current as *mut _, 1); - let tail_ptr = lean_ctor_get(current as *mut _, 2); - - result.push((decode_key(key_ptr.cast()), decode_val(value_ptr.cast()))); - current = tail_ptr.cast(); + if current.is_scalar() { + break; } + + let ctor = current.as_ctor(); + if ctor.tag() == 0 { + // AssocList.nil + break; + } + + // AssocList.cons: 3 fields (key, value, tail) + result.push((decode_key(ctor.get(0)), decode_val(ctor.get(1)))); + current = ctor.get(2); } result @@ -158,30 +138,29 @@ where /// - DHashMap { inner : Raw, wf : Prop } unboxes to Raw (Prop is erased) /// - Raw { size : Nat, buckets : Array } - field 0 = size, field 1 = buckets fn decode_hashmap( - map_ptr: *const c_void, + obj: LeanObj, decode_key: FK, decode_val: FV, ) -> Vec<(K, V)> where - FK: Fn(*const c_void) -> K + Copy, - FV: Fn(*const c_void) -> V + Copy, + FK: Fn(LeanObj) -> K + Copy, + FV: Fn(LeanObj) -> V + Copy, { - unsafe { - // Raw layout: field 0 = size (Nat), field 1 = buckets (Array) - let _size_ptr = lean_ctor_get(map_ptr as *mut _, 0); // unused but needed for layout - let buckets_ptr = lean_ctor_get(map_ptr as *mut _, 1); - - let mut pairs = Vec::new(); - for &bucket_ptr in lean_array_data(buckets_ptr.cast()) { - let bucket_pairs = decode_assoc_list(bucket_ptr, decode_key, decode_val); - pairs.extend(bucket_pairs); - } - - pairs + let ctor = obj.as_ctor(); + // Raw layout: field 0 = size (Nat), field 1 = buckets (Array) + let _size = ctor.get(0); // unused but needed for layout + let buckets = ctor.get(1).as_array(); + + let mut pairs = Vec::new(); + for bucket in buckets.iter() { + let bucket_pairs = decode_assoc_list(bucket, decode_key, decode_val); + pairs.extend(bucket_pairs); } + + pairs } -/// Decode Ix.Environment from Lean pointer. +/// Decode Ix.Environment from Lean object. /// /// Ix.Environment = { /// consts : HashMap Name ConstantInfo @@ -190,10 +169,10 @@ where /// NOTE: Environment with a single field is UNBOXED by Lean, /// so the pointer IS the HashMap directly, not a structure containing it. pub fn decode_ix_environment( - ptr: *const c_void, + obj: LeanObj, ) -> FxHashMap { - // Environment is unboxed - ptr IS the HashMap directly - let consts_pairs = decode_hashmap(ptr, decode_ix_name, decode_constant_info); + // Environment is unboxed - obj IS the HashMap directly + let consts_pairs = decode_hashmap(obj, decode_ix_name, decode_constant_info); let mut consts: FxHashMap = FxHashMap::default(); for (name, info) in consts_pairs { consts.insert(name, info); @@ -201,67 +180,58 @@ pub fn decode_ix_environment( consts } -/// Decode Ix.RawEnvironment from Lean pointer into HashMap. +/// Decode Ix.RawEnvironment from Lean object into HashMap. /// RawEnvironment = { consts : Array (Name × ConstantInfo) } /// NOTE: Unboxed to just Array. This version deduplicates by name. pub fn decode_ix_raw_environment( - ptr: *const c_void, + obj: LeanObj, ) -> FxHashMap { - unsafe { - // RawEnvironment is a single-field structure that may be unboxed - // Try treating ptr as the array directly first - let mut consts: FxHashMap = FxHashMap::default(); - - for &pair_ptr in lean_array_data(ptr) { - let name_ptr = lean_ctor_get(pair_ptr as *mut _, 0); - let info_ptr = lean_ctor_get(pair_ptr as *mut _, 1); - let name = decode_ix_name(name_ptr.cast()); - let info = decode_constant_info(info_ptr.cast()); - consts.insert(name, info); - } + let arr = obj.as_array(); + let mut consts: FxHashMap = FxHashMap::default(); - consts + for pair_obj in arr.iter() { + let pair = pair_obj.as_ctor(); + let name = decode_ix_name(pair.get(0)); + let info = decode_constant_info(pair.get(1)); + consts.insert(name, info); } + + consts } -/// Decode Ix.RawEnvironment from Lean pointer preserving array structure. +/// Decode Ix.RawEnvironment from Lean object preserving array structure. /// This version preserves all entries including duplicates. pub fn decode_ix_raw_environment_vec( - ptr: *const c_void, + obj: LeanObj, ) -> Vec<(Name, ConstantInfo)> { - unsafe { - let data = lean_array_data(ptr); - let mut consts = Vec::with_capacity(data.len()); - - for &pair_ptr in data { - let name_ptr = lean_ctor_get(pair_ptr as *mut _, 0); - let info_ptr = lean_ctor_get(pair_ptr as *mut _, 1); - let name = decode_ix_name(name_ptr.cast()); - let info = decode_constant_info(info_ptr.cast()); - consts.push((name, info)); - } - - consts + let arr = obj.as_array(); + let mut consts = Vec::with_capacity(arr.len()); + + for pair_obj in arr.iter() { + let pair = pair_obj.as_ctor(); + let name = decode_ix_name(pair.get(0)); + let info = decode_constant_info(pair.get(1)); + consts.push((name, info)); } + + consts } /// Build Ix.RawEnvironment from Vec, preserving order and duplicates. pub fn build_raw_environment_from_vec( cache: &mut LeanBuildCache, consts: &[(Name, ConstantInfo)], -) -> *mut c_void { - unsafe { - let consts_arr = lean_alloc_array(consts.len(), consts.len()); - for (i, (name, info)) in consts.iter().enumerate() { - let key_obj = build_name(cache, name); - let val_obj = build_constant_info(cache, info); - let pair = lean_alloc_ctor(0, 2, 0); - lean_ctor_set(pair, 0, key_obj.as_mut_ptr().cast()); - lean_ctor_set(pair, 1, val_obj.as_mut_ptr().cast()); - lean_array_set_core(consts_arr, i, pair); - } - consts_arr.cast() +) -> LeanObj { + let consts_arr = LeanArray::alloc(consts.len()); + for (i, (name, info)) in consts.iter().enumerate() { + let key_obj = build_name(cache, name); + let val_obj = build_constant_info(cache, info); + let pair = LeanCtor::alloc(0, 2, 0); + pair.set(0, key_obj); + pair.set(1, val_obj); + consts_arr.set(i, pair); } + *consts_arr } // ============================================================================= @@ -271,8 +241,8 @@ pub fn build_raw_environment_from_vec( /// Round-trip an Ix.Environment: decode from Lean, re-encode. #[unsafe(no_mangle)] pub extern "C" fn rs_roundtrip_ix_environment( - env_ptr: *const c_void, -) -> *mut c_void { + env_ptr: LeanObj, +) -> LeanObj { let env = decode_ix_environment(env_ptr); let mut cache = LeanBuildCache::with_capacity(env.len()); build_raw_environment(&mut cache, &env) @@ -282,8 +252,8 @@ pub extern "C" fn rs_roundtrip_ix_environment( /// Uses Vec-preserving functions to maintain array structure and order. #[unsafe(no_mangle)] pub extern "C" fn rs_roundtrip_ix_raw_environment( - env_ptr: *const c_void, -) -> *mut c_void { + env_ptr: LeanObj, +) -> LeanObj { let env = decode_ix_raw_environment_vec(env_ptr); let mut cache = LeanBuildCache::with_capacity(env.len()); build_raw_environment_from_vec(&mut cache, &env) diff --git a/src/lean/ffi/ix/expr.rs b/src/lean/ffi/ix/expr.rs index d52ba556..795723ad 100644 --- a/src/lean/ffi/ix/expr.rs +++ b/src/lean/ffi/ix/expr.rs @@ -14,22 +14,18 @@ //! - Tag 10: mdata (data : Array (Name × DataValue)) (expr : Expr) (hash : Address) //! - Tag 11: proj (typeName : Name) (idx : Nat) (struct : Expr) (hash : Address) -use std::ffi::c_void; - use crate::ix::env::{ BinderInfo, DataValue, Expr, ExprData, Level, Literal, Name, }; -use crate::lean::lean::{lean_ctor_get, lean_obj_tag}; use crate::lean::nat::Nat; use crate::lean::obj::{IxExpr, LeanArray, LeanCtor, LeanObj, LeanString}; -use crate::lean::{lean_array_data, lean_ctor_scalar_u8, lean_obj_to_string}; -use super::super::builder::LeanBuildCache; -use super::super::primitives::build_nat; -use super::address::build_address; -use super::data::{build_data_value, decode_data_value}; -use super::level::{build_level, build_level_array, decode_ix_level}; -use super::name::{build_name, decode_ix_name}; +use crate::lean::ffi::builder::LeanBuildCache; +use crate::lean::ffi::primitives::build_nat; +use crate::lean::ffi::ix::address::build_address; +use crate::lean::ffi::ix::data::{build_data_value, decode_data_value}; +use crate::lean::ffi::ix::level::{build_level, build_level_array, decode_ix_level}; +use crate::lean::ffi::ix::name::{build_name, decode_ix_name}; /// Build a Lean Ix.Expr with embedded hash. /// Uses caching to avoid rebuilding the same expression. @@ -219,173 +215,132 @@ pub fn binder_info_to_u8(bi: &BinderInfo) -> u8 { } /// Decode a Lean Ix.Expr to Rust Expr. -pub fn decode_ix_expr(ptr: *const c_void) -> Expr { - unsafe { - let tag = lean_obj_tag(ptr as *mut _); - match tag { - 0 => { - // bvar - let idx_ptr = lean_ctor_get(ptr as *mut _, 0); - let idx = Nat::from_ptr(idx_ptr.cast()); - Expr::bvar(idx) - }, - 1 => { - // fvar - let name_ptr = lean_ctor_get(ptr as *mut _, 0); - let name = decode_ix_name(name_ptr.cast()); - Expr::fvar(name) - }, - 2 => { - // mvar - let name_ptr = lean_ctor_get(ptr as *mut _, 0); - let name = decode_ix_name(name_ptr.cast()); - Expr::mvar(name) - }, - 3 => { - // sort - let level_ptr = lean_ctor_get(ptr as *mut _, 0); - let level = decode_ix_level(level_ptr.cast()); - Expr::sort(level) - }, - 4 => { - // const - let name_ptr = lean_ctor_get(ptr as *mut _, 0); - let levels_ptr = lean_ctor_get(ptr as *mut _, 1); - - let name = decode_ix_name(name_ptr.cast()); - let levels: Vec = lean_array_data(levels_ptr.cast()) - .iter() - .map(|&p| decode_ix_level(p)) - .collect(); - - Expr::cnst(name, levels) - }, - 5 => { - // app - let fn_ptr = lean_ctor_get(ptr as *mut _, 0); - let arg_ptr = lean_ctor_get(ptr as *mut _, 1); - let fn_expr = decode_ix_expr(fn_ptr.cast()); - let arg_expr = decode_ix_expr(arg_ptr.cast()); - Expr::app(fn_expr, arg_expr) - }, - 6 => { - // lam: name, ty, body, hash, bi (scalar) - let name_ptr = lean_ctor_get(ptr as *mut _, 0); - let ty_ptr = lean_ctor_get(ptr as *mut _, 1); - let body_ptr = lean_ctor_get(ptr as *mut _, 2); - - let name = decode_ix_name(name_ptr.cast()); - let ty = decode_ix_expr(ty_ptr.cast()); - let body = decode_ix_expr(body_ptr.cast()); - - // Read BinderInfo scalar (4 obj fields: name, ty, body, hash) - let bi_byte = lean_ctor_scalar_u8(ptr, 4, 0); - let bi = decode_binder_info(bi_byte); - - Expr::lam(name, ty, body, bi) - }, - 7 => { - // forallE: same layout as lam - let name_ptr = lean_ctor_get(ptr as *mut _, 0); - let ty_ptr = lean_ctor_get(ptr as *mut _, 1); - let body_ptr = lean_ctor_get(ptr as *mut _, 2); - - let name = decode_ix_name(name_ptr.cast()); - let ty = decode_ix_expr(ty_ptr.cast()); - let body = decode_ix_expr(body_ptr.cast()); - - // 4 obj fields: name, ty, body, hash - let bi_byte = lean_ctor_scalar_u8(ptr, 4, 0); - let bi = decode_binder_info(bi_byte); - - Expr::all(name, ty, body, bi) - }, - 8 => { - // letE: name, ty, val, body, hash, nonDep (scalar) - let name_ptr = lean_ctor_get(ptr as *mut _, 0); - let ty_ptr = lean_ctor_get(ptr as *mut _, 1); - let val_ptr = lean_ctor_get(ptr as *mut _, 2); - let body_ptr = lean_ctor_get(ptr as *mut _, 3); +pub fn decode_ix_expr(obj: LeanObj) -> Expr { + let ctor = obj.as_ctor(); + match ctor.tag() { + 0 => { + // bvar + let idx = Nat::from_obj(ctor.get(0)); + Expr::bvar(idx) + }, + 1 => { + // fvar + let name = decode_ix_name(ctor.get(0)); + Expr::fvar(name) + }, + 2 => { + // mvar + let name = decode_ix_name(ctor.get(0)); + Expr::mvar(name) + }, + 3 => { + // sort + let level = decode_ix_level(ctor.get(0)); + Expr::sort(level) + }, + 4 => { + // const + let name = decode_ix_name(ctor.get(0)); + let levels: Vec = + ctor.get(1).as_array() + .map(decode_ix_level); + + Expr::cnst(name, levels) + }, + 5 => { + // app + let fn_expr = decode_ix_expr(ctor.get(0)); + let arg_expr = decode_ix_expr(ctor.get(1)); + Expr::app(fn_expr, arg_expr) + }, + 6 => { + // lam: name, ty, body, hash, bi (scalar) + let name = decode_ix_name(ctor.get(0)); + let ty = decode_ix_expr(ctor.get(1)); + let body = decode_ix_expr(ctor.get(2)); - let name = decode_ix_name(name_ptr.cast()); - let ty = decode_ix_expr(ty_ptr.cast()); - let val = decode_ix_expr(val_ptr.cast()); - let body = decode_ix_expr(body_ptr.cast()); + // Read BinderInfo scalar (4 obj fields: name, ty, body, hash) + let bi_byte = ctor.scalar_u8(4, 0); + let bi = decode_binder_info(bi_byte); - // 5 obj fields: name, ty, val, body, hash - let non_dep = lean_ctor_scalar_u8(ptr, 5, 0) != 0; + Expr::lam(name, ty, body, bi) + }, + 7 => { + // forallE: same layout as lam + let name = decode_ix_name(ctor.get(0)); + let ty = decode_ix_expr(ctor.get(1)); + let body = decode_ix_expr(ctor.get(2)); - Expr::letE(name, ty, val, body, non_dep) - }, - 9 => { - // lit - let lit_ptr = lean_ctor_get(ptr as *mut _, 0); - let lit = decode_literal(lit_ptr.cast()); - Expr::lit(lit) - }, - 10 => { - // mdata: data, expr, hash - let data_ptr = lean_ctor_get(ptr as *mut _, 0); - let expr_ptr = lean_ctor_get(ptr as *mut _, 1); + // 4 obj fields: name, ty, body, hash + let bi_byte = ctor.scalar_u8(4, 0); + let bi = decode_binder_info(bi_byte); - let data: Vec<(Name, DataValue)> = lean_array_data(data_ptr.cast()) - .iter() - .map(|&p| decode_name_data_value(p)) - .collect(); + Expr::all(name, ty, body, bi) + }, + 8 => { + // letE: name, ty, val, body, hash, nonDep (scalar) + let name = decode_ix_name(ctor.get(0)); + let ty = decode_ix_expr(ctor.get(1)); + let val = decode_ix_expr(ctor.get(2)); + let body = decode_ix_expr(ctor.get(3)); - let inner = decode_ix_expr(expr_ptr.cast()); - Expr::mdata(data, inner) - }, - 11 => { - // proj: typeName, idx, struct, hash - let type_name_ptr = lean_ctor_get(ptr as *mut _, 0); - let idx_ptr = lean_ctor_get(ptr as *mut _, 1); - let struct_ptr = lean_ctor_get(ptr as *mut _, 2); + // 5 obj fields: name, ty, val, body, hash + let non_dep = ctor.scalar_u8(5, 0) != 0; - let type_name = decode_ix_name(type_name_ptr.cast()); - let idx = Nat::from_ptr(idx_ptr.cast()); - let struct_expr = decode_ix_expr(struct_ptr.cast()); + Expr::letE(name, ty, val, body, non_dep) + }, + 9 => { + // lit + let lit = decode_literal(ctor.get(0)); + Expr::lit(lit) + }, + 10 => { + // mdata: data, expr, hash + let data: Vec<(Name, DataValue)> = + ctor.get(0).as_array() + .map(decode_name_data_value); + + let inner = decode_ix_expr(ctor.get(1)); + Expr::mdata(data, inner) + }, + 11 => { + // proj: typeName, idx, struct, hash + let type_name = decode_ix_name(ctor.get(0)); + let idx = Nat::from_obj(ctor.get(1)); + let struct_expr = decode_ix_expr(ctor.get(2)); - Expr::proj(type_name, idx, struct_expr) - }, - _ => panic!("Invalid Ix.Expr tag: {}", tag), - } + Expr::proj(type_name, idx, struct_expr) + }, + _ => panic!("Invalid Ix.Expr tag: {}", ctor.tag()), } } -/// Decode Lean.Literal from a Lean pointer. -pub fn decode_literal(ptr: *const c_void) -> Literal { - unsafe { - let tag = lean_obj_tag(ptr as *mut _); - match tag { - 0 => { - // natVal - let nat_ptr = lean_ctor_get(ptr as *mut _, 0); - let nat = Nat::from_ptr(nat_ptr.cast()); - Literal::NatVal(nat) - }, - 1 => { - // strVal - let str_ptr = lean_ctor_get(ptr as *mut _, 0); - Literal::StrVal(lean_obj_to_string(str_ptr as *const _)) - }, - _ => panic!("Invalid Literal tag: {}", tag), - } +/// Decode Lean.Literal from a Lean object. +pub fn decode_literal(obj: LeanObj) -> Literal { + let ctor = obj.as_ctor(); + match ctor.tag() { + 0 => { + // natVal + let nat = Nat::from_obj(ctor.get(0)); + Literal::NatVal(nat) + }, + 1 => { + // strVal + Literal::StrVal( + ctor.get(0).as_string().to_string(), + ) + }, + _ => panic!("Invalid Literal tag: {}", ctor.tag()), } } /// Decode a (Name × DataValue) pair for mdata. -fn decode_name_data_value(ptr: *const c_void) -> (Name, DataValue) { - unsafe { - // Prod: ctor 0 with 2 fields - let name_ptr = lean_ctor_get(ptr as *mut _, 0); - let dv_ptr = lean_ctor_get(ptr as *mut _, 1); - - let name = decode_ix_name(name_ptr.cast()); - let dv = decode_data_value(dv_ptr.cast()); - - (name, dv) - } +fn decode_name_data_value(obj: LeanObj) -> (Name, DataValue) { + // Prod: ctor 0 with 2 fields + let ctor = obj.as_ctor(); + let name = decode_ix_name(ctor.get(0)); + let dv = decode_data_value(ctor.get(1)); + (name, dv) } /// Decode BinderInfo from byte. @@ -402,7 +357,7 @@ pub fn decode_binder_info(bi_byte: u8) -> BinderInfo { /// Round-trip an Ix.Expr: decode from Lean, re-encode via LeanBuildCache. #[unsafe(no_mangle)] pub extern "C" fn rs_roundtrip_ix_expr(expr_ptr: IxExpr) -> IxExpr { - let expr = decode_ix_expr(expr_ptr.as_ptr()); + let expr = decode_ix_expr(*expr_ptr); let mut cache = LeanBuildCache::new(); build_expr(&mut cache, &expr) } diff --git a/src/lean/ffi/ix/level.rs b/src/lean/ffi/ix/level.rs index 73a66429..2ce50c50 100644 --- a/src/lean/ffi/ix/level.rs +++ b/src/lean/ffi/ix/level.rs @@ -8,15 +8,12 @@ //! - Tag 4: param (n : Name) (hash : Address) //! - Tag 5: mvar (n : Name) (hash : Address) -use std::ffi::c_void; - use crate::ix::env::{Level, LevelData}; -use crate::lean::lean::{lean_ctor_get, lean_obj_tag}; -use crate::lean::obj::{IxLevel, LeanArray, LeanCtor}; +use crate::lean::obj::{IxLevel, LeanArray, LeanCtor, LeanObj}; -use super::super::builder::LeanBuildCache; -use super::address::build_address; -use super::name::{build_name, decode_ix_name}; +use crate::lean::ffi::builder::LeanBuildCache; +use crate::lean::ffi::ix::address::build_address; +use crate::lean::ffi::ix::name::{build_name, decode_ix_name}; /// Build a Lean Ix.Level with embedded hash. /// Uses caching to avoid rebuilding the same level. @@ -91,54 +88,45 @@ pub fn build_level_array( } /// Decode a Lean Ix.Level to Rust Level. -pub fn decode_ix_level(ptr: *const c_void) -> Level { - unsafe { - let tag = lean_obj_tag(ptr as *mut _); - match tag { - 0 => Level::zero(), - 1 => { - let x_ptr = lean_ctor_get(ptr as *mut _, 0); - let x = decode_ix_level(x_ptr.cast()); - Level::succ(x) - }, - 2 => { - let x_ptr = lean_ctor_get(ptr as *mut _, 0); - let y_ptr = lean_ctor_get(ptr as *mut _, 1); - let x = decode_ix_level(x_ptr.cast()); - let y = decode_ix_level(y_ptr.cast()); - Level::max(x, y) - }, - 3 => { - let x_ptr = lean_ctor_get(ptr as *mut _, 0); - let y_ptr = lean_ctor_get(ptr as *mut _, 1); - let x = decode_ix_level(x_ptr.cast()); - let y = decode_ix_level(y_ptr.cast()); - Level::imax(x, y) - }, - 4 => { - let n_ptr = lean_ctor_get(ptr as *mut _, 0); - let n = decode_ix_name(n_ptr.cast()); - Level::param(n) - }, - 5 => { - let n_ptr = lean_ctor_get(ptr as *mut _, 0); - let n = decode_ix_name(n_ptr.cast()); - Level::mvar(n) - }, - _ => panic!("Invalid Ix.Level tag: {}", tag), - } +pub fn decode_ix_level(obj: LeanObj) -> Level { + let ctor = obj.as_ctor(); + match ctor.tag() { + 0 => Level::zero(), + 1 => { + let x = decode_ix_level(ctor.get(0)); + Level::succ(x) + }, + 2 => { + let x = decode_ix_level(ctor.get(0)); + let y = decode_ix_level(ctor.get(1)); + Level::max(x, y) + }, + 3 => { + let x = decode_ix_level(ctor.get(0)); + let y = decode_ix_level(ctor.get(1)); + Level::imax(x, y) + }, + 4 => { + let n = decode_ix_name(ctor.get(0)); + Level::param(n) + }, + 5 => { + let n = decode_ix_name(ctor.get(0)); + Level::mvar(n) + }, + _ => panic!("Invalid Ix.Level tag: {}", ctor.tag()), } } /// Decode Array of Levels from Lean pointer. -pub fn decode_level_array(ptr: *const c_void) -> Vec { - crate::lean::lean_array_to_vec(ptr, decode_ix_level) +pub fn decode_level_array(obj: LeanObj) -> Vec { + obj.as_array().map(decode_ix_level) } /// Round-trip an Ix.Level: decode from Lean, re-encode via LeanBuildCache. #[unsafe(no_mangle)] pub extern "C" fn rs_roundtrip_ix_level(level_ptr: IxLevel) -> IxLevel { - let level = decode_ix_level(level_ptr.as_ptr()); + let level = decode_ix_level(*level_ptr); let mut cache = LeanBuildCache::new(); build_level(&mut cache, &level) } diff --git a/src/lean/ffi/ix/name.rs b/src/lean/ffi/ix/name.rs index bbc38ca1..def0fc81 100644 --- a/src/lean/ffi/ix/name.rs +++ b/src/lean/ffi/ix/name.rs @@ -5,17 +5,13 @@ //! - Tag 1: str (parent : Name) (s : String) (hash : Address) //! - Tag 2: num (parent : Name) (i : Nat) (hash : Address) -use std::ffi::c_void; - use crate::ix::env::{Name, NameData}; -use crate::lean::lean::{lean_ctor_get, lean_obj_tag}; -use crate::lean::lean_obj_to_string; use crate::lean::nat::Nat; -use crate::lean::obj::{IxName, LeanArray, LeanCtor, LeanString}; +use crate::lean::obj::{IxName, LeanArray, LeanCtor, LeanObj, LeanString}; -use super::super::builder::LeanBuildCache; -use super::super::primitives::build_nat; -use super::address::build_address; +use crate::lean::ffi::builder::LeanBuildCache; +use crate::lean::ffi::primitives::build_nat; +use crate::lean::ffi::ix::address::build_address; /// Build a Lean Ix.Name with embedded hash. /// Uses caching to avoid rebuilding the same name. @@ -69,50 +65,38 @@ pub fn build_name_array( } /// Decode a Lean Ix.Name to Rust Name. -pub fn decode_ix_name(ptr: *const c_void) -> Name { - unsafe { - let tag = lean_obj_tag(ptr as *mut _); - match tag { - 0 => { - // anonymous: just has hash, construct anon Name - Name::anon() - }, - 1 => { - // str: parent, s, hash - let parent_ptr = lean_ctor_get(ptr as *mut _, 0); - let s_ptr = lean_ctor_get(ptr as *mut _, 1); - // hash at field 2 is ignored - Rust recomputes it - - let parent = decode_ix_name(parent_ptr.cast()); - let s = lean_obj_to_string(s_ptr as *const _); - - Name::str(parent, s) - }, - 2 => { - // num: parent, i, hash - let parent_ptr = lean_ctor_get(ptr as *mut _, 0); - let i_ptr = lean_ctor_get(ptr as *mut _, 1); - // hash at field 2 is ignored - - let parent = decode_ix_name(parent_ptr.cast()); - let i = Nat::from_ptr(i_ptr.cast()); - - Name::num(parent, i) - }, - _ => panic!("Invalid Ix.Name tag: {}", tag), - } +pub fn decode_ix_name(obj: LeanObj) -> Name { + let ctor = obj.as_ctor(); + match ctor.tag() { + 0 => { + // anonymous: just has hash, construct anon Name + Name::anon() + }, + 1 => { + // str: parent, s, hash + let parent = decode_ix_name(ctor.get(0)); + let s = ctor.get(1).as_string().to_string(); + Name::str(parent, s) + }, + 2 => { + // num: parent, i, hash + let parent = decode_ix_name(ctor.get(0)); + let i = Nat::from_obj(ctor.get(1)); + Name::num(parent, i) + }, + _ => panic!("Invalid Ix.Name tag: {}", ctor.tag()), } } /// Decode Array of Names from Lean pointer. -pub fn decode_name_array(ptr: *const c_void) -> Vec { - crate::lean::lean_array_to_vec(ptr, decode_ix_name) +pub fn decode_name_array(obj: LeanObj) -> Vec { + obj.as_array().map(decode_ix_name) } /// Round-trip an Ix.Name: decode from Lean, re-encode via LeanBuildCache. #[unsafe(no_mangle)] pub extern "C" fn rs_roundtrip_ix_name(name_ptr: IxName) -> IxName { - let name = decode_ix_name(name_ptr.as_ptr()); + let name = decode_ix_name(*name_ptr); let mut cache = LeanBuildCache::new(); build_name(&mut cache, &name) } diff --git a/src/lean/ffi/ixon/compare.rs b/src/lean/ffi/ixon/compare.rs index 896377aa..727541c7 100644 --- a/src/lean/ffi/ixon/compare.rs +++ b/src/lean/ffi/ixon/compare.rs @@ -1,15 +1,14 @@ //! Cross-implementation compilation comparison FFI. use std::collections::HashMap; -use std::ffi::c_void; use crate::ix::compile::{BlockCache, CompileState, compile_env, compile_expr}; use crate::ix::env::Name; use crate::ix::ixon::serialize::put_expr; use crate::ix::mutual::MutCtx; -use crate::lean::obj::{LeanByteArray, LeanCtor, LeanObj}; +use crate::lean::obj::{LeanCtor, LeanObj}; -use super::super::lean_env::{ +use crate::lean::ffi::lean_env::{ Cache as LeanCache, GlobalCache, lean_ptr_to_expr, lean_ptr_to_name, }; @@ -21,7 +20,7 @@ pub struct RustBlockEnv { /// Compare Lean's compiled expression output with Rust's compilation of the same input. #[unsafe(no_mangle)] pub extern "C" fn rs_compare_expr_compilation( - lean_expr_ptr: *const c_void, + lean_expr_ptr: LeanObj, lean_output: LeanObj, univ_ctx_size: u64, ) -> bool { @@ -57,7 +56,7 @@ pub extern "C" fn rs_compare_expr_compilation( put_expr(&rust_expr, &mut rust_bytes); // Compare byte-for-byte - let lean_ba = unsafe { LeanByteArray::from_raw(lean_output.as_ptr()) }; + let lean_ba = lean_output.as_byte_array(); let lean_bytes = lean_ba.as_bytes(); rust_bytes == lean_bytes } @@ -102,11 +101,10 @@ fn build_block_compare_detail( /// # Safety /// /// `rust_env` must be a valid pointer to a `RustBlockEnv`. -/// `lowlink_name` must be a valid Lean object pointer. #[unsafe(no_mangle)] pub unsafe extern "C" fn rs_compare_block_v2( rust_env: *const RustBlockEnv, - lowlink_name: *const c_void, + lowlink_name: LeanObj, lean_bytes: LeanObj, lean_sharing_len: u64, ) -> LeanObj { @@ -114,7 +112,7 @@ pub unsafe extern "C" fn rs_compare_block_v2( let name = lean_ptr_to_name(lowlink_name, &global_cache); let rust_env = unsafe { &*rust_env }; - let lean_ba = unsafe { LeanByteArray::from_raw(lean_bytes.as_ptr()) }; + let lean_ba = lean_bytes.as_byte_array(); let lean_data = lean_ba.as_bytes(); // Look up Rust's compiled block @@ -185,9 +183,9 @@ pub unsafe extern "C" fn rs_free_compiled_env(ptr: *mut RustBlockEnv) { /// Build a RustBlockEnv from a Lean environment. #[unsafe(no_mangle)] pub extern "C" fn rs_build_compiled_env( - env_consts_ptr: *const c_void, + env_consts_ptr: LeanObj, ) -> *mut RustBlockEnv { - use super::super::lean_env::lean_ptr_to_env; + use crate::lean::ffi::lean_env::lean_ptr_to_env; // Decode Lean environment let rust_env = lean_ptr_to_env(env_consts_ptr); diff --git a/src/lean/ffi/ixon/constant.rs b/src/lean/ffi/ixon/constant.rs index c0e676ac..225bfaaf 100644 --- a/src/lean/ffi/ixon/constant.rs +++ b/src/lean/ffi/ixon/constant.rs @@ -8,520 +8,505 @@ use std::sync::Arc; use crate::ix::address::Address; use crate::ix::ixon::constant::{ - Axiom, Constant, ConstantInfo, Constructor, ConstructorProj, DefKind, - Definition, DefinitionProj, Inductive, InductiveProj, MutConst, Quotient, - Recursor, RecursorProj, RecursorRule, -}; -use crate::lean::obj::{ - IxAddress, IxonAxiom, IxonConstant, IxonConstantInfo, IxonConstructor, - IxonConstructorProj, IxonDefinition, IxonDefinitionProj, IxonExpr, - IxonInductive, IxonInductiveProj, IxonMutConst, IxonQuotient, IxonRecursor, - IxonRecursorProj, IxonRecursorRule, LeanArray, LeanByteArray, LeanCtor, - LeanObj, + Axiom as IxonAxiom, Constant as IxonConstant, + ConstantInfo as IxonConstantInfo, Constructor as IxonConstructor, + ConstructorProj, DefKind, Definition as IxonDefinition, DefinitionProj, + Inductive as IxonInductive, InductiveProj, MutConst, + Quotient as IxonQuotient, Recursor as IxonRecursor, RecursorProj, + RecursorRule as IxonRecursorRule, }; +use crate::lean::obj::{IxAddress, LeanArray, LeanByteArray, LeanCtor, LeanObj}; -use super::univ::*; +use crate::lean::ffi::ixon::expr::{ + build_ixon_expr, build_ixon_expr_array, decode_ixon_expr, + decode_ixon_expr_array, +}; +use crate::lean::ffi::ixon::univ::{build_ixon_univ_array, decode_ixon_univ_array}; -impl IxAddress { - /// Build Address from Ixon Address type (which is just a [u8; 32]). - pub fn build_from_ixon(addr: &Address) -> Self { - LeanByteArray::from_bytes(addr.as_bytes()) - } +/// Build Address from Ixon Address type (which is just a [u8; 32]). +pub fn build_address_from_ixon(addr: &Address) -> IxAddress { + LeanByteArray::from_bytes(addr.as_bytes()) +} - /// Build an Array of Addresses. - pub fn build_array(addrs: &[Address]) -> LeanArray { - let arr = LeanArray::alloc(addrs.len()); - for (i, addr) in addrs.iter().enumerate() { - arr.set(i, Self::build_from_ixon(addr)); - } - arr +/// Build an Array of Addresses. +pub fn build_address_array(addrs: &[Address]) -> LeanArray { + let arr = LeanArray::alloc(addrs.len()); + for (i, addr) in addrs.iter().enumerate() { + arr.set(i, build_address_from_ixon(addr)); } + arr +} - /// Decode a ByteArray (Address) to Address. - pub fn decode_ixon(self) -> Address { - Address::from_slice(&self.as_bytes()[..32]).expect("Address should be 32 bytes") - } +/// Build Ixon.Definition +/// Lean stores scalar fields ordered by size (largest first). +/// Layout: header(8) + typ(8) + value(8) + lvls(8) + kind(1) + safety(1) + padding(6) +pub fn build_ixon_definition(def: &IxonDefinition) -> LeanObj { + let typ_obj = build_ixon_expr(&def.typ); + let value_obj = build_ixon_expr(&def.value); + // 2 obj fields, 16 scalar bytes (lvls(8) + kind(1) + safety(1) + padding(6)) + let ctor = LeanCtor::alloc(0, 2, 16); + ctor.set(0, typ_obj); + ctor.set(1, value_obj); + // Scalar offsets from obj_cptr: 2*8=16 for lvls, 2*8+8=24 for kind, 2*8+9=25 for safety + ctor.set_u64(16, def.lvls); + let kind_val: u8 = match def.kind { + DefKind::Definition => 0, + DefKind::Opaque => 1, + DefKind::Theorem => 2, + }; + ctor.set_u8(24, kind_val); + let safety_val: u8 = match def.safety { + crate::ix::env::DefinitionSafety::Unsafe => 0, + crate::ix::env::DefinitionSafety::Safe => 1, + crate::ix::env::DefinitionSafety::Partial => 2, + }; + ctor.set_u8(25, safety_val); + *ctor +} - /// Decode Array Address. - pub fn decode_array(obj: LeanObj) -> Vec
{ - let arr = unsafe { LeanArray::from_raw(obj.as_ptr()) }; - arr.map(|elem| { - let ba = unsafe { LeanByteArray::from_raw(elem.as_ptr()) }; - ba.decode_ixon() - }) - } +/// Build Ixon.RecursorRule +pub fn build_ixon_recursor_rule(rule: &IxonRecursorRule) -> LeanObj { + let rhs_obj = build_ixon_expr(&rule.rhs); + // 1 obj field, 8 scalar bytes + let ctor = LeanCtor::alloc(0, 1, 8); + ctor.set(0, rhs_obj); + ctor.set_u64(8, rule.fields); + *ctor } -impl IxonDefinition { - /// Build Ixon.Definition - pub fn build(def: &Definition) -> Self { - let typ_obj = IxonExpr::build(&def.typ); - let value_obj = IxonExpr::build(&def.value); - let ctor = LeanCtor::alloc(0, 2, 16); - ctor.set(0, typ_obj); - ctor.set(1, value_obj); - ctor.set_u64(2 * 8, def.lvls); - let kind_val: u8 = match def.kind { - DefKind::Definition => 0, - DefKind::Opaque => 1, - DefKind::Theorem => 2, - }; - ctor.set_u8(2 * 8 + 8, kind_val); - let safety_val: u8 = match def.safety { - crate::ix::env::DefinitionSafety::Unsafe => 0, - crate::ix::env::DefinitionSafety::Safe => 1, - crate::ix::env::DefinitionSafety::Partial => 2, - }; - ctor.set_u8(2 * 8 + 9, safety_val); - Self::new(*ctor) - } +/// Build Ixon.Recursor +/// Scalars ordered by size: lvls(8) + params(8) + indices(8) + motives(8) + minors(8) + k(1) + isUnsafe(1) + padding(6) +pub fn build_ixon_recursor(rec: &IxonRecursor) -> LeanObj { + let typ_obj = build_ixon_expr(&rec.typ); + // Build rules array + let rules_arr = LeanArray::alloc(rec.rules.len()); + for (i, rule) in rec.rules.iter().enumerate() { + rules_arr.set(i, build_ixon_recursor_rule(rule)); + } + // 2 obj fields (typ, rules), 48 scalar bytes (5×8 + 1 + 1 + 6 padding) + let ctor = LeanCtor::alloc(0, 2, 48); + ctor.set(0, typ_obj); + ctor.set(1, rules_arr); + // Scalar offsets from obj_cptr: 2*8=16 base + ctor.set_u64(16, rec.lvls); + ctor.set_u64(24, rec.params); + ctor.set_u64(32, rec.indices); + ctor.set_u64(40, rec.motives); + ctor.set_u64(48, rec.minors); + ctor.set_u8(56, if rec.k { 1 } else { 0 }); + ctor.set_u8(57, if rec.is_unsafe { 1 } else { 0 }); + *ctor +} - /// Decode Ixon.Definition. - pub fn decode(self) -> Definition { - let ctor = unsafe { LeanCtor::from_raw(self.as_ptr()) }; - let lvls = ctor.scalar_u64(2, 0); - let kind_val = ctor.scalar_u8(2, 8); - let kind = match kind_val { - 0 => DefKind::Definition, - 1 => DefKind::Opaque, - 2 => DefKind::Theorem, - _ => panic!("Invalid DefKind: {kind_val}"), - }; - let safety_val = ctor.scalar_u8(2, 9); - let safety = match safety_val { - 0 => crate::ix::env::DefinitionSafety::Unsafe, - 1 => crate::ix::env::DefinitionSafety::Safe, - 2 => crate::ix::env::DefinitionSafety::Partial, - _ => panic!("Invalid DefinitionSafety: {safety_val}"), - }; - Definition { - kind, - safety, - lvls, - typ: Arc::new(IxonExpr::new(ctor.get(0)).decode()), - value: Arc::new(IxonExpr::new(ctor.get(1)).decode()), - } - } +/// Build Ixon.Axiom +/// Scalars ordered by size: lvls(8) + isUnsafe(1) + padding(7) +pub fn build_ixon_axiom(ax: &IxonAxiom) -> LeanObj { + let typ_obj = build_ixon_expr(&ax.typ); + // 1 obj field, 16 scalar bytes (lvls(8) + isUnsafe(1) + padding(7)) + let ctor = LeanCtor::alloc(0, 1, 16); + ctor.set(0, typ_obj); + // Scalar offsets from obj_cptr: 1*8=8 base + ctor.set_u64(8, ax.lvls); + ctor.set_u8(16, if ax.is_unsafe { 1 } else { 0 }); + *ctor } -impl IxonRecursorRule { - /// Build Ixon.RecursorRule - pub fn build(rule: &RecursorRule) -> Self { - let rhs_obj = IxonExpr::build(&rule.rhs); - let ctor = LeanCtor::alloc(0, 1, 8); - ctor.set(0, rhs_obj); - ctor.set_u64(8, rule.fields); - Self::new(*ctor) - } +/// Build Ixon.Quotient +/// QuotKind is a simple enum stored as scalar u8, not object field. +/// Scalars ordered by size: lvls(8) + kind(1) + padding(7) +pub fn build_ixon_quotient(quot: &IxonQuotient) -> LeanObj { + let typ_obj = build_ixon_expr(".typ); + // 1 obj field (typ), 16 scalar bytes (lvls(8) + kind(1) + padding(7)) + let ctor = LeanCtor::alloc(0, 1, 16); + ctor.set(0, typ_obj); + // Scalar offsets from obj_cptr: 1*8=8 base + ctor.set_u64(8, quot.lvls); + let kind_val: u8 = match quot.kind { + crate::ix::env::QuotKind::Type => 0, + crate::ix::env::QuotKind::Ctor => 1, + crate::ix::env::QuotKind::Lift => 2, + crate::ix::env::QuotKind::Ind => 3, + }; + ctor.set_u8(16, kind_val); + *ctor +} - /// Decode Ixon.RecursorRule. - pub fn decode(self) -> RecursorRule { - let ctor = unsafe { LeanCtor::from_raw(self.as_ptr()) }; - let fields = ctor.scalar_u64(1, 0); - RecursorRule { fields, rhs: Arc::new(IxonExpr::new(ctor.get(0)).decode()) } - } +/// Build Ixon.Constructor +/// Scalars ordered by size: lvls(8) + cidx(8) + params(8) + fields(8) + isUnsafe(1) + padding(7) +pub fn build_ixon_constructor(c: &IxonConstructor) -> LeanObj { + let typ_obj = build_ixon_expr(&c.typ); + // 1 obj field, 40 scalar bytes (4×8 + 1 + 7 padding) + let ctor = LeanCtor::alloc(0, 1, 40); + ctor.set(0, typ_obj); + // Scalar offsets from obj_cptr: 1*8=8 base + ctor.set_u64(8, c.lvls); + ctor.set_u64(16, c.cidx); + ctor.set_u64(24, c.params); + ctor.set_u64(32, c.fields); + ctor.set_u8(40, if c.is_unsafe { 1 } else { 0 }); + *ctor } -impl IxonRecursor { - /// Build Ixon.Recursor - pub fn build(rec: &Recursor) -> Self { - let typ_obj = IxonExpr::build(&rec.typ); - let rules_arr = LeanArray::alloc(rec.rules.len()); - for (i, rule) in rec.rules.iter().enumerate() { - rules_arr.set(i, IxonRecursorRule::build(rule)); - } - let ctor = LeanCtor::alloc(0, 2, 48); - ctor.set(0, typ_obj); - ctor.set(1, rules_arr); - ctor.set_u64(2 * 8, rec.lvls); - ctor.set_u64(2 * 8 + 8, rec.params); - ctor.set_u64(2 * 8 + 16, rec.indices); - ctor.set_u64(2 * 8 + 24, rec.motives); - ctor.set_u64(2 * 8 + 32, rec.minors); - ctor.set_u8(2 * 8 + 40, if rec.k { 1 } else { 0 }); - ctor.set_u8(2 * 8 + 41, if rec.is_unsafe { 1 } else { 0 }); - Self::new(*ctor) - } +/// Build Ixon.Inductive +/// Scalars ordered by size: lvls(8) + params(8) + indices(8) + nested(8) + recr(1) + refl(1) + isUnsafe(1) + padding(5) +pub fn build_ixon_inductive(ind: &IxonInductive) -> LeanObj { + let typ_obj = build_ixon_expr(&ind.typ); + // Build ctors array + let ctors_arr = LeanArray::alloc(ind.ctors.len()); + for (i, c) in ind.ctors.iter().enumerate() { + ctors_arr.set(i, build_ixon_constructor(c)); + } + // 2 obj fields, 40 scalar bytes (4×8 + 3 + 5 padding) + let ctor = LeanCtor::alloc(0, 2, 40); + ctor.set(0, typ_obj); + ctor.set(1, ctors_arr); + // Scalar offsets from obj_cptr: 2*8=16 base + ctor.set_u64(16, ind.lvls); + ctor.set_u64(24, ind.params); + ctor.set_u64(32, ind.indices); + ctor.set_u64(40, ind.nested); + ctor.set_u8(48, if ind.recr { 1 } else { 0 }); + ctor.set_u8(49, if ind.refl { 1 } else { 0 }); + ctor.set_u8(50, if ind.is_unsafe { 1 } else { 0 }); + *ctor +} - /// Decode Ixon.Recursor. - pub fn decode(self) -> Recursor { - let ctor = unsafe { LeanCtor::from_raw(self.as_ptr()) }; - let lvls = ctor.scalar_u64(2, 0); - let params = ctor.scalar_u64(2, 8); - let indices = ctor.scalar_u64(2, 16); - let motives = ctor.scalar_u64(2, 24); - let minors = ctor.scalar_u64(2, 32); - let k = ctor.scalar_bool(2, 40); - let is_unsafe = ctor.scalar_bool(2, 41); - let rules_arr = unsafe { LeanArray::from_raw(ctor.get(1).as_ptr()) }; - let rules = rules_arr.map(|r| IxonRecursorRule::new(r).decode()); - Recursor { - k, - is_unsafe, - lvls, - params, - indices, - motives, - minors, - typ: Arc::new(IxonExpr::new(ctor.get(0)).decode()), - rules, - } - } +/// Build Ixon.InductiveProj +pub fn build_inductive_proj(proj: &InductiveProj) -> LeanObj { + let block_obj = build_address_from_ixon(&proj.block); + let ctor = LeanCtor::alloc(0, 1, 8); + ctor.set(0, block_obj); + ctor.set_u64(8, proj.idx); + *ctor } -impl IxonAxiom { - /// Build Ixon.Axiom - pub fn build(ax: &Axiom) -> Self { - let typ_obj = IxonExpr::build(&ax.typ); - let ctor = LeanCtor::alloc(0, 1, 16); - ctor.set(0, typ_obj); - ctor.set_u64(8, ax.lvls); - ctor.set_u8(8 + 8, if ax.is_unsafe { 1 } else { 0 }); - Self::new(*ctor) - } +/// Build Ixon.ConstructorProj +pub fn build_constructor_proj(proj: &ConstructorProj) -> LeanObj { + let block_obj = build_address_from_ixon(&proj.block); + let ctor = LeanCtor::alloc(0, 1, 16); + ctor.set(0, block_obj); + ctor.set_u64(8, proj.idx); + ctor.set_u64(16, proj.cidx); + *ctor +} - /// Decode Ixon.Axiom. - pub fn decode(self) -> Axiom { - let ctor = unsafe { LeanCtor::from_raw(self.as_ptr()) }; - let lvls = ctor.scalar_u64(1, 0); - let is_unsafe = ctor.scalar_bool(1, 8); - Axiom { - is_unsafe, - lvls, - typ: Arc::new(IxonExpr::new(ctor.get(0)).decode()), - } - } +/// Build Ixon.RecursorProj +pub fn build_recursor_proj(proj: &RecursorProj) -> LeanObj { + let block_obj = build_address_from_ixon(&proj.block); + let ctor = LeanCtor::alloc(0, 1, 8); + ctor.set(0, block_obj); + ctor.set_u64(8, proj.idx); + *ctor } -impl IxonQuotient { - /// Build Ixon.Quotient - pub fn build(quot: &Quotient) -> Self { - let typ_obj = IxonExpr::build(".typ); - let ctor = LeanCtor::alloc(0, 1, 16); - ctor.set(0, typ_obj); - ctor.set_u64(8, quot.lvls); - let kind_val: u8 = match quot.kind { - crate::ix::env::QuotKind::Type => 0, - crate::ix::env::QuotKind::Ctor => 1, - crate::ix::env::QuotKind::Lift => 2, - crate::ix::env::QuotKind::Ind => 3, - }; - ctor.set_u8(8 + 8, kind_val); - Self::new(*ctor) - } +/// Build Ixon.DefinitionProj +pub fn build_definition_proj(proj: &DefinitionProj) -> LeanObj { + let block_obj = build_address_from_ixon(&proj.block); + let ctor = LeanCtor::alloc(0, 1, 8); + ctor.set(0, block_obj); + ctor.set_u64(8, proj.idx); + *ctor +} - /// Decode Ixon.Quotient. - pub fn decode(self) -> Quotient { - let ctor = unsafe { LeanCtor::from_raw(self.as_ptr()) }; - let lvls = ctor.scalar_u64(1, 0); - let kind_val = ctor.scalar_u8(1, 8); - let kind = match kind_val { - 0 => crate::ix::env::QuotKind::Type, - 1 => crate::ix::env::QuotKind::Ctor, - 2 => crate::ix::env::QuotKind::Lift, - 3 => crate::ix::env::QuotKind::Ind, - _ => panic!("Invalid QuotKind: {kind_val}"), - }; - Quotient { kind, lvls, typ: Arc::new(IxonExpr::new(ctor.get(0)).decode()) } +/// Build Ixon.MutConst +pub fn build_mut_const(mc: &MutConst) -> LeanObj { + match mc { + MutConst::Defn(def) => { + let def_obj = build_ixon_definition(def); + let ctor = LeanCtor::alloc(0, 1, 0); + ctor.set(0, def_obj); + *ctor + }, + MutConst::Indc(ind) => { + let ind_obj = build_ixon_inductive(ind); + let ctor = LeanCtor::alloc(1, 1, 0); + ctor.set(0, ind_obj); + *ctor + }, + MutConst::Recr(rec) => { + let rec_obj = build_ixon_recursor(rec); + let ctor = LeanCtor::alloc(2, 1, 0); + ctor.set(0, rec_obj); + *ctor + }, } } -impl IxonConstructor { - /// Build Ixon.Constructor - pub fn build(c: &Constructor) -> Self { - let typ_obj = IxonExpr::build(&c.typ); - let ctor = LeanCtor::alloc(0, 1, 40); - ctor.set(0, typ_obj); - ctor.set_u64(8, c.lvls); - ctor.set_u64(8 + 8, c.cidx); - ctor.set_u64(8 + 16, c.params); - ctor.set_u64(8 + 24, c.fields); - ctor.set_u8(8 + 32, if c.is_unsafe { 1 } else { 0 }); - Self::new(*ctor) +/// Build Ixon.ConstantInfo (9 constructors) +pub fn build_ixon_constant_info(info: &IxonConstantInfo) -> LeanObj { + match info { + IxonConstantInfo::Defn(def) => { + let def_obj = build_ixon_definition(def); + let ctor = LeanCtor::alloc(0, 1, 0); + ctor.set(0, def_obj); + *ctor + }, + IxonConstantInfo::Recr(rec) => { + let rec_obj = build_ixon_recursor(rec); + let ctor = LeanCtor::alloc(1, 1, 0); + ctor.set(0, rec_obj); + *ctor + }, + IxonConstantInfo::Axio(ax) => { + let ax_obj = build_ixon_axiom(ax); + let ctor = LeanCtor::alloc(2, 1, 0); + ctor.set(0, ax_obj); + *ctor + }, + IxonConstantInfo::Quot(quot) => { + let quot_obj = build_ixon_quotient(quot); + let ctor = LeanCtor::alloc(3, 1, 0); + ctor.set(0, quot_obj); + *ctor + }, + IxonConstantInfo::CPrj(proj) => { + let proj_obj = build_constructor_proj(proj); + let ctor = LeanCtor::alloc(4, 1, 0); + ctor.set(0, proj_obj); + *ctor + }, + IxonConstantInfo::RPrj(proj) => { + let proj_obj = build_recursor_proj(proj); + let ctor = LeanCtor::alloc(5, 1, 0); + ctor.set(0, proj_obj); + *ctor + }, + IxonConstantInfo::IPrj(proj) => { + let proj_obj = build_inductive_proj(proj); + let ctor = LeanCtor::alloc(6, 1, 0); + ctor.set(0, proj_obj); + *ctor + }, + IxonConstantInfo::DPrj(proj) => { + let proj_obj = build_definition_proj(proj); + let ctor = LeanCtor::alloc(7, 1, 0); + ctor.set(0, proj_obj); + *ctor + }, + IxonConstantInfo::Muts(muts) => { + let arr = LeanArray::alloc(muts.len()); + for (i, mc) in muts.iter().enumerate() { + arr.set(i, build_mut_const(mc)); + } + let ctor = LeanCtor::alloc(8, 1, 0); + ctor.set(0, arr); + *ctor + }, } +} - /// Decode Ixon.Constructor. - pub fn decode(self) -> Constructor { - let ctor = unsafe { LeanCtor::from_raw(self.as_ptr()) }; - let lvls = ctor.scalar_u64(1, 0); - let cidx = ctor.scalar_u64(1, 8); - let params = ctor.scalar_u64(1, 16); - let fields = ctor.scalar_u64(1, 24); - let is_unsafe = ctor.scalar_bool(1, 32); - Constructor { - is_unsafe, - lvls, - cidx, - params, - fields, - typ: Arc::new(IxonExpr::new(ctor.get(0)).decode()), - } - } +/// Build Ixon.Constant +pub fn build_ixon_constant(constant: &IxonConstant) -> LeanObj { + let info_obj = build_ixon_constant_info(&constant.info); + let sharing_obj = build_ixon_expr_array(&constant.sharing); + let refs_obj = build_address_array(&constant.refs); + let univs_obj = build_ixon_univ_array(&constant.univs); + let ctor = LeanCtor::alloc(0, 4, 0); + ctor.set(0, info_obj); + ctor.set(1, sharing_obj); + ctor.set(2, refs_obj); + ctor.set(3, univs_obj); + *ctor } -impl IxonInductive { - /// Build Ixon.Inductive - pub fn build(ind: &Inductive) -> Self { - let typ_obj = IxonExpr::build(&ind.typ); - let ctors_arr = LeanArray::alloc(ind.ctors.len()); - for (i, c) in ind.ctors.iter().enumerate() { - ctors_arr.set(i, IxonConstructor::build(c)); - } - let ctor = LeanCtor::alloc(0, 2, 40); - ctor.set(0, typ_obj); - ctor.set(1, ctors_arr); - ctor.set_u64(2 * 8, ind.lvls); - ctor.set_u64(2 * 8 + 8, ind.params); - ctor.set_u64(2 * 8 + 16, ind.indices); - ctor.set_u64(2 * 8 + 24, ind.nested); - ctor.set_u8(2 * 8 + 32, if ind.recr { 1 } else { 0 }); - ctor.set_u8(2 * 8 + 33, if ind.refl { 1 } else { 0 }); - ctor.set_u8(2 * 8 + 34, if ind.is_unsafe { 1 } else { 0 }); - Self::new(*ctor) - } +// ============================================================================= +// Decode Functions +// ============================================================================= - /// Decode Ixon.Inductive. - pub fn decode(self) -> Inductive { - let ctor = unsafe { LeanCtor::from_raw(self.as_ptr()) }; - let lvls = ctor.scalar_u64(2, 0); - let params = ctor.scalar_u64(2, 8); - let indices = ctor.scalar_u64(2, 16); - let nested = ctor.scalar_u64(2, 24); - let recr = ctor.scalar_bool(2, 32); - let refl = ctor.scalar_bool(2, 33); - let is_unsafe = ctor.scalar_bool(2, 34); - let ctors_arr = unsafe { LeanArray::from_raw(ctor.get(1).as_ptr()) }; - let ctors = ctors_arr.map(|c| IxonConstructor::new(c).decode()); - Inductive { - recr, - refl, - is_unsafe, - lvls, - params, - indices, - nested, - typ: Arc::new(IxonExpr::new(ctor.get(0)).decode()), - ctors, - } - } +/// Decode a ByteArray (Address) to Address. +pub fn decode_ixon_address(obj: LeanObj) -> Address { + let ba = obj.as_byte_array(); + Address::from_slice(&ba.as_bytes()[..32]).expect("Address should be 32 bytes") } -impl IxonInductiveProj { - /// Build Ixon.InductiveProj - pub fn build(proj: &InductiveProj) -> Self { - let ctor = LeanCtor::alloc(0, 1, 8); - ctor.set(0, IxAddress::build_from_ixon(&proj.block)); - ctor.set_u64(8, proj.idx); - Self::new(*ctor) - } +/// Decode Array Address. +pub fn decode_ixon_address_array(obj: LeanObj) -> Vec
{ + let arr = obj.as_array(); + arr.map(decode_ixon_address) +} - /// Decode Ixon.InductiveProj. - pub fn decode(self) -> InductiveProj { - let ctor = unsafe { LeanCtor::from_raw(self.as_ptr()) }; - let idx = ctor.scalar_u64(1, 0); - let ba = unsafe { LeanByteArray::from_raw(ctor.get(0).as_ptr()) }; - InductiveProj { idx, block: ba.decode_ixon() } - } +/// Decode Ixon.Definition. +/// Lean stores scalar fields ordered by size (largest first). +/// Layout: header(8) + typ(8) + value(8) + lvls(8) + kind(1) + safety(1) + padding(6) +pub fn decode_ixon_definition(obj: LeanObj) -> IxonDefinition { + let ctor = obj.as_ctor(); + let typ = Arc::new(decode_ixon_expr(ctor.get(0))); + let value = Arc::new(decode_ixon_expr(ctor.get(1))); + let lvls = ctor.scalar_u64(2, 0); + let kind_val = ctor.scalar_u8(2, 8); + let kind = match kind_val { + 0 => DefKind::Definition, + 1 => DefKind::Opaque, + 2 => DefKind::Theorem, + _ => panic!("Invalid DefKind: {}", kind_val), + }; + let safety_val = ctor.scalar_u8(2, 9); + let safety = match safety_val { + 0 => crate::ix::env::DefinitionSafety::Unsafe, + 1 => crate::ix::env::DefinitionSafety::Safe, + 2 => crate::ix::env::DefinitionSafety::Partial, + _ => panic!("Invalid DefinitionSafety: {}", safety_val), + }; + IxonDefinition { kind, safety, lvls, typ, value } } -impl IxonConstructorProj { - /// Build Ixon.ConstructorProj - pub fn build(proj: &ConstructorProj) -> Self { - let ctor = LeanCtor::alloc(0, 1, 16); - ctor.set(0, IxAddress::build_from_ixon(&proj.block)); - ctor.set_u64(8, proj.idx); - ctor.set_u64(8 + 8, proj.cidx); - Self::new(*ctor) - } +/// Decode Ixon.RecursorRule. +pub fn decode_ixon_recursor_rule(obj: LeanObj) -> IxonRecursorRule { + let ctor = obj.as_ctor(); + let rhs = Arc::new(decode_ixon_expr(ctor.get(0))); + let fields = ctor.scalar_u64(1, 0); + IxonRecursorRule { fields, rhs } +} - /// Decode Ixon.ConstructorProj. - pub fn decode(self) -> ConstructorProj { - let ctor = unsafe { LeanCtor::from_raw(self.as_ptr()) }; - let idx = ctor.scalar_u64(1, 0); - let cidx = ctor.scalar_u64(1, 8); - let ba = unsafe { LeanByteArray::from_raw(ctor.get(0).as_ptr()) }; - ConstructorProj { idx, cidx, block: ba.decode_ixon() } - } +/// Decode Ixon.Recursor. +/// Scalars ordered by size: lvls(8) + params(8) + indices(8) + motives(8) + minors(8) + k(1) + isUnsafe(1) + padding(6) +pub fn decode_ixon_recursor(obj: LeanObj) -> IxonRecursor { + let ctor = obj.as_ctor(); + let typ = Arc::new(decode_ixon_expr(ctor.get(0))); + let rules_arr = ctor.get(1).as_array(); + let rules = rules_arr.map(decode_ixon_recursor_rule); + let lvls = ctor.scalar_u64(2, 0); + let params = ctor.scalar_u64(2, 8); + let indices = ctor.scalar_u64(2, 16); + let motives = ctor.scalar_u64(2, 24); + let minors = ctor.scalar_u64(2, 32); + let k = ctor.scalar_u8(2, 40) != 0; + let is_unsafe = ctor.scalar_u8(2, 41) != 0; + IxonRecursor { k, is_unsafe, lvls, params, indices, motives, minors, typ, rules } } -impl IxonRecursorProj { - /// Build Ixon.RecursorProj - pub fn build(proj: &RecursorProj) -> Self { - let ctor = LeanCtor::alloc(0, 1, 8); - ctor.set(0, IxAddress::build_from_ixon(&proj.block)); - ctor.set_u64(8, proj.idx); - Self::new(*ctor) - } +/// Decode Ixon.Axiom. +/// Scalars ordered by size: lvls(8) + isUnsafe(1) + padding(7) +pub fn decode_ixon_axiom(obj: LeanObj) -> IxonAxiom { + let ctor = obj.as_ctor(); + let typ = Arc::new(decode_ixon_expr(ctor.get(0))); + let lvls = ctor.scalar_u64(1, 0); + let is_unsafe = ctor.scalar_u8(1, 8) != 0; + IxonAxiom { is_unsafe, lvls, typ } +} - /// Decode Ixon.RecursorProj. - pub fn decode(self) -> RecursorProj { - let ctor = unsafe { LeanCtor::from_raw(self.as_ptr()) }; - let idx = ctor.scalar_u64(1, 0); - let ba = unsafe { LeanByteArray::from_raw(ctor.get(0).as_ptr()) }; - RecursorProj { idx, block: ba.decode_ixon() } - } +/// Decode Ixon.Quotient. +/// QuotKind is a scalar (not object field). Scalars: lvls(8) + kind(1) + padding(7) +pub fn decode_ixon_quotient(obj: LeanObj) -> IxonQuotient { + let ctor = obj.as_ctor(); + let typ = Arc::new(decode_ixon_expr(ctor.get(0))); + let lvls = ctor.scalar_u64(1, 0); + let kind_val = ctor.scalar_u8(1, 8); + let kind = match kind_val { + 0 => crate::ix::env::QuotKind::Type, + 1 => crate::ix::env::QuotKind::Ctor, + 2 => crate::ix::env::QuotKind::Lift, + 3 => crate::ix::env::QuotKind::Ind, + _ => panic!("Invalid QuotKind: {}", kind_val), + }; + IxonQuotient { kind, lvls, typ } } -impl IxonDefinitionProj { - /// Build Ixon.DefinitionProj - pub fn build(proj: &DefinitionProj) -> Self { - let ctor = LeanCtor::alloc(0, 1, 8); - ctor.set(0, IxAddress::build_from_ixon(&proj.block)); - ctor.set_u64(8, proj.idx); - Self::new(*ctor) - } +/// Decode Ixon.Constructor. +/// Scalars ordered by size: lvls(8) + cidx(8) + params(8) + fields(8) + isUnsafe(1) + padding(7) +pub fn decode_ixon_constructor(obj: LeanObj) -> IxonConstructor { + let ctor = obj.as_ctor(); + let typ = Arc::new(decode_ixon_expr(ctor.get(0))); + let lvls = ctor.scalar_u64(1, 0); + let cidx = ctor.scalar_u64(1, 8); + let params = ctor.scalar_u64(1, 16); + let fields = ctor.scalar_u64(1, 24); + let is_unsafe = ctor.scalar_u8(1, 32) != 0; + IxonConstructor { is_unsafe, lvls, cidx, params, fields, typ } +} - /// Decode Ixon.DefinitionProj. - pub fn decode(self) -> DefinitionProj { - let ctor = unsafe { LeanCtor::from_raw(self.as_ptr()) }; - let idx = ctor.scalar_u64(1, 0); - let ba = unsafe { LeanByteArray::from_raw(ctor.get(0).as_ptr()) }; - DefinitionProj { idx, block: ba.decode_ixon() } - } +/// Decode Ixon.Inductive. +/// Scalars ordered by size: lvls(8) + params(8) + indices(8) + nested(8) + recr(1) + refl(1) + isUnsafe(1) + padding(5) +pub fn decode_ixon_inductive(obj: LeanObj) -> IxonInductive { + let ctor = obj.as_ctor(); + let typ = Arc::new(decode_ixon_expr(ctor.get(0))); + let ctors_arr = ctor.get(1).as_array(); + let ctors = ctors_arr.map(decode_ixon_constructor); + let lvls = ctor.scalar_u64(2, 0); + let params = ctor.scalar_u64(2, 8); + let indices = ctor.scalar_u64(2, 16); + let nested = ctor.scalar_u64(2, 24); + let recr = ctor.scalar_u8(2, 32) != 0; + let refl = ctor.scalar_u8(2, 33) != 0; + let is_unsafe = ctor.scalar_u8(2, 34) != 0; + IxonInductive { recr, refl, is_unsafe, lvls, params, indices, nested, typ, ctors } } -impl IxonMutConst { - /// Build Ixon.MutConst - pub fn build(mc: &MutConst) -> Self { - let obj = match mc { - MutConst::Defn(def) => { - let ctor = LeanCtor::alloc(0, 1, 0); - ctor.set(0, IxonDefinition::build(def)); - *ctor - }, - MutConst::Indc(ind) => { - let ctor = LeanCtor::alloc(1, 1, 0); - ctor.set(0, IxonInductive::build(ind)); - *ctor - }, - MutConst::Recr(rec) => { - let ctor = LeanCtor::alloc(2, 1, 0); - ctor.set(0, IxonRecursor::build(rec)); - *ctor - }, - }; - Self::new(obj) - } +/// Decode Ixon.InductiveProj. +pub fn decode_ixon_inductive_proj(obj: LeanObj) -> InductiveProj { + let ctor = obj.as_ctor(); + let block = decode_ixon_address(ctor.get(0)); + let idx = ctor.scalar_u64(1, 0); + InductiveProj { idx, block } +} - /// Decode Ixon.MutConst. - pub fn decode(self) -> MutConst { - let ctor = unsafe { LeanCtor::from_raw(self.as_ptr()) }; - match ctor.tag() { - 0 => MutConst::Defn(IxonDefinition::new(ctor.get(0)).decode()), - 1 => MutConst::Indc(IxonInductive::new(ctor.get(0)).decode()), - 2 => MutConst::Recr(IxonRecursor::new(ctor.get(0)).decode()), - tag => panic!("Invalid Ixon.MutConst tag: {tag}"), - } - } +/// Decode Ixon.ConstructorProj. +pub fn decode_ixon_constructor_proj(obj: LeanObj) -> ConstructorProj { + let ctor = obj.as_ctor(); + let block = decode_ixon_address(ctor.get(0)); + let idx = ctor.scalar_u64(1, 0); + let cidx = ctor.scalar_u64(1, 8); + ConstructorProj { idx, cidx, block } } -impl IxonConstantInfo { - /// Build Ixon.ConstantInfo (9 constructors) - pub fn build(info: &ConstantInfo) -> Self { - let obj = match info { - ConstantInfo::Defn(def) => { - let ctor = LeanCtor::alloc(0, 1, 0); - ctor.set(0, IxonDefinition::build(def)); - *ctor - }, - ConstantInfo::Recr(rec) => { - let ctor = LeanCtor::alloc(1, 1, 0); - ctor.set(0, IxonRecursor::build(rec)); - *ctor - }, - ConstantInfo::Axio(ax) => { - let ctor = LeanCtor::alloc(2, 1, 0); - ctor.set(0, IxonAxiom::build(ax)); - *ctor - }, - ConstantInfo::Quot(quot) => { - let ctor = LeanCtor::alloc(3, 1, 0); - ctor.set(0, IxonQuotient::build(quot)); - *ctor - }, - ConstantInfo::CPrj(proj) => { - let ctor = LeanCtor::alloc(4, 1, 0); - ctor.set(0, IxonConstructorProj::build(proj)); - *ctor - }, - ConstantInfo::RPrj(proj) => { - let ctor = LeanCtor::alloc(5, 1, 0); - ctor.set(0, IxonRecursorProj::build(proj)); - *ctor - }, - ConstantInfo::IPrj(proj) => { - let ctor = LeanCtor::alloc(6, 1, 0); - ctor.set(0, IxonInductiveProj::build(proj)); - *ctor - }, - ConstantInfo::DPrj(proj) => { - let ctor = LeanCtor::alloc(7, 1, 0); - ctor.set(0, IxonDefinitionProj::build(proj)); - *ctor - }, - ConstantInfo::Muts(muts) => { - let arr = LeanArray::alloc(muts.len()); - for (i, mc) in muts.iter().enumerate() { - arr.set(i, IxonMutConst::build(mc)); - } - let ctor = LeanCtor::alloc(8, 1, 0); - ctor.set(0, arr); - *ctor - }, - }; - Self::new(obj) - } +/// Decode Ixon.RecursorProj. +pub fn decode_ixon_recursor_proj(obj: LeanObj) -> RecursorProj { + let ctor = obj.as_ctor(); + let block = decode_ixon_address(ctor.get(0)); + let idx = ctor.scalar_u64(1, 0); + RecursorProj { idx, block } +} - /// Decode Ixon.ConstantInfo. - pub fn decode(self) -> ConstantInfo { - let ctor = unsafe { LeanCtor::from_raw(self.as_ptr()) }; - match ctor.tag() { - 0 => ConstantInfo::Defn(IxonDefinition::new(ctor.get(0)).decode()), - 1 => ConstantInfo::Recr(IxonRecursor::new(ctor.get(0)).decode()), - 2 => ConstantInfo::Axio(IxonAxiom::new(ctor.get(0)).decode()), - 3 => ConstantInfo::Quot(IxonQuotient::new(ctor.get(0)).decode()), - 4 => { - ConstantInfo::CPrj(IxonConstructorProj::new(ctor.get(0)).decode()) - }, - 5 => ConstantInfo::RPrj(IxonRecursorProj::new(ctor.get(0)).decode()), - 6 => ConstantInfo::IPrj(IxonInductiveProj::new(ctor.get(0)).decode()), - 7 => { - ConstantInfo::DPrj(IxonDefinitionProj::new(ctor.get(0)).decode()) - }, - 8 => { - let arr = unsafe { LeanArray::from_raw(ctor.get(0).as_ptr()) }; - let muts = arr.map(|m| IxonMutConst::new(m).decode()); - ConstantInfo::Muts(muts) - }, - tag => panic!("Invalid Ixon.ConstantInfo tag: {tag}"), - } +/// Decode Ixon.DefinitionProj. +pub fn decode_ixon_definition_proj(obj: LeanObj) -> DefinitionProj { + let ctor = obj.as_ctor(); + let block = decode_ixon_address(ctor.get(0)); + let idx = ctor.scalar_u64(1, 0); + DefinitionProj { idx, block } +} + +/// Decode Ixon.MutConst. +pub fn decode_ixon_mut_const(obj: LeanObj) -> MutConst { + let ctor = obj.as_ctor(); + let inner = ctor.get(0); + match ctor.tag() { + 0 => MutConst::Defn(decode_ixon_definition(inner)), + 1 => MutConst::Indc(decode_ixon_inductive(inner)), + 2 => MutConst::Recr(decode_ixon_recursor(inner)), + tag => panic!("Invalid Ixon.MutConst tag: {}", tag), } } -impl IxonConstant { - /// Build Ixon.Constant - pub fn build(constant: &Constant) -> Self { - let info_obj = IxonConstantInfo::build(&constant.info); - let sharing_obj = IxonExpr::build_array(&constant.sharing); - let refs_obj = IxAddress::build_array(&constant.refs); - let univs_obj = IxonUniv::build_array(&constant.univs); - let ctor = LeanCtor::alloc(0, 4, 0); - ctor.set(0, info_obj); - ctor.set(1, sharing_obj); - ctor.set(2, refs_obj); - ctor.set(3, univs_obj); - Self::new(*ctor) +/// Decode Ixon.ConstantInfo. +pub fn decode_ixon_constant_info(obj: LeanObj) -> IxonConstantInfo { + let ctor = obj.as_ctor(); + let inner = ctor.get(0); + match ctor.tag() { + 0 => IxonConstantInfo::Defn(decode_ixon_definition(inner)), + 1 => IxonConstantInfo::Recr(decode_ixon_recursor(inner)), + 2 => IxonConstantInfo::Axio(decode_ixon_axiom(inner)), + 3 => IxonConstantInfo::Quot(decode_ixon_quotient(inner)), + 4 => IxonConstantInfo::CPrj(decode_ixon_constructor_proj(inner)), + 5 => IxonConstantInfo::RPrj(decode_ixon_recursor_proj(inner)), + 6 => IxonConstantInfo::IPrj(decode_ixon_inductive_proj(inner)), + 7 => IxonConstantInfo::DPrj(decode_ixon_definition_proj(inner)), + 8 => { + let arr = inner.as_array(); + let muts = arr.map(decode_ixon_mut_const); + IxonConstantInfo::Muts(muts) + }, + tag => panic!("Invalid Ixon.ConstantInfo tag: {}", tag), } +} - /// Decode Ixon.Constant. - pub fn decode(self) -> Constant { - let ctor = unsafe { LeanCtor::from_raw(self.as_ptr()) }; - Constant { - info: IxonConstantInfo::new(ctor.get(0)).decode(), - sharing: IxonExpr::decode_array(ctor.get(1)), - refs: IxAddress::decode_array(ctor.get(2)), - univs: IxonUniv::decode_array(ctor.get(3)), - } +/// Decode Ixon.Constant. +pub fn decode_ixon_constant(obj: LeanObj) -> IxonConstant { + let ctor = obj.as_ctor(); + IxonConstant { + info: decode_ixon_constant_info(ctor.get(0)), + sharing: decode_ixon_expr_array(ctor.get(1)), + refs: decode_ixon_address_array(ctor.get(2)), + univs: decode_ixon_univ_array(ctor.get(3)), } } @@ -532,101 +517,97 @@ impl IxonConstant { /// Round-trip Ixon.Definition. #[unsafe(no_mangle)] pub extern "C" fn rs_roundtrip_ixon_definition(obj: LeanObj) -> LeanObj { - let def = IxonDefinition::new(obj).decode(); - IxonDefinition::build(&def).into() + let def = decode_ixon_definition(obj); + build_ixon_definition(&def) } /// Round-trip Ixon.Recursor. #[unsafe(no_mangle)] pub extern "C" fn rs_roundtrip_ixon_recursor(obj: LeanObj) -> LeanObj { - let rec = IxonRecursor::new(obj).decode(); - IxonRecursor::build(&rec).into() + let rec = decode_ixon_recursor(obj); + build_ixon_recursor(&rec) } /// Round-trip Ixon.Axiom. #[unsafe(no_mangle)] pub extern "C" fn rs_roundtrip_ixon_axiom(obj: LeanObj) -> LeanObj { - let ax = IxonAxiom::new(obj).decode(); - IxonAxiom::build(&ax).into() + let ax = decode_ixon_axiom(obj); + build_ixon_axiom(&ax) } /// Round-trip Ixon.Quotient. #[unsafe(no_mangle)] pub extern "C" fn rs_roundtrip_ixon_quotient(obj: LeanObj) -> LeanObj { - let quot = IxonQuotient::new(obj).decode(); - IxonQuotient::build(").into() + let quot = decode_ixon_quotient(obj); + build_ixon_quotient(") } /// Round-trip Ixon.ConstantInfo. #[unsafe(no_mangle)] pub extern "C" fn rs_roundtrip_ixon_constant_info(obj: LeanObj) -> LeanObj { - let info = IxonConstantInfo::new(obj).decode(); - IxonConstantInfo::build(&info).into() + let info = decode_ixon_constant_info(obj); + build_ixon_constant_info(&info) } /// Round-trip Ixon.Constant. #[unsafe(no_mangle)] pub extern "C" fn rs_roundtrip_ixon_constant(obj: LeanObj) -> LeanObj { - let constant = IxonConstant::new(obj).decode(); - IxonConstant::build(&constant).into() + let constant = decode_ixon_constant(obj); + build_ixon_constant(&constant) } /// Round-trip Ixon.RecursorRule. #[unsafe(no_mangle)] pub extern "C" fn rs_roundtrip_ixon_recursor_rule(obj: LeanObj) -> LeanObj { - let rule = IxonRecursorRule::new(obj).decode(); - IxonRecursorRule::build(&rule).into() + let rule = decode_ixon_recursor_rule(obj); + build_ixon_recursor_rule(&rule) } /// Round-trip Ixon.Constructor. #[unsafe(no_mangle)] pub extern "C" fn rs_roundtrip_ixon_constructor(obj: LeanObj) -> LeanObj { - let c = IxonConstructor::new(obj).decode(); - IxonConstructor::build(&c).into() + let c = decode_ixon_constructor(obj); + build_ixon_constructor(&c) } /// Round-trip Ixon.Inductive. #[unsafe(no_mangle)] pub extern "C" fn rs_roundtrip_ixon_inductive(obj: LeanObj) -> LeanObj { - let ind = IxonInductive::new(obj).decode(); - IxonInductive::build(&ind).into() + let ind = decode_ixon_inductive(obj); + build_ixon_inductive(&ind) } /// Round-trip Ixon.InductiveProj. #[unsafe(no_mangle)] pub extern "C" fn rs_roundtrip_ixon_inductive_proj(obj: LeanObj) -> LeanObj { - let proj = IxonInductiveProj::new(obj).decode(); - IxonInductiveProj::build(&proj).into() + let proj = decode_ixon_inductive_proj(obj); + build_inductive_proj(&proj) } /// Round-trip Ixon.ConstructorProj. #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ixon_constructor_proj( - obj: LeanObj, -) -> LeanObj { - let proj = IxonConstructorProj::new(obj).decode(); - IxonConstructorProj::build(&proj).into() +pub extern "C" fn rs_roundtrip_ixon_constructor_proj(obj: LeanObj) -> LeanObj { + let proj = decode_ixon_constructor_proj(obj); + build_constructor_proj(&proj) } /// Round-trip Ixon.RecursorProj. #[unsafe(no_mangle)] pub extern "C" fn rs_roundtrip_ixon_recursor_proj(obj: LeanObj) -> LeanObj { - let proj = IxonRecursorProj::new(obj).decode(); - IxonRecursorProj::build(&proj).into() + let proj = decode_ixon_recursor_proj(obj); + build_recursor_proj(&proj) } /// Round-trip Ixon.DefinitionProj. #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ixon_definition_proj( - obj: LeanObj, -) -> LeanObj { - let proj = IxonDefinitionProj::new(obj).decode(); - IxonDefinitionProj::build(&proj).into() +pub extern "C" fn rs_roundtrip_ixon_definition_proj(obj: LeanObj) -> LeanObj { + let proj = decode_ixon_definition_proj(obj); + build_definition_proj(&proj) } /// Round-trip Ixon.MutConst. #[unsafe(no_mangle)] pub extern "C" fn rs_roundtrip_ixon_mut_const(obj: LeanObj) -> LeanObj { - let mc = IxonMutConst::new(obj).decode(); - IxonMutConst::build(&mc).into() + let mc = decode_ixon_mut_const(obj); + build_mut_const(&mc) } diff --git a/src/lean/ffi/ixon/enums.rs b/src/lean/ffi/ixon/enums.rs index e8c4b12c..c565a291 100644 --- a/src/lean/ffi/ixon/enums.rs +++ b/src/lean/ffi/ixon/enums.rs @@ -4,32 +4,33 @@ use std::ffi::c_void; use crate::ix::env::{DefinitionSafety, QuotKind}; use crate::ix::ixon::constant::DefKind; +use crate::lean::obj::LeanObj; /// Build Ixon.DefKind /// | defn -- tag 0 /// | opaq -- tag 1 /// | thm -- tag 2 -/// Simple enums are represented as raw tag values (unboxed scalars). -pub fn build_def_kind(kind: &DefKind) -> *mut c_void { +/// Simple enums are passed as raw (unboxed) tag values across Lean FFI. +pub fn build_def_kind(kind: &DefKind) -> LeanObj { let tag = match kind { DefKind::Definition => 0, DefKind::Opaque => 1, DefKind::Theorem => 2, }; - tag as *mut c_void + unsafe { LeanObj::from_raw(tag as *const c_void) } } /// Build Ixon.DefinitionSafety /// | unsaf -- tag 0 /// | safe -- tag 1 /// | part -- tag 2 -pub fn build_ixon_definition_safety(safety: &DefinitionSafety) -> *mut c_void { +pub fn build_ixon_definition_safety(safety: &DefinitionSafety) -> LeanObj { let tag = match safety { DefinitionSafety::Unsafe => 0, DefinitionSafety::Safe => 1, DefinitionSafety::Partial => 2, }; - tag as *mut c_void + unsafe { LeanObj::from_raw(tag as *const c_void) } } /// Build Ixon.QuotKind @@ -37,23 +38,23 @@ pub fn build_ixon_definition_safety(safety: &DefinitionSafety) -> *mut c_void { /// | ctor -- tag 1 /// | lift -- tag 2 /// | ind -- tag 3 -pub fn build_ixon_quot_kind(kind: &QuotKind) -> *mut c_void { +pub fn build_ixon_quot_kind(kind: &QuotKind) -> LeanObj { let tag = match kind { QuotKind::Type => 0, QuotKind::Ctor => 1, QuotKind::Lift => 2, QuotKind::Ind => 3, }; - tag as *mut c_void + unsafe { LeanObj::from_raw(tag as *const c_void) } } // ============================================================================= // Decode Functions // ============================================================================= -/// Decode Ixon.DefKind (simple enum, raw tag value). -pub fn decode_ixon_def_kind(ptr: *const c_void) -> DefKind { - let tag = ptr as usize; +/// Decode Ixon.DefKind (simple enum, raw unboxed tag value). +pub fn decode_ixon_def_kind(obj: LeanObj) -> DefKind { + let tag = obj.as_ptr() as usize; match tag { 0 => DefKind::Definition, 1 => DefKind::Opaque, @@ -62,9 +63,9 @@ pub fn decode_ixon_def_kind(ptr: *const c_void) -> DefKind { } } -/// Decode Ixon.DefinitionSafety (simple enum, raw tag value). -pub fn decode_ixon_definition_safety(ptr: *const c_void) -> DefinitionSafety { - let tag = ptr as usize; +/// Decode Ixon.DefinitionSafety (simple enum, raw unboxed tag value). +pub fn decode_ixon_definition_safety(obj: LeanObj) -> DefinitionSafety { + let tag = obj.as_ptr() as usize; match tag { 0 => DefinitionSafety::Unsafe, 1 => DefinitionSafety::Safe, @@ -73,9 +74,9 @@ pub fn decode_ixon_definition_safety(ptr: *const c_void) -> DefinitionSafety { } } -/// Decode Ixon.QuotKind (simple enum, raw tag value). -pub fn decode_ixon_quot_kind(ptr: *const c_void) -> QuotKind { - let tag = ptr as usize; +/// Decode Ixon.QuotKind (simple enum, raw unboxed tag value). +pub fn decode_ixon_quot_kind(obj: LeanObj) -> QuotKind { + let tag = obj.as_ptr() as usize; match tag { 0 => QuotKind::Type, 1 => QuotKind::Ctor, @@ -91,27 +92,21 @@ pub fn decode_ixon_quot_kind(ptr: *const c_void) -> QuotKind { /// Round-trip Ixon.DefKind. #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ixon_def_kind( - ptr: *const c_void, -) -> *mut c_void { - let kind = decode_ixon_def_kind(ptr); +pub extern "C" fn rs_roundtrip_ixon_def_kind(obj: LeanObj) -> LeanObj { + let kind = decode_ixon_def_kind(obj); build_def_kind(&kind) } /// Round-trip Ixon.DefinitionSafety. #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ixon_definition_safety( - ptr: *const c_void, -) -> *mut c_void { - let safety = decode_ixon_definition_safety(ptr); +pub extern "C" fn rs_roundtrip_ixon_definition_safety(obj: LeanObj) -> LeanObj { + let safety = decode_ixon_definition_safety(obj); build_ixon_definition_safety(&safety) } /// Round-trip Ixon.QuotKind. #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ixon_quot_kind( - ptr: *const c_void, -) -> *mut c_void { - let kind = decode_ixon_quot_kind(ptr); +pub extern "C" fn rs_roundtrip_ixon_quot_kind(obj: LeanObj) -> LeanObj { + let kind = decode_ixon_quot_kind(obj); build_ixon_quot_kind(&kind) } diff --git a/src/lean/ffi/ixon/env.rs b/src/lean/ffi/ixon/env.rs index 4323cd90..dc472731 100644 --- a/src/lean/ffi/ixon/env.rs +++ b/src/lean/ffi/ixon/env.rs @@ -6,19 +6,23 @@ use crate::ix::address::Address; use crate::ix::env::Name; use crate::ix::ixon::comm::Comm; -use crate::ix::ixon::constant::Constant; -use crate::ix::ixon::env::{Env as IxonEnv, Named}; +use crate::ix::ixon::constant::Constant as IxonConstant; +use crate::ix::ixon::env::{Env as IxonEnv, Named as IxonNamed}; use crate::ix::ixon::metadata::ConstantMeta; use crate::lean::obj::{ - IxAddress, IxonComm, IxonConstant, IxonConstantMeta, IxonNamed, IxonRawEnv, LeanArray, LeanByteArray, LeanCtor, LeanExcept, LeanObj, }; +use crate::lean::ffi::ixon::constant::{ + build_address_from_ixon, build_ixon_constant, decode_ixon_address, + decode_ixon_constant, +}; +use crate::lean::ffi::ixon::meta::{build_constant_meta, decode_constant_meta}; use crate::lean::ffi::builder::LeanBuildCache; use crate::lean::ffi::ix::name::{build_name, decode_ix_name}; // ============================================================================= -// Decoded types — intermediate Rust representations +// Comm Type (secret: Address, payload: Address) // ============================================================================= /// Decoded Ixon.Comm @@ -27,95 +31,69 @@ pub struct DecodedComm { pub payload: Address, } -/// Decoded Ixon.RawConst -pub struct DecodedRawConst { - pub addr: Address, - pub constant: crate::ix::ixon::constant::Constant, -} - -/// Decoded Ixon.RawNamed -pub struct DecodedRawNamed { - pub name: Name, - pub addr: Address, - pub const_meta: ConstantMeta, -} - -/// Decoded Ixon.RawBlob -pub struct DecodedRawBlob { - pub addr: Address, - pub bytes: Vec, -} - -/// Decoded Ixon.RawComm -pub struct DecodedRawComm { - pub addr: Address, - pub comm: DecodedComm, -} - -/// Decoded Ixon.RawNameEntry -pub struct DecodedRawNameEntry { - pub addr: Address, - pub name: Name, -} - -/// Decoded Ixon.RawEnv -pub struct DecodedRawEnv { - pub consts: Vec, - pub named: Vec, - pub blobs: Vec, - pub comms: Vec, - pub names: Vec, -} - -// ============================================================================= -// Build/Decode functions for sub-types -// ============================================================================= - /// Decode Ixon.Comm from Lean pointer. +/// Comm = { secret : Address, payload : Address } pub fn decode_comm(obj: LeanObj) -> DecodedComm { - let ctor = unsafe { LeanCtor::from_raw(obj.as_ptr()) }; - let ba0 = unsafe { LeanByteArray::from_raw(ctor.get(0).as_ptr()) }; - let ba1 = unsafe { LeanByteArray::from_raw(ctor.get(1).as_ptr()) }; + let ctor = obj.as_ctor(); DecodedComm { - secret: ba0.decode_ixon(), - payload: ba1.decode_ixon(), + secret: decode_ixon_address(ctor.get(0)), + payload: decode_ixon_address(ctor.get(1)), } } /// Build Ixon.Comm Lean object. pub fn build_comm(comm: &DecodedComm) -> LeanObj { let ctor = LeanCtor::alloc(0, 2, 0); - ctor.set(0, IxAddress::build_from_ixon(&comm.secret)); - ctor.set(1, IxAddress::build_from_ixon(&comm.payload)); + ctor.set(0, build_address_from_ixon(&comm.secret)); + ctor.set(1, build_address_from_ixon(&comm.payload)); *ctor } +// ============================================================================= +// RawConst (addr: Address, const: Constant) +// ============================================================================= + +/// Decoded Ixon.RawConst +pub struct DecodedRawConst { + pub addr: Address, + pub constant: IxonConstant, +} + /// Decode Ixon.RawConst from Lean pointer. pub fn decode_raw_const(obj: LeanObj) -> DecodedRawConst { - let ctor = unsafe { LeanCtor::from_raw(obj.as_ptr()) }; - let ba = unsafe { LeanByteArray::from_raw(ctor.get(0).as_ptr()) }; + let ctor = obj.as_ctor(); DecodedRawConst { - addr: ba.decode_ixon(), - constant: IxonConstant::new(ctor.get(1)).decode(), + addr: decode_ixon_address(ctor.get(0)), + constant: decode_ixon_constant(ctor.get(1)), } } /// Build Ixon.RawConst Lean object. pub fn build_raw_const(rc: &DecodedRawConst) -> LeanObj { let ctor = LeanCtor::alloc(0, 2, 0); - ctor.set(0, IxAddress::build_from_ixon(&rc.addr)); - ctor.set(1, IxonConstant::build(&rc.constant)); + ctor.set(0, build_address_from_ixon(&rc.addr)); + ctor.set(1, build_ixon_constant(&rc.constant)); *ctor } +// ============================================================================= +// RawNamed (name: Ix.Name, addr: Address, constMeta: ConstantMeta) +// ============================================================================= + +/// Decoded Ixon.RawNamed +pub struct DecodedRawNamed { + pub name: Name, + pub addr: Address, + pub const_meta: ConstantMeta, +} + /// Decode Ixon.RawNamed from Lean pointer. pub fn decode_raw_named(obj: LeanObj) -> DecodedRawNamed { - let ctor = unsafe { LeanCtor::from_raw(obj.as_ptr()) }; - let ba = unsafe { LeanByteArray::from_raw(ctor.get(1).as_ptr()) }; + let ctor = obj.as_ctor(); DecodedRawNamed { - name: decode_ix_name(ctor.get(0).as_ptr()), - addr: ba.decode_ixon(), - const_meta: IxonConstantMeta::new(ctor.get(2)).decode(), + name: decode_ix_name(ctor.get(0)), + addr: decode_ixon_address(ctor.get(1)), + const_meta: decode_constant_meta(ctor.get(2)), } } @@ -126,18 +104,27 @@ pub fn build_raw_named( ) -> LeanObj { let ctor = LeanCtor::alloc(0, 3, 0); ctor.set(0, build_name(cache, &rn.name)); - ctor.set(1, IxAddress::build_from_ixon(&rn.addr)); - ctor.set(2, IxonConstantMeta::build(&rn.const_meta)); + ctor.set(1, build_address_from_ixon(&rn.addr)); + ctor.set(2, build_constant_meta(&rn.const_meta)); *ctor } +// ============================================================================= +// RawBlob (addr: Address, bytes: ByteArray) +// ============================================================================= + +/// Decoded Ixon.RawBlob +pub struct DecodedRawBlob { + pub addr: Address, + pub bytes: Vec, +} + /// Decode Ixon.RawBlob from Lean pointer. pub fn decode_raw_blob(obj: LeanObj) -> DecodedRawBlob { - let ctor = unsafe { LeanCtor::from_raw(obj.as_ptr()) }; - let ba_addr = unsafe { LeanByteArray::from_raw(ctor.get(0).as_ptr()) }; - let ba = unsafe { LeanByteArray::from_raw(ctor.get(1).as_ptr()) }; + let ctor = obj.as_ctor(); + let ba = ctor.get(1).as_byte_array(); DecodedRawBlob { - addr: ba_addr.decode_ixon(), + addr: decode_ixon_address(ctor.get(0)), bytes: ba.as_bytes().to_vec(), } } @@ -145,19 +132,26 @@ pub fn decode_raw_blob(obj: LeanObj) -> DecodedRawBlob { /// Build Ixon.RawBlob Lean object. pub fn build_raw_blob(rb: &DecodedRawBlob) -> LeanObj { let ctor = LeanCtor::alloc(0, 2, 0); - ctor.set(0, IxAddress::build_from_ixon(&rb.addr)); + ctor.set(0, build_address_from_ixon(&rb.addr)); ctor.set(1, LeanByteArray::from_bytes(&rb.bytes)); *ctor } +// ============================================================================= +// RawComm (addr: Address, comm: Comm) +// ============================================================================= + +/// Decoded Ixon.RawComm +pub struct DecodedRawComm { + pub addr: Address, + pub comm: DecodedComm, +} + /// Decode Ixon.RawComm from Lean pointer. pub fn decode_raw_comm(obj: LeanObj) -> DecodedRawComm { - let ctor = unsafe { LeanCtor::from_raw(obj.as_ptr()) }; + let ctor = obj.as_ctor(); DecodedRawComm { - addr: { - let ba = unsafe { LeanByteArray::from_raw(ctor.get(0).as_ptr()) }; - ba.decode_ixon() - }, + addr: decode_ixon_address(ctor.get(0)), comm: decode_comm(ctor.get(1)), } } @@ -165,18 +159,27 @@ pub fn decode_raw_comm(obj: LeanObj) -> DecodedRawComm { /// Build Ixon.RawComm Lean object. pub fn build_raw_comm(rc: &DecodedRawComm) -> LeanObj { let ctor = LeanCtor::alloc(0, 2, 0); - ctor.set(0, IxAddress::build_from_ixon(&rc.addr)); + ctor.set(0, build_address_from_ixon(&rc.addr)); ctor.set(1, build_comm(&rc.comm)); *ctor } +// ============================================================================= +// RawNameEntry (addr: Address, name: Ix.Name) +// ============================================================================= + +/// Decoded Ixon.RawNameEntry +pub struct DecodedRawNameEntry { + pub addr: Address, + pub name: Name, +} + /// Decode Ixon.RawNameEntry from Lean pointer. pub fn decode_raw_name_entry(obj: LeanObj) -> DecodedRawNameEntry { - let ctor = unsafe { LeanCtor::from_raw(obj.as_ptr()) }; - let ba = unsafe { LeanByteArray::from_raw(ctor.get(0).as_ptr()) }; + let ctor = obj.as_ctor(); DecodedRawNameEntry { - addr: ba.decode_ixon(), - name: decode_ix_name(ctor.get(1).as_ptr()), + addr: decode_ixon_address(ctor.get(0)), + name: decode_ix_name(ctor.get(1)), } } @@ -187,84 +190,88 @@ pub fn build_raw_name_entry( name: &Name, ) -> LeanObj { let ctor = LeanCtor::alloc(0, 2, 0); - ctor.set(0, IxAddress::build_from_ixon(addr)); + ctor.set(0, build_address_from_ixon(addr)); ctor.set(1, build_name(cache, name)); *ctor } // ============================================================================= -// IxonRawEnv methods +// RawEnv (consts, named, blobs, comms, names) // ============================================================================= -impl IxonRawEnv { - /// Decode Ixon.RawEnv from Lean pointer. - pub fn decode_all(obj: LeanObj) -> DecodedRawEnv { - let ctor = unsafe { LeanCtor::from_raw(obj.as_ptr()) }; - let consts_arr = unsafe { LeanArray::from_raw(ctor.get(0).as_ptr()) }; - let named_arr = unsafe { LeanArray::from_raw(ctor.get(1).as_ptr()) }; - let blobs_arr = unsafe { LeanArray::from_raw(ctor.get(2).as_ptr()) }; - let comms_arr = unsafe { LeanArray::from_raw(ctor.get(3).as_ptr()) }; - let names_arr = unsafe { LeanArray::from_raw(ctor.get(4).as_ptr()) }; - - DecodedRawEnv { - consts: consts_arr.map(decode_raw_const), - named: named_arr.map(decode_raw_named), - blobs: blobs_arr.map(decode_raw_blob), - comms: comms_arr.map(decode_raw_comm), - names: names_arr.map(decode_raw_name_entry), - } - } - - /// Build Ixon.RawEnv Lean object. - pub fn build_all(env: &DecodedRawEnv) -> LeanObj { - let mut cache = LeanBuildCache::new(); - - let consts_arr = LeanArray::alloc(env.consts.len()); - for (i, rc) in env.consts.iter().enumerate() { - consts_arr.set(i, build_raw_const(rc)); - } - - let named_arr = LeanArray::alloc(env.named.len()); - for (i, rn) in env.named.iter().enumerate() { - named_arr.set(i, build_raw_named(&mut cache, rn)); - } - - let blobs_arr = LeanArray::alloc(env.blobs.len()); - for (i, rb) in env.blobs.iter().enumerate() { - blobs_arr.set(i, build_raw_blob(rb)); - } - - let comms_arr = LeanArray::alloc(env.comms.len()); - for (i, rc) in env.comms.iter().enumerate() { - comms_arr.set(i, build_raw_comm(rc)); - } - - let names_arr = LeanArray::alloc(env.names.len()); - for (i, rn) in env.names.iter().enumerate() { - names_arr.set(i, build_raw_name_entry(&mut cache, &rn.addr, &rn.name)); - } - - let ctor = LeanCtor::alloc(0, 5, 0); - ctor.set(0, consts_arr); - ctor.set(1, named_arr); - ctor.set(2, blobs_arr); - ctor.set(3, comms_arr); - ctor.set(4, names_arr); - *ctor - } +/// Decoded Ixon.RawEnv +pub struct DecodedRawEnv { + pub consts: Vec, + pub named: Vec, + pub blobs: Vec, + pub comms: Vec, + pub names: Vec, } -// Keep old names as aliases for backward compatibility in consumer code +/// Decode Ixon.RawEnv from Lean pointer. pub fn decode_raw_env(obj: LeanObj) -> DecodedRawEnv { - IxonRawEnv::decode_all(obj) + let ctor = obj.as_ctor(); + let consts_arr = ctor.get(0).as_array(); + let named_arr = ctor.get(1).as_array(); + let blobs_arr = ctor.get(2).as_array(); + let comms_arr = ctor.get(3).as_array(); + let names_arr = ctor.get(4).as_array(); + + DecodedRawEnv { + consts: consts_arr.map(decode_raw_const), + named: named_arr.map(decode_raw_named), + blobs: blobs_arr.map(decode_raw_blob), + comms: comms_arr.map(decode_raw_comm), + names: names_arr.map(decode_raw_name_entry), + } } +/// Build Ixon.RawEnv Lean object. pub fn build_raw_env(env: &DecodedRawEnv) -> LeanObj { - IxonRawEnv::build_all(env) + let mut cache = LeanBuildCache::new(); + + // Build consts array + let consts_arr = LeanArray::alloc(env.consts.len()); + for (i, rc) in env.consts.iter().enumerate() { + consts_arr.set(i, build_raw_const(rc)); + } + + // Build named array + let named_arr = LeanArray::alloc(env.named.len()); + for (i, rn) in env.named.iter().enumerate() { + named_arr.set(i, build_raw_named(&mut cache, rn)); + } + + // Build blobs array + let blobs_arr = LeanArray::alloc(env.blobs.len()); + for (i, rb) in env.blobs.iter().enumerate() { + blobs_arr.set(i, build_raw_blob(rb)); + } + + // Build comms array + let comms_arr = LeanArray::alloc(env.comms.len()); + for (i, rc) in env.comms.iter().enumerate() { + comms_arr.set(i, build_raw_comm(rc)); + } + + // Build names array + let names_arr = LeanArray::alloc(env.names.len()); + for (i, rn) in env.names.iter().enumerate() { + names_arr.set(i, build_raw_name_entry(&mut cache, &rn.addr, &rn.name)); + } + + // Build RawEnv structure + let ctor = LeanCtor::alloc(0, 5, 0); + ctor.set(0, consts_arr); + ctor.set(1, named_arr); + ctor.set(2, blobs_arr); + ctor.set(3, comms_arr); + ctor.set(4, names_arr); + *ctor } // ============================================================================= -// DecodedRawEnv ↔ IxonEnv Conversion Helpers +// DecodedRawEnv <-> IxonEnv Conversion Helpers // ============================================================================= /// Reconstruct a Rust IxonEnv from a DecodedRawEnv. @@ -277,7 +284,7 @@ pub fn decoded_to_ixon_env(decoded: &DecodedRawEnv) -> IxonEnv { env.store_name(rn.addr.clone(), rn.name.clone()); } for rn in &decoded.named { - let named = crate::ix::ixon::env::Named::new(rn.addr.clone(), rn.const_meta.clone()); + let named = IxonNamed::new(rn.addr.clone(), rn.const_meta.clone()); env.register_name(rn.name.clone(), named); } for rb in &decoded.blobs { @@ -341,35 +348,37 @@ pub fn ixon_env_to_decoded(env: &IxonEnv) -> DecodedRawEnv { // rs_ser_env: Serialize an Ixon.RawEnv to bytes // ============================================================================= -/// FFI: Serialize an Ixon.RawEnv → ByteArray via Rust's Env.put. Pure. +/// FFI: Serialize an Ixon.RawEnv -> ByteArray via Rust's Env.put. Pure. #[unsafe(no_mangle)] -pub extern "C" fn rs_ser_env(raw_env_obj: LeanObj) -> LeanObj { - let decoded = decode_raw_env(raw_env_obj); +pub extern "C" fn rs_ser_env(obj: LeanObj) -> LeanObj { + let decoded = decode_raw_env(obj); let env = decoded_to_ixon_env(&decoded); let mut buf = Vec::new(); env.put(&mut buf).expect("Env serialization failed"); - LeanByteArray::from_bytes(&buf).into() + + let ba = LeanByteArray::from_bytes(&buf); + *ba } // ============================================================================= // rs_des_env: Deserialize bytes to an Ixon.RawEnv // ============================================================================= -/// FFI: Deserialize ByteArray → Except String Ixon.RawEnv via Rust's Env.get. Pure. +/// FFI: Deserialize ByteArray -> Except String Ixon.RawEnv via Rust's Env.get. Pure. #[unsafe(no_mangle)] -pub extern "C" fn rs_des_env(bytes_obj: LeanObj) -> LeanObj { - let ba = unsafe { LeanByteArray::from_raw(bytes_obj.as_ptr()) }; +pub extern "C" fn rs_des_env(obj: LeanObj) -> LeanObj { + let ba = obj.as_byte_array(); let data = ba.as_bytes(); let mut slice: &[u8] = data; match IxonEnv::get(&mut slice) { Ok(env) => { let decoded = ixon_env_to_decoded(&env); let raw_env = build_raw_env(&decoded); - LeanExcept::ok(raw_env).into() + *LeanExcept::ok(raw_env) }, Err(e) => { - let msg = format!("rs_des_env: {e}"); - LeanExcept::error_string(&msg).into() + let msg = format!("rs_des_env: {}", e); + *LeanExcept::error_string(&msg) }, } } diff --git a/src/lean/ffi/ixon/expr.rs b/src/lean/ffi/ixon/expr.rs index 41f7547c..4323bad2 100644 --- a/src/lean/ffi/ixon/expr.rs +++ b/src/lean/ffi/ixon/expr.rs @@ -2,195 +2,229 @@ use std::sync::Arc; -use crate::ix::ixon::expr::Expr; -use crate::lean::obj::{IxonExpr, LeanArray, LeanCtor, LeanObj}; +use crate::ix::ixon::expr::Expr as IxonExpr; +use crate::lean::obj::{LeanArray, LeanCtor, LeanObj}; -/// Decode Array UInt64 from Lean. -/// UInt64 values in arrays are stored as: -/// - Scalars (odd pointers) for small values: use unbox_usize -/// - Heap objects (even pointers) with the u64 value at offset 8 -fn decode_u64_array(obj: LeanObj) -> Vec { - let arr = unsafe { LeanArray::from_raw(obj.as_ptr()) }; - arr.map(|elem| { - if elem.is_scalar() { - elem.unbox_usize() as u64 - } else { - let ctor = unsafe { LeanCtor::from_raw(elem.as_ptr()) }; - ctor.scalar_u64(0, 0) - } - }) +/// Build Ixon.Expr (12 constructors). +pub fn build_ixon_expr(expr: &IxonExpr) -> LeanObj { + match expr { + IxonExpr::Sort(idx) => { + let ctor = LeanCtor::alloc(0, 0, 8); + ctor.set_u64(0, *idx); + *ctor + }, + IxonExpr::Var(idx) => { + let ctor = LeanCtor::alloc(1, 0, 8); + ctor.set_u64(0, *idx); + *ctor + }, + IxonExpr::Ref(ref_idx, univ_idxs) => { + let arr = LeanArray::alloc(univ_idxs.len()); + for (i, idx) in univ_idxs.iter().enumerate() { + let uint64_obj = LeanCtor::alloc(0, 0, 8); + uint64_obj.set_u64(0, *idx); + arr.set(i, uint64_obj); + } + let ctor = LeanCtor::alloc(2, 1, 8); + ctor.set(0, arr); + ctor.set_u64(8, *ref_idx); + *ctor + }, + IxonExpr::Rec(rec_idx, univ_idxs) => { + let arr = LeanArray::alloc(univ_idxs.len()); + for (i, idx) in univ_idxs.iter().enumerate() { + let uint64_obj = LeanCtor::alloc(0, 0, 8); + uint64_obj.set_u64(0, *idx); + arr.set(i, uint64_obj); + } + let ctor = LeanCtor::alloc(3, 1, 8); + ctor.set(0, arr); + ctor.set_u64(8, *rec_idx); + *ctor + }, + IxonExpr::Prj(type_ref_idx, field_idx, val) => { + let val_obj = build_ixon_expr(val); + let ctor = LeanCtor::alloc(4, 1, 16); + ctor.set(0, val_obj); + ctor.set_u64(8, *type_ref_idx); + ctor.set_u64(16, *field_idx); + *ctor + }, + IxonExpr::Str(ref_idx) => { + let ctor = LeanCtor::alloc(5, 0, 8); + ctor.set_u64(0, *ref_idx); + *ctor + }, + IxonExpr::Nat(ref_idx) => { + let ctor = LeanCtor::alloc(6, 0, 8); + ctor.set_u64(0, *ref_idx); + *ctor + }, + IxonExpr::App(fun, arg) => { + let fun_obj = build_ixon_expr(fun); + let arg_obj = build_ixon_expr(arg); + let ctor = LeanCtor::alloc(7, 2, 0); + ctor.set(0, fun_obj); + ctor.set(1, arg_obj); + *ctor + }, + IxonExpr::Lam(ty, body) => { + let ty_obj = build_ixon_expr(ty); + let body_obj = build_ixon_expr(body); + let ctor = LeanCtor::alloc(8, 2, 0); + ctor.set(0, ty_obj); + ctor.set(1, body_obj); + *ctor + }, + IxonExpr::All(ty, body) => { + let ty_obj = build_ixon_expr(ty); + let body_obj = build_ixon_expr(body); + let ctor = LeanCtor::alloc(9, 2, 0); + ctor.set(0, ty_obj); + ctor.set(1, body_obj); + *ctor + }, + IxonExpr::Let(non_dep, ty, val, body) => { + let ty_obj = build_ixon_expr(ty); + let val_obj = build_ixon_expr(val); + let body_obj = build_ixon_expr(body); + let ctor = LeanCtor::alloc(10, 3, 1); + ctor.set(0, ty_obj); + ctor.set(1, val_obj); + ctor.set(2, body_obj); + ctor.set_u8(24, if *non_dep { 1 } else { 0 }); + *ctor + }, + IxonExpr::Share(idx) => { + let ctor = LeanCtor::alloc(11, 0, 8); + ctor.set_u64(0, *idx); + *ctor + }, + } } -impl IxonExpr { - /// Build Ixon.Expr (12 constructors). - pub fn build(expr: &Expr) -> Self { - let obj = match expr { - Expr::Sort(idx) => { - let ctor = LeanCtor::alloc(0, 0, 8); - ctor.set_u64(0, *idx); - *ctor - }, - Expr::Var(idx) => { - let ctor = LeanCtor::alloc(1, 0, 8); - ctor.set_u64(0, *idx); - *ctor - }, - Expr::Ref(ref_idx, univ_idxs) => { - let arr = LeanArray::alloc(univ_idxs.len()); - for (i, idx) in univ_idxs.iter().enumerate() { - let uint64_obj = LeanCtor::alloc(0, 0, 8); - uint64_obj.set_u64(0, *idx); - arr.set(i, uint64_obj); - } - let ctor = LeanCtor::alloc(2, 1, 8); - ctor.set(0, arr); - ctor.set_u64(8, *ref_idx); - *ctor - }, - Expr::Rec(rec_idx, univ_idxs) => { - let arr = LeanArray::alloc(univ_idxs.len()); - for (i, idx) in univ_idxs.iter().enumerate() { - let uint64_obj = LeanCtor::alloc(0, 0, 8); - uint64_obj.set_u64(0, *idx); - arr.set(i, uint64_obj); - } - let ctor = LeanCtor::alloc(3, 1, 8); - ctor.set(0, arr); - ctor.set_u64(8, *rec_idx); - *ctor - }, - Expr::Prj(type_ref_idx, field_idx, val) => { - let val_obj = Self::build(val); - let ctor = LeanCtor::alloc(4, 1, 16); - ctor.set(0, val_obj); - ctor.set_u64(8, *type_ref_idx); - ctor.set_u64(16, *field_idx); - *ctor - }, - Expr::Str(ref_idx) => { - let ctor = LeanCtor::alloc(5, 0, 8); - ctor.set_u64(0, *ref_idx); - *ctor - }, - Expr::Nat(ref_idx) => { - let ctor = LeanCtor::alloc(6, 0, 8); - ctor.set_u64(0, *ref_idx); - *ctor - }, - Expr::App(fun, arg) => { - let fun_obj = Self::build(fun); - let arg_obj = Self::build(arg); - let ctor = LeanCtor::alloc(7, 2, 0); - ctor.set(0, fun_obj); - ctor.set(1, arg_obj); - *ctor - }, - Expr::Lam(ty, body) => { - let ty_obj = Self::build(ty); - let body_obj = Self::build(body); - let ctor = LeanCtor::alloc(8, 2, 0); - ctor.set(0, ty_obj); - ctor.set(1, body_obj); - *ctor - }, - Expr::All(ty, body) => { - let ty_obj = Self::build(ty); - let body_obj = Self::build(body); - let ctor = LeanCtor::alloc(9, 2, 0); - ctor.set(0, ty_obj); - ctor.set(1, body_obj); - *ctor - }, - Expr::Let(non_dep, ty, val, body) => { - let ty_obj = Self::build(ty); - let val_obj = Self::build(val); - let body_obj = Self::build(body); - let ctor = LeanCtor::alloc(10, 3, 1); - ctor.set(0, ty_obj); - ctor.set(1, val_obj); - ctor.set(2, body_obj); - ctor.set_u8(3 * 8, if *non_dep { 1 } else { 0 }); - *ctor - }, - Expr::Share(idx) => { - let ctor = LeanCtor::alloc(11, 0, 8); - ctor.set_u64(0, *idx); - *ctor - }, - }; - Self::new(obj) +/// Build an Array of Ixon.Expr. +pub fn build_ixon_expr_array(exprs: &[Arc]) -> LeanArray { + let arr = LeanArray::alloc(exprs.len()); + for (i, expr) in exprs.iter().enumerate() { + arr.set(i, build_ixon_expr(expr)); } + arr +} - /// Build an Array of Ixon.Expr. - pub fn build_array(exprs: &[Arc]) -> LeanArray { - let arr = LeanArray::alloc(exprs.len()); - for (i, expr) in exprs.iter().enumerate() { - arr.set(i, Self::build(expr)); - } - arr - } +// ============================================================================= +// Decode Functions +// ============================================================================= - /// Decode Ixon.Expr (12 constructors). - pub fn decode(self) -> Expr { - let obj: LeanObj = *self; - let ctor = unsafe { LeanCtor::from_raw(obj.as_ptr()) }; - match ctor.tag() { - 0 => Expr::Sort(ctor.scalar_u64(0, 0)), - 1 => Expr::Var(ctor.scalar_u64(0, 0)), - 2 => { - let ref_idx = ctor.scalar_u64(1, 0); - let univ_idxs = decode_u64_array(ctor.get(0)); - Expr::Ref(ref_idx, univ_idxs) - }, - 3 => { - let rec_idx = ctor.scalar_u64(1, 0); - let univ_idxs = decode_u64_array(ctor.get(0)); - Expr::Rec(rec_idx, univ_idxs) - }, - 4 => { - let type_ref_idx = ctor.scalar_u64(1, 0); - let field_idx = ctor.scalar_u64(1, 8); - Expr::Prj( - type_ref_idx, - field_idx, - Arc::new(Self::new(ctor.get(0)).decode()), - ) - }, - 5 => Expr::Str(ctor.scalar_u64(0, 0)), - 6 => Expr::Nat(ctor.scalar_u64(0, 0)), - 7 => Expr::App( - Arc::new(Self::new(ctor.get(0)).decode()), - Arc::new(Self::new(ctor.get(1)).decode()), - ), - 8 => Expr::Lam( - Arc::new(Self::new(ctor.get(0)).decode()), - Arc::new(Self::new(ctor.get(1)).decode()), - ), - 9 => Expr::All( - Arc::new(Self::new(ctor.get(0)).decode()), - Arc::new(Self::new(ctor.get(1)).decode()), - ), - 10 => { - let non_dep = ctor.scalar_bool(3, 0); - Expr::Let( - non_dep, - Arc::new(Self::new(ctor.get(0)).decode()), - Arc::new(Self::new(ctor.get(1)).decode()), - Arc::new(Self::new(ctor.get(2)).decode()), - ) - }, - 11 => Expr::Share(ctor.scalar_u64(0, 0)), - tag => panic!("Invalid Ixon.Expr tag: {tag}"), - } - } +/// Decode Array UInt64 from Lean. +fn decode_u64_array(obj: LeanObj) -> Vec { + let arr = obj.as_array(); + arr + .iter() + .map(|elem| { + if elem.is_scalar() { + elem.unbox_usize() as u64 + } else { + let ctor = elem.as_ctor(); + ctor.scalar_u64(0, 0) + } + }) + .collect() +} - /// Decode Array Ixon.Expr. - pub fn decode_array(obj: LeanObj) -> Vec> { - let arr = unsafe { LeanArray::from_raw(obj.as_ptr()) }; - arr.map(|e| Arc::new(Self::new(e).decode())) +/// Decode Ixon.Expr (12 constructors). +pub fn decode_ixon_expr(obj: LeanObj) -> IxonExpr { + let ctor = obj.as_ctor(); + let tag = ctor.tag(); + match tag { + 0 => { + let idx = ctor.scalar_u64(0, 0); + IxonExpr::Sort(idx) + }, + 1 => { + let idx = ctor.scalar_u64(0, 0); + IxonExpr::Var(idx) + }, + 2 => { + let arr_obj = ctor.get(0); + let ref_idx = ctor.scalar_u64(1, 0); + let univ_idxs = decode_u64_array(arr_obj); + IxonExpr::Ref(ref_idx, univ_idxs) + }, + 3 => { + let arr_obj = ctor.get(0); + let rec_idx = ctor.scalar_u64(1, 0); + let univ_idxs = decode_u64_array(arr_obj); + IxonExpr::Rec(rec_idx, univ_idxs) + }, + 4 => { + let val_obj = ctor.get(0); + let type_ref_idx = ctor.scalar_u64(1, 0); + let field_idx = ctor.scalar_u64(1, 8); + IxonExpr::Prj( + type_ref_idx, + field_idx, + Arc::new(decode_ixon_expr(val_obj)), + ) + }, + 5 => { + let ref_idx = ctor.scalar_u64(0, 0); + IxonExpr::Str(ref_idx) + }, + 6 => { + let ref_idx = ctor.scalar_u64(0, 0); + IxonExpr::Nat(ref_idx) + }, + 7 => { + let f_obj = ctor.get(0); + let a_obj = ctor.get(1); + IxonExpr::App( + Arc::new(decode_ixon_expr(f_obj)), + Arc::new(decode_ixon_expr(a_obj)), + ) + }, + 8 => { + let ty_obj = ctor.get(0); + let body_obj = ctor.get(1); + IxonExpr::Lam( + Arc::new(decode_ixon_expr(ty_obj)), + Arc::new(decode_ixon_expr(body_obj)), + ) + }, + 9 => { + let ty_obj = ctor.get(0); + let body_obj = ctor.get(1); + IxonExpr::All( + Arc::new(decode_ixon_expr(ty_obj)), + Arc::new(decode_ixon_expr(body_obj)), + ) + }, + 10 => { + let ty_obj = ctor.get(0); + let val_obj = ctor.get(1); + let body_obj = ctor.get(2); + let non_dep = ctor.scalar_u8(3, 0) != 0; + IxonExpr::Let( + non_dep, + Arc::new(decode_ixon_expr(ty_obj)), + Arc::new(decode_ixon_expr(val_obj)), + Arc::new(decode_ixon_expr(body_obj)), + ) + }, + 11 => { + let idx = ctor.scalar_u64(0, 0); + IxonExpr::Share(idx) + }, + _ => panic!("Invalid Ixon.Expr tag: {}", tag), } } +/// Decode Array Ixon.Expr. +pub fn decode_ixon_expr_array(obj: LeanObj) -> Vec> { + let arr = obj.as_array(); + arr.map(|e| Arc::new(decode_ixon_expr(e))) +} + // ============================================================================= // FFI Exports // ============================================================================= @@ -198,6 +232,6 @@ impl IxonExpr { /// Round-trip Ixon.Expr. #[unsafe(no_mangle)] pub extern "C" fn rs_roundtrip_ixon_expr(obj: LeanObj) -> LeanObj { - let expr = IxonExpr::new(obj).decode(); - IxonExpr::build(&expr).into() + let expr = decode_ixon_expr(obj); + build_ixon_expr(&expr) } diff --git a/src/lean/ffi/ixon/meta.rs b/src/lean/ffi/ixon/meta.rs index 1ef42dac..5a71d4d1 100644 --- a/src/lean/ffi/ixon/meta.rs +++ b/src/lean/ffi/ixon/meta.rs @@ -7,14 +7,14 @@ use crate::ix::env::BinderInfo; use crate::ix::ixon::Comm; use crate::ix::ixon::env::Named; use crate::ix::ixon::metadata::{ - ConstantMeta, DataValue, ExprMeta, ExprMetaData, KVMap, -}; -use crate::lean::obj::{ - IxAddress, IxonComm, IxonConstantMeta, IxonDataValue, IxonExprMetaArena, - IxonExprMetaData, IxonNamed, LeanArray, LeanCtor, LeanObj, + ConstantMeta, DataValue as IxonDataValue, ExprMeta, ExprMetaData, KVMap, }; +use crate::lean::obj::{LeanArray, LeanCtor, LeanObj}; -use super::constant::*; +use crate::lean::ffi::ixon::constant::{ + build_address_array, build_address_from_ixon, decode_ixon_address, + decode_ixon_address_array, +}; use crate::lean::ffi::ix::constant::{ build_reducibility_hints, decode_reducibility_hints, }; @@ -24,74 +24,56 @@ use crate::lean::ffi::ix::expr::binder_info_to_u8; // DataValue Build/Decode // ============================================================================= -impl IxonDataValue { - /// Build Ixon.DataValue (for metadata) - pub fn build(dv: &DataValue) -> Self { - let obj = match dv { - DataValue::OfString(addr) => { - let ctor = LeanCtor::alloc(0, 1, 0); - ctor.set(0, IxAddress::build_from_ixon(addr)); - *ctor - }, - DataValue::OfBool(b) => { - let ctor = LeanCtor::alloc(1, 0, 1); - ctor.set_u8(0, if *b { 1 } else { 0 }); - *ctor - }, - DataValue::OfName(addr) => { - let ctor = LeanCtor::alloc(2, 1, 0); - ctor.set(0, IxAddress::build_from_ixon(addr)); - *ctor - }, - DataValue::OfNat(addr) => { - let ctor = LeanCtor::alloc(3, 1, 0); - ctor.set(0, IxAddress::build_from_ixon(addr)); - *ctor - }, - DataValue::OfInt(addr) => { - let ctor = LeanCtor::alloc(4, 1, 0); - ctor.set(0, IxAddress::build_from_ixon(addr)); - *ctor - }, - DataValue::OfSyntax(addr) => { - let ctor = LeanCtor::alloc(5, 1, 0); - ctor.set(0, IxAddress::build_from_ixon(addr)); - *ctor - }, - }; - Self::new(obj) +/// Build Ixon.DataValue (for metadata) +pub fn build_ixon_data_value(dv: &IxonDataValue) -> LeanObj { + match dv { + IxonDataValue::OfString(addr) => { + let ctor = LeanCtor::alloc(0, 1, 0); + ctor.set(0, build_address_from_ixon(addr)); + *ctor + }, + IxonDataValue::OfBool(b) => { + let ctor = LeanCtor::alloc(1, 0, 1); + ctor.set_u8(0, if *b { 1 } else { 0 }); + *ctor + }, + IxonDataValue::OfName(addr) => { + let ctor = LeanCtor::alloc(2, 1, 0); + ctor.set(0, build_address_from_ixon(addr)); + *ctor + }, + IxonDataValue::OfNat(addr) => { + let ctor = LeanCtor::alloc(3, 1, 0); + ctor.set(0, build_address_from_ixon(addr)); + *ctor + }, + IxonDataValue::OfInt(addr) => { + let ctor = LeanCtor::alloc(4, 1, 0); + ctor.set(0, build_address_from_ixon(addr)); + *ctor + }, + IxonDataValue::OfSyntax(addr) => { + let ctor = LeanCtor::alloc(5, 1, 0); + ctor.set(0, build_address_from_ixon(addr)); + *ctor + }, } +} - /// Decode Ixon.DataValue. - pub fn decode(self) -> DataValue { - let ctor = unsafe { LeanCtor::from_raw(self.as_ptr()) }; - match ctor.tag() { - 0 => { - let ba = unsafe { crate::lean::obj::LeanByteArray::from_raw(ctor.get(0).as_ptr()) }; - DataValue::OfString(ba.decode_ixon()) - }, - 1 => { - let b = ctor.scalar_u8(0, 0) != 0; - DataValue::OfBool(b) - }, - 2 => { - let ba = unsafe { crate::lean::obj::LeanByteArray::from_raw(ctor.get(0).as_ptr()) }; - DataValue::OfName(ba.decode_ixon()) - }, - 3 => { - let ba = unsafe { crate::lean::obj::LeanByteArray::from_raw(ctor.get(0).as_ptr()) }; - DataValue::OfNat(ba.decode_ixon()) - }, - 4 => { - let ba = unsafe { crate::lean::obj::LeanByteArray::from_raw(ctor.get(0).as_ptr()) }; - DataValue::OfInt(ba.decode_ixon()) - }, - 5 => { - let ba = unsafe { crate::lean::obj::LeanByteArray::from_raw(ctor.get(0).as_ptr()) }; - DataValue::OfSyntax(ba.decode_ixon()) - }, - tag => panic!("Invalid Ixon.DataValue tag: {tag}"), - } +/// Decode Ixon.DataValue. +pub fn decode_ixon_data_value(obj: LeanObj) -> IxonDataValue { + let ctor = obj.as_ctor(); + match ctor.tag() { + 0 => IxonDataValue::OfString(decode_ixon_address(ctor.get(0))), + 1 => { + let b = ctor.scalar_u8(0, 0) != 0; + IxonDataValue::OfBool(b) + }, + 2 => IxonDataValue::OfName(decode_ixon_address(ctor.get(0))), + 3 => IxonDataValue::OfNat(decode_ixon_address(ctor.get(0))), + 4 => IxonDataValue::OfInt(decode_ixon_address(ctor.get(0))), + 5 => IxonDataValue::OfSyntax(decode_ixon_address(ctor.get(0))), + tag => panic!("Invalid Ixon.DataValue tag: {}", tag), } } @@ -104,8 +86,8 @@ pub fn build_ixon_kvmap(kvmap: &KVMap) -> LeanArray { let arr = LeanArray::alloc(kvmap.len()); for (i, (addr, dv)) in kvmap.iter().enumerate() { let pair = LeanCtor::alloc(0, 2, 0); - pair.set(0, IxAddress::build_from_ixon(addr)); - pair.set(1, IxonDataValue::build(dv)); + pair.set(0, build_address_from_ixon(addr)); + pair.set(1, build_ixon_data_value(dv)); arr.set(i, pair); } arr @@ -122,20 +104,22 @@ pub fn build_kvmap_array(kvmaps: &[KVMap]) -> LeanArray { /// Decode KVMap (Array (Address × DataValue)). pub fn decode_ixon_kvmap(obj: LeanObj) -> KVMap { - let arr = unsafe { LeanArray::from_raw(obj.as_ptr()) }; - arr.map(|pair| { - let pair_ctor = unsafe { LeanCtor::from_raw(pair.as_ptr()) }; - let ba = unsafe { crate::lean::obj::LeanByteArray::from_raw(pair_ctor.get(0).as_ptr()) }; - ( - ba.decode_ixon(), - IxonDataValue::new(pair_ctor.get(1)).decode(), - ) - }) + let arr = obj.as_array(); + arr + .iter() + .map(|pair| { + let pair_ctor = pair.as_ctor(); + ( + decode_ixon_address(pair_ctor.get(0)), + decode_ixon_data_value(pair_ctor.get(1)), + ) + }) + .collect() } /// Decode Array KVMap. fn decode_kvmap_array(obj: LeanObj) -> Vec { - let arr = unsafe { LeanArray::from_raw(obj.as_ptr()) }; + let arr = obj.as_array(); arr.map(decode_ixon_kvmap) } @@ -145,7 +129,7 @@ fn decode_kvmap_array(obj: LeanObj) -> Vec { /// Decode Array Address. fn decode_address_array(obj: LeanObj) -> Vec
{ - IxAddress::decode_array(obj) + decode_ixon_address_array(obj) } /// Build Array UInt64. @@ -159,138 +143,151 @@ fn build_u64_array(vals: &[u64]) -> LeanArray { /// Decode Array UInt64. fn decode_u64_array(obj: LeanObj) -> Vec { - let arr = unsafe { LeanArray::from_raw(obj.as_ptr()) }; - arr.map(|elem| elem.unbox_u64()) + let arr = obj.as_array(); + arr.iter().map(|elem| elem.unbox_u64()).collect() } // ============================================================================= // ExprMetaData Build/Decode // ============================================================================= -impl IxonExprMetaData { - /// Build Ixon.ExprMetaData Lean object. - pub fn build(node: &ExprMetaData) -> Self { - let obj = match node { - ExprMetaData::Leaf => LeanObj::box_usize(0), - - ExprMetaData::App { children } => { - let ctor = LeanCtor::alloc(1, 0, 16); - ctor.set_u64(0, children[0]); - ctor.set_u64(8, children[1]); - *ctor - }, - - ExprMetaData::Binder { name, info, children } => { - let ctor = LeanCtor::alloc(2, 1, 17); - ctor.set(0, IxAddress::build_from_ixon(name)); - ctor.set_u64(8, children[0]); - ctor.set_u64(8 + 8, children[1]); - ctor.set_u8(8 + 16, binder_info_to_u8(info)); - *ctor - }, - - ExprMetaData::LetBinder { name, children } => { - let ctor = LeanCtor::alloc(3, 1, 24); - ctor.set(0, IxAddress::build_from_ixon(name)); - ctor.set_u64(8, children[0]); - ctor.set_u64(8 + 8, children[1]); - ctor.set_u64(8 + 16, children[2]); - *ctor - }, - - ExprMetaData::Ref { name } => { - let ctor = LeanCtor::alloc(4, 1, 0); - ctor.set(0, IxAddress::build_from_ixon(name)); - *ctor - }, - - ExprMetaData::Prj { struct_name, child } => { - let ctor = LeanCtor::alloc(5, 1, 8); - ctor.set(0, IxAddress::build_from_ixon(struct_name)); - ctor.set_u64(8, *child); - *ctor - }, - - ExprMetaData::Mdata { mdata, child } => { - let ctor = LeanCtor::alloc(6, 1, 8); - ctor.set(0, build_kvmap_array(mdata)); - ctor.set_u64(8, *child); - *ctor - }, - }; - Self::new(obj) +/// Build Ixon.ExprMetaData Lean object. +/// +/// | Variant | Tag | Obj fields | Scalar bytes | +/// |------------|-----|------------------------|--------------------------| +/// | leaf | 0 | 0 | 0 | +/// | app | 1 | 0 | 16 (2× u64) | +/// | binder | 2 | 1 (name: Address) | 17 (info: u8, 2× u64) | +/// | letBinder | 3 | 1 (name: Address) | 24 (3× u64) | +/// | ref | 4 | 1 (name: Address) | 0 | +/// | prj | 5 | 1 (structName: Address) | 8 (1× u64) | +/// | mdata | 6 | 1 (mdata: Array) | 8 (1× u64) | +pub fn build_expr_meta_data(node: &ExprMetaData) -> LeanObj { + match node { + ExprMetaData::Leaf => LeanObj::box_usize(0), + + ExprMetaData::App { children } => { + // Tag 1, 0 obj fields, 16 scalar bytes (2× u64) + let ctor = LeanCtor::alloc(1, 0, 16); + ctor.set_u64(0, children[0]); + ctor.set_u64(8, children[1]); + *ctor + }, + + ExprMetaData::Binder { name, info, children } => { + // Tag 2, 1 obj field (name), scalar: 2× u64 + u8 (info) + // Lean ABI sorts scalars by size descending: [tyChild: u64 @ 8] [bodyChild: u64 @ 16] [info: u8 @ 24] + // Offsets from obj_cptr: 1*8=8 base for scalar area + let ctor = LeanCtor::alloc(2, 1, 17); + ctor.set(0, build_address_from_ixon(name)); + ctor.set_u64(8, children[0]); + ctor.set_u64(16, children[1]); + ctor.set_u8(24, binder_info_to_u8(info)); + *ctor + }, + + ExprMetaData::LetBinder { name, children } => { + // Tag 3, 1 obj field (name), 24 scalar bytes (3× u64) + let ctor = LeanCtor::alloc(3, 1, 24); + ctor.set(0, build_address_from_ixon(name)); + ctor.set_u64(8, children[0]); + ctor.set_u64(16, children[1]); + ctor.set_u64(24, children[2]); + *ctor + }, + + ExprMetaData::Ref { name } => { + // Tag 4, 1 obj field (name), 0 scalar bytes + let ctor = LeanCtor::alloc(4, 1, 0); + ctor.set(0, build_address_from_ixon(name)); + *ctor + }, + + ExprMetaData::Prj { struct_name, child } => { + // Tag 5, 1 obj field (structName), 8 scalar bytes (1× u64) + let ctor = LeanCtor::alloc(5, 1, 8); + ctor.set(0, build_address_from_ixon(struct_name)); + ctor.set_u64(8, *child); + *ctor + }, + + ExprMetaData::Mdata { mdata, child } => { + // Tag 6, 1 obj field (mdata: Array KVMap), 8 scalar bytes (1× u64) + let mdata_arr = build_kvmap_array(mdata); + let ctor = LeanCtor::alloc(6, 1, 8); + ctor.set(0, mdata_arr); + ctor.set_u64(8, *child); + *ctor + }, } +} - /// Decode Ixon.ExprMetaData from Lean pointer. - pub fn decode(self) -> ExprMetaData { - let obj: LeanObj = *self; - if obj.is_scalar() { - let tag = obj.unbox_usize(); - assert_eq!(tag, 0, "Invalid scalar ExprMetaData tag: {tag}"); - return ExprMetaData::Leaf; - } - let ctor = unsafe { LeanCtor::from_raw(obj.as_ptr()) }; - match ctor.tag() { - 1 => { - let fun_ = ctor.scalar_u64(0, 0); - let arg = ctor.scalar_u64(0, 8); - ExprMetaData::App { children: [fun_, arg] } - }, - - 2 => { - let ty_child = ctor.scalar_u64(1, 0); - let body_child = ctor.scalar_u64(1, 8); - let info_byte = ctor.scalar_u8(1, 16); - let info = match info_byte { - 0 => BinderInfo::Default, - 1 => BinderInfo::Implicit, - 2 => BinderInfo::StrictImplicit, - 3 => BinderInfo::InstImplicit, - _ => panic!("Invalid BinderInfo tag: {info_byte}"), - }; - let ba = unsafe { crate::lean::obj::LeanByteArray::from_raw(ctor.get(0).as_ptr()) }; - ExprMetaData::Binder { - name: ba.decode_ixon(), - info, - children: [ty_child, body_child], - } - }, - - 3 => { - let ty_child = ctor.scalar_u64(1, 0); - let val_child = ctor.scalar_u64(1, 8); - let body_child = ctor.scalar_u64(1, 16); - let ba = unsafe { crate::lean::obj::LeanByteArray::from_raw(ctor.get(0).as_ptr()) }; - ExprMetaData::LetBinder { - name: ba.decode_ixon(), - children: [ty_child, val_child, body_child], - } - }, - - 4 => { - let ba = unsafe { crate::lean::obj::LeanByteArray::from_raw(ctor.get(0).as_ptr()) }; - ExprMetaData::Ref { name: ba.decode_ixon() } - }, - - 5 => { - let child = ctor.scalar_u64(1, 0); - let ba = unsafe { crate::lean::obj::LeanByteArray::from_raw(ctor.get(0).as_ptr()) }; - ExprMetaData::Prj { - struct_name: ba.decode_ixon(), - child, - } - }, - - 6 => { - let child = ctor.scalar_u64(1, 0); - ExprMetaData::Mdata { - mdata: decode_kvmap_array(ctor.get(0)), - child, - } - }, - - tag => panic!("Invalid Ixon.ExprMetaData tag: {tag}"), - } +/// Decode Ixon.ExprMetaData from Lean pointer. +pub fn decode_expr_meta_data(obj: LeanObj) -> ExprMetaData { + // Leaf (tag 0, no fields) is represented as a scalar lean_box(0) + if obj.is_scalar() { + let tag = obj.as_ptr() as usize >> 1; + assert_eq!(tag, 0, "Invalid scalar ExprMetaData tag: {}", tag); + return ExprMetaData::Leaf; + } + let ctor = obj.as_ctor(); + match ctor.tag() { + 1 => { + // app: 0 obj fields, 2× u64 scalar + let fun_ = ctor.scalar_u64(0, 0); + let arg = ctor.scalar_u64(0, 8); + ExprMetaData::App { children: [fun_, arg] } + }, + + 2 => { + // binder: 1 obj field (name), scalar (Lean ABI: u64s first, then u8): + // [tyChild: u64 @ 0] [bodyChild: u64 @ 8] [info: u8 @ 16] + let name = decode_ixon_address(ctor.get(0)); + let ty_child = ctor.scalar_u64(1, 0); + let body_child = ctor.scalar_u64(1, 8); + let info_byte = ctor.scalar_u8(1, 16); + let info = match info_byte { + 0 => BinderInfo::Default, + 1 => BinderInfo::Implicit, + 2 => BinderInfo::StrictImplicit, + 3 => BinderInfo::InstImplicit, + _ => panic!("Invalid BinderInfo tag: {}", info_byte), + }; + ExprMetaData::Binder { name, info, children: [ty_child, body_child] } + }, + + 3 => { + // letBinder: 1 obj field (name), 3× u64 scalar + let name = decode_ixon_address(ctor.get(0)); + let ty_child = ctor.scalar_u64(1, 0); + let val_child = ctor.scalar_u64(1, 8); + let body_child = ctor.scalar_u64(1, 16); + ExprMetaData::LetBinder { + name, + children: [ty_child, val_child, body_child], + } + }, + + 4 => { + // ref: 1 obj field (name), 0 scalar + ExprMetaData::Ref { name: decode_ixon_address(ctor.get(0)) } + }, + + 5 => { + // prj: 1 obj field (structName), 1× u64 scalar + let struct_name = decode_ixon_address(ctor.get(0)); + let child = ctor.scalar_u64(1, 0); + ExprMetaData::Prj { struct_name, child } + }, + + 6 => { + // mdata: 1 obj field (mdata: Array KVMap), 1× u64 scalar + let mdata = decode_kvmap_array(ctor.get(0)); + let child = ctor.scalar_u64(1, 0); + ExprMetaData::Mdata { mdata, child } + }, + + tag => panic!("Invalid Ixon.ExprMetaData tag: {}", tag), } } @@ -298,36 +295,149 @@ impl IxonExprMetaData { // ExprMetaArena Build/Decode // ============================================================================= -impl IxonExprMetaArena { - /// Build Ixon.ExprMetaArena Lean object. - /// ExprMetaArena is a single-field structure (nodes : Array ExprMetaData), - /// which Lean unboxes — the value IS the Array directly. - pub fn build(arena: &ExprMeta) -> LeanArray { - let arr = LeanArray::alloc(arena.nodes.len()); - for (i, node) in arena.nodes.iter().enumerate() { - arr.set(i, IxonExprMetaData::build(node)); - } - arr +/// Build Ixon.ExprMetaArena Lean object. +/// ExprMetaArena is a single-field structure (nodes : Array ExprMetaData), +/// which Lean unboxes — the value IS the Array directly. +pub fn build_expr_meta_arena(arena: &ExprMeta) -> LeanArray { + let arr = LeanArray::alloc(arena.nodes.len()); + for (i, node) in arena.nodes.iter().enumerate() { + arr.set(i, build_expr_meta_data(node)); } + arr +} - /// Decode Ixon.ExprMetaArena from Lean pointer. - /// Single-field struct is unboxed — obj IS the Array directly. - pub fn decode(obj: LeanObj) -> ExprMeta { - let arr = unsafe { LeanArray::from_raw(obj.as_ptr()) }; - ExprMeta { nodes: arr.map(|n| IxonExprMetaData::new(n).decode()) } - } +/// Decode Ixon.ExprMetaArena from Lean pointer. +/// Single-field struct is unboxed — obj IS the Array directly. +pub fn decode_expr_meta_arena(obj: LeanObj) -> ExprMeta { + let arr = obj.as_array(); + ExprMeta { nodes: arr.map(decode_expr_meta_data) } } // ============================================================================= // ConstantMeta Build/Decode // ============================================================================= -impl IxonConstantMeta { - /// Build Ixon.ConstantMeta Lean object. - pub fn build(meta: &ConstantMeta) -> Self { - let obj = match meta { - ConstantMeta::Empty => LeanObj::box_usize(0), +/// Build Ixon.ConstantMeta Lean object. +/// +/// | Variant | Tag | Obj fields | Scalar bytes | +/// |---------|-----|-----------|-------------| +/// | empty | 0 | 0 | 0 | +/// | defn | 1 | 6 (name, lvls, hints, all, ctx, arena) | 16 (2× u64) | +/// | axio | 2 | 3 (name, lvls, arena) | 8 (1× u64) | +/// | quot | 3 | 3 (name, lvls, arena) | 8 (1× u64) | +/// | indc | 4 | 6 (name, lvls, ctors, all, ctx, arena) | 8 (1× u64) | +/// | ctor | 5 | 4 (name, lvls, induct, arena) | 8 (1× u64) | +/// | recr | 6 | 7 (name, lvls, rules, all, ctx, arena, ruleRoots) | 8 (1× u64) | +pub fn build_constant_meta(meta: &ConstantMeta) -> LeanObj { + match meta { + ConstantMeta::Empty => LeanObj::box_usize(0), + + ConstantMeta::Def { + name, + lvls, + hints, + all, + ctx, + arena, + type_root, + value_root, + } => { + let ctor = LeanCtor::alloc(1, 6, 16); + ctor.set(0, build_address_from_ixon(name)); + ctor.set(1, build_address_array(lvls)); + ctor.set(2, build_reducibility_hints(hints)); + ctor.set(3, build_address_array(all)); + ctor.set(4, build_address_array(ctx)); + ctor.set(5, build_expr_meta_arena(arena)); + ctor.set_u64(6 * 8, *type_root); + ctor.set_u64(6 * 8 + 8, *value_root); + *ctor + }, + + ConstantMeta::Axio { name, lvls, arena, type_root } => { + let ctor = LeanCtor::alloc(2, 3, 8); + ctor.set(0, build_address_from_ixon(name)); + ctor.set(1, build_address_array(lvls)); + ctor.set(2, build_expr_meta_arena(arena)); + ctor.set_u64(3 * 8, *type_root); + *ctor + }, + + ConstantMeta::Quot { name, lvls, arena, type_root } => { + let ctor = LeanCtor::alloc(3, 3, 8); + ctor.set(0, build_address_from_ixon(name)); + ctor.set(1, build_address_array(lvls)); + ctor.set(2, build_expr_meta_arena(arena)); + ctor.set_u64(3 * 8, *type_root); + *ctor + }, + + ConstantMeta::Indc { name, lvls, ctors, all, ctx, arena, type_root } => { + let ctor = LeanCtor::alloc(4, 6, 8); + ctor.set(0, build_address_from_ixon(name)); + ctor.set(1, build_address_array(lvls)); + ctor.set(2, build_address_array(ctors)); + ctor.set(3, build_address_array(all)); + ctor.set(4, build_address_array(ctx)); + ctor.set(5, build_expr_meta_arena(arena)); + ctor.set_u64(6 * 8, *type_root); + *ctor + }, + + ConstantMeta::Ctor { name, lvls, induct, arena, type_root } => { + let ctor = LeanCtor::alloc(5, 4, 8); + ctor.set(0, build_address_from_ixon(name)); + ctor.set(1, build_address_array(lvls)); + ctor.set(2, build_address_from_ixon(induct)); + ctor.set(3, build_expr_meta_arena(arena)); + ctor.set_u64(4 * 8, *type_root); + *ctor + }, + + ConstantMeta::Rec { + name, + lvls, + rules, + all, + ctx, + arena, + type_root, + rule_roots, + } => { + let ctor = LeanCtor::alloc(6, 7, 8); + ctor.set(0, build_address_from_ixon(name)); + ctor.set(1, build_address_array(lvls)); + ctor.set(2, build_address_array(rules)); + ctor.set(3, build_address_array(all)); + ctor.set(4, build_address_array(ctx)); + ctor.set(5, build_expr_meta_arena(arena)); + ctor.set(6, build_u64_array(rule_roots)); + ctor.set_u64(7 * 8, *type_root); + *ctor + }, + } +} +/// Decode Ixon.ConstantMeta from Lean pointer. +pub fn decode_constant_meta(obj: LeanObj) -> ConstantMeta { + // Empty (tag 0, no fields) is represented as a scalar lean_box(0) + if obj.is_scalar() { + let tag = obj.as_ptr() as usize >> 1; + assert_eq!(tag, 0, "Invalid scalar ConstantMeta tag: {}", tag); + return ConstantMeta::Empty; + } + let ctor = obj.as_ctor(); + match ctor.tag() { + 1 => { + // defn: 6 obj fields, 2× u64 scalar + let name = decode_ixon_address(ctor.get(0)); + let lvls = decode_address_array(ctor.get(1)); + let hints = decode_reducibility_hints(ctor.get(2)); + let all = decode_address_array(ctor.get(3)); + let ctx = decode_address_array(ctor.get(4)); + let arena = decode_expr_meta_arena(ctor.get(5)); + let type_root = ctor.scalar_u64(6, 0); + let value_root = ctor.scalar_u64(6, 8); ConstantMeta::Def { name, lvls, @@ -337,59 +447,59 @@ impl IxonConstantMeta { arena, type_root, value_root, - } => { - let ctor = LeanCtor::alloc(1, 6, 16); - ctor.set(0, IxAddress::build_from_ixon(name)); - ctor.set(1, IxAddress::build_array(lvls)); - ctor.set(2, build_reducibility_hints(hints)); - ctor.set(3, IxAddress::build_array(all)); - ctor.set(4, IxAddress::build_array(ctx)); - ctor.set(5, IxonExprMetaArena::build(arena)); - ctor.set_u64(6 * 8, *type_root); - ctor.set_u64(6 * 8 + 8, *value_root); - *ctor - }, - - ConstantMeta::Axio { name, lvls, arena, type_root } => { - let ctor = LeanCtor::alloc(2, 3, 8); - ctor.set(0, IxAddress::build_from_ixon(name)); - ctor.set(1, IxAddress::build_array(lvls)); - ctor.set(2, IxonExprMetaArena::build(arena)); - ctor.set_u64(3 * 8, *type_root); - *ctor - }, - - ConstantMeta::Quot { name, lvls, arena, type_root } => { - let ctor = LeanCtor::alloc(3, 3, 8); - ctor.set(0, IxAddress::build_from_ixon(name)); - ctor.set(1, IxAddress::build_array(lvls)); - ctor.set(2, IxonExprMetaArena::build(arena)); - ctor.set_u64(3 * 8, *type_root); - *ctor - }, - - ConstantMeta::Indc { name, lvls, ctors, all, ctx, arena, type_root } => { - let ctor = LeanCtor::alloc(4, 6, 8); - ctor.set(0, IxAddress::build_from_ixon(name)); - ctor.set(1, IxAddress::build_array(lvls)); - ctor.set(2, IxAddress::build_array(ctors)); - ctor.set(3, IxAddress::build_array(all)); - ctor.set(4, IxAddress::build_array(ctx)); - ctor.set(5, IxonExprMetaArena::build(arena)); - ctor.set_u64(6 * 8, *type_root); - *ctor - }, - - ConstantMeta::Ctor { name, lvls, induct, arena, type_root } => { - let ctor = LeanCtor::alloc(5, 4, 8); - ctor.set(0, IxAddress::build_from_ixon(name)); - ctor.set(1, IxAddress::build_array(lvls)); - ctor.set(2, IxAddress::build_from_ixon(induct)); - ctor.set(3, IxonExprMetaArena::build(arena)); - ctor.set_u64(4 * 8, *type_root); - *ctor - }, - + } + }, + + 2 => { + // axio: 3 obj fields, 1× u64 scalar + let name = decode_ixon_address(ctor.get(0)); + let lvls = decode_address_array(ctor.get(1)); + let arena = decode_expr_meta_arena(ctor.get(2)); + let type_root = ctor.scalar_u64(3, 0); + ConstantMeta::Axio { name, lvls, arena, type_root } + }, + + 3 => { + // quot: 3 obj fields, 1× u64 scalar + let name = decode_ixon_address(ctor.get(0)); + let lvls = decode_address_array(ctor.get(1)); + let arena = decode_expr_meta_arena(ctor.get(2)); + let type_root = ctor.scalar_u64(3, 0); + ConstantMeta::Quot { name, lvls, arena, type_root } + }, + + 4 => { + // indc: 6 obj fields, 1× u64 scalar + let name = decode_ixon_address(ctor.get(0)); + let lvls = decode_address_array(ctor.get(1)); + let ctors = decode_address_array(ctor.get(2)); + let all = decode_address_array(ctor.get(3)); + let ctx = decode_address_array(ctor.get(4)); + let arena = decode_expr_meta_arena(ctor.get(5)); + let type_root = ctor.scalar_u64(6, 0); + ConstantMeta::Indc { name, lvls, ctors, all, ctx, arena, type_root } + }, + + 5 => { + // ctor: 4 obj fields, 1× u64 scalar + let name = decode_ixon_address(ctor.get(0)); + let lvls = decode_address_array(ctor.get(1)); + let induct = decode_ixon_address(ctor.get(2)); + let arena = decode_expr_meta_arena(ctor.get(3)); + let type_root = ctor.scalar_u64(4, 0); + ConstantMeta::Ctor { name, lvls, induct, arena, type_root } + }, + + 6 => { + // recr: 7 obj fields, 1× u64 scalar + let name = decode_ixon_address(ctor.get(0)); + let lvls = decode_address_array(ctor.get(1)); + let rules = decode_address_array(ctor.get(2)); + let all = decode_address_array(ctor.get(3)); + let ctx = decode_address_array(ctor.get(4)); + let arena = decode_expr_meta_arena(ctor.get(5)); + let rule_roots = decode_u64_array(ctor.get(6)); + let type_root = ctor.scalar_u64(7, 0); ConstantMeta::Rec { name, lvls, @@ -399,119 +509,10 @@ impl IxonConstantMeta { arena, type_root, rule_roots, - } => { - let ctor = LeanCtor::alloc(6, 7, 8); - ctor.set(0, IxAddress::build_from_ixon(name)); - ctor.set(1, IxAddress::build_array(lvls)); - ctor.set(2, IxAddress::build_array(rules)); - ctor.set(3, IxAddress::build_array(all)); - ctor.set(4, IxAddress::build_array(ctx)); - ctor.set(5, IxonExprMetaArena::build(arena)); - ctor.set(6, build_u64_array(rule_roots)); - ctor.set_u64(7 * 8, *type_root); - *ctor - }, - }; - Self::new(obj) - } + } + }, - /// Decode Ixon.ConstantMeta from Lean pointer. - pub fn decode(self) -> ConstantMeta { - let obj: LeanObj = *self; - if obj.is_scalar() { - let tag = obj.unbox_usize(); - assert_eq!(tag, 0, "Invalid scalar ConstantMeta tag: {tag}"); - return ConstantMeta::Empty; - } - let ctor = unsafe { LeanCtor::from_raw(obj.as_ptr()) }; - match ctor.tag() { - 1 => { - let ba = unsafe { crate::lean::obj::LeanByteArray::from_raw(ctor.get(0).as_ptr()) }; - let name = ba.decode_ixon(); - let lvls = decode_address_array(ctor.get(1)); - let hints = decode_reducibility_hints(ctor.get(2).as_ptr()); - let all = decode_address_array(ctor.get(3)); - let ctx = decode_address_array(ctor.get(4)); - let arena = IxonExprMetaArena::decode(ctor.get(5)); - let type_root = ctor.scalar_u64(6, 0); - let value_root = ctor.scalar_u64(6, 8); - ConstantMeta::Def { - name, - lvls, - hints, - all, - ctx, - arena, - type_root, - value_root, - } - }, - - 2 => { - let ba = unsafe { crate::lean::obj::LeanByteArray::from_raw(ctor.get(0).as_ptr()) }; - let name = ba.decode_ixon(); - let lvls = decode_address_array(ctor.get(1)); - let arena = IxonExprMetaArena::decode(ctor.get(2)); - let type_root = ctor.scalar_u64(3, 0); - ConstantMeta::Axio { name, lvls, arena, type_root } - }, - - 3 => { - let ba = unsafe { crate::lean::obj::LeanByteArray::from_raw(ctor.get(0).as_ptr()) }; - let name = ba.decode_ixon(); - let lvls = decode_address_array(ctor.get(1)); - let arena = IxonExprMetaArena::decode(ctor.get(2)); - let type_root = ctor.scalar_u64(3, 0); - ConstantMeta::Quot { name, lvls, arena, type_root } - }, - - 4 => { - let ba = unsafe { crate::lean::obj::LeanByteArray::from_raw(ctor.get(0).as_ptr()) }; - let name = ba.decode_ixon(); - let lvls = decode_address_array(ctor.get(1)); - let ctors = decode_address_array(ctor.get(2)); - let all = decode_address_array(ctor.get(3)); - let ctx = decode_address_array(ctor.get(4)); - let arena = IxonExprMetaArena::decode(ctor.get(5)); - let type_root = ctor.scalar_u64(6, 0); - ConstantMeta::Indc { name, lvls, ctors, all, ctx, arena, type_root } - }, - - 5 => { - let ba = unsafe { crate::lean::obj::LeanByteArray::from_raw(ctor.get(0).as_ptr()) }; - let name = ba.decode_ixon(); - let lvls = decode_address_array(ctor.get(1)); - let ba2 = unsafe { crate::lean::obj::LeanByteArray::from_raw(ctor.get(2).as_ptr()) }; - let induct = ba2.decode_ixon(); - let arena = IxonExprMetaArena::decode(ctor.get(3)); - let type_root = ctor.scalar_u64(4, 0); - ConstantMeta::Ctor { name, lvls, induct, arena, type_root } - }, - - 6 => { - let ba = unsafe { crate::lean::obj::LeanByteArray::from_raw(ctor.get(0).as_ptr()) }; - let name = ba.decode_ixon(); - let lvls = decode_address_array(ctor.get(1)); - let rules = decode_address_array(ctor.get(2)); - let all = decode_address_array(ctor.get(3)); - let ctx = decode_address_array(ctor.get(4)); - let arena = IxonExprMetaArena::decode(ctor.get(5)); - let rule_roots = decode_u64_array(ctor.get(6)); - let type_root = ctor.scalar_u64(7, 0); - ConstantMeta::Rec { - name, - lvls, - rules, - all, - ctx, - arena, - type_root, - rule_roots, - } - }, - - tag => panic!("Invalid Ixon.ConstantMeta tag: {tag}"), - } + tag => panic!("Invalid Ixon.ConstantMeta tag: {}", tag), } } @@ -519,44 +520,41 @@ impl IxonConstantMeta { // Named and Comm Build/Decode // ============================================================================= -impl IxonNamed { - /// Build Ixon.Named { addr : Address, constMeta : ConstantMeta } - pub fn build(addr: &Address, meta: &ConstantMeta) -> Self { - let ctor = LeanCtor::alloc(0, 2, 0); - ctor.set(0, IxAddress::build_from_ixon(addr)); - ctor.set(1, IxonConstantMeta::build(meta)); - Self::new(*ctor) - } +/// Build Ixon.Named { addr : Address, constMeta : ConstantMeta } +pub fn build_named(addr: &Address, meta: &ConstantMeta) -> LeanObj { + let addr_obj = build_address_from_ixon(addr); + let meta_obj = build_constant_meta(meta); + let ctor = LeanCtor::alloc(0, 2, 0); + ctor.set(0, addr_obj); + ctor.set(1, meta_obj); + *ctor +} - /// Decode Ixon.Named. - pub fn decode(self) -> Named { - let ctor = unsafe { LeanCtor::from_raw(self.as_ptr()) }; - let ba = unsafe { crate::lean::obj::LeanByteArray::from_raw(ctor.get(0).as_ptr()) }; - Named { - addr: ba.decode_ixon(), - meta: IxonConstantMeta::new(ctor.get(1)).decode(), - } +/// Decode Ixon.Named. +pub fn decode_named(obj: LeanObj) -> Named { + let ctor = obj.as_ctor(); + Named { + addr: decode_ixon_address(ctor.get(0)), + meta: decode_constant_meta(ctor.get(1)), } } -impl IxonComm { - /// Build Ixon.Comm { secret : Address, payload : Address } - pub fn build(comm: &Comm) -> Self { - let ctor = LeanCtor::alloc(0, 2, 0); - ctor.set(0, IxAddress::build_from_ixon(&comm.secret)); - ctor.set(1, IxAddress::build_from_ixon(&comm.payload)); - Self::new(*ctor) - } +/// Build Ixon.Comm { secret : Address, payload : Address } +pub fn build_ixon_comm(comm: &Comm) -> LeanObj { + let secret_obj = build_address_from_ixon(&comm.secret); + let payload_obj = build_address_from_ixon(&comm.payload); + let ctor = LeanCtor::alloc(0, 2, 0); + ctor.set(0, secret_obj); + ctor.set(1, payload_obj); + *ctor +} - /// Decode Ixon.Comm. - pub fn decode(self) -> Comm { - let ctor = unsafe { LeanCtor::from_raw(self.as_ptr()) }; - let ba0 = unsafe { crate::lean::obj::LeanByteArray::from_raw(ctor.get(0).as_ptr()) }; - let ba1 = unsafe { crate::lean::obj::LeanByteArray::from_raw(ctor.get(1).as_ptr()) }; - Comm { - secret: ba0.decode_ixon(), - payload: ba1.decode_ixon(), - } +/// Decode Ixon.Comm. +pub fn decode_ixon_comm(obj: LeanObj) -> Comm { + let ctor = obj.as_ctor(); + Comm { + secret: decode_ixon_address(ctor.get(0)), + payload: decode_ixon_address(ctor.get(1)), } } @@ -567,41 +565,41 @@ impl IxonComm { /// Round-trip Ixon.DataValue. #[unsafe(no_mangle)] pub extern "C" fn rs_roundtrip_ixon_data_value(obj: LeanObj) -> LeanObj { - let dv = IxonDataValue::new(obj).decode(); - IxonDataValue::build(&dv).into() + let dv = decode_ixon_data_value(obj); + build_ixon_data_value(&dv) } /// Round-trip Ixon.Comm. #[unsafe(no_mangle)] pub extern "C" fn rs_roundtrip_ixon_comm(obj: LeanObj) -> LeanObj { - let comm = IxonComm::new(obj).decode(); - IxonComm::build(&comm).into() + let comm = decode_ixon_comm(obj); + build_ixon_comm(&comm) } /// Round-trip Ixon.ExprMetaData. #[unsafe(no_mangle)] pub extern "C" fn rs_roundtrip_ixon_expr_meta_data(obj: LeanObj) -> LeanObj { - let node = IxonExprMetaData::new(obj).decode(); - IxonExprMetaData::build(&node).into() + let node = decode_expr_meta_data(obj); + build_expr_meta_data(&node) } /// Round-trip Ixon.ExprMetaArena. #[unsafe(no_mangle)] pub extern "C" fn rs_roundtrip_ixon_expr_meta_arena(obj: LeanObj) -> LeanObj { - let arena = IxonExprMetaArena::decode(obj); - IxonExprMetaArena::build(&arena).into() + let arena = decode_expr_meta_arena(obj); + *build_expr_meta_arena(&arena) } /// Round-trip Ixon.ConstantMeta (full arena-based). #[unsafe(no_mangle)] pub extern "C" fn rs_roundtrip_ixon_constant_meta(obj: LeanObj) -> LeanObj { - let meta = IxonConstantMeta::new(obj).decode(); - IxonConstantMeta::build(&meta).into() + let meta = decode_constant_meta(obj); + build_constant_meta(&meta) } /// Round-trip Ixon.Named (with real metadata). #[unsafe(no_mangle)] pub extern "C" fn rs_roundtrip_ixon_named(obj: LeanObj) -> LeanObj { - let named = IxonNamed::new(obj).decode(); - IxonNamed::build(&named.addr, &named.meta).into() + let named = decode_named(obj); + build_named(&named.addr, &named.meta) } diff --git a/src/lean/ffi/ixon/serialize.rs b/src/lean/ffi/ixon/serialize.rs index 230d0243..46e08962 100644 --- a/src/lean/ffi/ixon/serialize.rs +++ b/src/lean/ffi/ixon/serialize.rs @@ -10,9 +10,9 @@ use crate::ix::ixon::expr::Expr as IxonExpr; use crate::ix::ixon::serialize::put_expr; use crate::ix::ixon::sharing::hash_expr; use crate::ix::ixon::univ::{Univ as IxonUniv, put_univ}; -use crate::lean::obj::{LeanByteArray, LeanCtor, LeanObj}; +use crate::lean::obj::LeanObj; -use super::constant::{decode_ixon_address, decode_ixon_constant}; +use crate::lean::ffi::ixon::constant::{decode_ixon_address, decode_ixon_constant}; /// Unbox a Lean UInt64, handling both scalar and boxed representations. fn lean_ptr_to_u64(obj: LeanObj) -> u64 { @@ -26,7 +26,7 @@ fn lean_ptr_to_u64(obj: LeanObj) -> u64 { /// Decode a Lean `Ixon.Expr` to a Rust `IxonExpr`. pub fn lean_ptr_to_ixon_expr(obj: LeanObj) -> Arc { assert!(!obj.is_scalar(), "Ixon.Expr should not be scalar"); - let ctor = unsafe { LeanCtor::from_raw(obj.as_ptr()) }; + let ctor = obj.as_ctor(); match ctor.tag() { 0 => { let idx = ctor.scalar_u64(0, 0); @@ -38,15 +38,13 @@ pub fn lean_ptr_to_ixon_expr(obj: LeanObj) -> Arc { }, 2 => { let ref_idx = ctor.scalar_u64(1, 0); - let univs_arr = - unsafe { crate::lean::obj::LeanArray::from_raw(ctor.get(0).as_ptr()) }; + let univs_arr = ctor.get(0).as_array(); let univs = univs_arr.map(lean_ptr_to_u64); Arc::new(IxonExpr::Ref(ref_idx, univs)) }, 3 => { let rec_idx = ctor.scalar_u64(1, 0); - let univs_arr = - unsafe { crate::lean::obj::LeanArray::from_raw(ctor.get(0).as_ptr()) }; + let univs_arr = ctor.get(0).as_array(); let univs = univs_arr.map(lean_ptr_to_u64); Arc::new(IxonExpr::Rec(rec_idx, univs)) }, @@ -115,7 +113,7 @@ fn lean_ptr_to_ixon_univ(obj: LeanObj) -> Arc { if obj.is_scalar() { return IxonUniv::zero(); } - let ctor = unsafe { LeanCtor::from_raw(obj.as_ptr()) }; + let ctor = obj.as_ctor(); match ctor.tag() { 1 => { let [inner] = ctor.objs::<1>(); @@ -141,7 +139,7 @@ pub extern "C" fn rs_eq_univ_serialization( bytes_obj: LeanObj, ) -> bool { let univ = lean_ptr_to_ixon_univ(univ_obj); - let ba = unsafe { LeanByteArray::from_raw(bytes_obj.as_ptr()) }; + let ba = bytes_obj.as_byte_array(); let bytes_data = ba.as_bytes(); let mut buf = Vec::with_capacity(bytes_data.len()); put_univ(&univ, &mut buf); @@ -155,7 +153,7 @@ pub extern "C" fn rs_eq_expr_serialization( bytes_obj: LeanObj, ) -> bool { let expr = lean_ptr_to_ixon_expr(expr_obj); - let ba = unsafe { LeanByteArray::from_raw(bytes_obj.as_ptr()) }; + let ba = bytes_obj.as_byte_array(); let bytes_data = ba.as_bytes(); let mut buf = Vec::with_capacity(bytes_data.len()); put_expr(&expr, &mut buf); @@ -169,7 +167,7 @@ pub extern "C" fn rs_eq_constant_serialization( bytes_obj: LeanObj, ) -> bool { let constant = decode_ixon_constant(constant_obj); - let ba = unsafe { LeanByteArray::from_raw(bytes_obj.as_ptr()) }; + let ba = bytes_obj.as_byte_array(); let bytes_data = ba.as_bytes(); let mut buf = Vec::with_capacity(bytes_data.len()); constant.put(&mut buf); @@ -183,11 +181,11 @@ pub extern "C" fn rs_eq_env_serialization( raw_env_obj: LeanObj, bytes_obj: LeanObj, ) -> bool { - use super::env::decode_raw_env; + use crate::lean::ffi::ixon::env::decode_raw_env; use crate::ix::ixon::env::Env; let decoded = decode_raw_env(raw_env_obj); - let ba = unsafe { LeanByteArray::from_raw(bytes_obj.as_ptr()) }; + let ba = bytes_obj.as_byte_array(); let bytes_data = ba.as_bytes(); // Deserialize Lean's bytes using Rust's deserializer @@ -258,7 +256,7 @@ extern "C" fn rs_env_serde_roundtrip(lean_bytes_obj: LeanObj) -> bool { use crate::ix::ixon::env::Env; // Get bytes from Lean ByteArray - let ba = unsafe { LeanByteArray::from_raw(lean_bytes_obj.as_ptr()) }; + let ba = lean_bytes_obj.as_byte_array(); let lean_bytes = ba.as_bytes().to_vec(); // Try to deserialize with Rust @@ -305,7 +303,7 @@ extern "C" fn rs_env_serde_check(lean_bytes_obj: LeanObj) -> bool { use crate::ix::ixon::env::Env; // Get bytes from Lean ByteArray - let ba = unsafe { LeanByteArray::from_raw(lean_bytes_obj.as_ptr()) }; + let ba = lean_bytes_obj.as_byte_array(); let lean_bytes = ba.as_bytes().to_vec(); // Try to deserialize with Rust diff --git a/src/lean/ffi/ixon/sharing.rs b/src/lean/ffi/ixon/sharing.rs index 3d2ad32f..b7b1faf4 100644 --- a/src/lean/ffi/ixon/sharing.rs +++ b/src/lean/ffi/ixon/sharing.rs @@ -7,18 +7,17 @@ use crate::ix::ixon::serialize::put_expr; use crate::ix::ixon::sharing::{ analyze_block, build_sharing_vec, decide_sharing, }; -use crate::lean::obj::{LeanArray, LeanByteArray, LeanObj}; +use crate::lean::obj::LeanObj; -use super::expr::decode_ixon_expr_array; -use super::serialize::lean_ptr_to_ixon_expr; +use crate::lean::ffi::ixon::expr::decode_ixon_expr_array; +use crate::lean::ffi::ixon::serialize::lean_ptr_to_ixon_expr; /// FFI: Debug sharing analysis - print usage counts for subterms with usage >= 2. /// This helps diagnose why Lean and Rust make different sharing decisions. #[unsafe(no_mangle)] pub extern "C" fn rs_debug_sharing_analysis(exprs_obj: LeanObj) { - let arr = unsafe { LeanArray::from_raw(exprs_obj.as_ptr()) }; - let exprs: Vec> = - arr.map(|elem| lean_ptr_to_ixon_expr(elem)); + let arr = exprs_obj.as_array(); + let exprs: Vec> = arr.map(|elem| lean_ptr_to_ixon_expr(elem)); println!("[Rust] Analyzing {} input expressions", exprs.len()); @@ -96,11 +95,9 @@ extern "C" fn rs_run_sharing_analysis( } // Write to output arrays - let sharing_ba = - unsafe { LeanByteArray::from_raw(out_sharing_vec.as_ptr()) }; + let sharing_ba = out_sharing_vec.as_byte_array(); unsafe { sharing_ba.set_data(&sharing_bytes) }; - let rewritten_ba = - unsafe { LeanByteArray::from_raw(out_rewritten.as_ptr()) }; + let rewritten_ba = out_rewritten.as_byte_array(); unsafe { rewritten_ba.set_data(&rewritten_bytes) }; shared_hashes.len() as u64 diff --git a/src/lean/ffi/ixon/univ.rs b/src/lean/ffi/ixon/univ.rs index 1735e94c..c4e8affc 100644 --- a/src/lean/ffi/ixon/univ.rs +++ b/src/lean/ffi/ixon/univ.rs @@ -51,7 +51,7 @@ impl IxonUniv { if obj.is_scalar() { return Univ::Zero; } - let ctor = unsafe { LeanCtor::from_raw(obj.as_ptr()) }; + let ctor = obj.as_ctor(); match ctor.tag() { 0 => Univ::Zero, 1 => Univ::Succ(Arc::new(Self::new(ctor.get(0)).decode())), @@ -70,11 +70,21 @@ impl IxonUniv { /// Decode Array Ixon.Univ. pub fn decode_array(obj: LeanObj) -> Vec> { - let arr = unsafe { LeanArray::from_raw(obj.as_ptr()) }; + let arr = obj.as_array(); arr.map(|elem| Arc::new(Self::new(elem).decode())) } } +/// Build an Array of Ixon.Univ (standalone wrapper). +pub fn build_ixon_univ_array(univs: &[Arc]) -> LeanArray { + IxonUniv::build_array(univs) +} + +/// Decode Array Ixon.Univ (standalone wrapper). +pub fn decode_ixon_univ_array(obj: LeanObj) -> Vec> { + IxonUniv::decode_array(obj) +} + // ============================================================================= // FFI Exports // ============================================================================= diff --git a/src/lean/ffi/lean_env.rs b/src/lean/ffi/lean_env.rs index 3581a4f3..52509d92 100644 --- a/src/lean/ffi/lean_env.rs +++ b/src/lean/ffi/lean_env.rs @@ -19,6 +19,7 @@ use std::sync::Arc; use rustc_hash::FxHashMap; +use crate::lean::nat::Nat; use crate::lean::obj::LeanObj; use crate::{ @@ -31,26 +32,21 @@ use crate::{ ReducibilityHints, SourceInfo, Substring, Syntax, SyntaxPreresolved, TheoremVal, }, - lean::{ - collect_list, lean_array_to_vec_with, lean_ctor_objs, lean_is_scalar, - lean_obj_to_string, lean_tag, nat::Nat, - }, - lean_unbox, }; const PARALLEL_THRESHOLD: usize = 100; -/// Wrapper to allow sending raw pointers across threads. The underlying Lean -/// objects must remain valid for the entire duration of parallel decoding +/// Wrapper to allow sending `LeanObj` across threads. The underlying Lean +/// objects must remain valid for the entire duration of parallel decoding. #[derive(Clone, Copy)] -struct SendPtr(*const c_void); +struct SendObj(LeanObj); -unsafe impl Send for SendPtr {} -unsafe impl Sync for SendPtr {} +unsafe impl Send for SendObj {} +unsafe impl Sync for SendObj {} -impl SendPtr { +impl SendObj { #[inline] - fn get(self) -> *const c_void { + fn get(self) -> LeanObj { self.0 } } @@ -98,33 +94,31 @@ impl<'g> Cache<'g> { } } -fn collect_list_ptrs(mut ptr: *const c_void) -> Vec<*const c_void> { - let mut ptrs = Vec::new(); - while !lean_is_scalar(ptr) { - let [head_ptr, tail_ptr] = lean_ctor_objs(ptr); - ptrs.push(head_ptr); - ptr = tail_ptr; - } - ptrs +fn collect_list_objs(obj: LeanObj) -> Vec { + obj.as_list().iter().collect() } // Name decoding with global cache -pub fn lean_ptr_to_name(ptr: *const c_void, global: &GlobalCache) -> Name { +pub fn lean_ptr_to_name(obj: LeanObj, global: &GlobalCache) -> Name { + let ptr = obj.as_ptr(); // Fast path: check if already cached if let Some(name) = global.names.get(&ptr) { return name.clone(); } // Compute the name - let name = if lean_is_scalar(ptr) { + let name = if obj.is_scalar() { Name::anon() } else { - let [pre_ptr, pos_ptr] = lean_ctor_objs(ptr); + let ctor = obj.as_ctor(); + let [pre, pos] = ctor.objs(); // Recursive call - will also use global cache - let pre = lean_ptr_to_name(pre_ptr, global); - match lean_tag(ptr) { - 1 => Name::str(pre, lean_obj_to_string(pos_ptr)), - 2 => Name::num(pre, Nat::from_ptr(pos_ptr)), + let pre = lean_ptr_to_name(pre, global); + match ctor.tag() { + 1 => { + Name::str(pre, pos.as_string().to_string()) + }, + 2 => Name::num(pre, Nat::from_obj(pos)), _ => unreachable!(), } }; @@ -133,36 +127,36 @@ pub fn lean_ptr_to_name(ptr: *const c_void, global: &GlobalCache) -> Name { global.names.entry(ptr).or_insert(name).clone() } -fn lean_ptr_to_level(ptr: *const c_void, cache: &mut Cache<'_>) -> Level { +fn lean_ptr_to_level(obj: LeanObj, cache: &mut Cache<'_>) -> Level { + let ptr = obj.as_ptr(); if let Some(cached) = cache.local.univs.get(&ptr) { return cached.clone(); } - let level = if lean_is_scalar(ptr) { + let level = if obj.is_scalar() { Level::zero() } else { - match lean_tag(ptr) { + let ctor = obj.as_ctor(); + match ctor.tag() { 1 => { - let [u] = lean_ctor_objs::<1>(ptr).map(|p| lean_ptr_to_level(p, cache)); + let [u] = ctor.objs::<1>().map(|o| lean_ptr_to_level(o, cache)); Level::succ(u) }, 2 => { - let [u, v] = - lean_ctor_objs::<2>(ptr).map(|p| lean_ptr_to_level(p, cache)); + let [u, v] = ctor.objs::<2>().map(|o| lean_ptr_to_level(o, cache)); Level::max(u, v) }, 3 => { - let [u, v] = - lean_ctor_objs::<2>(ptr).map(|p| lean_ptr_to_level(p, cache)); + let [u, v] = ctor.objs::<2>().map(|o| lean_ptr_to_level(o, cache)); Level::imax(u, v) }, 4 => { let [name] = - lean_ctor_objs::<1>(ptr).map(|p| lean_ptr_to_name(p, cache.global)); + ctor.objs::<1>().map(|o| lean_ptr_to_name(o, cache.global)); Level::param(name) }, 5 => { let [name] = - lean_ctor_objs::<1>(ptr).map(|p| lean_ptr_to_name(p, cache.global)); + ctor.objs::<1>().map(|o| lean_ptr_to_name(o, cache.global)); Level::mvar(name) }, _ => unreachable!(), @@ -172,33 +166,34 @@ fn lean_ptr_to_level(ptr: *const c_void, cache: &mut Cache<'_>) -> Level { level } -fn lean_ptr_to_substring(ptr: *const c_void) -> Substring { - let [str_ptr, start_pos_ptr, stop_pos_ptr] = lean_ctor_objs(ptr); - let str = lean_obj_to_string(str_ptr); - let start_pos = Nat::from_ptr(start_pos_ptr); - let stop_pos = Nat::from_ptr(stop_pos_ptr); +fn lean_ptr_to_substring(obj: LeanObj) -> Substring { + let ctor = obj.as_ctor(); + let [str_obj, start_pos, stop_pos] = ctor.objs(); + let str = str_obj.as_string().to_string(); + let start_pos = Nat::from_obj(start_pos); + let stop_pos = Nat::from_obj(stop_pos); Substring { str, start_pos, stop_pos } } -fn lean_ptr_to_source_info(ptr: *const c_void) -> SourceInfo { - if lean_is_scalar(ptr) { +fn lean_ptr_to_source_info(obj: LeanObj) -> SourceInfo { + if obj.is_scalar() { return SourceInfo::None; } - match lean_tag(ptr) { + let ctor = obj.as_ctor(); + match ctor.tag() { 0 => { - let [leading_ptr, pos_ptr, trailing_ptr, end_pos_ptr] = - lean_ctor_objs(ptr); - let leading = lean_ptr_to_substring(leading_ptr); - let pos = Nat::from_ptr(pos_ptr); - let trailing = lean_ptr_to_substring(trailing_ptr); - let end_pos = Nat::from_ptr(end_pos_ptr); + let [leading, pos, trailing, end_pos] = ctor.objs(); + let leading = lean_ptr_to_substring(leading); + let pos = Nat::from_obj(pos); + let trailing = lean_ptr_to_substring(trailing); + let end_pos = Nat::from_obj(end_pos); SourceInfo::Original(leading, pos, trailing, end_pos) }, 1 => { - let [pos_ptr, end_pos_ptr, canonical_ptr] = lean_ctor_objs(ptr); - let pos = Nat::from_ptr(pos_ptr); - let end_pos = Nat::from_ptr(end_pos_ptr); - let canonical = canonical_ptr as usize == 1; + let [pos, end_pos, canonical] = ctor.objs(); + let pos = Nat::from_obj(pos); + let end_pos = Nat::from_obj(end_pos); + let canonical = canonical.as_ptr() as usize == 1; SourceInfo::Synthetic(pos, end_pos, canonical) }, _ => unreachable!(), @@ -206,52 +201,62 @@ fn lean_ptr_to_source_info(ptr: *const c_void) -> SourceInfo { } fn lean_ptr_to_syntax_preresolved( - ptr: *const c_void, + obj: LeanObj, cache: &mut Cache<'_>, ) -> SyntaxPreresolved { - match lean_tag(ptr) { + let ctor = obj.as_ctor(); + match ctor.tag() { 0 => { - let [name_ptr] = lean_ctor_objs(ptr); - let name = lean_ptr_to_name(name_ptr, cache.global); + let [name_obj] = ctor.objs::<1>(); + let name = lean_ptr_to_name(name_obj, cache.global); SyntaxPreresolved::Namespace(name) }, 1 => { - let [name_ptr, fields_ptr] = lean_ctor_objs(ptr); - let name = lean_ptr_to_name(name_ptr, cache.global); - let fields = collect_list(fields_ptr, lean_obj_to_string); + let [name_obj, fields_obj] = ctor.objs(); + let name = lean_ptr_to_name(name_obj, cache.global); + let fields: Vec = + fields_obj.as_list() + .iter() + .map(|o| o.as_string().to_string()) + .collect(); SyntaxPreresolved::Decl(name, fields) }, _ => unreachable!(), } } -fn lean_ptr_to_syntax(ptr: *const c_void, cache: &mut Cache<'_>) -> Syntax { - if lean_is_scalar(ptr) { +fn lean_ptr_to_syntax(obj: LeanObj, cache: &mut Cache<'_>) -> Syntax { + if obj.is_scalar() { return Syntax::Missing; } - match lean_tag(ptr) { + let ctor = obj.as_ctor(); + match ctor.tag() { 1 => { - let [info_ptr, kind_ptr, args_ptr] = lean_ctor_objs(ptr); - let info = lean_ptr_to_source_info(info_ptr); - let kind = lean_ptr_to_name(kind_ptr, cache.global); - let args: Vec<_> = - lean_array_to_vec_with(args_ptr, lean_ptr_to_syntax, cache); + let [info, kind, args] = ctor.objs(); + let info = lean_ptr_to_source_info(info); + let kind = lean_ptr_to_name(kind, cache.global); + let args: Vec<_> = args.as_array() + .iter() + .map(|o| lean_ptr_to_syntax(o, cache)) + .collect(); Syntax::Node(info, kind, args) }, 2 => { - let [info_ptr, val_ptr] = lean_ctor_objs(ptr); - let info = lean_ptr_to_source_info(info_ptr); - Syntax::Atom(info, lean_obj_to_string(val_ptr)) + let [info, val] = ctor.objs(); + let info = lean_ptr_to_source_info(info); + Syntax::Atom( + info, + val.as_string().to_string(), + ) }, 3 => { - let [info_ptr, raw_val_ptr, val_ptr, preresolved_ptr] = - lean_ctor_objs(ptr); - let info = lean_ptr_to_source_info(info_ptr); - let raw_val = lean_ptr_to_substring(raw_val_ptr); - let val = lean_ptr_to_name(val_ptr, cache.global); - let preresolved = collect_list_ptrs(preresolved_ptr) + let [info, raw_val, val, preresolved] = ctor.objs(); + let info = lean_ptr_to_source_info(info); + let raw_val = lean_ptr_to_substring(raw_val); + let val = lean_ptr_to_name(val, cache.global); + let preresolved = collect_list_objs(preresolved) .into_iter() - .map(|p| lean_ptr_to_syntax_preresolved(p, cache)) + .map(|o| lean_ptr_to_syntax_preresolved(o, cache)) .collect(); Syntax::Ident(info, raw_val, val, preresolved) }, @@ -260,85 +265,85 @@ fn lean_ptr_to_syntax(ptr: *const c_void, cache: &mut Cache<'_>) -> Syntax { } fn lean_ptr_to_name_data_value( - ptr: *const c_void, + obj: LeanObj, cache: &mut Cache<'_>, ) -> (Name, DataValue) { - let [name_ptr, data_value_ptr] = lean_ctor_objs(ptr); - let name = lean_ptr_to_name(name_ptr, cache.global); - let [inner_ptr] = lean_ctor_objs(data_value_ptr); - let data_value = match lean_tag(data_value_ptr) { - 0 => DataValue::OfString(lean_obj_to_string(inner_ptr)), - 1 => DataValue::OfBool(inner_ptr as usize == 1), - 2 => DataValue::OfName(lean_ptr_to_name(inner_ptr, cache.global)), - 3 => DataValue::OfNat(Nat::from_ptr(inner_ptr)), + let ctor = obj.as_ctor(); + let [name_obj, data_value_obj] = ctor.objs(); + let name = lean_ptr_to_name(name_obj, cache.global); + let dv_ctor = data_value_obj.as_ctor(); + let [inner] = dv_ctor.objs::<1>(); + let data_value = match dv_ctor.tag() { + 0 => DataValue::OfString( + inner.as_string().to_string(), + ), + 1 => DataValue::OfBool(inner.as_ptr() as usize == 1), + 2 => DataValue::OfName(lean_ptr_to_name(inner, cache.global)), + 3 => DataValue::OfNat(Nat::from_obj(inner)), 4 => { - let [nat_ptr] = lean_ctor_objs(inner_ptr); - let nat = Nat::from_ptr(nat_ptr); - let int = match lean_tag(inner_ptr) { + let inner_ctor = inner.as_ctor(); + let [nat_obj] = inner_ctor.objs::<1>(); + let nat = Nat::from_obj(nat_obj); + let int = match inner_ctor.tag() { 0 => Int::OfNat(nat), 1 => Int::NegSucc(nat), _ => unreachable!(), }; DataValue::OfInt(int) }, - 5 => DataValue::OfSyntax(lean_ptr_to_syntax(inner_ptr, cache).into()), + 5 => DataValue::OfSyntax(lean_ptr_to_syntax(inner, cache).into()), _ => unreachable!(), }; (name, data_value) } -pub fn lean_ptr_to_expr(ptr: *const c_void, cache: &mut Cache<'_>) -> Expr { +pub fn lean_ptr_to_expr(obj: LeanObj, cache: &mut Cache<'_>) -> Expr { + let ptr = obj.as_ptr(); if let Some(cached) = cache.local.exprs.get(&ptr) { return cached.clone(); } - let expr = match lean_tag(ptr) { + let ctor = obj.as_ctor(); + let expr = match ctor.tag() { 0 => { - let [nat_ptr, _hash_ptr] = lean_ctor_objs(ptr); - let nat = Nat::from_ptr(nat_ptr.cast()); - Expr::bvar(nat) + let [nat, _hash] = ctor.objs(); + Expr::bvar(Nat::from_obj(nat)) }, 1 => { - let [name_ptr, _hash_ptr] = lean_ctor_objs(ptr); - let name = lean_ptr_to_name(name_ptr, cache.global); + let [name_obj, _hash] = ctor.objs(); + let name = lean_ptr_to_name(name_obj, cache.global); Expr::fvar(name) }, 2 => { - let [name_ptr, _hash_ptr] = lean_ctor_objs(ptr); - let name = lean_ptr_to_name(name_ptr, cache.global); + let [name_obj, _hash] = ctor.objs(); + let name = lean_ptr_to_name(name_obj, cache.global); Expr::mvar(name) }, 3 => { - let [u_ptr, _hash_ptr] = lean_ctor_objs(ptr); - let u = lean_ptr_to_level(u_ptr, cache); + let [u, _hash] = ctor.objs(); + let u = lean_ptr_to_level(u, cache); Expr::sort(u) }, 4 => { - let [name_ptr, levels_ptr, _hash_ptr] = lean_ctor_objs(ptr); - let name = lean_ptr_to_name(name_ptr, cache.global); - let levels = collect_list_ptrs(levels_ptr) + let [name_obj, levels, _hash] = ctor.objs(); + let name = lean_ptr_to_name(name_obj, cache.global); + let levels = collect_list_objs(levels) .into_iter() - .map(|p| lean_ptr_to_level(p, cache)) + .map(|o| lean_ptr_to_level(o, cache)) .collect(); Expr::cnst(name, levels) }, 5 => { - let [f_ptr, a_ptr, _hash_ptr] = lean_ctor_objs(ptr); - let f = lean_ptr_to_expr(f_ptr, cache); - let a = lean_ptr_to_expr(a_ptr, cache); + let [f, a, _hash] = ctor.objs(); + let f = lean_ptr_to_expr(f, cache); + let a = lean_ptr_to_expr(a, cache); Expr::app(f, a) }, 6 => { - let [ - binder_name_ptr, - binder_typ_ptr, - body_ptr, - _hash_ptr, - binder_info_ptr, - ] = lean_ctor_objs(ptr); - let binder_name = lean_ptr_to_name(binder_name_ptr, cache.global); - let binder_typ = lean_ptr_to_expr(binder_typ_ptr, cache); - let body = lean_ptr_to_expr(body_ptr, cache); - let binder_info = match binder_info_ptr as usize { + let [binder_name, binder_typ, body, _hash, binder_info] = ctor.objs(); + let binder_name = lean_ptr_to_name(binder_name, cache.global); + let binder_typ = lean_ptr_to_expr(binder_typ, cache); + let body = lean_ptr_to_expr(body, cache); + let binder_info = match binder_info.as_ptr() as usize { 0 => BinderInfo::Default, 1 => BinderInfo::Implicit, 2 => BinderInfo::StrictImplicit, @@ -348,17 +353,11 @@ pub fn lean_ptr_to_expr(ptr: *const c_void, cache: &mut Cache<'_>) -> Expr { Expr::lam(binder_name, binder_typ, body, binder_info) }, 7 => { - let [ - binder_name_ptr, - binder_typ_ptr, - body_ptr, - _hash_ptr, - binder_info_ptr, - ] = lean_ctor_objs(ptr); - let binder_name = lean_ptr_to_name(binder_name_ptr, cache.global); - let binder_typ = lean_ptr_to_expr(binder_typ_ptr, cache); - let body = lean_ptr_to_expr(body_ptr, cache); - let binder_info = match binder_info_ptr as usize { + let [binder_name, binder_typ, body, _hash, binder_info] = ctor.objs(); + let binder_name = lean_ptr_to_name(binder_name, cache.global); + let binder_typ = lean_ptr_to_expr(binder_typ, cache); + let body = lean_ptr_to_expr(body, cache); + let binder_info = match binder_info.as_ptr() as usize { 0 => BinderInfo::Default, 1 => BinderInfo::Implicit, 2 => BinderInfo::StrictImplicit, @@ -368,41 +367,40 @@ pub fn lean_ptr_to_expr(ptr: *const c_void, cache: &mut Cache<'_>) -> Expr { Expr::all(binder_name, binder_typ, body, binder_info) }, 8 => { - let [decl_name_ptr, typ_ptr, value_ptr, body_ptr, _hash_ptr, nondep_ptr] = - lean_ctor_objs(ptr); - let decl_name = lean_ptr_to_name(decl_name_ptr, cache.global); - let typ = lean_ptr_to_expr(typ_ptr, cache); - let value = lean_ptr_to_expr(value_ptr, cache); - let body = lean_ptr_to_expr(body_ptr, cache); - let nondep = nondep_ptr as usize == 1; + let [decl_name, typ, value, body, _hash, nondep] = ctor.objs(); + let decl_name = lean_ptr_to_name(decl_name, cache.global); + let typ = lean_ptr_to_expr(typ, cache); + let value = lean_ptr_to_expr(value, cache); + let body = lean_ptr_to_expr(body, cache); + let nondep = nondep.as_ptr() as usize == 1; Expr::letE(decl_name, typ, value, body, nondep) }, 9 => { - let [literal_ptr, _hash_ptr] = lean_ctor_objs(ptr); - let [inner_ptr] = lean_ctor_objs(literal_ptr); - match lean_tag(literal_ptr) { - 0 => { - let nat = Nat::from_ptr(inner_ptr); - Expr::lit(Literal::NatVal(nat)) - }, - 1 => Expr::lit(Literal::StrVal(lean_obj_to_string(inner_ptr))), + let [literal, _hash] = ctor.objs(); + let lit_ctor = literal.as_ctor(); + let [inner] = lit_ctor.objs::<1>(); + match lit_ctor.tag() { + 0 => Expr::lit(Literal::NatVal(Nat::from_obj(inner))), + 1 => Expr::lit(Literal::StrVal( + inner.as_string().to_string(), + )), _ => unreachable!(), } }, 10 => { - let [data_ptr, expr_ptr] = lean_ctor_objs(ptr); - let kv_map: Vec<_> = collect_list_ptrs(data_ptr) + let [data, expr_obj] = ctor.objs(); + let kv_map: Vec<_> = collect_list_objs(data) .into_iter() - .map(|p| lean_ptr_to_name_data_value(p, cache)) + .map(|o| lean_ptr_to_name_data_value(o, cache)) .collect(); - let expr = lean_ptr_to_expr(expr_ptr, cache); + let expr = lean_ptr_to_expr(expr_obj, cache); Expr::mdata(kv_map, expr) }, 11 => { - let [typ_name_ptr, idx_ptr, struct_ptr] = lean_ctor_objs(ptr); - let typ_name = lean_ptr_to_name(typ_name_ptr, cache.global); - let idx = Nat::from_ptr(idx_ptr); - let struct_expr = lean_ptr_to_expr(struct_ptr, cache); + let [typ_name, idx, struct_expr] = ctor.objs(); + let typ_name = lean_ptr_to_name(typ_name, cache.global); + let idx = Nat::from_obj(idx); + let struct_expr = lean_ptr_to_expr(struct_expr, cache); Expr::proj(typ_name, idx, struct_expr) }, _ => unreachable!(), @@ -412,63 +410,67 @@ pub fn lean_ptr_to_expr(ptr: *const c_void, cache: &mut Cache<'_>) -> Expr { } fn lean_ptr_to_recursor_rule( - ptr: *const c_void, + obj: LeanObj, cache: &mut Cache<'_>, ) -> RecursorRule { - let [ctor_ptr, n_fields_ptr, rhs_ptr] = lean_ctor_objs(ptr); - let ctor = lean_ptr_to_name(ctor_ptr, cache.global); - let n_fields = Nat::from_ptr(n_fields_ptr); - let rhs = lean_ptr_to_expr(rhs_ptr, cache); - RecursorRule { ctor, n_fields, rhs } + let ctor = obj.as_ctor(); + let [ctor_name, n_fields, rhs] = ctor.objs(); + let ctor_name = lean_ptr_to_name(ctor_name, cache.global); + let n_fields = Nat::from_obj(n_fields); + let rhs = lean_ptr_to_expr(rhs, cache); + RecursorRule { ctor: ctor_name, n_fields, rhs } } fn lean_ptr_to_constant_val( - ptr: *const c_void, + obj: LeanObj, cache: &mut Cache<'_>, ) -> ConstantVal { - let [name_ptr, level_params_ptr, typ_ptr] = lean_ctor_objs(ptr); - let name = lean_ptr_to_name(name_ptr, cache.global); - let level_params: Vec<_> = collect_list_ptrs(level_params_ptr) + let ctor = obj.as_ctor(); + let [name_obj, level_params, typ] = ctor.objs(); + let name = lean_ptr_to_name(name_obj, cache.global); + let level_params: Vec<_> = collect_list_objs(level_params) .into_iter() - .map(|p| lean_ptr_to_name(p, cache.global)) + .map(|o| lean_ptr_to_name(o, cache.global)) .collect(); - let typ = lean_ptr_to_expr(typ_ptr, cache); + let typ = lean_ptr_to_expr(typ, cache); ConstantVal { name, level_params, typ } } pub fn lean_ptr_to_constant_info( - ptr: *const c_void, + obj: LeanObj, cache: &mut Cache<'_>, ) -> ConstantInfo { - let [inner_val_ptr] = lean_ctor_objs(ptr); + let ctor = obj.as_ctor(); + let [inner_val] = ctor.objs::<1>(); + let inner = inner_val.as_ctor(); - match lean_tag(ptr) { + match ctor.tag() { 0 => { - let [constant_val_ptr, is_unsafe_ptr] = lean_ctor_objs(inner_val_ptr); - let constant_val = lean_ptr_to_constant_val(constant_val_ptr, cache); - let is_unsafe = is_unsafe_ptr as usize == 1; + let [constant_val, is_unsafe] = inner.objs(); + let constant_val = lean_ptr_to_constant_val(constant_val, cache); + let is_unsafe = is_unsafe.as_ptr() as usize == 1; ConstantInfo::AxiomInfo(AxiomVal { cnst: constant_val, is_unsafe }) }, 1 => { - let [constant_val_ptr, value_ptr, hints_ptr, all_ptr, safety_ptr] = - lean_ctor_objs(inner_val_ptr); - let constant_val = lean_ptr_to_constant_val(constant_val_ptr, cache); - let value = lean_ptr_to_expr(value_ptr, cache); - let hints = if lean_is_scalar(hints_ptr) { - match lean_unbox!(usize, hints_ptr) { + let [constant_val, value, hints, all, safety] = inner.objs(); + let constant_val = lean_ptr_to_constant_val(constant_val, cache); + let value = lean_ptr_to_expr(value, cache); + let hints = if hints.is_scalar() { + match hints.unbox_usize() { 0 => ReducibilityHints::Opaque, 1 => ReducibilityHints::Abbrev, _ => unreachable!(), } } else { - let [height_ptr] = lean_ctor_objs(hints_ptr); - ReducibilityHints::Regular(height_ptr as u32) + let hints_ctor = hints.as_ctor(); + let [height] = hints_ctor.objs::<1>(); + ReducibilityHints::Regular(height.as_ptr() as u32) }; - let all: Vec<_> = collect_list_ptrs(all_ptr) + let all: Vec<_> = collect_list_objs(all) .into_iter() - .map(|p| lean_ptr_to_name(p, cache.global)) + .map(|o| lean_ptr_to_name(o, cache.global)) .collect(); - let safety = match safety_ptr as usize { + let safety = match safety.as_ptr() as usize { 0 => DefinitionSafety::Unsafe, 1 => DefinitionSafety::Safe, 2 => DefinitionSafety::Partial, @@ -483,26 +485,24 @@ pub fn lean_ptr_to_constant_info( }) }, 2 => { - let [constant_val_ptr, value_ptr, all_ptr] = - lean_ctor_objs(inner_val_ptr); - let constant_val = lean_ptr_to_constant_val(constant_val_ptr, cache); - let value = lean_ptr_to_expr(value_ptr, cache); - let all: Vec<_> = collect_list_ptrs(all_ptr) + let [constant_val, value, all] = inner.objs(); + let constant_val = lean_ptr_to_constant_val(constant_val, cache); + let value = lean_ptr_to_expr(value, cache); + let all: Vec<_> = collect_list_objs(all) .into_iter() - .map(|p| lean_ptr_to_name(p, cache.global)) + .map(|o| lean_ptr_to_name(o, cache.global)) .collect(); ConstantInfo::ThmInfo(TheoremVal { cnst: constant_val, value, all }) }, 3 => { - let [constant_val_ptr, value_ptr, all_ptr, is_unsafe_ptr] = - lean_ctor_objs(inner_val_ptr); - let constant_val = lean_ptr_to_constant_val(constant_val_ptr, cache); - let value = lean_ptr_to_expr(value_ptr, cache); - let all: Vec<_> = collect_list_ptrs(all_ptr) + let [constant_val, value, all, is_unsafe] = inner.objs(); + let constant_val = lean_ptr_to_constant_val(constant_val, cache); + let value = lean_ptr_to_expr(value, cache); + let all: Vec<_> = collect_list_objs(all) .into_iter() - .map(|p| lean_ptr_to_name(p, cache.global)) + .map(|o| lean_ptr_to_name(o, cache.global)) .collect(); - let is_unsafe = is_unsafe_ptr as usize == 1; + let is_unsafe = is_unsafe.as_ptr() as usize == 1; ConstantInfo::OpaqueInfo(OpaqueVal { cnst: constant_val, value, @@ -511,9 +511,9 @@ pub fn lean_ptr_to_constant_info( }) }, 4 => { - let [constant_val_ptr, kind_ptr] = lean_ctor_objs(inner_val_ptr); - let constant_val = lean_ptr_to_constant_val(constant_val_ptr, cache); - let kind = match kind_ptr as usize { + let [constant_val, kind] = inner.objs(); + let constant_val = lean_ptr_to_constant_val(constant_val, cache); + let kind = match kind.as_ptr() as usize { 0 => QuotKind::Type, 1 => QuotKind::Ctor, 2 => QuotKind::Lift, @@ -523,29 +523,22 @@ pub fn lean_ptr_to_constant_info( ConstantInfo::QuotInfo(QuotVal { cnst: constant_val, kind }) }, 5 => { - let [ - constant_val_ptr, - num_params_ptr, - num_indices_ptr, - all_ptr, - ctors_ptr, - num_nested_ptr, - bools_ptr, - ] = lean_ctor_objs(inner_val_ptr); - let constant_val = lean_ptr_to_constant_val(constant_val_ptr, cache); - let num_params = Nat::from_ptr(num_params_ptr); - let num_indices = Nat::from_ptr(num_indices_ptr); - let all: Vec<_> = collect_list_ptrs(all_ptr) + let [constant_val, num_params, num_indices, all, ctors, num_nested, bools] = + inner.objs(); + let constant_val = lean_ptr_to_constant_val(constant_val, cache); + let num_params = Nat::from_obj(num_params); + let num_indices = Nat::from_obj(num_indices); + let all: Vec<_> = collect_list_objs(all) .into_iter() - .map(|p| lean_ptr_to_name(p, cache.global)) + .map(|o| lean_ptr_to_name(o, cache.global)) .collect(); - let ctors: Vec<_> = collect_list_ptrs(ctors_ptr) + let ctors: Vec<_> = collect_list_objs(ctors) .into_iter() - .map(|p| lean_ptr_to_name(p, cache.global)) + .map(|o| lean_ptr_to_name(o, cache.global)) .collect(); - let num_nested = Nat::from_ptr(num_nested_ptr); + let num_nested = Nat::from_obj(num_nested); let [is_rec, is_unsafe, is_reflexive, ..] = - (bools_ptr as usize).to_le_bytes().map(|b| b == 1); + (bools.as_ptr() as usize).to_le_bytes().map(|b| b == 1); ConstantInfo::InductInfo(InductiveVal { cnst: constant_val, num_params, @@ -559,20 +552,14 @@ pub fn lean_ptr_to_constant_info( }) }, 6 => { - let [ - constant_val_ptr, - induct_ptr, - cidx_ptr, - num_params_ptr, - num_fields_ptr, - is_unsafe_ptr, - ] = lean_ctor_objs(inner_val_ptr); - let constant_val = lean_ptr_to_constant_val(constant_val_ptr, cache); - let induct = lean_ptr_to_name(induct_ptr, cache.global); - let cidx = Nat::from_ptr(cidx_ptr); - let num_params = Nat::from_ptr(num_params_ptr); - let num_fields = Nat::from_ptr(num_fields_ptr); - let is_unsafe = is_unsafe_ptr as usize == 1; + let [constant_val, induct, cidx, num_params, num_fields, is_unsafe] = + inner.objs(); + let constant_val = lean_ptr_to_constant_val(constant_val, cache); + let induct = lean_ptr_to_name(induct, cache.global); + let cidx = Nat::from_obj(cidx); + let num_params = Nat::from_obj(num_params); + let num_fields = Nat::from_obj(num_fields); + let is_unsafe = is_unsafe.as_ptr() as usize == 1; ConstantInfo::CtorInfo(ConstructorVal { cnst: constant_val, induct, @@ -584,30 +571,30 @@ pub fn lean_ptr_to_constant_info( }, 7 => { let [ - constant_val_ptr, - all_ptr, - num_params_ptr, - num_indices_ptr, - num_motives_ptr, - num_minors_ptr, - rules_ptr, - bools_ptr, - ] = lean_ctor_objs(inner_val_ptr); - let constant_val = lean_ptr_to_constant_val(constant_val_ptr, cache); - let all: Vec<_> = collect_list_ptrs(all_ptr) + constant_val, + all, + num_params, + num_indices, + num_motives, + num_minors, + rules, + bools, + ] = inner.objs(); + let constant_val = lean_ptr_to_constant_val(constant_val, cache); + let all: Vec<_> = collect_list_objs(all) .into_iter() - .map(|p| lean_ptr_to_name(p, cache.global)) + .map(|o| lean_ptr_to_name(o, cache.global)) .collect(); - let num_params = Nat::from_ptr(num_params_ptr); - let num_indices = Nat::from_ptr(num_indices_ptr); - let num_motives = Nat::from_ptr(num_motives_ptr); - let num_minors = Nat::from_ptr(num_minors_ptr); - let rules: Vec<_> = collect_list_ptrs(rules_ptr) + let num_params = Nat::from_obj(num_params); + let num_indices = Nat::from_obj(num_indices); + let num_motives = Nat::from_obj(num_motives); + let num_minors = Nat::from_obj(num_minors); + let rules: Vec<_> = collect_list_objs(rules) .into_iter() - .map(|p| lean_ptr_to_recursor_rule(p, cache)) + .map(|o| lean_ptr_to_recursor_rule(o, cache)) .collect(); let [k, is_unsafe, ..] = - (bools_ptr as usize).to_le_bytes().map(|b| b == 1); + (bools.as_ptr() as usize).to_le_bytes().map(|b| b == 1); ConstantInfo::RecInfo(RecursorVal { cnst: constant_val, all, @@ -626,35 +613,36 @@ pub fn lean_ptr_to_constant_info( /// Decode a single (Name, ConstantInfo) pair. fn decode_name_constant_info( - ptr: *const c_void, + obj: LeanObj, global: &GlobalCache, ) -> (Name, ConstantInfo) { let mut cache = Cache::new(global); - let [name_ptr, constant_info_ptr] = lean_ctor_objs(ptr); - let name = lean_ptr_to_name(name_ptr, global); - let constant_info = lean_ptr_to_constant_info(constant_info_ptr, &mut cache); + let ctor = obj.as_ctor(); + let [name_obj, constant_info] = ctor.objs(); + let name = lean_ptr_to_name(name_obj, global); + let constant_info = lean_ptr_to_constant_info(constant_info, &mut cache); (name, constant_info) } // Decode a Lean environment in parallel with hybrid caching. -pub fn lean_ptr_to_env(ptr: *const c_void) -> Env { +pub fn lean_ptr_to_env(obj: LeanObj) -> Env { // Phase 1: Collect pointers (sequential) - let ptrs = collect_list_ptrs(ptr); + let objs = collect_list_objs(obj); - if ptrs.len() < PARALLEL_THRESHOLD { - return lean_ptr_to_env_sequential(ptr); + if objs.len() < PARALLEL_THRESHOLD { + return lean_ptr_to_env_sequential(obj); } // Estimate: ~3 unique names per constant on average - let global = GlobalCache::with_capacity(ptrs.len() * 3); + let global = GlobalCache::with_capacity(objs.len() * 3); // Phase 2: Decode in parallel with shared global name cache - let pairs: Vec<(Name, ConstantInfo)> = ptrs + let pairs: Vec<(Name, ConstantInfo)> = objs .into_iter() - .map(SendPtr) // Wrap each *const c_void in SendPtr - .collect::>() // Collect into Vec - .into_par_iter() // Now Rayon can use it (SendPtr is Send+Sync) - .map(|p| decode_name_constant_info(p.get(), &global)) // Unwrap with .get() + .map(SendObj) + .collect::>() + .into_par_iter() + .map(|o| decode_name_constant_info(o.get(), &global)) .collect(); // Phase 3: Build final map @@ -667,32 +655,25 @@ pub fn lean_ptr_to_env(ptr: *const c_void) -> Env { } /// Sequential fallback for small environments. -pub fn lean_ptr_to_env_sequential(ptr: *const c_void) -> Env { - let ptrs = collect_list_ptrs(ptr); +pub fn lean_ptr_to_env_sequential(obj: LeanObj) -> Env { + let objs = collect_list_objs(obj); let global = GlobalCache::new(); let mut env = Env::default(); - env.reserve(ptrs.len()); + env.reserve(objs.len()); - for p in ptrs { - let (name, constant_info) = decode_name_constant_info(p, &global); + for o in objs { + let (name, constant_info) = decode_name_constant_info(o, &global); env.insert(name, constant_info); } env } -//#[unsafe(no_mangle)] -//pub extern "C" fn rs_decode_env(ptr: *const c_void) -> usize { -// let env = lean_ptr_to_env(ptr); -// env.len() -//} - // Debug/analysis entry point invoked via the `rust-compile` test flag in // `Tests/FFI/Basic.lean`. Exercises the full compile→decompile→check→serialize // roundtrip and size analysis. Output is intentionally suppressed; re-enable // individual `eprintln!` lines when debugging locally. #[unsafe(no_mangle)] extern "C" fn rs_tmp_decode_const_map(obj: LeanObj) -> usize { - let ptr = obj.as_ptr(); // Enable hash-consed size tracking for debugging // TODO: Make this configurable via CLI instead of hardcoded crate::ix::compile::TRACK_HASH_CONSED_SIZE @@ -703,7 +684,7 @@ extern "C" fn rs_tmp_decode_const_map(obj: LeanObj) -> usize { crate::ix::compile::ANALYZE_SHARING .store(false, std::sync::atomic::Ordering::Relaxed); - let env = lean_ptr_to_env(ptr); + let env = lean_ptr_to_env(obj); let env = Arc::new(env); if let Ok(stt) = compile_env(&env) { if let Ok(dstt) = decompile_env(&stt) { diff --git a/src/lean/ffi/primitives.rs b/src/lean/ffi/primitives.rs index 50f6ea0c..bdeaa67e 100644 --- a/src/lean/ffi/primitives.rs +++ b/src/lean/ffi/primitives.rs @@ -6,17 +6,11 @@ //! - List, Array, ByteArray //! - AssocList, HashMap -use std::ffi::c_void; - -use crate::lean::lean::{ - lean_ctor_get, lean_obj_tag, - lean_uint64_to_nat, -}; +use crate::lean::lean::lean_uint64_to_nat; use crate::lean::nat::Nat; use crate::lean::obj::{ LeanArray, LeanByteArray, LeanCtor, LeanList, LeanObj, LeanString, }; -use crate::lean::{lean_array_data, lean_is_scalar}; // ============================================================================= // Nat Building @@ -56,14 +50,14 @@ pub fn build_nat(n: &Nat) -> LeanObj { /// Round-trip a Nat: decode from Lean, re-encode to Lean. #[unsafe(no_mangle)] pub extern "C" fn rs_roundtrip_nat(nat_ptr: LeanObj) -> LeanObj { - let nat = Nat::from_ptr(nat_ptr.as_ptr()); + let nat = Nat::from_obj(nat_ptr); build_nat(&nat) } /// Round-trip a String: decode from Lean, re-encode to Lean. #[unsafe(no_mangle)] pub extern "C" fn rs_roundtrip_string(s_ptr: LeanObj) -> LeanObj { - let s = unsafe { LeanString::from_raw(s_ptr.as_ptr()) }; + let s = s_ptr.as_string(); *LeanString::from_str(&s.to_string()) } @@ -71,7 +65,8 @@ pub extern "C" fn rs_roundtrip_string(s_ptr: LeanObj) -> LeanObj { #[unsafe(no_mangle)] pub extern "C" fn rs_roundtrip_list_nat(list_ptr: LeanObj) -> LeanObj { // Decode list to Vec - let nats: Vec = crate::lean::collect_list(list_ptr.as_ptr(), Nat::from_ptr); + let list = list_ptr.as_list(); + let nats: Vec = list.collect(Nat::from_obj); // Re-encode as Lean List build_list_nat(&nats) } @@ -80,8 +75,8 @@ pub extern "C" fn rs_roundtrip_list_nat(list_ptr: LeanObj) -> LeanObj { #[unsafe(no_mangle)] pub extern "C" fn rs_roundtrip_array_nat(arr_ptr: LeanObj) -> LeanObj { // Decode array - let nats: Vec = - lean_array_data(arr_ptr.as_ptr()).iter().map(|&p| Nat::from_ptr(p)).collect(); + let arr = arr_ptr.as_array(); + let nats: Vec = arr.map(Nat::from_obj); // Re-encode as Lean Array build_array_nat(&nats) } @@ -127,54 +122,44 @@ fn build_array_nat(nats: &[Nat]) -> LeanObj { /// Point is a structure, which in Lean is represented as a constructor with tag 0. #[unsafe(no_mangle)] pub extern "C" fn rs_roundtrip_point(point_ptr: LeanObj) -> LeanObj { - unsafe { - // Point is a structure (single constructor, tag 0) with 2 Nat fields - let x_ptr = lean_ctor_get(point_ptr.as_ptr() as *mut _, 0); - let y_ptr = lean_ctor_get(point_ptr.as_ptr() as *mut _, 1); - - // Decode the Nats - let x = Nat::from_ptr(x_ptr.cast()); - let y = Nat::from_ptr(y_ptr.cast()); - - // Re-encode as Point - let point = LeanCtor::alloc(0, 2, 0); - point.set(0, build_nat(&x)); - point.set(1, build_nat(&y)); - *point - } + let ctor = point_ptr.as_ctor(); + // Point is a structure (single constructor, tag 0) with 2 Nat fields + let x = Nat::from_obj(ctor.get(0)); + let y = Nat::from_obj(ctor.get(1)); + + // Re-encode as Point + let point = LeanCtor::alloc(0, 2, 0); + point.set(0, build_nat(&x)); + point.set(1, build_nat(&y)); + *point } /// Round-trip a NatTree (inductive with leaf : Nat → NatTree | node : NatTree → NatTree → NatTree). #[unsafe(no_mangle)] pub extern "C" fn rs_roundtrip_nat_tree(tree_ptr: LeanObj) -> LeanObj { - roundtrip_nat_tree_recursive(tree_ptr.as_ptr()) + roundtrip_nat_tree_recursive(tree_ptr) } -fn roundtrip_nat_tree_recursive(tree_ptr: *const c_void) -> LeanObj { - unsafe { - let tag = lean_obj_tag(tree_ptr as *mut _); - match tag { - 0 => { - // leaf : Nat → NatTree - let nat_ptr = lean_ctor_get(tree_ptr as *mut _, 0); - let nat = Nat::from_ptr(nat_ptr.cast()); - let leaf = LeanCtor::alloc(0, 1, 0); - leaf.set(0, build_nat(&nat)); - *leaf - }, - 1 => { - // node : NatTree → NatTree → NatTree - let left_ptr = lean_ctor_get(tree_ptr as *mut _, 0); - let right_ptr = lean_ctor_get(tree_ptr as *mut _, 1); - let left = roundtrip_nat_tree_recursive(left_ptr.cast()); - let right = roundtrip_nat_tree_recursive(right_ptr.cast()); - let node = LeanCtor::alloc(1, 2, 0); - node.set(0, left); - node.set(1, right); - *node - }, - _ => panic!("Invalid NatTree tag: {}", tag), - } +fn roundtrip_nat_tree_recursive(obj: LeanObj) -> LeanObj { + let ctor = obj.as_ctor(); + match ctor.tag() { + 0 => { + // leaf : Nat → NatTree + let nat = Nat::from_obj(ctor.get(0)); + let leaf = LeanCtor::alloc(0, 1, 0); + leaf.set(0, build_nat(&nat)); + *leaf + }, + 1 => { + // node : NatTree → NatTree → NatTree + let left = roundtrip_nat_tree_recursive(ctor.get(0)); + let right = roundtrip_nat_tree_recursive(ctor.get(1)); + let node = LeanCtor::alloc(1, 2, 0); + node.set(0, left); + node.set(1, right); + *node + }, + _ => panic!("Invalid NatTree tag: {}", ctor.tag()), } } @@ -186,7 +171,7 @@ pub extern "C" fn rs_roundtrip_assoclist_nat_nat(list_ptr: LeanObj) -> LeanObj { if list_ptr.is_scalar() { return LeanObj::box_usize(0); } - let pairs = decode_assoc_list_nat_nat(list_ptr.as_ptr()); + let pairs = decode_assoc_list_nat_nat(list_ptr); build_assoc_list_nat_nat(&pairs) } @@ -213,54 +198,50 @@ pub extern "C" fn rs_roundtrip_dhashmap_raw_nat_nat( return raw_ptr; } - unsafe { - let size_ptr = lean_ctor_get(raw_ptr.as_ptr() as *mut _, 0); - let buckets_ptr = lean_ctor_get(raw_ptr.as_ptr() as *mut _, 1); - - let size = Nat::from_ptr(size_ptr.cast()); + let raw_ctor = raw_ptr.as_ctor(); + let size = Nat::from_obj(raw_ctor.get(0)); + let buckets = raw_ctor.get(1).as_array(); - // Decode and rebuild buckets - let buckets_data = lean_array_data(buckets_ptr.cast()); - let num_buckets = buckets_data.len(); + // Decode and rebuild buckets + let num_buckets = buckets.len(); - let mut all_pairs: Vec<(Nat, Nat)> = Vec::new(); - for &bucket_ptr in buckets_data { - let pairs = decode_assoc_list_nat_nat(bucket_ptr); - all_pairs.extend(pairs); - } - - // Rebuild buckets - let new_buckets = LeanArray::alloc(num_buckets); - let nil = LeanObj::box_usize(0); - for i in 0..num_buckets { - new_buckets.set(i, nil); - } + let mut all_pairs: Vec<(Nat, Nat)> = Vec::new(); + for bucket in buckets.iter() { + let pairs = decode_assoc_list_nat_nat(bucket); + all_pairs.extend(pairs); + } - for (k, v) in &all_pairs { - let k_u64 = k.to_u64().unwrap_or_else(|| { - let bytes = k.to_le_bytes(); - let mut arr = [0u8; 8]; - let len = bytes.len().min(8); - arr[..len].copy_from_slice(&bytes[..len]); - u64::from_le_bytes(arr) - }); - #[allow(clippy::cast_possible_truncation)] - let bucket_idx = (k_u64 as usize) & (num_buckets - 1); - - let old_bucket = new_buckets.get(bucket_idx); - let new_bucket = LeanCtor::alloc(1, 3, 0); - new_bucket.set(0, build_nat(k)); - new_bucket.set(1, build_nat(v)); - new_bucket.set(2, old_bucket); - new_buckets.set(bucket_idx, *new_bucket); - } + // Rebuild buckets + let new_buckets = LeanArray::alloc(num_buckets); + let nil = LeanObj::box_usize(0); + for i in 0..num_buckets { + new_buckets.set(i, nil); + } - // Build Raw - let raw = LeanCtor::alloc(0, 2, 0); - raw.set(0, build_nat(&size)); - raw.set(1, *new_buckets); - *raw + for (k, v) in &all_pairs { + let k_u64 = k.to_u64().unwrap_or_else(|| { + let bytes = k.to_le_bytes(); + let mut arr = [0u8; 8]; + let len = bytes.len().min(8); + arr[..len].copy_from_slice(&bytes[..len]); + u64::from_le_bytes(arr) + }); + #[allow(clippy::cast_possible_truncation)] + let bucket_idx = (k_u64 as usize) & (num_buckets - 1); + + let old_bucket = new_buckets.get(bucket_idx); + let new_bucket = LeanCtor::alloc(1, 3, 0); + new_bucket.set(0, build_nat(k)); + new_bucket.set(1, build_nat(v)); + new_bucket.set(2, old_bucket); + new_buckets.set(bucket_idx, *new_bucket); } + + // Build Raw + let raw = LeanCtor::alloc(0, 2, 0); + raw.set(0, build_nat(&size)); + raw.set(1, *new_buckets); + *raw } /// Round-trip a Std.HashMap Nat Nat. @@ -280,94 +261,84 @@ pub extern "C" fn rs_roundtrip_dhashmap_raw_nat_nat( /// - cons key value tail: ctor 1, 3 fields #[unsafe(no_mangle)] pub extern "C" fn rs_roundtrip_hashmap_nat_nat(map_ptr: LeanObj) -> LeanObj { - unsafe { - // Due to unboxing, map_ptr points directly to Raw - let size_ptr = lean_ctor_get(map_ptr.as_ptr() as *mut _, 0); - let buckets_ptr = lean_ctor_get(map_ptr.as_ptr() as *mut _, 1); - - let size = Nat::from_ptr(size_ptr.cast()); - - // Decode buckets (Array of AssocLists) - let buckets_data = lean_array_data(buckets_ptr.cast()); - let mut pairs: Vec<(Nat, Nat)> = Vec::new(); - - for &bucket_ptr in buckets_data { - let bucket_pairs = decode_assoc_list_nat_nat(bucket_ptr); - pairs.extend(bucket_pairs); - } - - // Rebuild the HashMap with the same bucket count - let num_buckets = buckets_data.len(); - let new_buckets = LeanArray::alloc(num_buckets); + let raw_ctor = map_ptr.as_ctor(); + // Due to unboxing, map_ptr points directly to Raw + let size = Nat::from_obj(raw_ctor.get(0)); + let buckets = raw_ctor.get(1).as_array(); - // Initialize all buckets to AssocList.nil (lean_box(0)) - let nil = LeanObj::box_usize(0); - for i in 0..num_buckets { - new_buckets.set(i, nil); - } + // Decode buckets (Array of AssocLists) + let mut pairs: Vec<(Nat, Nat)> = Vec::new(); - // Insert each pair into the appropriate bucket using Lean's hash function - for (k, v) in &pairs { - // Hash the key - for Nat, Lean uses the value itself as hash - let k_u64 = k.to_u64().unwrap_or_else(|| { - // For large nats, use low 64 bits - let bytes = k.to_le_bytes(); - let mut arr = [0u8; 8]; - let len = bytes.len().min(8); - arr[..len].copy_from_slice(&bytes[..len]); - u64::from_le_bytes(arr) - }); - // Lean uses (hash & (buckets.size - 1)) for bucket index (power of 2) - #[allow(clippy::cast_possible_truncation)] - let bucket_idx = (k_u64 as usize) & (num_buckets - 1); + for bucket in buckets.iter() { + let bucket_pairs = decode_assoc_list_nat_nat(bucket); + pairs.extend(bucket_pairs); + } - // Get current bucket AssocList - let old_bucket = new_buckets.get(bucket_idx); + // Rebuild the HashMap with the same bucket count + let num_buckets = buckets.len(); + let new_buckets = LeanArray::alloc(num_buckets); - // Build AssocList.cons key value tail (tag 1, 3 fields) - let new_bucket = LeanCtor::alloc(1, 3, 0); - new_bucket.set(0, build_nat(k)); - new_bucket.set(1, build_nat(v)); - new_bucket.set(2, old_bucket); - new_buckets.set(bucket_idx, *new_bucket); - } + // Initialize all buckets to AssocList.nil (lean_box(0)) + let nil = LeanObj::box_usize(0); + for i in 0..num_buckets { + new_buckets.set(i, nil); + } - // Build Raw (ctor 0, 2 fields: size, buckets) - // Due to unboxing, this IS the HashMap - let raw = LeanCtor::alloc(0, 2, 0); - raw.set(0, build_nat(&size)); - raw.set(1, *new_buckets); - *raw + // Insert each pair into the appropriate bucket using Lean's hash function + for (k, v) in &pairs { + // Hash the key - for Nat, Lean uses the value itself as hash + let k_u64 = k.to_u64().unwrap_or_else(|| { + // For large nats, use low 64 bits + let bytes = k.to_le_bytes(); + let mut arr = [0u8; 8]; + let len = bytes.len().min(8); + arr[..len].copy_from_slice(&bytes[..len]); + u64::from_le_bytes(arr) + }); + // Lean uses (hash & (buckets.size - 1)) for bucket index (power of 2) + #[allow(clippy::cast_possible_truncation)] + let bucket_idx = (k_u64 as usize) & (num_buckets - 1); + + // Get current bucket AssocList + let old_bucket = new_buckets.get(bucket_idx); + + // Build AssocList.cons key value tail (tag 1, 3 fields) + let new_bucket = LeanCtor::alloc(1, 3, 0); + new_bucket.set(0, build_nat(k)); + new_bucket.set(1, build_nat(v)); + new_bucket.set(2, old_bucket); + new_buckets.set(bucket_idx, *new_bucket); } + + // Build Raw (ctor 0, 2 fields: size, buckets) + // Due to unboxing, this IS the HashMap + let raw = LeanCtor::alloc(0, 2, 0); + raw.set(0, build_nat(&size)); + raw.set(1, *new_buckets); + *raw } /// Decode a Lean AssocList Nat Nat to Vec of pairs /// AssocList: nil (tag 0) | cons key value tail (tag 1, 3 fields) -pub fn decode_assoc_list_nat_nat(list_ptr: *const c_void) -> Vec<(Nat, Nat)> { +pub fn decode_assoc_list_nat_nat(obj: LeanObj) -> Vec<(Nat, Nat)> { let mut result = Vec::new(); - let mut current = list_ptr; + let mut current = obj; loop { - unsafe { - if lean_is_scalar(current) { - break; - } - - let tag = lean_obj_tag(current as *mut _); - if tag == 0 { - break; - } + if current.is_scalar() { + break; + } - let key_ptr = lean_ctor_get(current as *mut _, 0); - let value_ptr = lean_ctor_get(current as *mut _, 1); - let tail_ptr = lean_ctor_get(current as *mut _, 2); + let ctor = current.as_ctor(); + if ctor.tag() == 0 { + break; + } - let k = Nat::from_ptr(key_ptr.cast()); - let v = Nat::from_ptr(value_ptr.cast()); + let k = Nat::from_obj(ctor.get(0)); + let v = Nat::from_obj(ctor.get(1)); - result.push((k, v)); - current = tail_ptr.cast(); - } + result.push((k, v)); + current = ctor.get(2); } result diff --git a/src/lean/nat.rs b/src/lean/nat.rs index 55a00c76..fae7db05 100644 --- a/src/lean/nat.rs +++ b/src/lean/nat.rs @@ -9,10 +9,7 @@ use std::mem::MaybeUninit; use num_bigint::BigUint; -use crate::{ - lean::{as_ref_unsafe, lean_is_scalar}, - lean_unbox, -}; +use crate::lean::obj::LeanObj; /// Arbitrary-precision natural number, wrapping `BigUint`. #[derive(Hash, PartialEq, Eq, Debug, Clone, PartialOrd, Ord)] @@ -42,16 +39,22 @@ impl Nat { /// Decode a `Nat` from a Lean object pointer. Handles both scalar (unboxed) /// and heap-allocated (GMP `mpz_object`) representations. pub fn from_ptr(ptr: *const c_void) -> Nat { - if lean_is_scalar(ptr) { - let u = lean_unbox!(usize, ptr); + let obj = unsafe { LeanObj::from_raw(ptr) }; + if obj.is_scalar() { + let u = obj.unbox_usize(); Nat(BigUint::from_bytes_le(&u.to_le_bytes())) } else { // Heap-allocated big integer (mpz_object) - let obj: &MpzObject = as_ref_unsafe(ptr.cast()); - Nat(obj.m_value.to_biguint()) + let mpz: &MpzObject = unsafe { &*ptr.cast() }; + Nat(mpz.m_value.to_biguint()) } } + /// Decode a `Nat` from a `LeanObj`. Convenience wrapper over `from_ptr`. + pub fn from_obj(obj: crate::lean::obj::LeanObj) -> Nat { + Self::from_ptr(obj.as_ptr()) + } + #[inline] pub fn from_le_bytes(bytes: &[u8]) -> Nat { Nat(BigUint::from_bytes_le(bytes)) @@ -102,8 +105,7 @@ impl Mpz { // GMP interop for building Lean Nat objects from limbs // ============================================================================= -use super::lean::lean_uint64_to_nat; -use super::lean_box_fn; +use crate::lean::lean::lean_uint64_to_nat; /// LEAN_MAX_SMALL_NAT = SIZE_MAX >> 1 const LEAN_MAX_SMALL_NAT: u64 = (usize::MAX >> 1) as u64; @@ -140,12 +142,12 @@ pub unsafe fn lean_nat_from_limbs( limbs: *const u64, ) -> *mut c_void { if num_limbs == 0 { - return lean_box_fn(0); + return LeanObj::box_usize(0).as_mut_ptr(); } let first = unsafe { *limbs }; if num_limbs == 1 && first <= LEAN_MAX_SMALL_NAT { #[allow(clippy::cast_possible_truncation)] // only targets 64-bit - return lean_box_fn(first as usize); + return LeanObj::box_usize(first as usize).as_mut_ptr(); } if num_limbs == 1 { return unsafe { lean_uint64_to_nat(first).cast() }; diff --git a/src/lean/obj.rs b/src/lean/obj.rs index ffc99292..0ecee682 100644 --- a/src/lean/obj.rs +++ b/src/lean/obj.rs @@ -8,8 +8,8 @@ use std::ffi::c_void; use std::marker::PhantomData; use std::ops::Deref; -use super::lean; -use super::safe_cstring; +use crate::lean::lean; +use crate::lean::safe_cstring; // ============================================================================= // LeanObj — Untyped base wrapper @@ -92,6 +92,49 @@ impl LeanObj { unsafe { lean::lean_unbox_uint64(self.0 as *mut _) } } + /// Interpret as a constructor object (tag 0–243). + /// + /// Debug-asserts the tag is in range. + #[inline] + pub fn as_ctor(self) -> LeanCtor { + debug_assert!(!self.is_scalar() && self.tag() <= 243); + LeanCtor(self) + } + + /// Interpret as a `String` object (tag 249). + /// + /// Debug-asserts the tag is correct. + #[inline] + pub fn as_string(self) -> LeanString { + debug_assert!(!self.is_scalar() && self.tag() == 249); + LeanString(self) + } + + /// Interpret as an `Array` object (tag 246). + /// + /// Debug-asserts the tag is correct. + #[inline] + pub fn as_array(self) -> LeanArray { + debug_assert!(!self.is_scalar() && self.tag() == 246); + LeanArray(self) + } + + /// Interpret as a `List` (nil = scalar, cons = tag 1). + /// + /// Debug-asserts the tag is valid for a list. + #[inline] + pub fn as_list(self) -> LeanList { + debug_assert!(self.is_scalar() || self.tag() == 1); + LeanList(self) + } + + /// Interpret as a `ByteArray` object (tag 248). + #[inline] + pub fn as_byte_array(self) -> LeanByteArray { + debug_assert!(!self.is_scalar() && self.tag() == 248); + LeanByteArray(self) + } + #[inline] pub fn box_u32(n: u32) -> Self { Self(unsafe { lean::lean_box_uint32(n) }.cast()) @@ -577,7 +620,7 @@ impl Iterator for LeanListIter { if self.0.is_scalar() { return None; } - let ctor = unsafe { LeanCtor::from_raw(self.0.as_ptr()) }; + let ctor = self.0.as_ctor(); let [head, tail] = ctor.objs::<2>(); self.0 = tail; Some(head) @@ -634,7 +677,7 @@ impl LeanOption { if self.is_none() { None } else { - let ctor = unsafe { LeanCtor::from_raw(self.0.as_ptr()) }; + let ctor = self.0.as_ctor(); Some(ctor.get(0)) } } @@ -696,7 +739,7 @@ impl LeanExcept { } pub fn into_result(self) -> Result { - let ctor = unsafe { LeanCtor::from_raw(self.0.as_ptr()) }; + let ctor = self.0.as_ctor(); if self.is_ok() { Ok(ctor.get(0)) } else { Err(ctor.get(0)) } } } @@ -927,13 +970,13 @@ impl LeanProd { /// Get the first element. pub fn fst(&self) -> LeanObj { - let ctor = unsafe { LeanCtor::from_raw(self.0.as_ptr()) }; + let ctor = self.0.as_ctor(); ctor.get(0) } /// Get the second element. pub fn snd(&self) -> LeanObj { - let ctor = unsafe { LeanCtor::from_raw(self.0.as_ptr()) }; + let ctor = self.0.as_ctor(); ctor.get(1) } } From 24ad79af100e30b816f8025c412f09fa1ca4fd09 Mon Sep 17 00:00:00 2001 From: samuelburnham <45365069+samuelburnham@users.noreply.github.com> Date: Tue, 3 Mar 2026 10:45:27 -0500 Subject: [PATCH 10/27] Fmt --- src/lean/ffi/aiur.rs | 5 +--- src/lean/ffi/aiur/protocol.rs | 17 ++++--------- src/lean/ffi/aiur/toplevel.rs | 29 ++++++---------------- src/lean/ffi/compile.rs | 39 ++++++++++-------------------- src/lean/ffi/graph.rs | 2 +- src/lean/ffi/ix/constant.rs | 21 +++++++--------- src/lean/ffi/ix/data.rs | 28 +++++++--------------- src/lean/ffi/ix/env.rs | 16 +++++-------- src/lean/ffi/ix/expr.rs | 17 ++++++------- src/lean/ffi/ix/name.rs | 2 +- src/lean/ffi/ixon/constant.rs | 32 +++++++++++++++++++++---- src/lean/ffi/ixon/env.rs | 4 ++-- src/lean/ffi/ixon/meta.rs | 8 +++---- src/lean/ffi/ixon/serialize.rs | 12 ++++------ src/lean/ffi/lean_env.rs | 44 ++++++++++++++++------------------ src/lean/ffi/primitives.rs | 7 +++--- 16 files changed, 121 insertions(+), 162 deletions(-) diff --git a/src/lean/ffi/aiur.rs b/src/lean/ffi/aiur.rs index 949a5dd6..27ab8e52 100644 --- a/src/lean/ffi/aiur.rs +++ b/src/lean/ffi/aiur.rs @@ -3,10 +3,7 @@ use multi_stark::p3_field::integers::QuotientMap; pub mod protocol; pub mod toplevel; -use crate::{ - aiur::G, - lean::obj::LeanObj, -}; +use crate::{aiur::G, lean::obj::LeanObj}; #[inline] pub(super) fn lean_unbox_nat_as_usize(obj: LeanObj) -> usize { diff --git a/src/lean/ffi/aiur/protocol.rs b/src/lean/ffi/aiur/protocol.rs index f4141c90..c8dbe4d6 100644 --- a/src/lean/ffi/aiur/protocol.rs +++ b/src/lean/ffi/aiur/protocol.rs @@ -104,15 +104,12 @@ extern "C" fn rs_aiur_system_prove( let fri_parameters = lean_ctor_to_fri_parameters(fri_parameters); let fun_idx = lean_unbox_nat_as_usize(fun_idx); let args = args.as_array().map(lean_unbox_g); - let io_data = - io_data_arr.as_array().map(lean_unbox_g); + let io_data = io_data_arr.as_array().map(lean_unbox_g); let io_map = lean_array_to_io_buffer_map(io_map_arr); let mut io_buffer = IOBuffer { data: io_data, map: io_map }; let (claim, proof) = - aiur_system_obj - .get() - .prove(fri_parameters, fun_idx, &args, &mut io_buffer); + aiur_system_obj.get().prove(fri_parameters, fun_idx, &args, &mut io_buffer); // claim: Array G let lean_claim = build_g_array(&claim); @@ -171,9 +168,7 @@ fn build_g_array(values: &[G]) -> LeanObj { } fn lean_ptr_to_commitment_parameters(obj: LeanObj) -> CommitmentParameters { - CommitmentParameters { - log_blowup: lean_unbox_nat_as_usize(obj), - } + CommitmentParameters { log_blowup: lean_unbox_nat_as_usize(obj) } } fn lean_ctor_to_fri_parameters(obj: LeanObj) -> FriParameters { @@ -188,12 +183,10 @@ fn lean_ctor_to_fri_parameters(obj: LeanObj) -> FriParameters { fn lean_array_to_io_buffer_map(obj: LeanObj) -> FxHashMap, IOKeyInfo> { let arr = obj.as_array(); - let mut map = - FxHashMap::with_capacity_and_hasher(arr.len(), FxBuildHasher); + let mut map = FxHashMap::with_capacity_and_hasher(arr.len(), FxBuildHasher); for elt in arr.iter() { let pair = elt.as_ctor(); - let key = - pair.get(0).as_array().map(lean_unbox_g); + let key = pair.get(0).as_array().map(lean_unbox_g); let info_ctor = pair.get(1).as_ctor(); let info = IOKeyInfo { idx: lean_unbox_nat_as_usize(info_ctor.get(0)), diff --git a/src/lean/ffi/aiur/toplevel.rs b/src/lean/ffi/aiur/toplevel.rs index ddd8ce60..ace58895 100644 --- a/src/lean/ffi/aiur/toplevel.rs +++ b/src/lean/ffi/aiur/toplevel.rs @@ -51,17 +51,11 @@ fn lean_ptr_to_op(obj: LeanObj) -> Op { }, 7 => { let [width, val_idx] = ctor.objs::<2>(); - Op::Load( - lean_unbox_nat_as_usize(width), - lean_unbox_nat_as_usize(val_idx), - ) + Op::Load(lean_unbox_nat_as_usize(width), lean_unbox_nat_as_usize(val_idx)) }, 8 => { let [a, b] = ctor.objs::<2>(); - Op::AssertEq( - lean_ptr_to_vec_val_idx(a), - lean_ptr_to_vec_val_idx(b), - ) + Op::AssertEq(lean_ptr_to_vec_val_idx(a), lean_ptr_to_vec_val_idx(b)) }, 9 => { let [key] = ctor.objs::<1>(); @@ -77,10 +71,7 @@ fn lean_ptr_to_op(obj: LeanObj) -> Op { }, 11 => { let [idx, len] = ctor.objs::<2>(); - Op::IORead( - lean_unbox_nat_as_usize(idx), - lean_unbox_nat_as_usize(len), - ) + Op::IORead(lean_unbox_nat_as_usize(idx), lean_unbox_nat_as_usize(len)) }, 12 => { let [data] = ctor.objs::<1>(); @@ -129,10 +120,7 @@ fn lean_ptr_to_op(obj: LeanObj) -> Op { None } else { let inner_ctor = idxs_obj.as_ctor(); - Some( - inner_ctor.get(0).as_array() - .map(lean_unbox_nat_as_usize), - ) + Some(inner_ctor.get(0).as_array().map(lean_unbox_nat_as_usize)) }; Op::Debug(label, idxs) }, @@ -154,8 +142,7 @@ fn lean_ptr_to_ctrl(obj: LeanObj) -> Ctrl { 0 => { let [val_idx_obj, cases_obj, default_obj] = ctor.objs::<3>(); let val_idx = lean_unbox_nat_as_usize(val_idx_obj); - let vec_cases = - cases_obj.as_array().map(lean_ptr_to_g_block_pair); + let vec_cases = cases_obj.as_array().map(lean_ptr_to_g_block_pair); let cases = FxIndexMap::from_iter(vec_cases); let default = if default_obj.is_scalar() { None @@ -209,9 +196,7 @@ fn lean_ptr_to_function(obj: LeanObj) -> Function { pub(crate) fn lean_ptr_to_toplevel(obj: LeanObj) -> Toplevel { let ctor = obj.as_ctor(); let [functions_obj, memory_sizes_obj] = ctor.objs::<2>(); - let functions = - functions_obj.as_array().map(lean_ptr_to_function); - let memory_sizes = - memory_sizes_obj.as_array().map(lean_unbox_nat_as_usize); + let functions = functions_obj.as_array().map(lean_ptr_to_function); + let memory_sizes = memory_sizes_obj.as_array().map(lean_unbox_nat_as_usize); Toplevel { functions, memory_sizes } } diff --git a/src/lean/ffi/compile.rs b/src/lean/ffi/compile.rs index e5bd26fc..d2c01fb7 100644 --- a/src/lean/ffi/compile.rs +++ b/src/lean/ffi/compile.rs @@ -10,7 +10,6 @@ use std::collections::HashMap; use std::sync::Arc; -use crate::lean::ffi::{ffi_io_guard, io_error, io_ok}; use crate::ix::address::Address; use crate::ix::compile::{CompileState, compile_env}; use crate::ix::condense::compute_sccs; @@ -21,6 +20,7 @@ use crate::ix::ixon::constant::{Constant as IxonConstant, ConstantInfo}; use crate::ix::ixon::expr::Expr as IxonExpr; use crate::ix::ixon::serialize::put_expr; use crate::ix::ixon::{Comm, ConstantMeta}; +use crate::lean::ffi::{ffi_io_guard, io_error, io_ok}; use crate::lean::lean::lean_uint64_to_nat; use crate::lean::nat::Nat; use crate::lean::obj::{ @@ -42,7 +42,9 @@ use crate::lean::ffi::ixon::env::{ build_raw_env, build_raw_name_entry, decode_raw_env, decoded_to_ixon_env, }; use crate::lean::ffi::ixon::meta::{build_constant_meta, build_ixon_comm}; -use crate::lean::ffi::lean_env::{GlobalCache, lean_ptr_to_env, lean_ptr_to_name}; +use crate::lean::ffi::lean_env::{ + GlobalCache, lean_ptr_to_env, lean_ptr_to_name, +}; // ============================================================================= // Helper builders @@ -194,9 +196,7 @@ pub extern "C" fn rs_roundtrip_block_compare_detail(obj: LeanObj) -> LeanObj { /// FFI function to run the complete compilation pipeline and return all data. #[unsafe(no_mangle)] -pub extern "C" fn rs_compile_env_full( - env_consts_ptr: LeanObj, -) -> LeanObj { +pub extern "C" fn rs_compile_env_full(env_consts_ptr: LeanObj) -> LeanObj { ffi_io_guard(std::panic::AssertUnwindSafe(|| { // Phase 1: Decode Lean environment let rust_env = lean_ptr_to_env(env_consts_ptr); @@ -220,8 +220,7 @@ pub extern "C" fn rs_compile_env_full( // Phase 4: Build Lean structures let mut cache = LeanBuildCache::with_capacity(env_len); - let raw_env = - build_raw_environment(&mut cache, &rust_env); + let raw_env = build_raw_environment(&mut cache, &rust_env); let condensed_obj = build_condensed_blocks(&mut cache, &condensed); // Collect blocks @@ -331,17 +330,14 @@ pub extern "C" fn rs_roundtrip_raw_env(raw_env_obj: LeanObj) -> LeanObj { /// FFI function to run all compilation phases and return combined results. #[unsafe(no_mangle)] -pub extern "C" fn rs_compile_phases( - env_consts_ptr: LeanObj, -) -> LeanObj { +pub extern "C" fn rs_compile_phases(env_consts_ptr: LeanObj) -> LeanObj { ffi_io_guard(std::panic::AssertUnwindSafe(|| { let rust_env = lean_ptr_to_env(env_consts_ptr); let env_len = rust_env.len(); let rust_env = Arc::new(rust_env); let mut cache = LeanBuildCache::with_capacity(env_len); - let raw_env = - build_raw_environment(&mut cache, &rust_env); + let raw_env = build_raw_environment(&mut cache, &rust_env); let ref_graph = build_ref_graph(&rust_env); @@ -432,9 +428,7 @@ pub extern "C" fn rs_compile_phases( /// FFI function to compile a Lean environment to a RawEnv. #[unsafe(no_mangle)] -pub extern "C" fn rs_compile_env_to_ixon( - env_consts_ptr: LeanObj, -) -> LeanObj { +pub extern "C" fn rs_compile_env_to_ixon(env_consts_ptr: LeanObj) -> LeanObj { ffi_io_guard(std::panic::AssertUnwindSafe(|| { let rust_env = lean_ptr_to_env(env_consts_ptr); let rust_env = Arc::new(rust_env); @@ -524,8 +518,7 @@ pub extern "C" fn rs_canonicalize_env_to_ix( ffi_io_guard(std::panic::AssertUnwindSafe(|| { let rust_env = lean_ptr_to_env(env_consts_ptr); let mut cache = LeanBuildCache::with_capacity(rust_env.len()); - let raw_env = - build_raw_environment(&mut cache, &rust_env); + let raw_env = build_raw_environment(&mut cache, &rust_env); io_ok(raw_env) })) } @@ -1280,9 +1273,7 @@ pub fn decode_decompile_error(obj: LeanObj) -> DecompileError { let msg = ctor.get(0).as_string().to_string(); DecompileError::BadConstantFormat { msg } }, - 10 => { - DecompileError::Serialize(decode_serialize_error(ctor.get(0))) - }, + 10 => DecompileError::Serialize(decode_serialize_error(ctor.get(0))), _ => unreachable!("Invalid DecompileError tag: {}", ctor.tag()), } } @@ -1354,9 +1345,7 @@ pub fn decode_compile_error(obj: LeanObj) -> CompileError { let param = ctor.get(1).as_string().to_string(); CompileError::UnknownUnivParam { curr, param } }, - 5 => { - CompileError::Serialize(decode_serialize_error(ctor.get(0))) - }, + 5 => CompileError::Serialize(decode_serialize_error(ctor.get(0))), _ => unreachable!("Invalid CompileError tag: {}", ctor.tag()), } } @@ -1417,8 +1406,6 @@ pub extern "C" fn rs_decompile_env(raw_env_obj: LeanObj) -> LeanObj { LeanExcept::ok(arr).into() }, - Err(e) => { - LeanExcept::error(build_decompile_error(&e)).into() - }, + Err(e) => LeanExcept::error(build_decompile_error(&e)).into(), } } diff --git a/src/lean/ffi/graph.rs b/src/lean/ffi/graph.rs index b41f3263..103f79f1 100644 --- a/src/lean/ffi/graph.rs +++ b/src/lean/ffi/graph.rs @@ -2,9 +2,9 @@ use std::sync::Arc; -use crate::lean::ffi::{ffi_io_guard, io_ok}; use crate::ix::condense::compute_sccs; use crate::ix::graph::build_ref_graph; +use crate::lean::ffi::{ffi_io_guard, io_ok}; use crate::lean::obj::{LeanArray, LeanCtor, LeanObj}; use crate::lean::ffi::builder::LeanBuildCache; diff --git a/src/lean/ffi/ix/constant.rs b/src/lean/ffi/ix/constant.rs index 3e0313ef..73153ac5 100644 --- a/src/lean/ffi/ix/constant.rs +++ b/src/lean/ffi/ix/constant.rs @@ -19,11 +19,11 @@ use crate::lean::nat::Nat; use crate::lean::obj::{IxConstantInfo, LeanArray, LeanCtor, LeanObj}; use crate::lean::ffi::builder::LeanBuildCache; -use crate::lean::ffi::primitives::build_nat; use crate::lean::ffi::ix::expr::{build_expr, decode_ix_expr}; use crate::lean::ffi::ix::name::{ build_name, build_name_array, decode_ix_name, decode_name_array, }; +use crate::lean::ffi::primitives::build_nat; /// Build a Ix.ConstantVal structure. pub fn build_constant_val( @@ -272,9 +272,7 @@ fn build_recursor_rules( pub fn decode_constant_val(obj: LeanObj) -> ConstantVal { let ctor = obj.as_ctor(); let name = decode_ix_name(ctor.get(0)); - let level_params: Vec = - ctor.get(1).as_array() - .map(decode_ix_name); + let level_params: Vec = ctor.get(1).as_array().map(decode_ix_name); let typ = decode_ix_expr(ctor.get(2)); ConstantVal { name, level_params, typ } @@ -346,13 +344,11 @@ pub fn decode_constant_info(obj: LeanObj) -> ConstantInfo { all: decode_name_array(inner.get(3)), }) }, - 2 => { - ConstantInfo::ThmInfo(TheoremVal { - cnst: decode_constant_val(inner.get(0)), - value: decode_ix_expr(inner.get(1)), - all: decode_name_array(inner.get(2)), - }) - }, + 2 => ConstantInfo::ThmInfo(TheoremVal { + cnst: decode_constant_val(inner.get(0)), + value: decode_ix_expr(inner.get(1)), + all: decode_name_array(inner.get(2)), + }), 3 => { let is_unsafe = inner.scalar_u8(3, 0) != 0; @@ -412,8 +408,7 @@ pub fn decode_constant_info(obj: LeanObj) -> ConstantInfo { let is_unsafe = inner.scalar_u8(7, 1) != 0; let rules: Vec = - inner.get(6).as_array() - .map(decode_recursor_rule); + inner.get(6).as_array().map(decode_recursor_rule); ConstantInfo::RecInfo(RecursorVal { cnst: decode_constant_val(inner.get(0)), diff --git a/src/lean/ffi/ix/data.rs b/src/lean/ffi/ix/data.rs index a3ca65c4..188296ba 100644 --- a/src/lean/ffi/ix/data.rs +++ b/src/lean/ffi/ix/data.rs @@ -5,13 +5,13 @@ use crate::ix::env::{ }; use crate::lean::nat::Nat; use crate::lean::obj::{ - IxDataValue, IxInt, IxSourceInfo, IxSubstring, IxSyntax, - IxSyntaxPreresolved, LeanArray, LeanCtor, LeanObj, LeanString, + IxDataValue, IxInt, IxSourceInfo, IxSubstring, IxSyntax, IxSyntaxPreresolved, + LeanArray, LeanCtor, LeanObj, LeanString, }; use crate::lean::ffi::builder::LeanBuildCache; -use crate::lean::ffi::primitives::build_nat; use crate::lean::ffi::ix::name::{build_name, decode_ix_name}; +use crate::lean::ffi::primitives::build_nat; /// Build a Ix.Int (ofNat or negSucc). pub fn build_int(int: &Int) -> IxInt { @@ -125,8 +125,7 @@ pub fn build_syntax(cache: &mut LeanBuildCache, syn: &Syntax) -> IxSyntax { let info_obj = build_source_info(info); let raw_val_obj = build_substring(raw_val); let val_obj = build_name(cache, val); - let preresolved_obj = - build_syntax_preresolved_array(cache, preresolved); + let preresolved_obj = build_syntax_preresolved_array(cache, preresolved); let obj = LeanCtor::alloc(3, 4, 0); obj.set(0, info_obj); obj.set(1, raw_val_obj); @@ -241,9 +240,7 @@ pub fn decode_data_value(obj: LeanObj) -> DataValue { match ctor.tag() { 0 => { // ofString: 1 object field - DataValue::OfString( - ctor.get(0).as_string().to_string(), - ) + DataValue::OfString(ctor.get(0).as_string().to_string()) }, 1 => { // ofBool: 0 object fields, 1 scalar byte @@ -289,19 +286,14 @@ pub fn decode_ix_syntax(obj: LeanObj) -> Syntax { // node: info, kind, args let info = decode_ix_source_info(ctor.get(0)); let kind = decode_ix_name(ctor.get(1)); - let args: Vec = - ctor.get(2).as_array() - .map(decode_ix_syntax); + let args: Vec = ctor.get(2).as_array().map(decode_ix_syntax); Syntax::Node(info, kind, args) }, 2 => { // atom: info, val let info = decode_ix_source_info(ctor.get(0)); - Syntax::Atom( - info, - ctor.get(1).as_string().to_string(), - ) + Syntax::Atom(info, ctor.get(1).as_string().to_string()) }, 3 => { // ident: info, rawVal, val, preresolved @@ -309,8 +301,7 @@ pub fn decode_ix_syntax(obj: LeanObj) -> Syntax { let raw_val = decode_substring(ctor.get(1)); let val = decode_ix_name(ctor.get(2)); let preresolved: Vec = - ctor.get(3).as_array() - .map(decode_syntax_preresolved); + ctor.get(3).as_array().map(decode_syntax_preresolved); Syntax::Ident(info, raw_val, val, preresolved) }, @@ -371,8 +362,7 @@ pub fn decode_syntax_preresolved(obj: LeanObj) -> SyntaxPreresolved { // decl let name = decode_ix_name(ctor.get(0)); let aliases: Vec = - ctor.get(1).as_array() - .map(|obj| obj.as_string().to_string()); + ctor.get(1).as_array().map(|obj| obj.as_string().to_string()); SyntaxPreresolved::Decl(name, aliases) }, diff --git a/src/lean/ffi/ix/env.rs b/src/lean/ffi/ix/env.rs index a2a7da51..6d2a317d 100644 --- a/src/lean/ffi/ix/env.rs +++ b/src/lean/ffi/ix/env.rs @@ -6,7 +6,9 @@ use crate::ix::env::{ConstantInfo, Name}; use crate::lean::obj::{LeanArray, LeanCtor, LeanObj}; use crate::lean::ffi::builder::LeanBuildCache; -use crate::lean::ffi::ix::constant::{build_constant_info, decode_constant_info}; +use crate::lean::ffi::ix::constant::{ + build_constant_info, decode_constant_info, +}; use crate::lean::ffi::ix::name::{build_name, decode_ix_name}; // ============================================================================= @@ -168,9 +170,7 @@ where /// /// NOTE: Environment with a single field is UNBOXED by Lean, /// so the pointer IS the HashMap directly, not a structure containing it. -pub fn decode_ix_environment( - obj: LeanObj, -) -> FxHashMap { +pub fn decode_ix_environment(obj: LeanObj) -> FxHashMap { // Environment is unboxed - obj IS the HashMap directly let consts_pairs = decode_hashmap(obj, decode_ix_name, decode_constant_info); let mut consts: FxHashMap = FxHashMap::default(); @@ -240,9 +240,7 @@ pub fn build_raw_environment_from_vec( /// Round-trip an Ix.Environment: decode from Lean, re-encode. #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ix_environment( - env_ptr: LeanObj, -) -> LeanObj { +pub extern "C" fn rs_roundtrip_ix_environment(env_ptr: LeanObj) -> LeanObj { let env = decode_ix_environment(env_ptr); let mut cache = LeanBuildCache::with_capacity(env.len()); build_raw_environment(&mut cache, &env) @@ -251,9 +249,7 @@ pub extern "C" fn rs_roundtrip_ix_environment( /// Round-trip an Ix.RawEnvironment: decode from Lean, re-encode. /// Uses Vec-preserving functions to maintain array structure and order. #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ix_raw_environment( - env_ptr: LeanObj, -) -> LeanObj { +pub extern "C" fn rs_roundtrip_ix_raw_environment(env_ptr: LeanObj) -> LeanObj { let env = decode_ix_raw_environment_vec(env_ptr); let mut cache = LeanBuildCache::with_capacity(env.len()); build_raw_environment_from_vec(&mut cache, &env) diff --git a/src/lean/ffi/ix/expr.rs b/src/lean/ffi/ix/expr.rs index 795723ad..d5e45b81 100644 --- a/src/lean/ffi/ix/expr.rs +++ b/src/lean/ffi/ix/expr.rs @@ -21,11 +21,13 @@ use crate::lean::nat::Nat; use crate::lean::obj::{IxExpr, LeanArray, LeanCtor, LeanObj, LeanString}; use crate::lean::ffi::builder::LeanBuildCache; -use crate::lean::ffi::primitives::build_nat; use crate::lean::ffi::ix::address::build_address; use crate::lean::ffi::ix::data::{build_data_value, decode_data_value}; -use crate::lean::ffi::ix::level::{build_level, build_level_array, decode_ix_level}; +use crate::lean::ffi::ix::level::{ + build_level, build_level_array, decode_ix_level, +}; use crate::lean::ffi::ix::name::{build_name, decode_ix_name}; +use crate::lean::ffi::primitives::build_nat; /// Build a Lean Ix.Expr with embedded hash. /// Uses caching to avoid rebuilding the same expression. @@ -241,9 +243,7 @@ pub fn decode_ix_expr(obj: LeanObj) -> Expr { 4 => { // const let name = decode_ix_name(ctor.get(0)); - let levels: Vec = - ctor.get(1).as_array() - .map(decode_ix_level); + let levels: Vec = ctor.get(1).as_array().map(decode_ix_level); Expr::cnst(name, levels) }, @@ -297,8 +297,7 @@ pub fn decode_ix_expr(obj: LeanObj) -> Expr { 10 => { // mdata: data, expr, hash let data: Vec<(Name, DataValue)> = - ctor.get(0).as_array() - .map(decode_name_data_value); + ctor.get(0).as_array().map(decode_name_data_value); let inner = decode_ix_expr(ctor.get(1)); Expr::mdata(data, inner) @@ -326,9 +325,7 @@ pub fn decode_literal(obj: LeanObj) -> Literal { }, 1 => { // strVal - Literal::StrVal( - ctor.get(0).as_string().to_string(), - ) + Literal::StrVal(ctor.get(0).as_string().to_string()) }, _ => panic!("Invalid Literal tag: {}", ctor.tag()), } diff --git a/src/lean/ffi/ix/name.rs b/src/lean/ffi/ix/name.rs index def0fc81..3a89ad9b 100644 --- a/src/lean/ffi/ix/name.rs +++ b/src/lean/ffi/ix/name.rs @@ -10,8 +10,8 @@ use crate::lean::nat::Nat; use crate::lean::obj::{IxName, LeanArray, LeanCtor, LeanObj, LeanString}; use crate::lean::ffi::builder::LeanBuildCache; -use crate::lean::ffi::primitives::build_nat; use crate::lean::ffi::ix::address::build_address; +use crate::lean::ffi::primitives::build_nat; /// Build a Lean Ix.Name with embedded hash. /// Uses caching to avoid rebuilding the same name. diff --git a/src/lean/ffi/ixon/constant.rs b/src/lean/ffi/ixon/constant.rs index 225bfaaf..0fbe2677 100644 --- a/src/lean/ffi/ixon/constant.rs +++ b/src/lean/ffi/ixon/constant.rs @@ -15,13 +15,17 @@ use crate::ix::ixon::constant::{ Quotient as IxonQuotient, Recursor as IxonRecursor, RecursorProj, RecursorRule as IxonRecursorRule, }; -use crate::lean::obj::{IxAddress, LeanArray, LeanByteArray, LeanCtor, LeanObj}; +use crate::lean::obj::{ + IxAddress, LeanArray, LeanByteArray, LeanCtor, LeanObj, +}; use crate::lean::ffi::ixon::expr::{ build_ixon_expr, build_ixon_expr_array, decode_ixon_expr, decode_ixon_expr_array, }; -use crate::lean::ffi::ixon::univ::{build_ixon_univ_array, decode_ixon_univ_array}; +use crate::lean::ffi::ixon::univ::{ + build_ixon_univ_array, decode_ixon_univ_array, +}; /// Build Address from Ixon Address type (which is just a [u8; 32]). pub fn build_address_from_ixon(addr: &Address) -> IxAddress { @@ -372,7 +376,17 @@ pub fn decode_ixon_recursor(obj: LeanObj) -> IxonRecursor { let minors = ctor.scalar_u64(2, 32); let k = ctor.scalar_u8(2, 40) != 0; let is_unsafe = ctor.scalar_u8(2, 41) != 0; - IxonRecursor { k, is_unsafe, lvls, params, indices, motives, minors, typ, rules } + IxonRecursor { + k, + is_unsafe, + lvls, + params, + indices, + motives, + minors, + typ, + rules, + } } /// Decode Ixon.Axiom. @@ -429,7 +443,17 @@ pub fn decode_ixon_inductive(obj: LeanObj) -> IxonInductive { let recr = ctor.scalar_u8(2, 32) != 0; let refl = ctor.scalar_u8(2, 33) != 0; let is_unsafe = ctor.scalar_u8(2, 34) != 0; - IxonInductive { recr, refl, is_unsafe, lvls, params, indices, nested, typ, ctors } + IxonInductive { + recr, + refl, + is_unsafe, + lvls, + params, + indices, + nested, + typ, + ctors, + } } /// Decode Ixon.InductiveProj. diff --git a/src/lean/ffi/ixon/env.rs b/src/lean/ffi/ixon/env.rs index dc472731..41474f2c 100644 --- a/src/lean/ffi/ixon/env.rs +++ b/src/lean/ffi/ixon/env.rs @@ -13,13 +13,13 @@ use crate::lean::obj::{ LeanArray, LeanByteArray, LeanCtor, LeanExcept, LeanObj, }; +use crate::lean::ffi::builder::LeanBuildCache; +use crate::lean::ffi::ix::name::{build_name, decode_ix_name}; use crate::lean::ffi::ixon::constant::{ build_address_from_ixon, build_ixon_constant, decode_ixon_address, decode_ixon_constant, }; use crate::lean::ffi::ixon::meta::{build_constant_meta, decode_constant_meta}; -use crate::lean::ffi::builder::LeanBuildCache; -use crate::lean::ffi::ix::name::{build_name, decode_ix_name}; // ============================================================================= // Comm Type (secret: Address, payload: Address) diff --git a/src/lean/ffi/ixon/meta.rs b/src/lean/ffi/ixon/meta.rs index 5a71d4d1..c8f09d52 100644 --- a/src/lean/ffi/ixon/meta.rs +++ b/src/lean/ffi/ixon/meta.rs @@ -11,14 +11,14 @@ use crate::ix::ixon::metadata::{ }; use crate::lean::obj::{LeanArray, LeanCtor, LeanObj}; -use crate::lean::ffi::ixon::constant::{ - build_address_array, build_address_from_ixon, decode_ixon_address, - decode_ixon_address_array, -}; use crate::lean::ffi::ix::constant::{ build_reducibility_hints, decode_reducibility_hints, }; use crate::lean::ffi::ix::expr::binder_info_to_u8; +use crate::lean::ffi::ixon::constant::{ + build_address_array, build_address_from_ixon, decode_ixon_address, + decode_ixon_address_array, +}; // ============================================================================= // DataValue Build/Decode diff --git a/src/lean/ffi/ixon/serialize.rs b/src/lean/ffi/ixon/serialize.rs index 46e08962..ceaca271 100644 --- a/src/lean/ffi/ixon/serialize.rs +++ b/src/lean/ffi/ixon/serialize.rs @@ -12,15 +12,13 @@ use crate::ix::ixon::sharing::hash_expr; use crate::ix::ixon::univ::{Univ as IxonUniv, put_univ}; use crate::lean::obj::LeanObj; -use crate::lean::ffi::ixon::constant::{decode_ixon_address, decode_ixon_constant}; +use crate::lean::ffi::ixon::constant::{ + decode_ixon_address, decode_ixon_constant, +}; /// Unbox a Lean UInt64, handling both scalar and boxed representations. fn lean_ptr_to_u64(obj: LeanObj) -> u64 { - if obj.is_scalar() { - obj.unbox_usize() as u64 - } else { - obj.unbox_u64() - } + if obj.is_scalar() { obj.unbox_usize() as u64 } else { obj.unbox_u64() } } /// Decode a Lean `Ixon.Expr` to a Rust `IxonExpr`. @@ -181,8 +179,8 @@ pub extern "C" fn rs_eq_env_serialization( raw_env_obj: LeanObj, bytes_obj: LeanObj, ) -> bool { - use crate::lean::ffi::ixon::env::decode_raw_env; use crate::ix::ixon::env::Env; + use crate::lean::ffi::ixon::env::decode_raw_env; let decoded = decode_raw_env(raw_env_obj); let ba = bytes_obj.as_byte_array(); diff --git a/src/lean/ffi/lean_env.rs b/src/lean/ffi/lean_env.rs index 52509d92..c1a42cd9 100644 --- a/src/lean/ffi/lean_env.rs +++ b/src/lean/ffi/lean_env.rs @@ -115,9 +115,7 @@ pub fn lean_ptr_to_name(obj: LeanObj, global: &GlobalCache) -> Name { // Recursive call - will also use global cache let pre = lean_ptr_to_name(pre, global); match ctor.tag() { - 1 => { - Name::str(pre, pos.as_string().to_string()) - }, + 1 => Name::str(pre, pos.as_string().to_string()), 2 => Name::num(pre, Nat::from_obj(pos)), _ => unreachable!(), } @@ -214,11 +212,11 @@ fn lean_ptr_to_syntax_preresolved( 1 => { let [name_obj, fields_obj] = ctor.objs(); let name = lean_ptr_to_name(name_obj, cache.global); - let fields: Vec = - fields_obj.as_list() - .iter() - .map(|o| o.as_string().to_string()) - .collect(); + let fields: Vec = fields_obj + .as_list() + .iter() + .map(|o| o.as_string().to_string()) + .collect(); SyntaxPreresolved::Decl(name, fields) }, _ => unreachable!(), @@ -235,19 +233,14 @@ fn lean_ptr_to_syntax(obj: LeanObj, cache: &mut Cache<'_>) -> Syntax { let [info, kind, args] = ctor.objs(); let info = lean_ptr_to_source_info(info); let kind = lean_ptr_to_name(kind, cache.global); - let args: Vec<_> = args.as_array() - .iter() - .map(|o| lean_ptr_to_syntax(o, cache)) - .collect(); + let args: Vec<_> = + args.as_array().iter().map(|o| lean_ptr_to_syntax(o, cache)).collect(); Syntax::Node(info, kind, args) }, 2 => { let [info, val] = ctor.objs(); let info = lean_ptr_to_source_info(info); - Syntax::Atom( - info, - val.as_string().to_string(), - ) + Syntax::Atom(info, val.as_string().to_string()) }, 3 => { let [info, raw_val, val, preresolved] = ctor.objs(); @@ -274,9 +267,7 @@ fn lean_ptr_to_name_data_value( let dv_ctor = data_value_obj.as_ctor(); let [inner] = dv_ctor.objs::<1>(); let data_value = match dv_ctor.tag() { - 0 => DataValue::OfString( - inner.as_string().to_string(), - ), + 0 => DataValue::OfString(inner.as_string().to_string()), 1 => DataValue::OfBool(inner.as_ptr() as usize == 1), 2 => DataValue::OfName(lean_ptr_to_name(inner, cache.global)), 3 => DataValue::OfNat(Nat::from_obj(inner)), @@ -381,9 +372,7 @@ pub fn lean_ptr_to_expr(obj: LeanObj, cache: &mut Cache<'_>) -> Expr { let [inner] = lit_ctor.objs::<1>(); match lit_ctor.tag() { 0 => Expr::lit(Literal::NatVal(Nat::from_obj(inner))), - 1 => Expr::lit(Literal::StrVal( - inner.as_string().to_string(), - )), + 1 => Expr::lit(Literal::StrVal(inner.as_string().to_string())), _ => unreachable!(), } }, @@ -523,8 +512,15 @@ pub fn lean_ptr_to_constant_info( ConstantInfo::QuotInfo(QuotVal { cnst: constant_val, kind }) }, 5 => { - let [constant_val, num_params, num_indices, all, ctors, num_nested, bools] = - inner.objs(); + let [ + constant_val, + num_params, + num_indices, + all, + ctors, + num_nested, + bools, + ] = inner.objs(); let constant_val = lean_ptr_to_constant_val(constant_val, cache); let num_params = Nat::from_obj(num_params); let num_indices = Nat::from_obj(num_indices); diff --git a/src/lean/ffi/primitives.rs b/src/lean/ffi/primitives.rs index bdeaa67e..084d2009 100644 --- a/src/lean/ffi/primitives.rs +++ b/src/lean/ffi/primitives.rs @@ -37,9 +37,10 @@ pub fn build_nat(n: &Nat) -> LeanObj { limbs.push(u64::from_le_bytes(arr)); } unsafe { - LeanObj::from_raw( - crate::lean::nat::lean_nat_from_limbs(limbs.len(), limbs.as_ptr()), - ) + LeanObj::from_raw(crate::lean::nat::lean_nat_from_limbs( + limbs.len(), + limbs.as_ptr(), + )) } } From dc3789c5920becff0df488fd1a9ee32dad4c6935 Mon Sep 17 00:00:00 2001 From: samuelburnham <45365069+samuelburnham@users.noreply.github.com> Date: Tue, 3 Mar 2026 11:36:12 -0500 Subject: [PATCH 11/27] Use descriptive types and rename LeanObj->LeanObject --- src/iroh/_client.rs | 18 +- src/iroh/_server.rs | 2 +- src/iroh/client.rs | 36 ++-- src/iroh/server.rs | 2 +- src/lean.rs | 6 +- src/lean/ffi.rs | 28 ++- src/lean/ffi/aiur.rs | 6 +- src/lean/ffi/aiur/protocol.rs | 42 ++-- src/lean/ffi/aiur/toplevel.rs | 18 +- src/lean/ffi/builder.rs | 8 +- src/lean/ffi/byte_array.rs | 2 +- src/lean/ffi/compile.rs | 165 ++++++++------- src/lean/ffi/graph.rs | 10 +- src/lean/ffi/ix/address.rs | 6 +- src/lean/ffi/ix/constant.rs | 26 +-- src/lean/ffi/ix/data.rs | 86 ++++---- src/lean/ffi/ix/env.rs | 50 +++-- src/lean/ffi/ix/expr.rs | 44 ++-- src/lean/ffi/ix/level.rs | 22 +- src/lean/ffi/ix/name.rs | 16 +- src/lean/ffi/ixon/compare.rs | 35 ++-- src/lean/ffi/ixon/constant.rs | 181 ++++++++++------- src/lean/ffi/ixon/enums.rs | 40 ++-- src/lean/ffi/ixon/env.rs | 48 +++-- src/lean/ffi/ixon/expr.rs | 16 +- src/lean/ffi/ixon/meta.rs | 86 ++++---- src/lean/ffi/ixon/serialize.rs | 65 +++--- src/lean/ffi/ixon/sharing.rs | 36 ++-- src/lean/ffi/ixon/univ.rs | 22 +- src/lean/ffi/keccak.rs | 4 +- src/lean/ffi/lean_env.rs | 40 ++-- src/lean/ffi/primitives.rs | 72 +++---- src/lean/ffi/unsigned.rs | 2 +- src/lean/nat.rs | 14 +- src/lean/{obj.rs => object.rs} | 356 +++++++++++++++++---------------- src/sha256.rs | 2 +- 36 files changed, 847 insertions(+), 765 deletions(-) rename src/lean/{obj.rs => object.rs} (74%) diff --git a/src/iroh/_client.rs b/src/iroh/_client.rs index 98a0f631..aac86d51 100644 --- a/src/iroh/_client.rs +++ b/src/iroh/_client.rs @@ -1,4 +1,4 @@ -use crate::lean::obj::{LeanExcept, LeanObj}; +use crate::lean::object::{LeanExcept, LeanObject}; const ERR_MSG: &str = "Iroh functions not supported when the Rust `net` feature is disabled \ or on MacOS aarch64-darwin"; @@ -6,10 +6,10 @@ const ERR_MSG: &str = "Iroh functions not supported when the Rust `net` feature /// `Iroh.Connect.putBytes' : @& String → @& Array String → @& String → @& String → Except String PutResponse` #[unsafe(no_mangle)] extern "C" fn rs_iroh_put( - _node_id: LeanObj, - _addrs: LeanObj, - _relay_url: LeanObj, - _input: LeanObj, + _node_id: LeanObject, + _addrs: LeanObject, + _relay_url: LeanObject, + _input: LeanObject, ) -> LeanExcept { LeanExcept::error_string(ERR_MSG) } @@ -17,10 +17,10 @@ extern "C" fn rs_iroh_put( /// `Iroh.Connect.getBytes' : @& String → @& Array String → @& String → @& String → Except String GetResponse` #[unsafe(no_mangle)] extern "C" fn rs_iroh_get( - _node_id: LeanObj, - _addrs: LeanObj, - _relay_url: LeanObj, - _hash: LeanObj, + _node_id: LeanObject, + _addrs: LeanObject, + _relay_url: LeanObject, + _hash: LeanObject, ) -> LeanExcept { LeanExcept::error_string(ERR_MSG) } diff --git a/src/iroh/_server.rs b/src/iroh/_server.rs index 87ba4b6c..228f0d4e 100644 --- a/src/iroh/_server.rs +++ b/src/iroh/_server.rs @@ -1,4 +1,4 @@ -use crate::lean::obj::LeanExcept; +use crate::lean::object::LeanExcept; /// `Iroh.Serve.serve' : Unit → Except String Unit` #[unsafe(no_mangle)] diff --git a/src/iroh/client.rs b/src/iroh/client.rs index a760b73f..daf04dea 100644 --- a/src/iroh/client.rs +++ b/src/iroh/client.rs @@ -8,8 +8,8 @@ use tracing_subscriber::util::SubscriberInitExt; use tracing_subscriber::{EnvFilter, fmt}; use crate::iroh::common::{GetRequest, PutRequest, Request, Response}; -use crate::lean::obj::{ - LeanByteArray, LeanCtor, LeanExcept, LeanObj, LeanString, +use crate::lean::object::{ + LeanArray, LeanByteArray, LeanCtor, LeanExcept, LeanString, }; // An example ALPN that we are using to communicate over the `Endpoint` @@ -49,15 +49,15 @@ fn mk_get_response(message: &str, hash: &str, bytes: &[u8]) -> LeanCtor { /// `Iroh.Connect.putBytes' : @& String → @& Array String → @& String → @& String → Except String PutResponse` #[unsafe(no_mangle)] extern "C" fn rs_iroh_put( - node_id: LeanObj, - addrs: LeanObj, - relay_url: LeanObj, - input: LeanObj, + node_id: LeanString, + addrs: LeanArray, + relay_url: LeanString, + input: LeanString, ) -> LeanExcept { - let node_id = node_id.as_string().to_string(); - let addrs: Vec = addrs.as_array().map(|x| x.as_string().to_string()); - let relay_url = relay_url.as_string().to_string(); - let input_str = input.as_string().to_string(); + let node_id = node_id.to_string(); + let addrs: Vec = addrs.map(|x| x.as_string().to_string()); + let relay_url = relay_url.to_string(); + let input_str = input.to_string(); let request = Request::Put(PutRequest { bytes: input_str.as_bytes().to_vec() }); @@ -79,15 +79,15 @@ extern "C" fn rs_iroh_put( /// `Iroh.Connect.getBytes' : @& String → @& Array String → @& String → @& String → Except String GetResponse` #[unsafe(no_mangle)] extern "C" fn rs_iroh_get( - node_id: LeanObj, - addrs: LeanObj, - relay_url: LeanObj, - hash: LeanObj, + node_id: LeanString, + addrs: LeanArray, + relay_url: LeanString, + hash: LeanString, ) -> LeanExcept { - let node_id = node_id.as_string().to_string(); - let addrs: Vec = addrs.as_array().map(|x| x.as_string().to_string()); - let relay_url = relay_url.as_string().to_string(); - let hash_str = hash.as_string().to_string(); + let node_id = node_id.to_string(); + let addrs: Vec = addrs.map(|x| x.as_string().to_string()); + let relay_url = relay_url.to_string(); + let hash_str = hash.to_string(); let request = Request::Get(GetRequest { hash: hash_str.clone() }); diff --git a/src/iroh/server.rs b/src/iroh/server.rs index 69947587..07ce3fd6 100644 --- a/src/iroh/server.rs +++ b/src/iroh/server.rs @@ -11,7 +11,7 @@ use tracing_subscriber::util::SubscriberInitExt; use tracing_subscriber::{EnvFilter, fmt}; use crate::iroh::common::{GetResponse, PutResponse, Request, Response}; -use crate::lean::obj::LeanExcept; +use crate::lean::object::LeanExcept; // An example ALPN that we are using to communicate over the `Endpoint` const EXAMPLE_ALPN: &[u8] = b"n0/iroh/examples/magic/0"; diff --git a/src/lean.rs b/src/lean.rs index 59b760df..bdfaa183 100644 --- a/src/lean.rs +++ b/src/lean.rs @@ -18,13 +18,13 @@ clippy::cast_possible_truncation, clippy::derive_partial_eq_without_eq )] -pub mod lean { +pub mod lean_sys { include!(concat!(env!("OUT_DIR"), "/lean.rs")); } pub mod ffi; pub mod nat; -pub mod obj; +pub mod object; use std::ffi::{CString, c_void}; @@ -45,6 +45,6 @@ pub fn safe_cstring(s: &str) -> CString { /// Must only be used as a `lean_external_foreach_fn` callback. pub unsafe extern "C" fn noop_foreach( _: *mut c_void, - _: *mut lean::lean_object, + _: *mut lean_sys::lean_object, ) { } diff --git a/src/lean/ffi.rs b/src/lean/ffi.rs index 9fecad9a..718cc26b 100644 --- a/src/lean/ffi.rs +++ b/src/lean/ffi.rs @@ -12,17 +12,17 @@ pub mod ix; // Ix types: Name, Level, Expr, ConstantInfo, Environment pub mod ixon; // Ixon types: Univ, Expr, Constant, metadata pub mod primitives; // Primitives: rs_roundtrip_nat, rs_roundtrip_string, etc. -use crate::lean::lean::{ +use crate::lean::lean_sys::{ lean_io_result_mk_error, lean_io_result_mk_ok, lean_mk_io_user_error, }; -use crate::lean::obj::{LeanObj, LeanString}; +use crate::lean::object::{LeanArray, LeanByteArray, LeanObject, LeanString}; /// Guard an FFI function that returns a Lean IO result against panics. /// On panic, returns a Lean IO error with the panic message instead of /// unwinding across the `extern "C"` boundary (which is undefined behavior). -pub(crate) fn ffi_io_guard(f: F) -> LeanObj +pub(crate) fn ffi_io_guard(f: F) -> LeanObject where - F: FnOnce() -> LeanObj + std::panic::UnwindSafe, + F: FnOnce() -> LeanObject + std::panic::UnwindSafe, { match std::panic::catch_unwind(f) { Ok(result) => result, @@ -40,33 +40,31 @@ where } /// Wrap a Lean value in an IO success result. -pub(crate) fn io_ok(val: impl Into) -> LeanObj { - let val: LeanObj = val.into(); +pub(crate) fn io_ok(val: impl Into) -> LeanObject { + let val: LeanObject = val.into(); unsafe { - LeanObj::from_raw(lean_io_result_mk_ok(val.as_mut_ptr().cast()).cast()) + LeanObject::from_raw(lean_io_result_mk_ok(val.as_mut_ptr().cast()).cast()) } } /// Create a Lean IO error result from a Rust error message. -pub(crate) fn io_error(msg: &str) -> LeanObj { +pub(crate) fn io_error(msg: &str) -> LeanObject { let lean_msg = LeanString::from_str(msg); unsafe { let lean_err = lean_mk_io_user_error(lean_msg.as_mut_ptr().cast()); - LeanObj::from_raw(lean_io_result_mk_error(lean_err).cast()) + LeanObject::from_raw(lean_io_result_mk_error(lean_err).cast()) } } #[unsafe(no_mangle)] extern "C" fn rs_boxed_u32s_are_equivalent_to_bytes( - u32s: LeanObj, - bytes: LeanObj, + u32s: LeanArray, + bytes: LeanByteArray, ) -> bool { - let arr = u32s.as_array(); - let u32s_flat: Vec = arr + let u32s_flat: Vec = u32s .map(|elem| elem.unbox_u32()) .into_iter() .flat_map(u32::to_le_bytes) .collect(); - let ba = bytes.as_byte_array(); - u32s_flat == ba.as_bytes() + u32s_flat == bytes.as_bytes() } diff --git a/src/lean/ffi/aiur.rs b/src/lean/ffi/aiur.rs index 27ab8e52..42b8423a 100644 --- a/src/lean/ffi/aiur.rs +++ b/src/lean/ffi/aiur.rs @@ -3,16 +3,16 @@ use multi_stark::p3_field::integers::QuotientMap; pub mod protocol; pub mod toplevel; -use crate::{aiur::G, lean::obj::LeanObj}; +use crate::{aiur::G, lean::object::LeanObject}; #[inline] -pub(super) fn lean_unbox_nat_as_usize(obj: LeanObj) -> usize { +pub(super) fn lean_unbox_nat_as_usize(obj: LeanObject) -> usize { assert!(obj.is_scalar()); obj.unbox_usize() } #[inline] -pub(super) fn lean_unbox_g(obj: LeanObj) -> G { +pub(super) fn lean_unbox_g(obj: LeanObject) -> G { let u64 = obj.unbox_u64(); unsafe { G::from_canonical_unchecked(u64) } } diff --git a/src/lean/ffi/aiur/protocol.rs b/src/lean/ffi/aiur/protocol.rs index c8dbe4d6..d4f29485 100644 --- a/src/lean/ffi/aiur/protocol.rs +++ b/src/lean/ffi/aiur/protocol.rs @@ -16,9 +16,9 @@ use crate::{ ffi::aiur::{ lean_unbox_g, lean_unbox_nat_as_usize, toplevel::lean_ptr_to_toplevel, }, - obj::{ + object::{ ExternalClass, LeanArray, LeanByteArray, LeanCtor, LeanExcept, - LeanExternal, LeanObj, + LeanExternal, LeanObject, }, }, }; @@ -64,8 +64,8 @@ extern "C" fn rs_aiur_proof_of_bytes( /// `AiurSystem.build : @&Bytecode.Toplevel → @&CommitmentParameters → AiurSystem` #[unsafe(no_mangle)] extern "C" fn rs_aiur_system_build( - toplevel: LeanObj, - commitment_parameters: LeanObj, + toplevel: LeanObject, + commitment_parameters: LeanObject, ) -> LeanExternal { let system = AiurSystem::build( lean_ptr_to_toplevel(toplevel), @@ -78,14 +78,14 @@ extern "C" fn rs_aiur_system_build( #[unsafe(no_mangle)] extern "C" fn rs_aiur_system_verify( aiur_system_obj: LeanExternal, - fri_parameters: LeanObj, - claim: LeanObj, + fri_parameters: LeanObject, + claim: LeanObject, proof_obj: LeanExternal, ) -> LeanExcept { let fri_parameters = lean_ctor_to_fri_parameters(fri_parameters); let claim = claim.as_array().map(lean_unbox_g); match aiur_system_obj.get().verify(fri_parameters, &claim, proof_obj.get()) { - Ok(()) => LeanExcept::ok(LeanObj::box_usize(0)), + Ok(()) => LeanExcept::ok(LeanObject::box_usize(0)), Err(err) => LeanExcept::error_string(&format!("{err:?}")), } } @@ -95,12 +95,12 @@ extern "C" fn rs_aiur_system_verify( #[unsafe(no_mangle)] extern "C" fn rs_aiur_system_prove( aiur_system_obj: LeanExternal, - fri_parameters: LeanObj, - fun_idx: LeanObj, - args: LeanObj, - io_data_arr: LeanObj, - io_map_arr: LeanObj, -) -> LeanObj { + fri_parameters: LeanObject, + fun_idx: LeanObject, + args: LeanObject, + io_data_arr: LeanObject, + io_map_arr: LeanObject, +) -> LeanObject { let fri_parameters = lean_ctor_to_fri_parameters(fri_parameters); let fun_idx = lean_unbox_nat_as_usize(fun_idx); let args = args.as_array().map(lean_unbox_g); @@ -127,8 +127,8 @@ extern "C" fn rs_aiur_system_prove( let key_arr = build_g_array(key); // IOKeyInfo ctor (tag 0, 2 object fields) let key_info = LeanCtor::alloc(0, 2, 0); - key_info.set(0, LeanObj::box_usize(info.idx)); - key_info.set(1, LeanObj::box_usize(info.len)); + key_info.set(0, LeanObject::box_usize(info.idx)); + key_info.set(1, LeanObject::box_usize(info.len)); // (Array G × IOKeyInfo) tuple let map_elt = LeanCtor::alloc(0, 2, 0); map_elt.set(0, key_arr); @@ -159,19 +159,19 @@ extern "C" fn rs_aiur_system_prove( // ============================================================================= /// Build a Lean `Array G` from a slice of field elements. -fn build_g_array(values: &[G]) -> LeanObj { +fn build_g_array(values: &[G]) -> LeanObject { let arr = LeanArray::alloc(values.len()); for (i, g) in values.iter().enumerate() { - arr.set(i, LeanObj::box_u64(g.as_canonical_u64())); + arr.set(i, LeanObject::box_u64(g.as_canonical_u64())); } *arr } -fn lean_ptr_to_commitment_parameters(obj: LeanObj) -> CommitmentParameters { +fn lean_ptr_to_commitment_parameters(obj: LeanObject) -> CommitmentParameters { CommitmentParameters { log_blowup: lean_unbox_nat_as_usize(obj) } } -fn lean_ctor_to_fri_parameters(obj: LeanObj) -> FriParameters { +fn lean_ctor_to_fri_parameters(obj: LeanObject) -> FriParameters { let ctor = obj.as_ctor(); FriParameters { log_final_poly_len: lean_unbox_nat_as_usize(ctor.get(0)), @@ -181,7 +181,9 @@ fn lean_ctor_to_fri_parameters(obj: LeanObj) -> FriParameters { } } -fn lean_array_to_io_buffer_map(obj: LeanObj) -> FxHashMap, IOKeyInfo> { +fn lean_array_to_io_buffer_map( + obj: LeanObject, +) -> FxHashMap, IOKeyInfo> { let arr = obj.as_array(); let mut map = FxHashMap::with_capacity_and_hasher(arr.len(), FxBuildHasher); for elt in arr.iter() { diff --git a/src/lean/ffi/aiur/toplevel.rs b/src/lean/ffi/aiur/toplevel.rs index ace58895..622be7ee 100644 --- a/src/lean/ffi/aiur/toplevel.rs +++ b/src/lean/ffi/aiur/toplevel.rs @@ -6,16 +6,16 @@ use crate::{ G, bytecode::{Block, Ctrl, Function, FunctionLayout, Op, Toplevel, ValIdx}, }, - lean::obj::LeanObj, + lean::object::LeanObject, }; use crate::lean::ffi::aiur::{lean_unbox_g, lean_unbox_nat_as_usize}; -fn lean_ptr_to_vec_val_idx(obj: LeanObj) -> Vec { +fn lean_ptr_to_vec_val_idx(obj: LeanObject) -> Vec { obj.as_array().map(lean_unbox_nat_as_usize) } -fn lean_ptr_to_op(obj: LeanObj) -> Op { +fn lean_ptr_to_op(obj: LeanObject) -> Op { let ctor = obj.as_ctor(); match ctor.tag() { 0 => { @@ -128,7 +128,7 @@ fn lean_ptr_to_op(obj: LeanObj) -> Op { } } -fn lean_ptr_to_g_block_pair(obj: LeanObj) -> (G, Block) { +fn lean_ptr_to_g_block_pair(obj: LeanObject) -> (G, Block) { let ctor = obj.as_ctor(); let [g_obj, block_obj] = ctor.objs::<2>(); let g = lean_unbox_g(g_obj); @@ -136,7 +136,7 @@ fn lean_ptr_to_g_block_pair(obj: LeanObj) -> (G, Block) { (g, block) } -fn lean_ptr_to_ctrl(obj: LeanObj) -> Ctrl { +fn lean_ptr_to_ctrl(obj: LeanObject) -> Ctrl { let ctor = obj.as_ctor(); match ctor.tag() { 0 => { @@ -163,7 +163,7 @@ fn lean_ptr_to_ctrl(obj: LeanObj) -> Ctrl { } } -fn lean_ptr_to_block(obj: LeanObj) -> Block { +fn lean_ptr_to_block(obj: LeanObject) -> Block { let ctor = obj.as_ctor(); let [ops_obj, ctrl_obj, min_sel_obj, max_sel_obj] = ctor.objs::<4>(); let ops = ops_obj.as_array().map(lean_ptr_to_op); @@ -173,7 +173,7 @@ fn lean_ptr_to_block(obj: LeanObj) -> Block { Block { ops, ctrl, min_sel_included, max_sel_excluded } } -fn lean_ptr_to_function_layout(obj: LeanObj) -> FunctionLayout { +fn lean_ptr_to_function_layout(obj: LeanObject) -> FunctionLayout { let ctor = obj.as_ctor(); let [input_size, selectors, auxiliaries, lookups] = ctor.objs::<4>(); FunctionLayout { @@ -184,7 +184,7 @@ fn lean_ptr_to_function_layout(obj: LeanObj) -> FunctionLayout { } } -fn lean_ptr_to_function(obj: LeanObj) -> Function { +fn lean_ptr_to_function(obj: LeanObject) -> Function { let ctor = obj.as_ctor(); let [body_obj, layout_obj, unconstrained_obj] = ctor.objs::<3>(); let body = lean_ptr_to_block(body_obj); @@ -193,7 +193,7 @@ fn lean_ptr_to_function(obj: LeanObj) -> Function { Function { body, layout, unconstrained } } -pub(crate) fn lean_ptr_to_toplevel(obj: LeanObj) -> Toplevel { +pub(crate) fn lean_ptr_to_toplevel(obj: LeanObject) -> Toplevel { let ctor = obj.as_ctor(); let [functions_obj, memory_sizes_obj] = ctor.objs::<2>(); let functions = functions_obj.as_array().map(lean_ptr_to_function); diff --git a/src/lean/ffi/builder.rs b/src/lean/ffi/builder.rs index d3bf73a0..8e85a25b 100644 --- a/src/lean/ffi/builder.rs +++ b/src/lean/ffi/builder.rs @@ -3,16 +3,16 @@ use blake3::Hash; use rustc_hash::FxHashMap; -use crate::lean::obj::{IxExpr, IxLevel, IxName}; +use crate::lean::object::{LeanIxExpr, LeanIxLevel, LeanIxName}; /// Cache for constructing Lean Ix types with deduplication. /// /// This struct maintains caches for names, levels, and expressions to avoid /// rebuilding the same Lean objects multiple times during environment construction. pub struct LeanBuildCache { - pub(crate) names: FxHashMap, - pub(crate) levels: FxHashMap, - pub(crate) exprs: FxHashMap, + pub(crate) names: FxHashMap, + pub(crate) levels: FxHashMap, + pub(crate) exprs: FxHashMap, } impl LeanBuildCache { diff --git a/src/lean/ffi/byte_array.rs b/src/lean/ffi/byte_array.rs index c9f3a3b4..7e247e07 100644 --- a/src/lean/ffi/byte_array.rs +++ b/src/lean/ffi/byte_array.rs @@ -1,4 +1,4 @@ -use crate::lean::obj::LeanByteArray; +use crate::lean::object::LeanByteArray; /// `@& ByteArray → @& ByteArray → Bool` /// Efficient implementation for `BEq ByteArray` diff --git a/src/lean/ffi/compile.rs b/src/lean/ffi/compile.rs index d2c01fb7..6b20c5b7 100644 --- a/src/lean/ffi/compile.rs +++ b/src/lean/ffi/compile.rs @@ -21,10 +21,13 @@ use crate::ix::ixon::expr::Expr as IxonExpr; use crate::ix::ixon::serialize::put_expr; use crate::ix::ixon::{Comm, ConstantMeta}; use crate::lean::ffi::{ffi_io_guard, io_error, io_ok}; -use crate::lean::lean::lean_uint64_to_nat; +use crate::lean::lean_sys::lean_uint64_to_nat; use crate::lean::nat::Nat; -use crate::lean::obj::{ - LeanArray, LeanByteArray, LeanCtor, LeanExcept, LeanObj, LeanString, +use crate::lean::object::{ + LeanArray, LeanByteArray, LeanCtor, LeanExcept, LeanIxBlockCompareDetail, + LeanIxBlockCompareResult, LeanIxCompileError, LeanIxCompilePhases, + LeanIxCondensedBlocks, LeanIxDecompileError, LeanIxSerializeError, + LeanIxonRawEnv, LeanObject, LeanString, }; use dashmap::DashMap; @@ -51,13 +54,13 @@ use crate::lean::ffi::lean_env::{ // ============================================================================= /// Build a Lean String from a Rust &str. -fn build_lean_string(s: &str) -> LeanObj { +fn build_lean_string(s: &str) -> LeanObject { LeanString::from_str(s).into() } /// Build a Lean Nat from a usize. -fn build_lean_nat_usize(n: usize) -> LeanObj { - unsafe { LeanObj::from_raw(lean_uint64_to_nat(n as u64).cast()) } +fn build_lean_nat_usize(n: usize) -> LeanObject { + unsafe { LeanObject::from_raw(lean_uint64_to_nat(n as u64).cast()) } } // ============================================================================= @@ -65,7 +68,7 @@ fn build_lean_nat_usize(n: usize) -> LeanObj { // ============================================================================= /// Build RawConst: { addr : Address, const : Ixon.Constant } -pub fn build_raw_const(addr: &Address, constant: &IxonConstant) -> LeanObj { +pub fn build_raw_const(addr: &Address, constant: &IxonConstant) -> LeanObject { let ctor = LeanCtor::alloc(0, 2, 0); ctor.set(0, build_address_from_ixon(addr)); ctor.set(1, build_ixon_constant(constant)); @@ -78,7 +81,7 @@ pub fn build_raw_named( name: &Name, addr: &Address, meta: &ConstantMeta, -) -> LeanObj { +) -> LeanObject { let ctor = LeanCtor::alloc(0, 3, 0); ctor.set(0, build_name(cache, name)); ctor.set(1, build_address_from_ixon(addr)); @@ -87,7 +90,7 @@ pub fn build_raw_named( } /// Build RawBlob: { addr : Address, bytes : ByteArray } -pub fn build_raw_blob(addr: &Address, bytes: &[u8]) -> LeanObj { +pub fn build_raw_blob(addr: &Address, bytes: &[u8]) -> LeanObject { let ctor = LeanCtor::alloc(0, 2, 0); ctor.set(0, build_address_from_ixon(addr)); ctor.set(1, LeanByteArray::from_bytes(bytes)); @@ -95,7 +98,7 @@ pub fn build_raw_blob(addr: &Address, bytes: &[u8]) -> LeanObj { } /// Build RawComm: { addr : Address, comm : Ixon.Comm } -pub fn build_raw_comm(addr: &Address, comm: &Comm) -> LeanObj { +pub fn build_raw_comm(addr: &Address, comm: &Comm) -> LeanObject { let ctor = LeanCtor::alloc(0, 2, 0); ctor.set(0, build_address_from_ixon(addr)); ctor.set(1, build_ixon_comm(comm)); @@ -108,7 +111,9 @@ pub fn build_raw_comm(addr: &Address, comm: &Comm) -> LeanObj { /// Round-trip a RustCondensedBlocks structure. #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_rust_condensed_blocks(obj: LeanObj) -> LeanObj { +pub extern "C" fn rs_roundtrip_rust_condensed_blocks( + obj: LeanIxCondensedBlocks, +) -> LeanIxCondensedBlocks { let ctor = obj.as_ctor(); let low_links = ctor.get(0); let blocks = ctor.get(1); @@ -122,12 +127,14 @@ pub extern "C" fn rs_roundtrip_rust_condensed_blocks(obj: LeanObj) -> LeanObj { result.set(0, low_links); result.set(1, blocks); result.set(2, block_refs); - *result + (*result).into() } /// Round-trip a RustCompilePhases structure. #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_rust_compile_phases(obj: LeanObj) -> LeanObj { +pub extern "C" fn rs_roundtrip_rust_compile_phases( + obj: LeanIxCompilePhases, +) -> LeanIxCompilePhases { let ctor = obj.as_ctor(); let raw_env = ctor.get(0); let condensed = ctor.get(1); @@ -141,7 +148,7 @@ pub extern "C" fn rs_roundtrip_rust_compile_phases(obj: LeanObj) -> LeanObj { result.set(0, raw_env); result.set(1, condensed); result.set(2, compile_env); - *result + (*result).into() } // ============================================================================= @@ -150,7 +157,9 @@ pub extern "C" fn rs_roundtrip_rust_compile_phases(obj: LeanObj) -> LeanObj { /// Round-trip a BlockCompareResult. #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_block_compare_result(obj: LeanObj) -> LeanObj { +pub extern "C" fn rs_roundtrip_block_compare_result( + obj: LeanIxBlockCompareResult, +) -> LeanIxBlockCompareResult { // Tags 0 (match) and 2 (notFound) have 0 fields → Lean represents as scalars if obj.is_scalar() { return obj; @@ -167,7 +176,7 @@ pub extern "C" fn rs_roundtrip_block_compare_result(obj: LeanObj) -> LeanObj { out.set_u64(0, lean_size); out.set_u64(8, rust_size); out.set_u64(16, first_diff); - *out + (*out).into() }, _ => unreachable!("Invalid BlockCompareResult tag: {}", ctor.tag()), } @@ -175,19 +184,21 @@ pub extern "C" fn rs_roundtrip_block_compare_result(obj: LeanObj) -> LeanObj { /// Round-trip a BlockCompareDetail. #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_block_compare_detail(obj: LeanObj) -> LeanObj { +pub extern "C" fn rs_roundtrip_block_compare_detail( + obj: LeanIxBlockCompareDetail, +) -> LeanIxBlockCompareDetail { let ctor = obj.as_ctor(); let result_ptr = ctor.get(0); let lean_sharing_len = ctor.scalar_u64(1, 0); let rust_sharing_len = ctor.scalar_u64(1, 8); - let result_obj = rs_roundtrip_block_compare_result(result_ptr); + let result_obj = rs_roundtrip_block_compare_result(result_ptr.into()); let out = LeanCtor::alloc(0, 1, 16); out.set(0, result_obj); - out.set_u64(1 * 8, lean_sharing_len); - out.set_u64(1 * 8 + 8, rust_sharing_len); - *out + out.set_u64(8, lean_sharing_len); + out.set_u64(16, rust_sharing_len); + (*out).into() } // ============================================================================= @@ -196,7 +207,9 @@ pub extern "C" fn rs_roundtrip_block_compare_detail(obj: LeanObj) -> LeanObj { /// FFI function to run the complete compilation pipeline and return all data. #[unsafe(no_mangle)] -pub extern "C" fn rs_compile_env_full(env_consts_ptr: LeanObj) -> LeanObj { +pub extern "C" fn rs_compile_env_full( + env_consts_ptr: LeanObject, +) -> LeanObject { ffi_io_guard(std::panic::AssertUnwindSafe(|| { // Phase 1: Decode Lean environment let rust_env = lean_ptr_to_env(env_consts_ptr); @@ -294,7 +307,7 @@ pub extern "C" fn rs_compile_env_full(env_consts_ptr: LeanObj) -> LeanObj { /// FFI function to compile a Lean environment to serialized Ixon.Env bytes. #[unsafe(no_mangle)] -pub extern "C" fn rs_compile_env(env_consts_ptr: LeanObj) -> LeanObj { +pub extern "C" fn rs_compile_env(env_consts_ptr: LeanObject) -> LeanObject { ffi_io_guard(std::panic::AssertUnwindSafe(|| { let rust_env = lean_ptr_to_env(env_consts_ptr); let rust_env = Arc::new(rust_env); @@ -323,14 +336,16 @@ pub extern "C" fn rs_compile_env(env_consts_ptr: LeanObj) -> LeanObj { /// Round-trip a RawEnv: decode from Lean, re-encode via builder. /// This performs a full decode/build cycle to verify FFI correctness. #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_raw_env(raw_env_obj: LeanObj) -> LeanObj { - let env = decode_raw_env(raw_env_obj); - build_raw_env(&env) +pub extern "C" fn rs_roundtrip_raw_env( + raw_env_obj: LeanIxonRawEnv, +) -> LeanIxonRawEnv { + let env = decode_raw_env(*raw_env_obj); + build_raw_env(&env).into() } /// FFI function to run all compilation phases and return combined results. #[unsafe(no_mangle)] -pub extern "C" fn rs_compile_phases(env_consts_ptr: LeanObj) -> LeanObj { +pub extern "C" fn rs_compile_phases(env_consts_ptr: LeanObject) -> LeanObject { ffi_io_guard(std::panic::AssertUnwindSafe(|| { let rust_env = lean_ptr_to_env(env_consts_ptr); let env_len = rust_env.len(); @@ -428,7 +443,9 @@ pub extern "C" fn rs_compile_phases(env_consts_ptr: LeanObj) -> LeanObj { /// FFI function to compile a Lean environment to a RawEnv. #[unsafe(no_mangle)] -pub extern "C" fn rs_compile_env_to_ixon(env_consts_ptr: LeanObj) -> LeanObj { +pub extern "C" fn rs_compile_env_to_ixon( + env_consts_ptr: LeanObject, +) -> LeanObject { ffi_io_guard(std::panic::AssertUnwindSafe(|| { let rust_env = lean_ptr_to_env(env_consts_ptr); let rust_env = Arc::new(rust_env); @@ -513,8 +530,8 @@ pub extern "C" fn rs_compile_env_to_ixon(env_consts_ptr: LeanObj) -> LeanObj { /// FFI function to canonicalize environment to Ix.RawEnvironment. #[unsafe(no_mangle)] pub extern "C" fn rs_canonicalize_env_to_ix( - env_consts_ptr: LeanObj, -) -> LeanObj { + env_consts_ptr: LeanObject, +) -> LeanObject { ffi_io_guard(std::panic::AssertUnwindSafe(|| { let rust_env = lean_ptr_to_env(env_consts_ptr); let mut cache = LeanBuildCache::with_capacity(rust_env.len()); @@ -543,7 +560,7 @@ pub struct RustCompiledEnv { /// FFI: Simple test to verify FFI round-trip works. /// Takes a Lean.Name and returns a magic number to verify the call succeeded. #[unsafe(no_mangle)] -extern "C" fn rs_test_ffi_roundtrip(name_ptr: LeanObj) -> u64 { +extern "C" fn rs_test_ffi_roundtrip(name_ptr: LeanObject) -> u64 { let global_cache = GlobalCache::default(); let name = lean_ptr_to_name(name_ptr, &global_cache); @@ -560,7 +577,7 @@ extern "C" fn rs_test_ffi_roundtrip(name_ptr: LeanObj) -> u64 { /// FFI: Compile entire environment with Rust, returning a handle to RustCompiledEnv. #[unsafe(no_mangle)] extern "C" fn rs_compile_env_rust_first( - env_consts_ptr: LeanObj, + env_consts_ptr: LeanObject, ) -> *mut RustCompiledEnv { // Decode Lean environment let lean_env = lean_ptr_to_env(env_consts_ptr); @@ -606,8 +623,8 @@ extern "C" fn rs_compile_env_rust_first( #[unsafe(no_mangle)] extern "C" fn rs_compare_block( rust_env: *const RustCompiledEnv, - lowlink_name: LeanObj, - lean_bytes: LeanObj, + lowlink_name: LeanObject, + lean_bytes: LeanByteArray, ) -> u64 { if rust_env.is_null() { return 2u64 << 32; // not found @@ -616,8 +633,7 @@ extern "C" fn rs_compare_block( let name = lean_ptr_to_name(lowlink_name, &global_cache); let rust_env = unsafe { &*rust_env }; - let ba = lean_bytes.as_byte_array(); - let lean_data = ba.as_bytes(); + let lean_data = lean_bytes.as_bytes(); // Look up Rust's compiled block let rust_bytes = match rust_env.blocks.get(&name) { @@ -670,7 +686,7 @@ extern "C" fn rs_get_rust_env_block_count( #[unsafe(no_mangle)] extern "C" fn rs_get_block_bytes_len( rust_env: *const RustCompiledEnv, - lowlink_name: LeanObj, + lowlink_name: LeanObject, ) -> u64 { if rust_env.is_null() { return 0; @@ -690,8 +706,8 @@ extern "C" fn rs_get_block_bytes_len( #[unsafe(no_mangle)] extern "C" fn rs_copy_block_bytes( rust_env: *const RustCompiledEnv, - lowlink_name: LeanObj, - dest: LeanObj, + lowlink_name: LeanObject, + dest: LeanByteArray, ) { if rust_env.is_null() { return; @@ -707,15 +723,14 @@ extern "C" fn rs_copy_block_bytes( }; // Copy into the Lean ByteArray - let ba = dest.as_byte_array(); - unsafe { ba.set_data(bytes) }; + unsafe { dest.set_data(bytes) }; } /// FFI: Get Rust's sharing vector length for a block. #[unsafe(no_mangle)] extern "C" fn rs_get_block_sharing_len( rust_env: *const RustCompiledEnv, - lowlink_name: LeanObj, + lowlink_name: LeanObject, ) -> u64 { if rust_env.is_null() { return 0; @@ -834,8 +849,8 @@ fn unshare_expr( #[unsafe(no_mangle)] extern "C" fn rs_get_pre_sharing_exprs( rust_env: *const RustCompiledEnv, - lowlink_name: LeanObj, - out_buf: LeanObj, + lowlink_name: LeanObject, + out_buf: LeanByteArray, ) -> u64 { if rust_env.is_null() { return 0; @@ -927,8 +942,7 @@ extern "C" fn rs_get_pre_sharing_exprs( } // Write to output buffer - let ba = out_buf.as_byte_array(); - unsafe { ba.set_data(&output_bytes) }; + unsafe { out_buf.set_data(&output_bytes) }; n_exprs } @@ -937,7 +951,7 @@ extern "C" fn rs_get_pre_sharing_exprs( #[unsafe(no_mangle)] extern "C" fn rs_get_pre_sharing_exprs_len( rust_env: *const RustCompiledEnv, - lowlink_name: LeanObj, + lowlink_name: LeanObject, ) -> u64 { if rust_env.is_null() { return 0; @@ -997,8 +1011,8 @@ extern "C" fn rs_get_pre_sharing_exprs_len( #[unsafe(no_mangle)] extern "C" fn rs_lookup_const_addr( rust_env: *const RustCompiledEnv, - name_ptr: LeanObj, - out_addr: LeanObj, + name_ptr: LeanObject, + out_addr: LeanByteArray, ) -> u64 { if rust_env.is_null() { return 0; @@ -1012,8 +1026,7 @@ extern "C" fn rs_lookup_const_addr( match rust_env.compile_state.name_to_addr.get(&name) { Some(addr_ref) => { // Copy the 32-byte address into the output ByteArray - let ba = out_addr.as_byte_array(); - unsafe { ba.set_data(addr_ref.as_bytes()) }; + unsafe { out_addr.set_data(addr_ref.as_bytes()) }; 1 }, None => 0, @@ -1048,7 +1061,7 @@ use crate::ix::ixon::error::{CompileError, DecompileError, SerializeError}; /// 4: invalidBool (value : UInt8) → 0 obj + 1 scalar (UInt8) /// 5: addressError → 0 obj + 0 scalar /// 6: invalidShareIndex (idx : UInt64) (max : Nat) → 1 obj (Nat) + 8 scalar (UInt64) -pub fn build_serialize_error(se: &SerializeError) -> LeanObj { +pub fn build_serialize_error(se: &SerializeError) -> LeanObject { match se { SerializeError::UnexpectedEof { expected } => { let ctor = LeanCtor::alloc(0, 1, 0); @@ -1058,19 +1071,19 @@ pub fn build_serialize_error(se: &SerializeError) -> LeanObj { SerializeError::InvalidTag { tag, context } => { let ctor = LeanCtor::alloc(1, 1, 1); ctor.set(0, build_lean_string(context)); - ctor.set_u8(1 * 8, *tag); + ctor.set_u8(8, *tag); *ctor }, SerializeError::InvalidFlag { flag, context } => { let ctor = LeanCtor::alloc(2, 1, 1); ctor.set(0, build_lean_string(context)); - ctor.set_u8(1 * 8, *flag); + ctor.set_u8(8, *flag); *ctor }, SerializeError::InvalidVariant { variant, context } => { let ctor = LeanCtor::alloc(3, 1, 8); ctor.set(0, build_lean_string(context)); - ctor.set_u64(1 * 8, *variant); + ctor.set_u64(8, *variant); *ctor }, SerializeError::InvalidBool { value } => { @@ -1078,18 +1091,18 @@ pub fn build_serialize_error(se: &SerializeError) -> LeanObj { ctor.set_u8(0, *value); *ctor }, - SerializeError::AddressError => LeanObj::box_usize(5), + SerializeError::AddressError => LeanObject::box_usize(5), SerializeError::InvalidShareIndex { idx, max } => { let ctor = LeanCtor::alloc(6, 1, 8); ctor.set(0, build_lean_nat_usize(*max)); - ctor.set_u64(1 * 8, *idx); + ctor.set_u64(8, *idx); *ctor }, } } /// Decode a Lean Ixon.SerializeError to a Rust SerializeError. -pub fn decode_serialize_error(obj: LeanObj) -> SerializeError { +pub fn decode_serialize_error(obj: LeanObject) -> SerializeError { // Tag 5 (addressError) has 0 fields → Lean represents as scalar if obj.is_scalar() { let tag = obj.unbox_usize(); @@ -1141,7 +1154,7 @@ pub fn decode_serialize_error(obj: LeanObj) -> SerializeError { /// → 2 object fields (Nat, String) + 8 scalar bytes (UInt64) /// → `lean_alloc_ctor(tag, 2, 8)` /// → obj[0] = Nat, obj[1] = String, scalar[0] = UInt64 -pub fn build_decompile_error(err: &DecompileError) -> LeanObj { +pub fn build_decompile_error(err: &DecompileError) -> LeanObject { match err { DecompileError::InvalidRefIndex { idx, refs_len, constant } => { let ctor = LeanCtor::alloc(0, 2, 8); @@ -1213,7 +1226,7 @@ pub fn build_decompile_error(err: &DecompileError) -> LeanObj { } /// Decode a Lean DecompileError to a Rust DecompileError. -pub fn decode_decompile_error(obj: LeanObj) -> DecompileError { +pub fn decode_decompile_error(obj: LeanObject) -> DecompileError { let ctor = obj.as_ctor(); match ctor.tag() { 0 => { @@ -1287,7 +1300,7 @@ pub fn decode_decompile_error(obj: LeanObj) -> DecompileError { /// 3: unsupportedExpr (desc : String) → 1 obj /// 4: unknownUnivParam (curr param : String) → 2 obj /// 5: serializeError (msg : String) → 1 obj -pub fn build_compile_error(err: &CompileError) -> LeanObj { +pub fn build_compile_error(err: &CompileError) -> LeanObject { match err { CompileError::MissingConstant { name } => { let ctor = LeanCtor::alloc(0, 1, 0); @@ -1324,7 +1337,7 @@ pub fn build_compile_error(err: &CompileError) -> LeanObj { } /// Decode a Lean CompileError to a Rust CompileError. -pub fn decode_compile_error(obj: LeanObj) -> CompileError { +pub fn decode_compile_error(obj: LeanObject) -> CompileError { let ctor = obj.as_ctor(); match ctor.tag() { 0 => { @@ -1352,23 +1365,29 @@ pub fn decode_compile_error(obj: LeanObj) -> CompileError { /// FFI: Round-trip a DecompileError: Lean → Rust → Lean. #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_decompile_error(obj: LeanObj) -> LeanObj { - let err = decode_decompile_error(obj); - build_decompile_error(&err) +pub extern "C" fn rs_roundtrip_decompile_error( + obj: LeanIxDecompileError, +) -> LeanIxDecompileError { + let err = decode_decompile_error(*obj); + build_decompile_error(&err).into() } /// FFI: Round-trip a CompileError: Lean → Rust → Lean. #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_compile_error(obj: LeanObj) -> LeanObj { - let err = decode_compile_error(obj); - build_compile_error(&err) +pub extern "C" fn rs_roundtrip_compile_error( + obj: LeanIxCompileError, +) -> LeanIxCompileError { + let err = decode_compile_error(*obj); + build_compile_error(&err).into() } /// FFI: Round-trip a SerializeError: Lean → Rust → Lean. #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_serialize_error(obj: LeanObj) -> LeanObj { - let err = decode_serialize_error(obj); - build_serialize_error(&err) +pub extern "C" fn rs_roundtrip_serialize_error( + obj: LeanIxSerializeError, +) -> LeanIxSerializeError { + let err = decode_serialize_error(*obj); + build_serialize_error(&err).into() } // ============================================================================= @@ -1377,8 +1396,8 @@ pub extern "C" fn rs_roundtrip_serialize_error(obj: LeanObj) -> LeanObj { /// FFI: Decompile an Ixon.RawEnv → Except DecompileError (Array (Ix.Name × Ix.ConstantInfo)). Pure. #[unsafe(no_mangle)] -pub extern "C" fn rs_decompile_env(raw_env_obj: LeanObj) -> LeanObj { - let decoded = decode_raw_env(raw_env_obj); +pub extern "C" fn rs_decompile_env(raw_env_obj: LeanIxonRawEnv) -> LeanObject { + let decoded = decode_raw_env(*raw_env_obj); let env = decoded_to_ixon_env(&decoded); // Wrap in CompileState (decompile_env only uses .env) diff --git a/src/lean/ffi/graph.rs b/src/lean/ffi/graph.rs index 103f79f1..f4e5846e 100644 --- a/src/lean/ffi/graph.rs +++ b/src/lean/ffi/graph.rs @@ -5,7 +5,7 @@ use std::sync::Arc; use crate::ix::condense::compute_sccs; use crate::ix::graph::build_ref_graph; use crate::lean::ffi::{ffi_io_guard, io_ok}; -use crate::lean::obj::{LeanArray, LeanCtor, LeanObj}; +use crate::lean::object::{LeanArray, LeanCtor, LeanObject}; use crate::lean::ffi::builder::LeanBuildCache; use crate::lean::ffi::ix::name::build_name; @@ -15,7 +15,7 @@ use crate::lean::ffi::lean_env::lean_ptr_to_env; pub fn build_ref_graph_array( cache: &mut LeanBuildCache, refs: &crate::ix::graph::RefMap, -) -> LeanObj { +) -> LeanObject { let arr = LeanArray::alloc(refs.len()); for (i, (name, ref_set)) in refs.iter().enumerate() { let name_obj = build_name(cache, name); @@ -38,7 +38,7 @@ pub fn build_ref_graph_array( pub fn build_condensed_blocks( cache: &mut LeanBuildCache, condensed: &crate::ix::condense::CondensedBlocks, -) -> LeanObj { +) -> LeanObject { // Build lowLinks: Array (Ix.Name × Ix.Name) let low_links_arr = LeanArray::alloc(condensed.low_links.len()); for (i, (name, low_link)) in condensed.low_links.iter().enumerate() { @@ -94,7 +94,7 @@ pub fn build_condensed_blocks( /// FFI function to build a reference graph from a Lean environment. #[unsafe(no_mangle)] -pub extern "C" fn rs_build_ref_graph(env_consts_ptr: LeanObj) -> LeanObj { +pub extern "C" fn rs_build_ref_graph(env_consts_ptr: LeanObject) -> LeanObject { ffi_io_guard(std::panic::AssertUnwindSafe(|| { let rust_env = lean_ptr_to_env(env_consts_ptr); let rust_env = Arc::new(rust_env); @@ -107,7 +107,7 @@ pub extern "C" fn rs_build_ref_graph(env_consts_ptr: LeanObj) -> LeanObj { /// FFI function to compute SCCs from a Lean environment. #[unsafe(no_mangle)] -pub extern "C" fn rs_compute_sccs(env_consts_ptr: LeanObj) -> LeanObj { +pub extern "C" fn rs_compute_sccs(env_consts_ptr: LeanObject) -> LeanObject { ffi_io_guard(std::panic::AssertUnwindSafe(|| { let rust_env = lean_ptr_to_env(env_consts_ptr); let rust_env = Arc::new(rust_env); diff --git a/src/lean/ffi/ix/address.rs b/src/lean/ffi/ix/address.rs index 61f8f443..e885aed4 100644 --- a/src/lean/ffi/ix/address.rs +++ b/src/lean/ffi/ix/address.rs @@ -2,18 +2,18 @@ //! //! Address = { hash : ByteArray } - ByteArray wrapper for blake3 Hash -use crate::lean::obj::{IxAddress, LeanByteArray}; +use crate::lean::object::{LeanIxAddress, LeanByteArray}; /// Build a Ix.Address from a blake3::Hash. /// Address = { hash : ByteArray } - single field struct, so UNBOXED to ByteArray -pub fn build_address(hash: &blake3::Hash) -> IxAddress { +pub fn build_address(hash: &blake3::Hash) -> LeanIxAddress { LeanByteArray::from_bytes(hash.as_bytes()) } /// Round-trip an Ix.Address: decode ByteArray, re-encode. /// Address = { hash : ByteArray } - single field struct, so UNBOXED to ByteArray directly #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ix_address(addr: IxAddress) -> IxAddress { +pub extern "C" fn rs_roundtrip_ix_address(addr: LeanIxAddress) -> LeanIxAddress { // Address is a single-field struct { hash : ByteArray } // Due to unboxing, addr IS the ByteArray directly LeanByteArray::from_bytes(addr.as_bytes()) diff --git a/src/lean/ffi/ix/constant.rs b/src/lean/ffi/ix/constant.rs index 73153ac5..34a7b016 100644 --- a/src/lean/ffi/ix/constant.rs +++ b/src/lean/ffi/ix/constant.rs @@ -16,7 +16,7 @@ use crate::ix::env::{ RecursorRule, RecursorVal, ReducibilityHints, TheoremVal, }; use crate::lean::nat::Nat; -use crate::lean::obj::{IxConstantInfo, LeanArray, LeanCtor, LeanObj}; +use crate::lean::object::{LeanIxConstantInfo, LeanArray, LeanCtor, LeanObject}; use crate::lean::ffi::builder::LeanBuildCache; use crate::lean::ffi::ix::expr::{build_expr, decode_ix_expr}; @@ -29,7 +29,7 @@ use crate::lean::ffi::primitives::build_nat; pub fn build_constant_val( cache: &mut LeanBuildCache, cv: &ConstantVal, -) -> LeanObj { +) -> LeanObject { // ConstantVal = { name : Name, levelParams : Array Name, type : Expr } let name_obj = build_name(cache, &cv.name); let level_params_obj = build_name_array(cache, &cv.level_params); @@ -45,12 +45,12 @@ pub fn build_constant_val( /// Build ReducibilityHints. /// NOTE: In Lean 4, 0-field constructors are boxed scalars when the inductive has /// other constructors with fields. So opaque and abbrev use box_usize. -pub fn build_reducibility_hints(hints: &ReducibilityHints) -> LeanObj { +pub fn build_reducibility_hints(hints: &ReducibilityHints) -> LeanObject { match hints { // | opaque -- tag 0, boxed as scalar - ReducibilityHints::Opaque => LeanObj::box_usize(0), + ReducibilityHints::Opaque => LeanObject::box_usize(0), // | abbrev -- tag 1, boxed as scalar - ReducibilityHints::Abbrev => LeanObj::box_usize(1), + ReducibilityHints::Abbrev => LeanObject::box_usize(1), // | regular (h : UInt32) -- tag 2, object constructor ReducibilityHints::Regular(h) => { // UInt32 is a scalar, stored inline @@ -69,7 +69,7 @@ pub fn build_reducibility_hints(hints: &ReducibilityHints) -> LeanObj { pub fn build_constant_info( cache: &mut LeanBuildCache, info: &ConstantInfo, -) -> IxConstantInfo { +) -> LeanIxConstantInfo { let result = match info { // | axiomInfo (v : AxiomVal) -- tag 0 ConstantInfo::AxiomInfo(v) => { @@ -238,7 +238,7 @@ pub fn build_constant_info( }, }; - IxConstantInfo::new(result) + LeanIxConstantInfo::new(result) } /// Build an Array of RecursorRule. @@ -269,7 +269,7 @@ fn build_recursor_rules( /// Decode Ix.ConstantVal from Lean object. /// ConstantVal = { name : Name, levelParams : Array Name, type : Expr } -pub fn decode_constant_val(obj: LeanObj) -> ConstantVal { +pub fn decode_constant_val(obj: LeanObject) -> ConstantVal { let ctor = obj.as_ctor(); let name = decode_ix_name(ctor.get(0)); let level_params: Vec = ctor.get(1).as_array().map(decode_ix_name); @@ -279,7 +279,7 @@ pub fn decode_constant_val(obj: LeanObj) -> ConstantVal { } /// Decode Lean.ReducibilityHints from Lean object. -pub fn decode_reducibility_hints(obj: LeanObj) -> ReducibilityHints { +pub fn decode_reducibility_hints(obj: LeanObject) -> ReducibilityHints { if obj.is_scalar() { let tag = obj.as_ptr() as usize >> 1; match tag { @@ -303,7 +303,7 @@ pub fn decode_reducibility_hints(obj: LeanObj) -> ReducibilityHints { } /// Decode Ix.RecursorRule from Lean object. -fn decode_recursor_rule(obj: LeanObj) -> RecursorRule { +fn decode_recursor_rule(obj: LeanObject) -> RecursorRule { let ctor = obj.as_ctor(); RecursorRule { ctor: decode_ix_name(ctor.get(0)), @@ -313,7 +313,7 @@ fn decode_recursor_rule(obj: LeanObj) -> RecursorRule { } /// Decode Ix.ConstantInfo from Lean object. -pub fn decode_constant_info(obj: LeanObj) -> ConstantInfo { +pub fn decode_constant_info(obj: LeanObject) -> ConstantInfo { let outer = obj.as_ctor(); let inner_obj = outer.get(0); let inner = inner_obj.as_ctor(); @@ -429,8 +429,8 @@ pub fn decode_constant_info(obj: LeanObj) -> ConstantInfo { /// Round-trip an Ix.ConstantInfo: decode from Lean, re-encode via LeanBuildCache. #[unsafe(no_mangle)] pub extern "C" fn rs_roundtrip_ix_constant_info( - info_ptr: IxConstantInfo, -) -> IxConstantInfo { + info_ptr: LeanIxConstantInfo, +) -> LeanIxConstantInfo { let info = decode_constant_info(*info_ptr); let mut cache = LeanBuildCache::new(); build_constant_info(&mut cache, &info) diff --git a/src/lean/ffi/ix/data.rs b/src/lean/ffi/ix/data.rs index 188296ba..e6aab8ab 100644 --- a/src/lean/ffi/ix/data.rs +++ b/src/lean/ffi/ix/data.rs @@ -4,9 +4,9 @@ use crate::ix::env::{ DataValue, Int, Name, SourceInfo, Substring, Syntax, SyntaxPreresolved, }; use crate::lean::nat::Nat; -use crate::lean::obj::{ - IxDataValue, IxInt, IxSourceInfo, IxSubstring, IxSyntax, IxSyntaxPreresolved, - LeanArray, LeanCtor, LeanObj, LeanString, +use crate::lean::object::{ + LeanIxDataValue, LeanIxInt, LeanIxSourceInfo, LeanIxSubstring, LeanIxSyntax, + LeanIxSyntaxPreresolved, LeanArray, LeanCtor, LeanObject, LeanString, }; use crate::lean::ffi::builder::LeanBuildCache; @@ -14,32 +14,32 @@ use crate::lean::ffi::ix::name::{build_name, decode_ix_name}; use crate::lean::ffi::primitives::build_nat; /// Build a Ix.Int (ofNat or negSucc). -pub fn build_int(int: &Int) -> IxInt { +pub fn build_int(int: &Int) -> LeanIxInt { match int { Int::OfNat(n) => { let obj = LeanCtor::alloc(0, 1, 0); obj.set(0, build_nat(n)); - IxInt::new(*obj) + LeanIxInt::new(*obj) }, Int::NegSucc(n) => { let obj = LeanCtor::alloc(1, 1, 0); obj.set(0, build_nat(n)); - IxInt::new(*obj) + LeanIxInt::new(*obj) }, } } /// Build a Ix.Substring. -pub fn build_substring(ss: &Substring) -> IxSubstring { +pub fn build_substring(ss: &Substring) -> LeanIxSubstring { let obj = LeanCtor::alloc(0, 3, 0); obj.set(0, LeanString::from_str(ss.str.as_str())); obj.set(1, build_nat(&ss.start_pos)); obj.set(2, build_nat(&ss.stop_pos)); - IxSubstring::new(*obj) + LeanIxSubstring::new(*obj) } /// Build a Ix.SourceInfo. -pub fn build_source_info(si: &SourceInfo) -> IxSourceInfo { +pub fn build_source_info(si: &SourceInfo) -> LeanIxSourceInfo { match si { // | original (leading : Substring) (pos : Nat) (trailing : Substring) (endPos : Nat) -- tag 0 SourceInfo::Original(leading, pos, trailing, end_pos) => { @@ -48,7 +48,7 @@ pub fn build_source_info(si: &SourceInfo) -> IxSourceInfo { obj.set(1, build_nat(pos)); obj.set(2, build_substring(trailing)); obj.set(3, build_nat(end_pos)); - IxSourceInfo::new(*obj) + LeanIxSourceInfo::new(*obj) }, // | synthetic (pos : Nat) (endPos : Nat) (canonical : Bool) -- tag 1 SourceInfo::Synthetic(pos, end_pos, canonical) => { @@ -56,10 +56,10 @@ pub fn build_source_info(si: &SourceInfo) -> IxSourceInfo { obj.set(0, build_nat(pos)); obj.set(1, build_nat(end_pos)); obj.set_u8(2 * 8, *canonical as u8); - IxSourceInfo::new(*obj) + LeanIxSourceInfo::new(*obj) }, // | none -- tag 2 - SourceInfo::None => IxSourceInfo::new(*LeanCtor::alloc(2, 0, 0)), + SourceInfo::None => LeanIxSourceInfo::new(*LeanCtor::alloc(2, 0, 0)), } } @@ -67,13 +67,13 @@ pub fn build_source_info(si: &SourceInfo) -> IxSourceInfo { pub fn build_syntax_preresolved( cache: &mut LeanBuildCache, sp: &SyntaxPreresolved, -) -> IxSyntaxPreresolved { +) -> LeanIxSyntaxPreresolved { match sp { // | namespace (name : Name) -- tag 0 SyntaxPreresolved::Namespace(name) => { let obj = LeanCtor::alloc(0, 1, 0); obj.set(0, build_name(cache, name)); - IxSyntaxPreresolved::new(*obj) + LeanIxSyntaxPreresolved::new(*obj) }, // | decl (name : Name) (aliases : Array String) -- tag 1 SyntaxPreresolved::Decl(name, aliases) => { @@ -82,7 +82,7 @@ pub fn build_syntax_preresolved( let obj = LeanCtor::alloc(1, 2, 0); obj.set(0, name_obj); obj.set(1, aliases_obj); - IxSyntaxPreresolved::new(*obj) + LeanIxSyntaxPreresolved::new(*obj) }, } } @@ -97,10 +97,10 @@ pub fn build_string_array(strings: &[String]) -> LeanArray { } /// Build a Ix.Syntax. -pub fn build_syntax(cache: &mut LeanBuildCache, syn: &Syntax) -> IxSyntax { +pub fn build_syntax(cache: &mut LeanBuildCache, syn: &Syntax) -> LeanIxSyntax { match syn { // | missing -- tag 0 - Syntax::Missing => IxSyntax::new(*LeanCtor::alloc(0, 0, 0)), + Syntax::Missing => LeanIxSyntax::new(*LeanCtor::alloc(0, 0, 0)), // | node (info : SourceInfo) (kind : Name) (args : Array Syntax) -- tag 1 Syntax::Node(info, kind, args) => { let info_obj = build_source_info(info); @@ -110,7 +110,7 @@ pub fn build_syntax(cache: &mut LeanBuildCache, syn: &Syntax) -> IxSyntax { obj.set(0, info_obj); obj.set(1, kind_obj); obj.set(2, args_obj); - IxSyntax::new(*obj) + LeanIxSyntax::new(*obj) }, // | atom (info : SourceInfo) (val : String) -- tag 2 Syntax::Atom(info, val) => { @@ -118,7 +118,7 @@ pub fn build_syntax(cache: &mut LeanBuildCache, syn: &Syntax) -> IxSyntax { let obj = LeanCtor::alloc(2, 2, 0); obj.set(0, info_obj); obj.set(1, LeanString::from_str(val.as_str())); - IxSyntax::new(*obj) + LeanIxSyntax::new(*obj) }, // | ident (info : SourceInfo) (rawVal : Substring) (val : Name) (preresolved : Array SyntaxPreresolved) -- tag 3 Syntax::Ident(info, raw_val, val, preresolved) => { @@ -131,7 +131,7 @@ pub fn build_syntax(cache: &mut LeanBuildCache, syn: &Syntax) -> IxSyntax { obj.set(1, raw_val_obj); obj.set(2, val_obj); obj.set(3, preresolved_obj); - IxSyntax::new(*obj) + LeanIxSyntax::new(*obj) }, } } @@ -164,38 +164,38 @@ pub fn build_syntax_preresolved_array( pub fn build_data_value( cache: &mut LeanBuildCache, dv: &DataValue, -) -> IxDataValue { +) -> LeanIxDataValue { match dv { DataValue::OfString(s) => { let obj = LeanCtor::alloc(0, 1, 0); obj.set(0, LeanString::from_str(s.as_str())); - IxDataValue::new(*obj) + LeanIxDataValue::new(*obj) }, DataValue::OfBool(b) => { // 0 object fields, 1 scalar byte let obj = LeanCtor::alloc(1, 0, 1); obj.set_u8(0, *b as u8); - IxDataValue::new(*obj) + LeanIxDataValue::new(*obj) }, DataValue::OfName(n) => { let obj = LeanCtor::alloc(2, 1, 0); obj.set(0, build_name(cache, n)); - IxDataValue::new(*obj) + LeanIxDataValue::new(*obj) }, DataValue::OfNat(n) => { let obj = LeanCtor::alloc(3, 1, 0); obj.set(0, build_nat(n)); - IxDataValue::new(*obj) + LeanIxDataValue::new(*obj) }, DataValue::OfInt(i) => { let obj = LeanCtor::alloc(4, 1, 0); obj.set(0, build_int(i)); - IxDataValue::new(*obj) + LeanIxDataValue::new(*obj) }, DataValue::OfSyntax(syn) => { let obj = LeanCtor::alloc(5, 1, 0); obj.set(0, build_syntax(cache, syn)); - IxDataValue::new(*obj) + LeanIxDataValue::new(*obj) }, } } @@ -224,7 +224,7 @@ pub fn build_kvmap( /// Decode Ix.Int from Lean object. /// Ix.Int: ofNat (tag 0, 1 field) | negSucc (tag 1, 1 field) -pub fn decode_ix_int(obj: LeanObj) -> Int { +pub fn decode_ix_int(obj: LeanObject) -> Int { let ctor = obj.as_ctor(); let nat = Nat::from_obj(ctor.get(0)); match ctor.tag() { @@ -235,7 +235,7 @@ pub fn decode_ix_int(obj: LeanObj) -> Int { } /// Decode Ix.DataValue from a Lean object. -pub fn decode_data_value(obj: LeanObj) -> DataValue { +pub fn decode_data_value(obj: LeanObject) -> DataValue { let ctor = obj.as_ctor(); match ctor.tag() { 0 => { @@ -275,7 +275,7 @@ pub fn decode_data_value(obj: LeanObj) -> DataValue { } /// Decode Ix.Syntax from a Lean object. -pub fn decode_ix_syntax(obj: LeanObj) -> Syntax { +pub fn decode_ix_syntax(obj: LeanObject) -> Syntax { if obj.is_scalar() { return Syntax::Missing; } @@ -310,7 +310,7 @@ pub fn decode_ix_syntax(obj: LeanObj) -> Syntax { } /// Decode Ix.SourceInfo. -pub fn decode_ix_source_info(obj: LeanObj) -> SourceInfo { +pub fn decode_ix_source_info(obj: LeanObject) -> SourceInfo { if obj.is_scalar() { return SourceInfo::None; } @@ -341,7 +341,7 @@ pub fn decode_ix_source_info(obj: LeanObj) -> SourceInfo { } /// Decode Ix.Substring. -pub fn decode_substring(obj: LeanObj) -> Substring { +pub fn decode_substring(obj: LeanObject) -> Substring { let ctor = obj.as_ctor(); Substring { str: ctor.get(0).as_string().to_string(), @@ -351,7 +351,7 @@ pub fn decode_substring(obj: LeanObj) -> Substring { } /// Decode Ix.SyntaxPreresolved. -pub fn decode_syntax_preresolved(obj: LeanObj) -> SyntaxPreresolved { +pub fn decode_syntax_preresolved(obj: LeanObject) -> SyntaxPreresolved { let ctor = obj.as_ctor(); match ctor.tag() { 0 => { @@ -376,7 +376,7 @@ pub fn decode_syntax_preresolved(obj: LeanObj) -> SyntaxPreresolved { /// Round-trip an Ix.Int: decode from Lean, re-encode. #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ix_int(int_ptr: IxInt) -> IxInt { +pub extern "C" fn rs_roundtrip_ix_int(int_ptr: LeanIxInt) -> LeanIxInt { let int_val = decode_ix_int(*int_ptr); build_int(&int_val) } @@ -384,8 +384,8 @@ pub extern "C" fn rs_roundtrip_ix_int(int_ptr: IxInt) -> IxInt { /// Round-trip an Ix.Substring: decode from Lean, re-encode. #[unsafe(no_mangle)] pub extern "C" fn rs_roundtrip_ix_substring( - sub_ptr: IxSubstring, -) -> IxSubstring { + sub_ptr: LeanIxSubstring, +) -> LeanIxSubstring { let sub = decode_substring(*sub_ptr); build_substring(&sub) } @@ -393,8 +393,8 @@ pub extern "C" fn rs_roundtrip_ix_substring( /// Round-trip an Ix.SourceInfo: decode from Lean, re-encode. #[unsafe(no_mangle)] pub extern "C" fn rs_roundtrip_ix_source_info( - si_ptr: IxSourceInfo, -) -> IxSourceInfo { + si_ptr: LeanIxSourceInfo, +) -> LeanIxSourceInfo { let si = decode_ix_source_info(*si_ptr); build_source_info(&si) } @@ -402,8 +402,8 @@ pub extern "C" fn rs_roundtrip_ix_source_info( /// Round-trip an Ix.SyntaxPreresolved: decode from Lean, re-encode. #[unsafe(no_mangle)] pub extern "C" fn rs_roundtrip_ix_syntax_preresolved( - sp_ptr: IxSyntaxPreresolved, -) -> IxSyntaxPreresolved { + sp_ptr: LeanIxSyntaxPreresolved, +) -> LeanIxSyntaxPreresolved { let sp = decode_syntax_preresolved(*sp_ptr); let mut cache = LeanBuildCache::new(); build_syntax_preresolved(&mut cache, &sp) @@ -411,7 +411,7 @@ pub extern "C" fn rs_roundtrip_ix_syntax_preresolved( /// Round-trip an Ix.Syntax: decode from Lean, re-encode. #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ix_syntax(syn_ptr: IxSyntax) -> IxSyntax { +pub extern "C" fn rs_roundtrip_ix_syntax(syn_ptr: LeanIxSyntax) -> LeanIxSyntax { let syn = decode_ix_syntax(*syn_ptr); let mut cache = LeanBuildCache::new(); build_syntax(&mut cache, &syn) @@ -420,8 +420,8 @@ pub extern "C" fn rs_roundtrip_ix_syntax(syn_ptr: IxSyntax) -> IxSyntax { /// Round-trip an Ix.DataValue: decode from Lean, re-encode. #[unsafe(no_mangle)] pub extern "C" fn rs_roundtrip_ix_data_value( - dv_ptr: IxDataValue, -) -> IxDataValue { + dv_ptr: LeanIxDataValue, +) -> LeanIxDataValue { let dv = decode_data_value(*dv_ptr); let mut cache = LeanBuildCache::new(); build_data_value(&mut cache, &dv) diff --git a/src/lean/ffi/ix/env.rs b/src/lean/ffi/ix/env.rs index 6d2a317d..be70c190 100644 --- a/src/lean/ffi/ix/env.rs +++ b/src/lean/ffi/ix/env.rs @@ -3,7 +3,9 @@ use rustc_hash::FxHashMap; use crate::ix::env::{ConstantInfo, Name}; -use crate::lean::obj::{LeanArray, LeanCtor, LeanObj}; +use crate::lean::object::{ + LeanIxEnvironment, LeanIxRawEnvironment, LeanArray, LeanCtor, LeanObject, +}; use crate::lean::ffi::builder::LeanBuildCache; use crate::lean::ffi::ix::constant::{ @@ -24,14 +26,14 @@ use crate::lean::ffi::ix::name::{build_name, decode_ix_name}; /// /// AssocList α β = nil | cons (key : α) (value : β) (tail : AssocList α β) pub fn build_hashmap_from_pairs( - pairs: Vec<(LeanObj, LeanObj, u64)>, // (key_obj, val_obj, hash) -) -> LeanObj { + pairs: Vec<(LeanObject, LeanObject, u64)>, // (key_obj, val_obj, hash) +) -> LeanObject { let size = pairs.len(); let bucket_count = (size * 4 / 3 + 1).next_power_of_two().max(8); // Create array of AssocLists (initially all nil = boxed 0) let buckets = LeanArray::alloc(bucket_count); - let nil = LeanObj::box_usize(0); + let nil = LeanObject::box_usize(0); for i in 0..bucket_count { buckets.set(i, nil); // nil } @@ -56,7 +58,7 @@ pub fn build_hashmap_from_pairs( // Build Raw { size : Nat, buckets : Array } // Due to unboxing, this IS the HashMap directly // Field 0 = size, Field 1 = buckets (2 object fields, no scalars) - let size_obj = LeanObj::box_usize(size); + let size_obj = LeanObject::box_usize(size); let raw = LeanCtor::alloc(0, 2, 0); raw.set(0, size_obj); @@ -80,7 +82,7 @@ pub fn build_hashmap_from_pairs( pub fn build_raw_environment( cache: &mut LeanBuildCache, consts: &FxHashMap, -) -> LeanObj { +) -> LeanObject { // Build consts array: Array (Name × ConstantInfo) let consts_arr = LeanArray::alloc(consts.len()); for (i, (name, info)) in consts.iter().enumerate() { @@ -102,13 +104,13 @@ pub fn build_raw_environment( /// Decode a HashMap's AssocList and collect key-value pairs using a custom decoder. fn decode_assoc_list( - obj: LeanObj, + obj: LeanObject, decode_key: FK, decode_val: FV, ) -> Vec<(K, V)> where - FK: Fn(LeanObj) -> K, - FV: Fn(LeanObj) -> V, + FK: Fn(LeanObject) -> K, + FV: Fn(LeanObject) -> V, { let mut result = Vec::new(); let mut current = obj; @@ -140,13 +142,13 @@ where /// - DHashMap { inner : Raw, wf : Prop } unboxes to Raw (Prop is erased) /// - Raw { size : Nat, buckets : Array } - field 0 = size, field 1 = buckets fn decode_hashmap( - obj: LeanObj, + obj: LeanObject, decode_key: FK, decode_val: FV, ) -> Vec<(K, V)> where - FK: Fn(LeanObj) -> K + Copy, - FV: Fn(LeanObj) -> V + Copy, + FK: Fn(LeanObject) -> K + Copy, + FV: Fn(LeanObject) -> V + Copy, { let ctor = obj.as_ctor(); // Raw layout: field 0 = size (Nat), field 1 = buckets (Array) @@ -170,7 +172,7 @@ where /// /// NOTE: Environment with a single field is UNBOXED by Lean, /// so the pointer IS the HashMap directly, not a structure containing it. -pub fn decode_ix_environment(obj: LeanObj) -> FxHashMap { +pub fn decode_ix_environment(obj: LeanObject) -> FxHashMap { // Environment is unboxed - obj IS the HashMap directly let consts_pairs = decode_hashmap(obj, decode_ix_name, decode_constant_info); let mut consts: FxHashMap = FxHashMap::default(); @@ -184,7 +186,7 @@ pub fn decode_ix_environment(obj: LeanObj) -> FxHashMap { /// RawEnvironment = { consts : Array (Name × ConstantInfo) } /// NOTE: Unboxed to just Array. This version deduplicates by name. pub fn decode_ix_raw_environment( - obj: LeanObj, + obj: LeanObject, ) -> FxHashMap { let arr = obj.as_array(); let mut consts: FxHashMap = FxHashMap::default(); @@ -202,7 +204,7 @@ pub fn decode_ix_raw_environment( /// Decode Ix.RawEnvironment from Lean object preserving array structure. /// This version preserves all entries including duplicates. pub fn decode_ix_raw_environment_vec( - obj: LeanObj, + obj: LeanObject, ) -> Vec<(Name, ConstantInfo)> { let arr = obj.as_array(); let mut consts = Vec::with_capacity(arr.len()); @@ -221,7 +223,7 @@ pub fn decode_ix_raw_environment_vec( pub fn build_raw_environment_from_vec( cache: &mut LeanBuildCache, consts: &[(Name, ConstantInfo)], -) -> LeanObj { +) -> LeanObject { let consts_arr = LeanArray::alloc(consts.len()); for (i, (name, info)) in consts.iter().enumerate() { let key_obj = build_name(cache, name); @@ -240,17 +242,21 @@ pub fn build_raw_environment_from_vec( /// Round-trip an Ix.Environment: decode from Lean, re-encode. #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ix_environment(env_ptr: LeanObj) -> LeanObj { - let env = decode_ix_environment(env_ptr); +pub extern "C" fn rs_roundtrip_ix_environment( + env_ptr: LeanIxEnvironment, +) -> LeanIxRawEnvironment { + let env = decode_ix_environment(*env_ptr); let mut cache = LeanBuildCache::with_capacity(env.len()); - build_raw_environment(&mut cache, &env) + build_raw_environment(&mut cache, &env).into() } /// Round-trip an Ix.RawEnvironment: decode from Lean, re-encode. /// Uses Vec-preserving functions to maintain array structure and order. #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ix_raw_environment(env_ptr: LeanObj) -> LeanObj { - let env = decode_ix_raw_environment_vec(env_ptr); +pub extern "C" fn rs_roundtrip_ix_raw_environment( + env_ptr: LeanIxRawEnvironment, +) -> LeanIxRawEnvironment { + let env = decode_ix_raw_environment_vec(*env_ptr); let mut cache = LeanBuildCache::with_capacity(env.len()); - build_raw_environment_from_vec(&mut cache, &env) + build_raw_environment_from_vec(&mut cache, &env).into() } diff --git a/src/lean/ffi/ix/expr.rs b/src/lean/ffi/ix/expr.rs index d5e45b81..cf4a6ed8 100644 --- a/src/lean/ffi/ix/expr.rs +++ b/src/lean/ffi/ix/expr.rs @@ -18,7 +18,7 @@ use crate::ix::env::{ BinderInfo, DataValue, Expr, ExprData, Level, Literal, Name, }; use crate::lean::nat::Nat; -use crate::lean::obj::{IxExpr, LeanArray, LeanCtor, LeanObj, LeanString}; +use crate::lean::object::{LeanIxExpr, LeanArray, LeanCtor, LeanObject, LeanString}; use crate::lean::ffi::builder::LeanBuildCache; use crate::lean::ffi::ix::address::build_address; @@ -31,7 +31,7 @@ use crate::lean::ffi::primitives::build_nat; /// Build a Lean Ix.Expr with embedded hash. /// Uses caching to avoid rebuilding the same expression. -pub fn build_expr(cache: &mut LeanBuildCache, expr: &Expr) -> IxExpr { +pub fn build_expr(cache: &mut LeanBuildCache, expr: &Expr) -> LeanIxExpr { let hash = *expr.get_hash(); if let Some(&cached) = cache.exprs.get(&hash) { cached.inc_ref(); @@ -43,25 +43,25 @@ pub fn build_expr(cache: &mut LeanBuildCache, expr: &Expr) -> IxExpr { let obj = LeanCtor::alloc(0, 2, 0); obj.set(0, build_nat(idx)); obj.set(1, build_address(h)); - IxExpr::new(*obj) + LeanIxExpr::new(*obj) }, ExprData::Fvar(name, h) => { let obj = LeanCtor::alloc(1, 2, 0); obj.set(0, build_name(cache, name)); obj.set(1, build_address(h)); - IxExpr::new(*obj) + LeanIxExpr::new(*obj) }, ExprData::Mvar(name, h) => { let obj = LeanCtor::alloc(2, 2, 0); obj.set(0, build_name(cache, name)); obj.set(1, build_address(h)); - IxExpr::new(*obj) + LeanIxExpr::new(*obj) }, ExprData::Sort(level, h) => { let obj = LeanCtor::alloc(3, 2, 0); obj.set(0, build_level(cache, level)); obj.set(1, build_address(h)); - IxExpr::new(*obj) + LeanIxExpr::new(*obj) }, ExprData::Const(name, levels, h) => { let name_obj = build_name(cache, name); @@ -70,7 +70,7 @@ pub fn build_expr(cache: &mut LeanBuildCache, expr: &Expr) -> IxExpr { obj.set(0, name_obj); obj.set(1, levels_obj); obj.set(2, build_address(h)); - IxExpr::new(*obj) + LeanIxExpr::new(*obj) }, ExprData::App(fn_expr, arg_expr, h) => { let fn_obj = build_expr(cache, fn_expr); @@ -79,7 +79,7 @@ pub fn build_expr(cache: &mut LeanBuildCache, expr: &Expr) -> IxExpr { obj.set(0, fn_obj); obj.set(1, arg_obj); obj.set(2, build_address(h)); - IxExpr::new(*obj) + LeanIxExpr::new(*obj) }, ExprData::Lam(name, ty, body, bi, h) => { let name_obj = build_name(cache, name); @@ -93,7 +93,7 @@ pub fn build_expr(cache: &mut LeanBuildCache, expr: &Expr) -> IxExpr { obj.set(2, body_obj); obj.set(3, hash_obj); obj.set_u8(4 * 8, binder_info_to_u8(bi)); - IxExpr::new(*obj) + LeanIxExpr::new(*obj) }, ExprData::ForallE(name, ty, body, bi, h) => { let name_obj = build_name(cache, name); @@ -106,7 +106,7 @@ pub fn build_expr(cache: &mut LeanBuildCache, expr: &Expr) -> IxExpr { obj.set(2, body_obj); obj.set(3, hash_obj); obj.set_u8(4 * 8, binder_info_to_u8(bi)); - IxExpr::new(*obj) + LeanIxExpr::new(*obj) }, ExprData::LetE(name, ty, val, body, non_dep, h) => { let name_obj = build_name(cache, name); @@ -122,14 +122,14 @@ pub fn build_expr(cache: &mut LeanBuildCache, expr: &Expr) -> IxExpr { obj.set(3, body_obj); obj.set(4, hash_obj); obj.set_u8(5 * 8, *non_dep as u8); - IxExpr::new(*obj) + LeanIxExpr::new(*obj) }, ExprData::Lit(lit, h) => { let lit_obj = build_literal(lit); let obj = LeanCtor::alloc(9, 2, 0); obj.set(0, lit_obj); obj.set(1, build_address(h)); - IxExpr::new(*obj) + LeanIxExpr::new(*obj) }, ExprData::Mdata(md, inner, h) => { let md_obj = build_mdata_array(cache, md); @@ -138,7 +138,7 @@ pub fn build_expr(cache: &mut LeanBuildCache, expr: &Expr) -> IxExpr { obj.set(0, md_obj); obj.set(1, inner_obj); obj.set(2, build_address(h)); - IxExpr::new(*obj) + LeanIxExpr::new(*obj) }, ExprData::Proj(type_name, idx, struct_expr, h) => { let name_obj = build_name(cache, type_name); @@ -149,7 +149,7 @@ pub fn build_expr(cache: &mut LeanBuildCache, expr: &Expr) -> IxExpr { obj.set(1, idx_obj); obj.set(2, struct_obj); obj.set(3, build_address(h)); - IxExpr::new(*obj) + LeanIxExpr::new(*obj) }, }; @@ -175,7 +175,7 @@ fn build_name_datavalue_pair( cache: &mut LeanBuildCache, name: &Name, dv: &DataValue, -) -> LeanObj { +) -> LeanObject { let name_obj = build_name(cache, name); let dv_obj = build_data_value(cache, dv); let pair = LeanCtor::alloc(0, 2, 0); @@ -185,7 +185,7 @@ fn build_name_datavalue_pair( } /// Build a Literal (natVal or strVal). -pub fn build_literal(lit: &Literal) -> LeanObj { +pub fn build_literal(lit: &Literal) -> LeanObject { match lit { Literal::NatVal(n) => { let obj = LeanCtor::alloc(0, 1, 0); @@ -202,8 +202,8 @@ pub fn build_literal(lit: &Literal) -> LeanObj { /// Build Ix.BinderInfo enum. /// BinderInfo is a 4-constructor enum with no fields, stored as boxed scalar. -pub fn build_binder_info(bi: &BinderInfo) -> LeanObj { - LeanObj::box_usize(binder_info_to_u8(bi) as usize) +pub fn build_binder_info(bi: &BinderInfo) -> LeanObject { + LeanObject::box_usize(binder_info_to_u8(bi) as usize) } /// Convert BinderInfo to u8 tag. @@ -217,7 +217,7 @@ pub fn binder_info_to_u8(bi: &BinderInfo) -> u8 { } /// Decode a Lean Ix.Expr to Rust Expr. -pub fn decode_ix_expr(obj: LeanObj) -> Expr { +pub fn decode_ix_expr(obj: LeanObject) -> Expr { let ctor = obj.as_ctor(); match ctor.tag() { 0 => { @@ -315,7 +315,7 @@ pub fn decode_ix_expr(obj: LeanObj) -> Expr { } /// Decode Lean.Literal from a Lean object. -pub fn decode_literal(obj: LeanObj) -> Literal { +pub fn decode_literal(obj: LeanObject) -> Literal { let ctor = obj.as_ctor(); match ctor.tag() { 0 => { @@ -332,7 +332,7 @@ pub fn decode_literal(obj: LeanObj) -> Literal { } /// Decode a (Name × DataValue) pair for mdata. -fn decode_name_data_value(obj: LeanObj) -> (Name, DataValue) { +fn decode_name_data_value(obj: LeanObject) -> (Name, DataValue) { // Prod: ctor 0 with 2 fields let ctor = obj.as_ctor(); let name = decode_ix_name(ctor.get(0)); @@ -353,7 +353,7 @@ pub fn decode_binder_info(bi_byte: u8) -> BinderInfo { /// Round-trip an Ix.Expr: decode from Lean, re-encode via LeanBuildCache. #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ix_expr(expr_ptr: IxExpr) -> IxExpr { +pub extern "C" fn rs_roundtrip_ix_expr(expr_ptr: LeanIxExpr) -> LeanIxExpr { let expr = decode_ix_expr(*expr_ptr); let mut cache = LeanBuildCache::new(); build_expr(&mut cache, &expr) diff --git a/src/lean/ffi/ix/level.rs b/src/lean/ffi/ix/level.rs index 2ce50c50..c6ba3b8e 100644 --- a/src/lean/ffi/ix/level.rs +++ b/src/lean/ffi/ix/level.rs @@ -9,7 +9,7 @@ //! - Tag 5: mvar (n : Name) (hash : Address) use crate::ix::env::{Level, LevelData}; -use crate::lean::obj::{IxLevel, LeanArray, LeanCtor, LeanObj}; +use crate::lean::object::{LeanIxLevel, LeanArray, LeanCtor, LeanObject}; use crate::lean::ffi::builder::LeanBuildCache; use crate::lean::ffi::ix::address::build_address; @@ -17,7 +17,7 @@ use crate::lean::ffi::ix::name::{build_name, decode_ix_name}; /// Build a Lean Ix.Level with embedded hash. /// Uses caching to avoid rebuilding the same level. -pub fn build_level(cache: &mut LeanBuildCache, level: &Level) -> IxLevel { +pub fn build_level(cache: &mut LeanBuildCache, level: &Level) -> LeanIxLevel { let hash = *level.get_hash(); if let Some(&cached) = cache.levels.get(&hash) { cached.inc_ref(); @@ -28,14 +28,14 @@ pub fn build_level(cache: &mut LeanBuildCache, level: &Level) -> IxLevel { LevelData::Zero(h) => { let ctor = LeanCtor::alloc(0, 1, 0); ctor.set(0, build_address(h)); - IxLevel::new(*ctor) + LeanIxLevel::new(*ctor) }, LevelData::Succ(x, h) => { let x_obj = build_level(cache, x); let ctor = LeanCtor::alloc(1, 2, 0); ctor.set(0, x_obj); ctor.set(1, build_address(h)); - IxLevel::new(*ctor) + LeanIxLevel::new(*ctor) }, LevelData::Max(x, y, h) => { let x_obj = build_level(cache, x); @@ -44,7 +44,7 @@ pub fn build_level(cache: &mut LeanBuildCache, level: &Level) -> IxLevel { ctor.set(0, x_obj); ctor.set(1, y_obj); ctor.set(2, build_address(h)); - IxLevel::new(*ctor) + LeanIxLevel::new(*ctor) }, LevelData::Imax(x, y, h) => { let x_obj = build_level(cache, x); @@ -53,21 +53,21 @@ pub fn build_level(cache: &mut LeanBuildCache, level: &Level) -> IxLevel { ctor.set(0, x_obj); ctor.set(1, y_obj); ctor.set(2, build_address(h)); - IxLevel::new(*ctor) + LeanIxLevel::new(*ctor) }, LevelData::Param(n, h) => { let n_obj = build_name(cache, n); let ctor = LeanCtor::alloc(4, 2, 0); ctor.set(0, n_obj); ctor.set(1, build_address(h)); - IxLevel::new(*ctor) + LeanIxLevel::new(*ctor) }, LevelData::Mvar(n, h) => { let n_obj = build_name(cache, n); let ctor = LeanCtor::alloc(5, 2, 0); ctor.set(0, n_obj); ctor.set(1, build_address(h)); - IxLevel::new(*ctor) + LeanIxLevel::new(*ctor) }, }; @@ -88,7 +88,7 @@ pub fn build_level_array( } /// Decode a Lean Ix.Level to Rust Level. -pub fn decode_ix_level(obj: LeanObj) -> Level { +pub fn decode_ix_level(obj: LeanObject) -> Level { let ctor = obj.as_ctor(); match ctor.tag() { 0 => Level::zero(), @@ -119,13 +119,13 @@ pub fn decode_ix_level(obj: LeanObj) -> Level { } /// Decode Array of Levels from Lean pointer. -pub fn decode_level_array(obj: LeanObj) -> Vec { +pub fn decode_level_array(obj: LeanObject) -> Vec { obj.as_array().map(decode_ix_level) } /// Round-trip an Ix.Level: decode from Lean, re-encode via LeanBuildCache. #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ix_level(level_ptr: IxLevel) -> IxLevel { +pub extern "C" fn rs_roundtrip_ix_level(level_ptr: LeanIxLevel) -> LeanIxLevel { let level = decode_ix_level(*level_ptr); let mut cache = LeanBuildCache::new(); build_level(&mut cache, &level) diff --git a/src/lean/ffi/ix/name.rs b/src/lean/ffi/ix/name.rs index 3a89ad9b..46dfe234 100644 --- a/src/lean/ffi/ix/name.rs +++ b/src/lean/ffi/ix/name.rs @@ -7,7 +7,7 @@ use crate::ix::env::{Name, NameData}; use crate::lean::nat::Nat; -use crate::lean::obj::{IxName, LeanArray, LeanCtor, LeanObj, LeanString}; +use crate::lean::object::{LeanIxName, LeanArray, LeanCtor, LeanObject, LeanString}; use crate::lean::ffi::builder::LeanBuildCache; use crate::lean::ffi::ix::address::build_address; @@ -15,7 +15,7 @@ use crate::lean::ffi::primitives::build_nat; /// Build a Lean Ix.Name with embedded hash. /// Uses caching to avoid rebuilding the same name. -pub fn build_name(cache: &mut LeanBuildCache, name: &Name) -> IxName { +pub fn build_name(cache: &mut LeanBuildCache, name: &Name) -> LeanIxName { let hash = name.get_hash(); if let Some(&cached) = cache.names.get(hash) { cached.inc_ref(); @@ -26,7 +26,7 @@ pub fn build_name(cache: &mut LeanBuildCache, name: &Name) -> IxName { NameData::Anonymous(h) => { let ctor = LeanCtor::alloc(0, 1, 0); ctor.set(0, build_address(h)); - IxName::new(*ctor) + LeanIxName::new(*ctor) }, NameData::Str(parent, s, h) => { let parent_obj = build_name(cache, parent); @@ -35,7 +35,7 @@ pub fn build_name(cache: &mut LeanBuildCache, name: &Name) -> IxName { ctor.set(0, parent_obj); ctor.set(1, s_obj); ctor.set(2, build_address(h)); - IxName::new(*ctor) + LeanIxName::new(*ctor) }, NameData::Num(parent, n, h) => { let parent_obj = build_name(cache, parent); @@ -44,7 +44,7 @@ pub fn build_name(cache: &mut LeanBuildCache, name: &Name) -> IxName { ctor.set(0, parent_obj); ctor.set(1, n_obj); ctor.set(2, build_address(h)); - IxName::new(*ctor) + LeanIxName::new(*ctor) }, }; @@ -65,7 +65,7 @@ pub fn build_name_array( } /// Decode a Lean Ix.Name to Rust Name. -pub fn decode_ix_name(obj: LeanObj) -> Name { +pub fn decode_ix_name(obj: LeanObject) -> Name { let ctor = obj.as_ctor(); match ctor.tag() { 0 => { @@ -89,13 +89,13 @@ pub fn decode_ix_name(obj: LeanObj) -> Name { } /// Decode Array of Names from Lean pointer. -pub fn decode_name_array(obj: LeanObj) -> Vec { +pub fn decode_name_array(obj: LeanObject) -> Vec { obj.as_array().map(decode_ix_name) } /// Round-trip an Ix.Name: decode from Lean, re-encode via LeanBuildCache. #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ix_name(name_ptr: IxName) -> IxName { +pub extern "C" fn rs_roundtrip_ix_name(name_ptr: LeanIxName) -> LeanIxName { let name = decode_ix_name(*name_ptr); let mut cache = LeanBuildCache::new(); build_name(&mut cache, &name) diff --git a/src/lean/ffi/ixon/compare.rs b/src/lean/ffi/ixon/compare.rs index 727541c7..62d800da 100644 --- a/src/lean/ffi/ixon/compare.rs +++ b/src/lean/ffi/ixon/compare.rs @@ -6,7 +6,9 @@ use crate::ix::compile::{BlockCache, CompileState, compile_env, compile_expr}; use crate::ix::env::Name; use crate::ix::ixon::serialize::put_expr; use crate::ix::mutual::MutCtx; -use crate::lean::obj::{LeanCtor, LeanObj}; +use crate::lean::object::{ + LeanIxBlockCompareDetail, LeanByteArray, LeanCtor, LeanObject, +}; use crate::lean::ffi::lean_env::{ Cache as LeanCache, GlobalCache, lean_ptr_to_expr, lean_ptr_to_name, @@ -20,8 +22,8 @@ pub struct RustBlockEnv { /// Compare Lean's compiled expression output with Rust's compilation of the same input. #[unsafe(no_mangle)] pub extern "C" fn rs_compare_expr_compilation( - lean_expr_ptr: LeanObj, - lean_output: LeanObj, + lean_expr_ptr: LeanObject, + lean_output: LeanByteArray, univ_ctx_size: u64, ) -> bool { // Decode Lean.Expr to Rust's representation @@ -56,8 +58,7 @@ pub extern "C" fn rs_compare_expr_compilation( put_expr(&rust_expr, &mut rust_bytes); // Compare byte-for-byte - let lean_ba = lean_output.as_byte_array(); - let lean_bytes = lean_ba.as_bytes(); + let lean_bytes = lean_output.as_bytes(); rust_bytes == lean_bytes } @@ -68,7 +69,7 @@ fn build_block_compare_result( lean_size: u64, rust_size: u64, first_diff_offset: u64, -) -> LeanObj { +) -> LeanObject { if matched { *LeanCtor::alloc(0, 0, 0) // match } else if not_found { @@ -85,10 +86,10 @@ fn build_block_compare_result( /// Build a BlockCompareDetail Lean object. fn build_block_compare_detail( - result: LeanObj, + result: LeanObject, lean_sharing_len: u64, rust_sharing_len: u64, -) -> LeanObj { +) -> LeanObject { let ctor = LeanCtor::alloc(0, 1, 16); ctor.set(0, result); ctor.set_u64(8, lean_sharing_len); @@ -104,16 +105,15 @@ fn build_block_compare_detail( #[unsafe(no_mangle)] pub unsafe extern "C" fn rs_compare_block_v2( rust_env: *const RustBlockEnv, - lowlink_name: LeanObj, - lean_bytes: LeanObj, + lowlink_name: LeanObject, + lean_bytes: LeanByteArray, lean_sharing_len: u64, -) -> LeanObj { +) -> LeanIxBlockCompareDetail { let global_cache = GlobalCache::default(); let name = lean_ptr_to_name(lowlink_name, &global_cache); let rust_env = unsafe { &*rust_env }; - let lean_ba = lean_bytes.as_byte_array(); - let lean_data = lean_ba.as_bytes(); + let lean_data = lean_bytes.as_bytes(); // Look up Rust's compiled block let (rust_bytes, rust_sharing_len) = match rust_env.blocks.get(&name) { @@ -122,7 +122,7 @@ pub unsafe extern "C" fn rs_compare_block_v2( // Block not found in Rust compilation let result = build_block_compare_result(false, true, lean_data.len() as u64, 0, 0); - return build_block_compare_detail(result, lean_sharing_len, 0); + return build_block_compare_detail(result, lean_sharing_len, 0).into(); }, }; @@ -140,7 +140,8 @@ pub unsafe extern "C" fn rs_compare_block_v2( result, lean_sharing_len, rust_sharing_len, - ); + ) + .into(); } // Mismatch: find first differing byte @@ -163,7 +164,7 @@ pub unsafe extern "C" fn rs_compare_block_v2( rust_bytes.len() as u64, first_diff_offset, ); - build_block_compare_detail(result, lean_sharing_len, rust_sharing_len) + build_block_compare_detail(result, lean_sharing_len, rust_sharing_len).into() } /// Free a RustBlockEnv pointer. @@ -183,7 +184,7 @@ pub unsafe extern "C" fn rs_free_compiled_env(ptr: *mut RustBlockEnv) { /// Build a RustBlockEnv from a Lean environment. #[unsafe(no_mangle)] pub extern "C" fn rs_build_compiled_env( - env_consts_ptr: LeanObj, + env_consts_ptr: LeanObject, ) -> *mut RustBlockEnv { use crate::lean::ffi::lean_env::lean_ptr_to_env; diff --git a/src/lean/ffi/ixon/constant.rs b/src/lean/ffi/ixon/constant.rs index 0fbe2677..3ab0d5b1 100644 --- a/src/lean/ffi/ixon/constant.rs +++ b/src/lean/ffi/ixon/constant.rs @@ -15,8 +15,13 @@ use crate::ix::ixon::constant::{ Quotient as IxonQuotient, Recursor as IxonRecursor, RecursorProj, RecursorRule as IxonRecursorRule, }; -use crate::lean::obj::{ - IxAddress, LeanArray, LeanByteArray, LeanCtor, LeanObj, +use crate::lean::object::{ + LeanIxAddress, LeanIxonAxiom, LeanIxonConstant, LeanIxonConstantInfo, + LeanIxonConstructor, LeanIxonConstructorProj, LeanIxonDefinition, + LeanIxonDefinitionProj, LeanIxonInductive, LeanIxonInductiveProj, + LeanIxonMutConst, LeanIxonQuotient, LeanIxonRecursor, + LeanIxonRecursorProj, LeanIxonRecursorRule, LeanArray, LeanByteArray, + LeanCtor, LeanObject, }; use crate::lean::ffi::ixon::expr::{ @@ -28,7 +33,7 @@ use crate::lean::ffi::ixon::univ::{ }; /// Build Address from Ixon Address type (which is just a [u8; 32]). -pub fn build_address_from_ixon(addr: &Address) -> IxAddress { +pub fn build_address_from_ixon(addr: &Address) -> LeanIxAddress { LeanByteArray::from_bytes(addr.as_bytes()) } @@ -44,7 +49,7 @@ pub fn build_address_array(addrs: &[Address]) -> LeanArray { /// Build Ixon.Definition /// Lean stores scalar fields ordered by size (largest first). /// Layout: header(8) + typ(8) + value(8) + lvls(8) + kind(1) + safety(1) + padding(6) -pub fn build_ixon_definition(def: &IxonDefinition) -> LeanObj { +pub fn build_ixon_definition(def: &IxonDefinition) -> LeanObject { let typ_obj = build_ixon_expr(&def.typ); let value_obj = build_ixon_expr(&def.value); // 2 obj fields, 16 scalar bytes (lvls(8) + kind(1) + safety(1) + padding(6)) @@ -69,7 +74,7 @@ pub fn build_ixon_definition(def: &IxonDefinition) -> LeanObj { } /// Build Ixon.RecursorRule -pub fn build_ixon_recursor_rule(rule: &IxonRecursorRule) -> LeanObj { +pub fn build_ixon_recursor_rule(rule: &IxonRecursorRule) -> LeanObject { let rhs_obj = build_ixon_expr(&rule.rhs); // 1 obj field, 8 scalar bytes let ctor = LeanCtor::alloc(0, 1, 8); @@ -80,7 +85,7 @@ pub fn build_ixon_recursor_rule(rule: &IxonRecursorRule) -> LeanObj { /// Build Ixon.Recursor /// Scalars ordered by size: lvls(8) + params(8) + indices(8) + motives(8) + minors(8) + k(1) + isUnsafe(1) + padding(6) -pub fn build_ixon_recursor(rec: &IxonRecursor) -> LeanObj { +pub fn build_ixon_recursor(rec: &IxonRecursor) -> LeanObject { let typ_obj = build_ixon_expr(&rec.typ); // Build rules array let rules_arr = LeanArray::alloc(rec.rules.len()); @@ -104,7 +109,7 @@ pub fn build_ixon_recursor(rec: &IxonRecursor) -> LeanObj { /// Build Ixon.Axiom /// Scalars ordered by size: lvls(8) + isUnsafe(1) + padding(7) -pub fn build_ixon_axiom(ax: &IxonAxiom) -> LeanObj { +pub fn build_ixon_axiom(ax: &IxonAxiom) -> LeanObject { let typ_obj = build_ixon_expr(&ax.typ); // 1 obj field, 16 scalar bytes (lvls(8) + isUnsafe(1) + padding(7)) let ctor = LeanCtor::alloc(0, 1, 16); @@ -118,7 +123,7 @@ pub fn build_ixon_axiom(ax: &IxonAxiom) -> LeanObj { /// Build Ixon.Quotient /// QuotKind is a simple enum stored as scalar u8, not object field. /// Scalars ordered by size: lvls(8) + kind(1) + padding(7) -pub fn build_ixon_quotient(quot: &IxonQuotient) -> LeanObj { +pub fn build_ixon_quotient(quot: &IxonQuotient) -> LeanObject { let typ_obj = build_ixon_expr(".typ); // 1 obj field (typ), 16 scalar bytes (lvls(8) + kind(1) + padding(7)) let ctor = LeanCtor::alloc(0, 1, 16); @@ -137,7 +142,7 @@ pub fn build_ixon_quotient(quot: &IxonQuotient) -> LeanObj { /// Build Ixon.Constructor /// Scalars ordered by size: lvls(8) + cidx(8) + params(8) + fields(8) + isUnsafe(1) + padding(7) -pub fn build_ixon_constructor(c: &IxonConstructor) -> LeanObj { +pub fn build_ixon_constructor(c: &IxonConstructor) -> LeanObject { let typ_obj = build_ixon_expr(&c.typ); // 1 obj field, 40 scalar bytes (4×8 + 1 + 7 padding) let ctor = LeanCtor::alloc(0, 1, 40); @@ -153,7 +158,7 @@ pub fn build_ixon_constructor(c: &IxonConstructor) -> LeanObj { /// Build Ixon.Inductive /// Scalars ordered by size: lvls(8) + params(8) + indices(8) + nested(8) + recr(1) + refl(1) + isUnsafe(1) + padding(5) -pub fn build_ixon_inductive(ind: &IxonInductive) -> LeanObj { +pub fn build_ixon_inductive(ind: &IxonInductive) -> LeanObject { let typ_obj = build_ixon_expr(&ind.typ); // Build ctors array let ctors_arr = LeanArray::alloc(ind.ctors.len()); @@ -176,7 +181,7 @@ pub fn build_ixon_inductive(ind: &IxonInductive) -> LeanObj { } /// Build Ixon.InductiveProj -pub fn build_inductive_proj(proj: &InductiveProj) -> LeanObj { +pub fn build_inductive_proj(proj: &InductiveProj) -> LeanObject { let block_obj = build_address_from_ixon(&proj.block); let ctor = LeanCtor::alloc(0, 1, 8); ctor.set(0, block_obj); @@ -185,7 +190,7 @@ pub fn build_inductive_proj(proj: &InductiveProj) -> LeanObj { } /// Build Ixon.ConstructorProj -pub fn build_constructor_proj(proj: &ConstructorProj) -> LeanObj { +pub fn build_constructor_proj(proj: &ConstructorProj) -> LeanObject { let block_obj = build_address_from_ixon(&proj.block); let ctor = LeanCtor::alloc(0, 1, 16); ctor.set(0, block_obj); @@ -195,7 +200,7 @@ pub fn build_constructor_proj(proj: &ConstructorProj) -> LeanObj { } /// Build Ixon.RecursorProj -pub fn build_recursor_proj(proj: &RecursorProj) -> LeanObj { +pub fn build_recursor_proj(proj: &RecursorProj) -> LeanObject { let block_obj = build_address_from_ixon(&proj.block); let ctor = LeanCtor::alloc(0, 1, 8); ctor.set(0, block_obj); @@ -204,7 +209,7 @@ pub fn build_recursor_proj(proj: &RecursorProj) -> LeanObj { } /// Build Ixon.DefinitionProj -pub fn build_definition_proj(proj: &DefinitionProj) -> LeanObj { +pub fn build_definition_proj(proj: &DefinitionProj) -> LeanObject { let block_obj = build_address_from_ixon(&proj.block); let ctor = LeanCtor::alloc(0, 1, 8); ctor.set(0, block_obj); @@ -213,7 +218,7 @@ pub fn build_definition_proj(proj: &DefinitionProj) -> LeanObj { } /// Build Ixon.MutConst -pub fn build_mut_const(mc: &MutConst) -> LeanObj { +pub fn build_mut_const(mc: &MutConst) -> LeanObject { match mc { MutConst::Defn(def) => { let def_obj = build_ixon_definition(def); @@ -237,7 +242,7 @@ pub fn build_mut_const(mc: &MutConst) -> LeanObj { } /// Build Ixon.ConstantInfo (9 constructors) -pub fn build_ixon_constant_info(info: &IxonConstantInfo) -> LeanObj { +pub fn build_ixon_constant_info(info: &IxonConstantInfo) -> LeanObject { match info { IxonConstantInfo::Defn(def) => { let def_obj = build_ixon_definition(def); @@ -300,7 +305,7 @@ pub fn build_ixon_constant_info(info: &IxonConstantInfo) -> LeanObj { } /// Build Ixon.Constant -pub fn build_ixon_constant(constant: &IxonConstant) -> LeanObj { +pub fn build_ixon_constant(constant: &IxonConstant) -> LeanObject { let info_obj = build_ixon_constant_info(&constant.info); let sharing_obj = build_ixon_expr_array(&constant.sharing); let refs_obj = build_address_array(&constant.refs); @@ -318,13 +323,13 @@ pub fn build_ixon_constant(constant: &IxonConstant) -> LeanObj { // ============================================================================= /// Decode a ByteArray (Address) to Address. -pub fn decode_ixon_address(obj: LeanObj) -> Address { +pub fn decode_ixon_address(obj: LeanObject) -> Address { let ba = obj.as_byte_array(); Address::from_slice(&ba.as_bytes()[..32]).expect("Address should be 32 bytes") } /// Decode Array Address. -pub fn decode_ixon_address_array(obj: LeanObj) -> Vec
{ +pub fn decode_ixon_address_array(obj: LeanObject) -> Vec
{ let arr = obj.as_array(); arr.map(decode_ixon_address) } @@ -332,7 +337,7 @@ pub fn decode_ixon_address_array(obj: LeanObj) -> Vec
{ /// Decode Ixon.Definition. /// Lean stores scalar fields ordered by size (largest first). /// Layout: header(8) + typ(8) + value(8) + lvls(8) + kind(1) + safety(1) + padding(6) -pub fn decode_ixon_definition(obj: LeanObj) -> IxonDefinition { +pub fn decode_ixon_definition(obj: LeanObject) -> IxonDefinition { let ctor = obj.as_ctor(); let typ = Arc::new(decode_ixon_expr(ctor.get(0))); let value = Arc::new(decode_ixon_expr(ctor.get(1))); @@ -355,7 +360,7 @@ pub fn decode_ixon_definition(obj: LeanObj) -> IxonDefinition { } /// Decode Ixon.RecursorRule. -pub fn decode_ixon_recursor_rule(obj: LeanObj) -> IxonRecursorRule { +pub fn decode_ixon_recursor_rule(obj: LeanObject) -> IxonRecursorRule { let ctor = obj.as_ctor(); let rhs = Arc::new(decode_ixon_expr(ctor.get(0))); let fields = ctor.scalar_u64(1, 0); @@ -364,7 +369,7 @@ pub fn decode_ixon_recursor_rule(obj: LeanObj) -> IxonRecursorRule { /// Decode Ixon.Recursor. /// Scalars ordered by size: lvls(8) + params(8) + indices(8) + motives(8) + minors(8) + k(1) + isUnsafe(1) + padding(6) -pub fn decode_ixon_recursor(obj: LeanObj) -> IxonRecursor { +pub fn decode_ixon_recursor(obj: LeanObject) -> IxonRecursor { let ctor = obj.as_ctor(); let typ = Arc::new(decode_ixon_expr(ctor.get(0))); let rules_arr = ctor.get(1).as_array(); @@ -391,7 +396,7 @@ pub fn decode_ixon_recursor(obj: LeanObj) -> IxonRecursor { /// Decode Ixon.Axiom. /// Scalars ordered by size: lvls(8) + isUnsafe(1) + padding(7) -pub fn decode_ixon_axiom(obj: LeanObj) -> IxonAxiom { +pub fn decode_ixon_axiom(obj: LeanObject) -> IxonAxiom { let ctor = obj.as_ctor(); let typ = Arc::new(decode_ixon_expr(ctor.get(0))); let lvls = ctor.scalar_u64(1, 0); @@ -401,7 +406,7 @@ pub fn decode_ixon_axiom(obj: LeanObj) -> IxonAxiom { /// Decode Ixon.Quotient. /// QuotKind is a scalar (not object field). Scalars: lvls(8) + kind(1) + padding(7) -pub fn decode_ixon_quotient(obj: LeanObj) -> IxonQuotient { +pub fn decode_ixon_quotient(obj: LeanObject) -> IxonQuotient { let ctor = obj.as_ctor(); let typ = Arc::new(decode_ixon_expr(ctor.get(0))); let lvls = ctor.scalar_u64(1, 0); @@ -418,7 +423,7 @@ pub fn decode_ixon_quotient(obj: LeanObj) -> IxonQuotient { /// Decode Ixon.Constructor. /// Scalars ordered by size: lvls(8) + cidx(8) + params(8) + fields(8) + isUnsafe(1) + padding(7) -pub fn decode_ixon_constructor(obj: LeanObj) -> IxonConstructor { +pub fn decode_ixon_constructor(obj: LeanObject) -> IxonConstructor { let ctor = obj.as_ctor(); let typ = Arc::new(decode_ixon_expr(ctor.get(0))); let lvls = ctor.scalar_u64(1, 0); @@ -431,7 +436,7 @@ pub fn decode_ixon_constructor(obj: LeanObj) -> IxonConstructor { /// Decode Ixon.Inductive. /// Scalars ordered by size: lvls(8) + params(8) + indices(8) + nested(8) + recr(1) + refl(1) + isUnsafe(1) + padding(5) -pub fn decode_ixon_inductive(obj: LeanObj) -> IxonInductive { +pub fn decode_ixon_inductive(obj: LeanObject) -> IxonInductive { let ctor = obj.as_ctor(); let typ = Arc::new(decode_ixon_expr(ctor.get(0))); let ctors_arr = ctor.get(1).as_array(); @@ -457,7 +462,7 @@ pub fn decode_ixon_inductive(obj: LeanObj) -> IxonInductive { } /// Decode Ixon.InductiveProj. -pub fn decode_ixon_inductive_proj(obj: LeanObj) -> InductiveProj { +pub fn decode_ixon_inductive_proj(obj: LeanObject) -> InductiveProj { let ctor = obj.as_ctor(); let block = decode_ixon_address(ctor.get(0)); let idx = ctor.scalar_u64(1, 0); @@ -465,7 +470,7 @@ pub fn decode_ixon_inductive_proj(obj: LeanObj) -> InductiveProj { } /// Decode Ixon.ConstructorProj. -pub fn decode_ixon_constructor_proj(obj: LeanObj) -> ConstructorProj { +pub fn decode_ixon_constructor_proj(obj: LeanObject) -> ConstructorProj { let ctor = obj.as_ctor(); let block = decode_ixon_address(ctor.get(0)); let idx = ctor.scalar_u64(1, 0); @@ -474,7 +479,7 @@ pub fn decode_ixon_constructor_proj(obj: LeanObj) -> ConstructorProj { } /// Decode Ixon.RecursorProj. -pub fn decode_ixon_recursor_proj(obj: LeanObj) -> RecursorProj { +pub fn decode_ixon_recursor_proj(obj: LeanObject) -> RecursorProj { let ctor = obj.as_ctor(); let block = decode_ixon_address(ctor.get(0)); let idx = ctor.scalar_u64(1, 0); @@ -482,7 +487,7 @@ pub fn decode_ixon_recursor_proj(obj: LeanObj) -> RecursorProj { } /// Decode Ixon.DefinitionProj. -pub fn decode_ixon_definition_proj(obj: LeanObj) -> DefinitionProj { +pub fn decode_ixon_definition_proj(obj: LeanObject) -> DefinitionProj { let ctor = obj.as_ctor(); let block = decode_ixon_address(ctor.get(0)); let idx = ctor.scalar_u64(1, 0); @@ -490,7 +495,7 @@ pub fn decode_ixon_definition_proj(obj: LeanObj) -> DefinitionProj { } /// Decode Ixon.MutConst. -pub fn decode_ixon_mut_const(obj: LeanObj) -> MutConst { +pub fn decode_ixon_mut_const(obj: LeanObject) -> MutConst { let ctor = obj.as_ctor(); let inner = ctor.get(0); match ctor.tag() { @@ -502,7 +507,7 @@ pub fn decode_ixon_mut_const(obj: LeanObj) -> MutConst { } /// Decode Ixon.ConstantInfo. -pub fn decode_ixon_constant_info(obj: LeanObj) -> IxonConstantInfo { +pub fn decode_ixon_constant_info(obj: LeanObject) -> IxonConstantInfo { let ctor = obj.as_ctor(); let inner = ctor.get(0); match ctor.tag() { @@ -524,7 +529,7 @@ pub fn decode_ixon_constant_info(obj: LeanObj) -> IxonConstantInfo { } /// Decode Ixon.Constant. -pub fn decode_ixon_constant(obj: LeanObj) -> IxonConstant { +pub fn decode_ixon_constant(obj: LeanObject) -> IxonConstant { let ctor = obj.as_ctor(); IxonConstant { info: decode_ixon_constant_info(ctor.get(0)), @@ -540,98 +545,124 @@ pub fn decode_ixon_constant(obj: LeanObj) -> IxonConstant { /// Round-trip Ixon.Definition. #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ixon_definition(obj: LeanObj) -> LeanObj { - let def = decode_ixon_definition(obj); - build_ixon_definition(&def) +pub extern "C" fn rs_roundtrip_ixon_definition( + obj: LeanIxonDefinition, +) -> LeanIxonDefinition { + let def = decode_ixon_definition(*obj); + build_ixon_definition(&def).into() } /// Round-trip Ixon.Recursor. #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ixon_recursor(obj: LeanObj) -> LeanObj { - let rec = decode_ixon_recursor(obj); - build_ixon_recursor(&rec) +pub extern "C" fn rs_roundtrip_ixon_recursor( + obj: LeanIxonRecursor, +) -> LeanIxonRecursor { + let rec = decode_ixon_recursor(*obj); + build_ixon_recursor(&rec).into() } /// Round-trip Ixon.Axiom. #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ixon_axiom(obj: LeanObj) -> LeanObj { - let ax = decode_ixon_axiom(obj); - build_ixon_axiom(&ax) +pub extern "C" fn rs_roundtrip_ixon_axiom(obj: LeanIxonAxiom) -> LeanIxonAxiom { + let ax = decode_ixon_axiom(*obj); + build_ixon_axiom(&ax).into() } /// Round-trip Ixon.Quotient. #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ixon_quotient(obj: LeanObj) -> LeanObj { - let quot = decode_ixon_quotient(obj); - build_ixon_quotient(") +pub extern "C" fn rs_roundtrip_ixon_quotient( + obj: LeanIxonQuotient, +) -> LeanIxonQuotient { + let quot = decode_ixon_quotient(*obj); + build_ixon_quotient(").into() } /// Round-trip Ixon.ConstantInfo. #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ixon_constant_info(obj: LeanObj) -> LeanObj { - let info = decode_ixon_constant_info(obj); - build_ixon_constant_info(&info) +pub extern "C" fn rs_roundtrip_ixon_constant_info( + obj: LeanIxonConstantInfo, +) -> LeanIxonConstantInfo { + let info = decode_ixon_constant_info(*obj); + build_ixon_constant_info(&info).into() } /// Round-trip Ixon.Constant. #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ixon_constant(obj: LeanObj) -> LeanObj { - let constant = decode_ixon_constant(obj); - build_ixon_constant(&constant) +pub extern "C" fn rs_roundtrip_ixon_constant( + obj: LeanIxonConstant, +) -> LeanIxonConstant { + let constant = decode_ixon_constant(*obj); + build_ixon_constant(&constant).into() } /// Round-trip Ixon.RecursorRule. #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ixon_recursor_rule(obj: LeanObj) -> LeanObj { - let rule = decode_ixon_recursor_rule(obj); - build_ixon_recursor_rule(&rule) +pub extern "C" fn rs_roundtrip_ixon_recursor_rule( + obj: LeanIxonRecursorRule, +) -> LeanIxonRecursorRule { + let rule = decode_ixon_recursor_rule(*obj); + build_ixon_recursor_rule(&rule).into() } /// Round-trip Ixon.Constructor. #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ixon_constructor(obj: LeanObj) -> LeanObj { - let c = decode_ixon_constructor(obj); - build_ixon_constructor(&c) +pub extern "C" fn rs_roundtrip_ixon_constructor( + obj: LeanIxonConstructor, +) -> LeanIxonConstructor { + let c = decode_ixon_constructor(*obj); + build_ixon_constructor(&c).into() } /// Round-trip Ixon.Inductive. #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ixon_inductive(obj: LeanObj) -> LeanObj { - let ind = decode_ixon_inductive(obj); - build_ixon_inductive(&ind) +pub extern "C" fn rs_roundtrip_ixon_inductive( + obj: LeanIxonInductive, +) -> LeanIxonInductive { + let ind = decode_ixon_inductive(*obj); + build_ixon_inductive(&ind).into() } /// Round-trip Ixon.InductiveProj. #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ixon_inductive_proj(obj: LeanObj) -> LeanObj { - let proj = decode_ixon_inductive_proj(obj); - build_inductive_proj(&proj) +pub extern "C" fn rs_roundtrip_ixon_inductive_proj( + obj: LeanIxonInductiveProj, +) -> LeanIxonInductiveProj { + let proj = decode_ixon_inductive_proj(*obj); + build_inductive_proj(&proj).into() } /// Round-trip Ixon.ConstructorProj. #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ixon_constructor_proj(obj: LeanObj) -> LeanObj { - let proj = decode_ixon_constructor_proj(obj); - build_constructor_proj(&proj) +pub extern "C" fn rs_roundtrip_ixon_constructor_proj( + obj: LeanIxonConstructorProj, +) -> LeanIxonConstructorProj { + let proj = decode_ixon_constructor_proj(*obj); + build_constructor_proj(&proj).into() } /// Round-trip Ixon.RecursorProj. #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ixon_recursor_proj(obj: LeanObj) -> LeanObj { - let proj = decode_ixon_recursor_proj(obj); - build_recursor_proj(&proj) +pub extern "C" fn rs_roundtrip_ixon_recursor_proj( + obj: LeanIxonRecursorProj, +) -> LeanIxonRecursorProj { + let proj = decode_ixon_recursor_proj(*obj); + build_recursor_proj(&proj).into() } /// Round-trip Ixon.DefinitionProj. #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ixon_definition_proj(obj: LeanObj) -> LeanObj { - let proj = decode_ixon_definition_proj(obj); - build_definition_proj(&proj) +pub extern "C" fn rs_roundtrip_ixon_definition_proj( + obj: LeanIxonDefinitionProj, +) -> LeanIxonDefinitionProj { + let proj = decode_ixon_definition_proj(*obj); + build_definition_proj(&proj).into() } /// Round-trip Ixon.MutConst. #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ixon_mut_const(obj: LeanObj) -> LeanObj { - let mc = decode_ixon_mut_const(obj); - build_mut_const(&mc) +pub extern "C" fn rs_roundtrip_ixon_mut_const( + obj: LeanIxonMutConst, +) -> LeanIxonMutConst { + let mc = decode_ixon_mut_const(*obj); + build_mut_const(&mc).into() } diff --git a/src/lean/ffi/ixon/enums.rs b/src/lean/ffi/ixon/enums.rs index c565a291..d2402bf2 100644 --- a/src/lean/ffi/ixon/enums.rs +++ b/src/lean/ffi/ixon/enums.rs @@ -4,33 +4,33 @@ use std::ffi::c_void; use crate::ix::env::{DefinitionSafety, QuotKind}; use crate::ix::ixon::constant::DefKind; -use crate::lean::obj::LeanObj; +use crate::lean::object::{LeanIxonDefKind, LeanIxonDefinitionSafety, LeanIxonQuotKind, LeanObject}; /// Build Ixon.DefKind /// | defn -- tag 0 /// | opaq -- tag 1 /// | thm -- tag 2 /// Simple enums are passed as raw (unboxed) tag values across Lean FFI. -pub fn build_def_kind(kind: &DefKind) -> LeanObj { +pub fn build_def_kind(kind: &DefKind) -> LeanObject { let tag = match kind { DefKind::Definition => 0, DefKind::Opaque => 1, DefKind::Theorem => 2, }; - unsafe { LeanObj::from_raw(tag as *const c_void) } + unsafe { LeanObject::from_raw(tag as *const c_void) } } /// Build Ixon.DefinitionSafety /// | unsaf -- tag 0 /// | safe -- tag 1 /// | part -- tag 2 -pub fn build_ixon_definition_safety(safety: &DefinitionSafety) -> LeanObj { +pub fn build_ixon_definition_safety(safety: &DefinitionSafety) -> LeanObject { let tag = match safety { DefinitionSafety::Unsafe => 0, DefinitionSafety::Safe => 1, DefinitionSafety::Partial => 2, }; - unsafe { LeanObj::from_raw(tag as *const c_void) } + unsafe { LeanObject::from_raw(tag as *const c_void) } } /// Build Ixon.QuotKind @@ -38,14 +38,14 @@ pub fn build_ixon_definition_safety(safety: &DefinitionSafety) -> LeanObj { /// | ctor -- tag 1 /// | lift -- tag 2 /// | ind -- tag 3 -pub fn build_ixon_quot_kind(kind: &QuotKind) -> LeanObj { +pub fn build_ixon_quot_kind(kind: &QuotKind) -> LeanObject { let tag = match kind { QuotKind::Type => 0, QuotKind::Ctor => 1, QuotKind::Lift => 2, QuotKind::Ind => 3, }; - unsafe { LeanObj::from_raw(tag as *const c_void) } + unsafe { LeanObject::from_raw(tag as *const c_void) } } // ============================================================================= @@ -53,7 +53,7 @@ pub fn build_ixon_quot_kind(kind: &QuotKind) -> LeanObj { // ============================================================================= /// Decode Ixon.DefKind (simple enum, raw unboxed tag value). -pub fn decode_ixon_def_kind(obj: LeanObj) -> DefKind { +pub fn decode_ixon_def_kind(obj: LeanObject) -> DefKind { let tag = obj.as_ptr() as usize; match tag { 0 => DefKind::Definition, @@ -64,7 +64,7 @@ pub fn decode_ixon_def_kind(obj: LeanObj) -> DefKind { } /// Decode Ixon.DefinitionSafety (simple enum, raw unboxed tag value). -pub fn decode_ixon_definition_safety(obj: LeanObj) -> DefinitionSafety { +pub fn decode_ixon_definition_safety(obj: LeanObject) -> DefinitionSafety { let tag = obj.as_ptr() as usize; match tag { 0 => DefinitionSafety::Unsafe, @@ -75,7 +75,7 @@ pub fn decode_ixon_definition_safety(obj: LeanObj) -> DefinitionSafety { } /// Decode Ixon.QuotKind (simple enum, raw unboxed tag value). -pub fn decode_ixon_quot_kind(obj: LeanObj) -> QuotKind { +pub fn decode_ixon_quot_kind(obj: LeanObject) -> QuotKind { let tag = obj.as_ptr() as usize; match tag { 0 => QuotKind::Type, @@ -92,21 +92,23 @@ pub fn decode_ixon_quot_kind(obj: LeanObj) -> QuotKind { /// Round-trip Ixon.DefKind. #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ixon_def_kind(obj: LeanObj) -> LeanObj { - let kind = decode_ixon_def_kind(obj); - build_def_kind(&kind) +pub extern "C" fn rs_roundtrip_ixon_def_kind(obj: LeanIxonDefKind) -> LeanIxonDefKind { + let kind = decode_ixon_def_kind(*obj); + build_def_kind(&kind).into() } /// Round-trip Ixon.DefinitionSafety. #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ixon_definition_safety(obj: LeanObj) -> LeanObj { - let safety = decode_ixon_definition_safety(obj); - build_ixon_definition_safety(&safety) +pub extern "C" fn rs_roundtrip_ixon_definition_safety( + obj: LeanIxonDefinitionSafety, +) -> LeanIxonDefinitionSafety { + let safety = decode_ixon_definition_safety(*obj); + build_ixon_definition_safety(&safety).into() } /// Round-trip Ixon.QuotKind. #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ixon_quot_kind(obj: LeanObj) -> LeanObj { - let kind = decode_ixon_quot_kind(obj); - build_ixon_quot_kind(&kind) +pub extern "C" fn rs_roundtrip_ixon_quot_kind(obj: LeanIxonQuotKind) -> LeanIxonQuotKind { + let kind = decode_ixon_quot_kind(*obj); + build_ixon_quot_kind(&kind).into() } diff --git a/src/lean/ffi/ixon/env.rs b/src/lean/ffi/ixon/env.rs index 41474f2c..efde1691 100644 --- a/src/lean/ffi/ixon/env.rs +++ b/src/lean/ffi/ixon/env.rs @@ -9,8 +9,8 @@ use crate::ix::ixon::comm::Comm; use crate::ix::ixon::constant::Constant as IxonConstant; use crate::ix::ixon::env::{Env as IxonEnv, Named as IxonNamed}; use crate::ix::ixon::metadata::ConstantMeta; -use crate::lean::obj::{ - LeanArray, LeanByteArray, LeanCtor, LeanExcept, LeanObj, +use crate::lean::object::{ + LeanIxonRawEnv, LeanArray, LeanByteArray, LeanCtor, LeanExcept, LeanObject, }; use crate::lean::ffi::builder::LeanBuildCache; @@ -33,7 +33,7 @@ pub struct DecodedComm { /// Decode Ixon.Comm from Lean pointer. /// Comm = { secret : Address, payload : Address } -pub fn decode_comm(obj: LeanObj) -> DecodedComm { +pub fn decode_comm(obj: LeanObject) -> DecodedComm { let ctor = obj.as_ctor(); DecodedComm { secret: decode_ixon_address(ctor.get(0)), @@ -42,7 +42,7 @@ pub fn decode_comm(obj: LeanObj) -> DecodedComm { } /// Build Ixon.Comm Lean object. -pub fn build_comm(comm: &DecodedComm) -> LeanObj { +pub fn build_comm(comm: &DecodedComm) -> LeanObject { let ctor = LeanCtor::alloc(0, 2, 0); ctor.set(0, build_address_from_ixon(&comm.secret)); ctor.set(1, build_address_from_ixon(&comm.payload)); @@ -60,7 +60,7 @@ pub struct DecodedRawConst { } /// Decode Ixon.RawConst from Lean pointer. -pub fn decode_raw_const(obj: LeanObj) -> DecodedRawConst { +pub fn decode_raw_const(obj: LeanObject) -> DecodedRawConst { let ctor = obj.as_ctor(); DecodedRawConst { addr: decode_ixon_address(ctor.get(0)), @@ -69,7 +69,7 @@ pub fn decode_raw_const(obj: LeanObj) -> DecodedRawConst { } /// Build Ixon.RawConst Lean object. -pub fn build_raw_const(rc: &DecodedRawConst) -> LeanObj { +pub fn build_raw_const(rc: &DecodedRawConst) -> LeanObject { let ctor = LeanCtor::alloc(0, 2, 0); ctor.set(0, build_address_from_ixon(&rc.addr)); ctor.set(1, build_ixon_constant(&rc.constant)); @@ -88,7 +88,7 @@ pub struct DecodedRawNamed { } /// Decode Ixon.RawNamed from Lean pointer. -pub fn decode_raw_named(obj: LeanObj) -> DecodedRawNamed { +pub fn decode_raw_named(obj: LeanObject) -> DecodedRawNamed { let ctor = obj.as_ctor(); DecodedRawNamed { name: decode_ix_name(ctor.get(0)), @@ -101,7 +101,7 @@ pub fn decode_raw_named(obj: LeanObj) -> DecodedRawNamed { pub fn build_raw_named( cache: &mut LeanBuildCache, rn: &DecodedRawNamed, -) -> LeanObj { +) -> LeanObject { let ctor = LeanCtor::alloc(0, 3, 0); ctor.set(0, build_name(cache, &rn.name)); ctor.set(1, build_address_from_ixon(&rn.addr)); @@ -120,7 +120,7 @@ pub struct DecodedRawBlob { } /// Decode Ixon.RawBlob from Lean pointer. -pub fn decode_raw_blob(obj: LeanObj) -> DecodedRawBlob { +pub fn decode_raw_blob(obj: LeanObject) -> DecodedRawBlob { let ctor = obj.as_ctor(); let ba = ctor.get(1).as_byte_array(); DecodedRawBlob { @@ -130,7 +130,7 @@ pub fn decode_raw_blob(obj: LeanObj) -> DecodedRawBlob { } /// Build Ixon.RawBlob Lean object. -pub fn build_raw_blob(rb: &DecodedRawBlob) -> LeanObj { +pub fn build_raw_blob(rb: &DecodedRawBlob) -> LeanObject { let ctor = LeanCtor::alloc(0, 2, 0); ctor.set(0, build_address_from_ixon(&rb.addr)); ctor.set(1, LeanByteArray::from_bytes(&rb.bytes)); @@ -148,7 +148,7 @@ pub struct DecodedRawComm { } /// Decode Ixon.RawComm from Lean pointer. -pub fn decode_raw_comm(obj: LeanObj) -> DecodedRawComm { +pub fn decode_raw_comm(obj: LeanObject) -> DecodedRawComm { let ctor = obj.as_ctor(); DecodedRawComm { addr: decode_ixon_address(ctor.get(0)), @@ -157,7 +157,7 @@ pub fn decode_raw_comm(obj: LeanObj) -> DecodedRawComm { } /// Build Ixon.RawComm Lean object. -pub fn build_raw_comm(rc: &DecodedRawComm) -> LeanObj { +pub fn build_raw_comm(rc: &DecodedRawComm) -> LeanObject { let ctor = LeanCtor::alloc(0, 2, 0); ctor.set(0, build_address_from_ixon(&rc.addr)); ctor.set(1, build_comm(&rc.comm)); @@ -175,7 +175,7 @@ pub struct DecodedRawNameEntry { } /// Decode Ixon.RawNameEntry from Lean pointer. -pub fn decode_raw_name_entry(obj: LeanObj) -> DecodedRawNameEntry { +pub fn decode_raw_name_entry(obj: LeanObject) -> DecodedRawNameEntry { let ctor = obj.as_ctor(); DecodedRawNameEntry { addr: decode_ixon_address(ctor.get(0)), @@ -188,7 +188,7 @@ pub fn build_raw_name_entry( cache: &mut LeanBuildCache, addr: &Address, name: &Name, -) -> LeanObj { +) -> LeanObject { let ctor = LeanCtor::alloc(0, 2, 0); ctor.set(0, build_address_from_ixon(addr)); ctor.set(1, build_name(cache, name)); @@ -209,7 +209,7 @@ pub struct DecodedRawEnv { } /// Decode Ixon.RawEnv from Lean pointer. -pub fn decode_raw_env(obj: LeanObj) -> DecodedRawEnv { +pub fn decode_raw_env(obj: LeanObject) -> DecodedRawEnv { let ctor = obj.as_ctor(); let consts_arr = ctor.get(0).as_array(); let named_arr = ctor.get(1).as_array(); @@ -227,7 +227,7 @@ pub fn decode_raw_env(obj: LeanObj) -> DecodedRawEnv { } /// Build Ixon.RawEnv Lean object. -pub fn build_raw_env(env: &DecodedRawEnv) -> LeanObj { +pub fn build_raw_env(env: &DecodedRawEnv) -> LeanObject { let mut cache = LeanBuildCache::new(); // Build consts array @@ -350,14 +350,13 @@ pub fn ixon_env_to_decoded(env: &IxonEnv) -> DecodedRawEnv { /// FFI: Serialize an Ixon.RawEnv -> ByteArray via Rust's Env.put. Pure. #[unsafe(no_mangle)] -pub extern "C" fn rs_ser_env(obj: LeanObj) -> LeanObj { - let decoded = decode_raw_env(obj); +pub extern "C" fn rs_ser_env(obj: LeanIxonRawEnv) -> LeanByteArray { + let decoded = decode_raw_env(*obj); let env = decoded_to_ixon_env(&decoded); let mut buf = Vec::new(); env.put(&mut buf).expect("Env serialization failed"); - let ba = LeanByteArray::from_bytes(&buf); - *ba + LeanByteArray::from_bytes(&buf) } // ============================================================================= @@ -366,19 +365,18 @@ pub extern "C" fn rs_ser_env(obj: LeanObj) -> LeanObj { /// FFI: Deserialize ByteArray -> Except String Ixon.RawEnv via Rust's Env.get. Pure. #[unsafe(no_mangle)] -pub extern "C" fn rs_des_env(obj: LeanObj) -> LeanObj { - let ba = obj.as_byte_array(); - let data = ba.as_bytes(); +pub extern "C" fn rs_des_env(obj: LeanByteArray) -> LeanExcept { + let data = obj.as_bytes(); let mut slice: &[u8] = data; match IxonEnv::get(&mut slice) { Ok(env) => { let decoded = ixon_env_to_decoded(&env); let raw_env = build_raw_env(&decoded); - *LeanExcept::ok(raw_env) + LeanExcept::ok(raw_env) }, Err(e) => { let msg = format!("rs_des_env: {}", e); - *LeanExcept::error_string(&msg) + LeanExcept::error_string(&msg) }, } } diff --git a/src/lean/ffi/ixon/expr.rs b/src/lean/ffi/ixon/expr.rs index 4323bad2..cb109423 100644 --- a/src/lean/ffi/ixon/expr.rs +++ b/src/lean/ffi/ixon/expr.rs @@ -3,10 +3,10 @@ use std::sync::Arc; use crate::ix::ixon::expr::Expr as IxonExpr; -use crate::lean::obj::{LeanArray, LeanCtor, LeanObj}; +use crate::lean::object::{LeanIxonExpr, LeanArray, LeanCtor, LeanObject}; /// Build Ixon.Expr (12 constructors). -pub fn build_ixon_expr(expr: &IxonExpr) -> LeanObj { +pub fn build_ixon_expr(expr: &IxonExpr) -> LeanObject { match expr { IxonExpr::Sort(idx) => { let ctor = LeanCtor::alloc(0, 0, 8); @@ -117,7 +117,7 @@ pub fn build_ixon_expr_array(exprs: &[Arc]) -> LeanArray { // ============================================================================= /// Decode Array UInt64 from Lean. -fn decode_u64_array(obj: LeanObj) -> Vec { +fn decode_u64_array(obj: LeanObject) -> Vec { let arr = obj.as_array(); arr .iter() @@ -133,7 +133,7 @@ fn decode_u64_array(obj: LeanObj) -> Vec { } /// Decode Ixon.Expr (12 constructors). -pub fn decode_ixon_expr(obj: LeanObj) -> IxonExpr { +pub fn decode_ixon_expr(obj: LeanObject) -> IxonExpr { let ctor = obj.as_ctor(); let tag = ctor.tag(); match tag { @@ -220,7 +220,7 @@ pub fn decode_ixon_expr(obj: LeanObj) -> IxonExpr { } /// Decode Array Ixon.Expr. -pub fn decode_ixon_expr_array(obj: LeanObj) -> Vec> { +pub fn decode_ixon_expr_array(obj: LeanObject) -> Vec> { let arr = obj.as_array(); arr.map(|e| Arc::new(decode_ixon_expr(e))) } @@ -231,7 +231,7 @@ pub fn decode_ixon_expr_array(obj: LeanObj) -> Vec> { /// Round-trip Ixon.Expr. #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ixon_expr(obj: LeanObj) -> LeanObj { - let expr = decode_ixon_expr(obj); - build_ixon_expr(&expr) +pub extern "C" fn rs_roundtrip_ixon_expr(obj: LeanIxonExpr) -> LeanIxonExpr { + let expr = decode_ixon_expr(*obj); + build_ixon_expr(&expr).into() } diff --git a/src/lean/ffi/ixon/meta.rs b/src/lean/ffi/ixon/meta.rs index c8f09d52..75ea9f0f 100644 --- a/src/lean/ffi/ixon/meta.rs +++ b/src/lean/ffi/ixon/meta.rs @@ -9,7 +9,11 @@ use crate::ix::ixon::env::Named; use crate::ix::ixon::metadata::{ ConstantMeta, DataValue as IxonDataValue, ExprMeta, ExprMetaData, KVMap, }; -use crate::lean::obj::{LeanArray, LeanCtor, LeanObj}; +use crate::lean::object::{ + LeanIxonComm, LeanIxonConstantMeta, LeanIxonDataValue, + LeanIxonExprMetaArena, LeanIxonExprMetaData, LeanIxonNamed, LeanArray, + LeanCtor, LeanObject, +}; use crate::lean::ffi::ix::constant::{ build_reducibility_hints, decode_reducibility_hints, @@ -25,7 +29,7 @@ use crate::lean::ffi::ixon::constant::{ // ============================================================================= /// Build Ixon.DataValue (for metadata) -pub fn build_ixon_data_value(dv: &IxonDataValue) -> LeanObj { +pub fn build_ixon_data_value(dv: &IxonDataValue) -> LeanObject { match dv { IxonDataValue::OfString(addr) => { let ctor = LeanCtor::alloc(0, 1, 0); @@ -61,7 +65,7 @@ pub fn build_ixon_data_value(dv: &IxonDataValue) -> LeanObj { } /// Decode Ixon.DataValue. -pub fn decode_ixon_data_value(obj: LeanObj) -> IxonDataValue { +pub fn decode_ixon_data_value(obj: LeanObject) -> IxonDataValue { let ctor = obj.as_ctor(); match ctor.tag() { 0 => IxonDataValue::OfString(decode_ixon_address(ctor.get(0))), @@ -103,7 +107,7 @@ pub fn build_kvmap_array(kvmaps: &[KVMap]) -> LeanArray { } /// Decode KVMap (Array (Address × DataValue)). -pub fn decode_ixon_kvmap(obj: LeanObj) -> KVMap { +pub fn decode_ixon_kvmap(obj: LeanObject) -> KVMap { let arr = obj.as_array(); arr .iter() @@ -118,7 +122,7 @@ pub fn decode_ixon_kvmap(obj: LeanObj) -> KVMap { } /// Decode Array KVMap. -fn decode_kvmap_array(obj: LeanObj) -> Vec { +fn decode_kvmap_array(obj: LeanObject) -> Vec { let arr = obj.as_array(); arr.map(decode_ixon_kvmap) } @@ -128,7 +132,7 @@ fn decode_kvmap_array(obj: LeanObj) -> Vec { // ============================================================================= /// Decode Array Address. -fn decode_address_array(obj: LeanObj) -> Vec
{ +fn decode_address_array(obj: LeanObject) -> Vec
{ decode_ixon_address_array(obj) } @@ -136,13 +140,13 @@ fn decode_address_array(obj: LeanObj) -> Vec
{ fn build_u64_array(vals: &[u64]) -> LeanArray { let arr = LeanArray::alloc(vals.len()); for (i, &v) in vals.iter().enumerate() { - arr.set(i, LeanObj::box_u64(v)); + arr.set(i, LeanObject::box_u64(v)); } arr } /// Decode Array UInt64. -fn decode_u64_array(obj: LeanObj) -> Vec { +fn decode_u64_array(obj: LeanObject) -> Vec { let arr = obj.as_array(); arr.iter().map(|elem| elem.unbox_u64()).collect() } @@ -162,9 +166,9 @@ fn decode_u64_array(obj: LeanObj) -> Vec { /// | ref | 4 | 1 (name: Address) | 0 | /// | prj | 5 | 1 (structName: Address) | 8 (1× u64) | /// | mdata | 6 | 1 (mdata: Array) | 8 (1× u64) | -pub fn build_expr_meta_data(node: &ExprMetaData) -> LeanObj { +pub fn build_expr_meta_data(node: &ExprMetaData) -> LeanObject { match node { - ExprMetaData::Leaf => LeanObj::box_usize(0), + ExprMetaData::Leaf => LeanObject::box_usize(0), ExprMetaData::App { children } => { // Tag 1, 0 obj fields, 16 scalar bytes (2× u64) @@ -223,7 +227,7 @@ pub fn build_expr_meta_data(node: &ExprMetaData) -> LeanObj { } /// Decode Ixon.ExprMetaData from Lean pointer. -pub fn decode_expr_meta_data(obj: LeanObj) -> ExprMetaData { +pub fn decode_expr_meta_data(obj: LeanObject) -> ExprMetaData { // Leaf (tag 0, no fields) is represented as a scalar lean_box(0) if obj.is_scalar() { let tag = obj.as_ptr() as usize >> 1; @@ -308,7 +312,7 @@ pub fn build_expr_meta_arena(arena: &ExprMeta) -> LeanArray { /// Decode Ixon.ExprMetaArena from Lean pointer. /// Single-field struct is unboxed — obj IS the Array directly. -pub fn decode_expr_meta_arena(obj: LeanObj) -> ExprMeta { +pub fn decode_expr_meta_arena(obj: LeanObject) -> ExprMeta { let arr = obj.as_array(); ExprMeta { nodes: arr.map(decode_expr_meta_data) } } @@ -328,9 +332,9 @@ pub fn decode_expr_meta_arena(obj: LeanObj) -> ExprMeta { /// | indc | 4 | 6 (name, lvls, ctors, all, ctx, arena) | 8 (1× u64) | /// | ctor | 5 | 4 (name, lvls, induct, arena) | 8 (1× u64) | /// | recr | 6 | 7 (name, lvls, rules, all, ctx, arena, ruleRoots) | 8 (1× u64) | -pub fn build_constant_meta(meta: &ConstantMeta) -> LeanObj { +pub fn build_constant_meta(meta: &ConstantMeta) -> LeanObject { match meta { - ConstantMeta::Empty => LeanObj::box_usize(0), + ConstantMeta::Empty => LeanObject::box_usize(0), ConstantMeta::Def { name, @@ -419,7 +423,7 @@ pub fn build_constant_meta(meta: &ConstantMeta) -> LeanObj { } /// Decode Ixon.ConstantMeta from Lean pointer. -pub fn decode_constant_meta(obj: LeanObj) -> ConstantMeta { +pub fn decode_constant_meta(obj: LeanObject) -> ConstantMeta { // Empty (tag 0, no fields) is represented as a scalar lean_box(0) if obj.is_scalar() { let tag = obj.as_ptr() as usize >> 1; @@ -521,7 +525,7 @@ pub fn decode_constant_meta(obj: LeanObj) -> ConstantMeta { // ============================================================================= /// Build Ixon.Named { addr : Address, constMeta : ConstantMeta } -pub fn build_named(addr: &Address, meta: &ConstantMeta) -> LeanObj { +pub fn build_named(addr: &Address, meta: &ConstantMeta) -> LeanObject { let addr_obj = build_address_from_ixon(addr); let meta_obj = build_constant_meta(meta); let ctor = LeanCtor::alloc(0, 2, 0); @@ -531,7 +535,7 @@ pub fn build_named(addr: &Address, meta: &ConstantMeta) -> LeanObj { } /// Decode Ixon.Named. -pub fn decode_named(obj: LeanObj) -> Named { +pub fn decode_named(obj: LeanObject) -> Named { let ctor = obj.as_ctor(); Named { addr: decode_ixon_address(ctor.get(0)), @@ -540,7 +544,7 @@ pub fn decode_named(obj: LeanObj) -> Named { } /// Build Ixon.Comm { secret : Address, payload : Address } -pub fn build_ixon_comm(comm: &Comm) -> LeanObj { +pub fn build_ixon_comm(comm: &Comm) -> LeanObject { let secret_obj = build_address_from_ixon(&comm.secret); let payload_obj = build_address_from_ixon(&comm.payload); let ctor = LeanCtor::alloc(0, 2, 0); @@ -550,7 +554,7 @@ pub fn build_ixon_comm(comm: &Comm) -> LeanObj { } /// Decode Ixon.Comm. -pub fn decode_ixon_comm(obj: LeanObj) -> Comm { +pub fn decode_ixon_comm(obj: LeanObject) -> Comm { let ctor = obj.as_ctor(); Comm { secret: decode_ixon_address(ctor.get(0)), @@ -564,42 +568,50 @@ pub fn decode_ixon_comm(obj: LeanObj) -> Comm { /// Round-trip Ixon.DataValue. #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ixon_data_value(obj: LeanObj) -> LeanObj { - let dv = decode_ixon_data_value(obj); - build_ixon_data_value(&dv) +pub extern "C" fn rs_roundtrip_ixon_data_value( + obj: LeanIxonDataValue, +) -> LeanIxonDataValue { + let dv = decode_ixon_data_value(*obj); + build_ixon_data_value(&dv).into() } /// Round-trip Ixon.Comm. #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ixon_comm(obj: LeanObj) -> LeanObj { - let comm = decode_ixon_comm(obj); - build_ixon_comm(&comm) +pub extern "C" fn rs_roundtrip_ixon_comm(obj: LeanIxonComm) -> LeanIxonComm { + let comm = decode_ixon_comm(*obj); + build_ixon_comm(&comm).into() } /// Round-trip Ixon.ExprMetaData. #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ixon_expr_meta_data(obj: LeanObj) -> LeanObj { - let node = decode_expr_meta_data(obj); - build_expr_meta_data(&node) +pub extern "C" fn rs_roundtrip_ixon_expr_meta_data( + obj: LeanIxonExprMetaData, +) -> LeanIxonExprMetaData { + let node = decode_expr_meta_data(*obj); + build_expr_meta_data(&node).into() } /// Round-trip Ixon.ExprMetaArena. #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ixon_expr_meta_arena(obj: LeanObj) -> LeanObj { - let arena = decode_expr_meta_arena(obj); - *build_expr_meta_arena(&arena) +pub extern "C" fn rs_roundtrip_ixon_expr_meta_arena( + obj: LeanIxonExprMetaArena, +) -> LeanIxonExprMetaArena { + let arena = decode_expr_meta_arena(*obj); + (*build_expr_meta_arena(&arena)).into() } /// Round-trip Ixon.ConstantMeta (full arena-based). #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ixon_constant_meta(obj: LeanObj) -> LeanObj { - let meta = decode_constant_meta(obj); - build_constant_meta(&meta) +pub extern "C" fn rs_roundtrip_ixon_constant_meta( + obj: LeanIxonConstantMeta, +) -> LeanIxonConstantMeta { + let meta = decode_constant_meta(*obj); + build_constant_meta(&meta).into() } /// Round-trip Ixon.Named (with real metadata). #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ixon_named(obj: LeanObj) -> LeanObj { - let named = decode_named(obj); - build_named(&named.addr, &named.meta) +pub extern "C" fn rs_roundtrip_ixon_named(obj: LeanIxonNamed) -> LeanIxonNamed { + let named = decode_named(*obj); + build_named(&named.addr, &named.meta).into() } diff --git a/src/lean/ffi/ixon/serialize.rs b/src/lean/ffi/ixon/serialize.rs index ceaca271..564d1d9b 100644 --- a/src/lean/ffi/ixon/serialize.rs +++ b/src/lean/ffi/ixon/serialize.rs @@ -10,19 +10,22 @@ use crate::ix::ixon::expr::Expr as IxonExpr; use crate::ix::ixon::serialize::put_expr; use crate::ix::ixon::sharing::hash_expr; use crate::ix::ixon::univ::{Univ as IxonUniv, put_univ}; -use crate::lean::obj::LeanObj; +use crate::lean::object::{ + LeanIxAddress, LeanIxonConstant, LeanIxonExpr, LeanIxonRawEnv, + LeanIxonUniv, LeanByteArray, LeanObject, +}; use crate::lean::ffi::ixon::constant::{ decode_ixon_address, decode_ixon_constant, }; /// Unbox a Lean UInt64, handling both scalar and boxed representations. -fn lean_ptr_to_u64(obj: LeanObj) -> u64 { +fn lean_ptr_to_u64(obj: LeanObject) -> u64 { if obj.is_scalar() { obj.unbox_usize() as u64 } else { obj.unbox_u64() } } /// Decode a Lean `Ixon.Expr` to a Rust `IxonExpr`. -pub fn lean_ptr_to_ixon_expr(obj: LeanObj) -> Arc { +pub fn lean_ptr_to_ixon_expr(obj: LeanObject) -> Arc { assert!(!obj.is_scalar(), "Ixon.Expr should not be scalar"); let ctor = obj.as_ctor(); match ctor.tag() { @@ -97,17 +100,17 @@ pub fn lean_ptr_to_ixon_expr(obj: LeanObj) -> Arc { /// Check if Lean's computed hash matches Rust's computed hash. #[unsafe(no_mangle)] pub extern "C" fn rs_expr_hash_matches( - expr_obj: LeanObj, - expected_hash: LeanObj, + expr_obj: LeanIxonExpr, + expected_hash: LeanIxAddress, ) -> bool { - let expr = lean_ptr_to_ixon_expr(expr_obj); + let expr = lean_ptr_to_ixon_expr(*expr_obj); let hash = hash_expr(&expr); - let expected = decode_ixon_address(expected_hash); + let expected = decode_ixon_address(*expected_hash); Address::from_slice(hash.as_bytes()).is_ok_and(|h| h == expected) } /// Decode a Lean `Ixon.Univ` to a Rust `IxonUniv`. -fn lean_ptr_to_ixon_univ(obj: LeanObj) -> Arc { +fn lean_ptr_to_ixon_univ(obj: LeanObject) -> Arc { if obj.is_scalar() { return IxonUniv::zero(); } @@ -133,12 +136,11 @@ fn lean_ptr_to_ixon_univ(obj: LeanObj) -> Arc { /// Check if Lean's Ixon.Univ serialization matches Rust. #[unsafe(no_mangle)] pub extern "C" fn rs_eq_univ_serialization( - univ_obj: LeanObj, - bytes_obj: LeanObj, + univ_obj: LeanIxonUniv, + bytes_obj: LeanByteArray, ) -> bool { - let univ = lean_ptr_to_ixon_univ(univ_obj); - let ba = bytes_obj.as_byte_array(); - let bytes_data = ba.as_bytes(); + let univ = lean_ptr_to_ixon_univ(*univ_obj); + let bytes_data = bytes_obj.as_bytes(); let mut buf = Vec::with_capacity(bytes_data.len()); put_univ(&univ, &mut buf); buf == bytes_data @@ -147,12 +149,11 @@ pub extern "C" fn rs_eq_univ_serialization( /// Check if Lean's Ixon.Expr serialization matches Rust. #[unsafe(no_mangle)] pub extern "C" fn rs_eq_expr_serialization( - expr_obj: LeanObj, - bytes_obj: LeanObj, + expr_obj: LeanIxonExpr, + bytes_obj: LeanByteArray, ) -> bool { - let expr = lean_ptr_to_ixon_expr(expr_obj); - let ba = bytes_obj.as_byte_array(); - let bytes_data = ba.as_bytes(); + let expr = lean_ptr_to_ixon_expr(*expr_obj); + let bytes_data = bytes_obj.as_bytes(); let mut buf = Vec::with_capacity(bytes_data.len()); put_expr(&expr, &mut buf); buf == bytes_data @@ -161,12 +162,11 @@ pub extern "C" fn rs_eq_expr_serialization( /// Check if Lean's Ixon.Constant serialization matches Rust. #[unsafe(no_mangle)] pub extern "C" fn rs_eq_constant_serialization( - constant_obj: LeanObj, - bytes_obj: LeanObj, + constant_obj: LeanIxonConstant, + bytes_obj: LeanByteArray, ) -> bool { - let constant = decode_ixon_constant(constant_obj); - let ba = bytes_obj.as_byte_array(); - let bytes_data = ba.as_bytes(); + let constant = decode_ixon_constant(*constant_obj); + let bytes_data = bytes_obj.as_bytes(); let mut buf = Vec::with_capacity(bytes_data.len()); constant.put(&mut buf); buf == bytes_data @@ -176,15 +176,14 @@ pub extern "C" fn rs_eq_constant_serialization( /// Due to HashMap ordering differences, we compare deserialized content rather than bytes. #[unsafe(no_mangle)] pub extern "C" fn rs_eq_env_serialization( - raw_env_obj: LeanObj, - bytes_obj: LeanObj, + raw_env_obj: LeanIxonRawEnv, + bytes_obj: LeanByteArray, ) -> bool { use crate::ix::ixon::env::Env; use crate::lean::ffi::ixon::env::decode_raw_env; - let decoded = decode_raw_env(raw_env_obj); - let ba = bytes_obj.as_byte_array(); - let bytes_data = ba.as_bytes(); + let decoded = decode_raw_env(*raw_env_obj); + let bytes_data = bytes_obj.as_bytes(); // Deserialize Lean's bytes using Rust's deserializer let rust_env = match Env::get(&mut &bytes_data[..]) { @@ -250,12 +249,11 @@ pub extern "C" fn rs_eq_env_serialization( /// /// Returns: true if Rust can deserialize and re-serialize to the same bytes #[unsafe(no_mangle)] -extern "C" fn rs_env_serde_roundtrip(lean_bytes_obj: LeanObj) -> bool { +extern "C" fn rs_env_serde_roundtrip(lean_bytes_obj: LeanByteArray) -> bool { use crate::ix::ixon::env::Env; // Get bytes from Lean ByteArray - let ba = lean_bytes_obj.as_byte_array(); - let lean_bytes = ba.as_bytes().to_vec(); + let lean_bytes = lean_bytes_obj.as_bytes().to_vec(); // Try to deserialize with Rust let mut slice = lean_bytes.as_slice(); @@ -297,12 +295,11 @@ extern "C" fn rs_env_serde_roundtrip(lean_bytes_obj: LeanObj) -> bool { /// /// Returns: true if Rust can deserialize and the counts match #[unsafe(no_mangle)] -extern "C" fn rs_env_serde_check(lean_bytes_obj: LeanObj) -> bool { +extern "C" fn rs_env_serde_check(lean_bytes_obj: LeanByteArray) -> bool { use crate::ix::ixon::env::Env; // Get bytes from Lean ByteArray - let ba = lean_bytes_obj.as_byte_array(); - let lean_bytes = ba.as_bytes().to_vec(); + let lean_bytes = lean_bytes_obj.as_bytes().to_vec(); // Try to deserialize with Rust let mut slice = lean_bytes.as_slice(); diff --git a/src/lean/ffi/ixon/sharing.rs b/src/lean/ffi/ixon/sharing.rs index b7b1faf4..b4d074c4 100644 --- a/src/lean/ffi/ixon/sharing.rs +++ b/src/lean/ffi/ixon/sharing.rs @@ -7,7 +7,7 @@ use crate::ix::ixon::serialize::put_expr; use crate::ix::ixon::sharing::{ analyze_block, build_sharing_vec, decide_sharing, }; -use crate::lean::obj::LeanObj; +use crate::lean::object::{LeanArray, LeanByteArray}; use crate::lean::ffi::ixon::expr::decode_ixon_expr_array; use crate::lean::ffi::ixon::serialize::lean_ptr_to_ixon_expr; @@ -15,9 +15,9 @@ use crate::lean::ffi::ixon::serialize::lean_ptr_to_ixon_expr; /// FFI: Debug sharing analysis - print usage counts for subterms with usage >= 2. /// This helps diagnose why Lean and Rust make different sharing decisions. #[unsafe(no_mangle)] -pub extern "C" fn rs_debug_sharing_analysis(exprs_obj: LeanObj) { - let arr = exprs_obj.as_array(); - let exprs: Vec> = arr.map(|elem| lean_ptr_to_ixon_expr(elem)); +pub extern "C" fn rs_debug_sharing_analysis(exprs_obj: LeanArray) { + let arr = exprs_obj; + let exprs: Vec> = arr.map(lean_ptr_to_ixon_expr); println!("[Rust] Analyzing {} input expressions", exprs.len()); @@ -57,8 +57,8 @@ pub extern "C" fn rs_debug_sharing_analysis(exprs_obj: LeanObj) { /// FFI: Run Rust's sharing analysis on Lean-provided Ixon.Expr array. /// Returns the number of shared items Rust would produce. #[unsafe(no_mangle)] -extern "C" fn rs_analyze_sharing_count(exprs_obj: LeanObj) -> u64 { - let exprs = decode_ixon_expr_array(exprs_obj); +extern "C" fn rs_analyze_sharing_count(exprs_obj: LeanArray) -> u64 { + let exprs = decode_ixon_expr_array(*exprs_obj); let (info_map, _ptr_to_hash) = analyze_block(&exprs, false); let shared_hashes = decide_sharing(&info_map); @@ -71,11 +71,11 @@ extern "C" fn rs_analyze_sharing_count(exprs_obj: LeanObj) -> u64 { /// Returns number of shared items. #[unsafe(no_mangle)] extern "C" fn rs_run_sharing_analysis( - exprs_obj: LeanObj, - out_sharing_vec: LeanObj, - out_rewritten: LeanObj, + exprs_obj: LeanArray, + out_sharing_vec: LeanByteArray, + out_rewritten: LeanByteArray, ) -> u64 { - let exprs = decode_ixon_expr_array(exprs_obj); + let exprs = decode_ixon_expr_array(*exprs_obj); let (info_map, ptr_to_hash) = analyze_block(&exprs, false); let shared_hashes = decide_sharing(&info_map); @@ -95,10 +95,8 @@ extern "C" fn rs_run_sharing_analysis( } // Write to output arrays - let sharing_ba = out_sharing_vec.as_byte_array(); - unsafe { sharing_ba.set_data(&sharing_bytes) }; - let rewritten_ba = out_rewritten.as_byte_array(); - unsafe { rewritten_ba.set_data(&rewritten_bytes) }; + unsafe { out_sharing_vec.set_data(&sharing_bytes) }; + unsafe { out_rewritten.set_data(&rewritten_bytes) }; shared_hashes.len() as u64 } @@ -111,15 +109,15 @@ extern "C" fn rs_run_sharing_analysis( /// - bits 48-63: Rust sharing count #[unsafe(no_mangle)] extern "C" fn rs_compare_sharing_analysis( - exprs_obj: LeanObj, - lean_sharing_obj: LeanObj, - _lean_rewritten_obj: LeanObj, + exprs_obj: LeanArray, + lean_sharing_obj: LeanArray, + _lean_rewritten_obj: LeanArray, ) -> u64 { // Decode input expressions - let exprs = decode_ixon_expr_array(exprs_obj); + let exprs = decode_ixon_expr_array(*exprs_obj); // Decode Lean's sharing vector - let lean_sharing = decode_ixon_expr_array(lean_sharing_obj); + let lean_sharing = decode_ixon_expr_array(*lean_sharing_obj); // Run Rust's sharing analysis let (info_map, ptr_to_hash) = analyze_block(&exprs, false); diff --git a/src/lean/ffi/ixon/univ.rs b/src/lean/ffi/ixon/univ.rs index c4e8affc..a6e6ae04 100644 --- a/src/lean/ffi/ixon/univ.rs +++ b/src/lean/ffi/ixon/univ.rs @@ -3,13 +3,13 @@ use std::sync::Arc; use crate::ix::ixon::univ::Univ; -use crate::lean::obj::{IxonUniv, LeanArray, LeanCtor, LeanObj}; +use crate::lean::object::{LeanIxonUniv, LeanArray, LeanCtor, LeanObject}; -impl IxonUniv { +impl LeanIxonUniv { /// Build Ixon.Univ pub fn build(univ: &Univ) -> Self { let obj = match univ { - Univ::Zero => LeanObj::box_usize(0), + Univ::Zero => LeanObject::box_usize(0), Univ::Succ(inner) => { let ctor = LeanCtor::alloc(1, 1, 0); ctor.set(0, Self::build(inner)); @@ -47,7 +47,7 @@ impl IxonUniv { /// Decode Ixon.Univ (recursive enum). pub fn decode(self) -> Univ { - let obj: LeanObj = *self; + let obj: LeanObject = *self; if obj.is_scalar() { return Univ::Zero; } @@ -69,7 +69,7 @@ impl IxonUniv { } /// Decode Array Ixon.Univ. - pub fn decode_array(obj: LeanObj) -> Vec> { + pub fn decode_array(obj: LeanObject) -> Vec> { let arr = obj.as_array(); arr.map(|elem| Arc::new(Self::new(elem).decode())) } @@ -77,12 +77,12 @@ impl IxonUniv { /// Build an Array of Ixon.Univ (standalone wrapper). pub fn build_ixon_univ_array(univs: &[Arc]) -> LeanArray { - IxonUniv::build_array(univs) + LeanIxonUniv::build_array(univs) } /// Decode Array Ixon.Univ (standalone wrapper). -pub fn decode_ixon_univ_array(obj: LeanObj) -> Vec> { - IxonUniv::decode_array(obj) +pub fn decode_ixon_univ_array(obj: LeanObject) -> Vec> { + LeanIxonUniv::decode_array(obj) } // ============================================================================= @@ -91,7 +91,7 @@ pub fn decode_ixon_univ_array(obj: LeanObj) -> Vec> { /// Round-trip Ixon.Univ. #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ixon_univ(obj: LeanObj) -> LeanObj { - let univ = IxonUniv::new(obj).decode(); - IxonUniv::build(&univ).into() +pub extern "C" fn rs_roundtrip_ixon_univ(obj: LeanIxonUniv) -> LeanIxonUniv { + let univ = obj.decode(); + LeanIxonUniv::build(&univ) } diff --git a/src/lean/ffi/keccak.rs b/src/lean/ffi/keccak.rs index 622369db..7ae4cf0c 100644 --- a/src/lean/ffi/keccak.rs +++ b/src/lean/ffi/keccak.rs @@ -2,7 +2,7 @@ use std::sync::OnceLock; use tiny_keccak::{Hasher, Keccak}; -use crate::lean::obj::{ExternalClass, LeanByteArray, LeanExternal, LeanObj}; +use crate::lean::object::{ExternalClass, LeanByteArray, LeanExternal, LeanObject}; static KECCAK_CLASS: OnceLock = OnceLock::new(); @@ -12,7 +12,7 @@ fn keccak_class() -> &'static ExternalClass { /// `Keccak.Hasher.init : Unit → Hasher` #[unsafe(no_mangle)] -extern "C" fn rs_keccak256_hasher_init(_unit: LeanObj) -> LeanExternal { +extern "C" fn rs_keccak256_hasher_init(_unit: LeanObject) -> LeanExternal { LeanExternal::alloc(keccak_class(), Keccak::v256()) } diff --git a/src/lean/ffi/lean_env.rs b/src/lean/ffi/lean_env.rs index c1a42cd9..6a52f3b7 100644 --- a/src/lean/ffi/lean_env.rs +++ b/src/lean/ffi/lean_env.rs @@ -20,7 +20,7 @@ use std::sync::Arc; use rustc_hash::FxHashMap; use crate::lean::nat::Nat; -use crate::lean::obj::LeanObj; +use crate::lean::object::LeanObject; use crate::{ ix::compile::compile_env, @@ -36,17 +36,17 @@ use crate::{ const PARALLEL_THRESHOLD: usize = 100; -/// Wrapper to allow sending `LeanObj` across threads. The underlying Lean +/// Wrapper to allow sending `LeanObject` across threads. The underlying Lean /// objects must remain valid for the entire duration of parallel decoding. #[derive(Clone, Copy)] -struct SendObj(LeanObj); +struct SendObj(LeanObject); unsafe impl Send for SendObj {} unsafe impl Sync for SendObj {} impl SendObj { #[inline] - fn get(self) -> LeanObj { + fn get(self) -> LeanObject { self.0 } } @@ -94,12 +94,12 @@ impl<'g> Cache<'g> { } } -fn collect_list_objs(obj: LeanObj) -> Vec { +fn collect_list_objs(obj: LeanObject) -> Vec { obj.as_list().iter().collect() } // Name decoding with global cache -pub fn lean_ptr_to_name(obj: LeanObj, global: &GlobalCache) -> Name { +pub fn lean_ptr_to_name(obj: LeanObject, global: &GlobalCache) -> Name { let ptr = obj.as_ptr(); // Fast path: check if already cached if let Some(name) = global.names.get(&ptr) { @@ -125,7 +125,7 @@ pub fn lean_ptr_to_name(obj: LeanObj, global: &GlobalCache) -> Name { global.names.entry(ptr).or_insert(name).clone() } -fn lean_ptr_to_level(obj: LeanObj, cache: &mut Cache<'_>) -> Level { +fn lean_ptr_to_level(obj: LeanObject, cache: &mut Cache<'_>) -> Level { let ptr = obj.as_ptr(); if let Some(cached) = cache.local.univs.get(&ptr) { return cached.clone(); @@ -164,7 +164,7 @@ fn lean_ptr_to_level(obj: LeanObj, cache: &mut Cache<'_>) -> Level { level } -fn lean_ptr_to_substring(obj: LeanObj) -> Substring { +fn lean_ptr_to_substring(obj: LeanObject) -> Substring { let ctor = obj.as_ctor(); let [str_obj, start_pos, stop_pos] = ctor.objs(); let str = str_obj.as_string().to_string(); @@ -173,7 +173,7 @@ fn lean_ptr_to_substring(obj: LeanObj) -> Substring { Substring { str, start_pos, stop_pos } } -fn lean_ptr_to_source_info(obj: LeanObj) -> SourceInfo { +fn lean_ptr_to_source_info(obj: LeanObject) -> SourceInfo { if obj.is_scalar() { return SourceInfo::None; } @@ -199,7 +199,7 @@ fn lean_ptr_to_source_info(obj: LeanObj) -> SourceInfo { } fn lean_ptr_to_syntax_preresolved( - obj: LeanObj, + obj: LeanObject, cache: &mut Cache<'_>, ) -> SyntaxPreresolved { let ctor = obj.as_ctor(); @@ -223,7 +223,7 @@ fn lean_ptr_to_syntax_preresolved( } } -fn lean_ptr_to_syntax(obj: LeanObj, cache: &mut Cache<'_>) -> Syntax { +fn lean_ptr_to_syntax(obj: LeanObject, cache: &mut Cache<'_>) -> Syntax { if obj.is_scalar() { return Syntax::Missing; } @@ -258,7 +258,7 @@ fn lean_ptr_to_syntax(obj: LeanObj, cache: &mut Cache<'_>) -> Syntax { } fn lean_ptr_to_name_data_value( - obj: LeanObj, + obj: LeanObject, cache: &mut Cache<'_>, ) -> (Name, DataValue) { let ctor = obj.as_ctor(); @@ -288,7 +288,7 @@ fn lean_ptr_to_name_data_value( (name, data_value) } -pub fn lean_ptr_to_expr(obj: LeanObj, cache: &mut Cache<'_>) -> Expr { +pub fn lean_ptr_to_expr(obj: LeanObject, cache: &mut Cache<'_>) -> Expr { let ptr = obj.as_ptr(); if let Some(cached) = cache.local.exprs.get(&ptr) { return cached.clone(); @@ -399,7 +399,7 @@ pub fn lean_ptr_to_expr(obj: LeanObj, cache: &mut Cache<'_>) -> Expr { } fn lean_ptr_to_recursor_rule( - obj: LeanObj, + obj: LeanObject, cache: &mut Cache<'_>, ) -> RecursorRule { let ctor = obj.as_ctor(); @@ -411,7 +411,7 @@ fn lean_ptr_to_recursor_rule( } fn lean_ptr_to_constant_val( - obj: LeanObj, + obj: LeanObject, cache: &mut Cache<'_>, ) -> ConstantVal { let ctor = obj.as_ctor(); @@ -426,7 +426,7 @@ fn lean_ptr_to_constant_val( } pub fn lean_ptr_to_constant_info( - obj: LeanObj, + obj: LeanObject, cache: &mut Cache<'_>, ) -> ConstantInfo { let ctor = obj.as_ctor(); @@ -609,7 +609,7 @@ pub fn lean_ptr_to_constant_info( /// Decode a single (Name, ConstantInfo) pair. fn decode_name_constant_info( - obj: LeanObj, + obj: LeanObject, global: &GlobalCache, ) -> (Name, ConstantInfo) { let mut cache = Cache::new(global); @@ -621,7 +621,7 @@ fn decode_name_constant_info( } // Decode a Lean environment in parallel with hybrid caching. -pub fn lean_ptr_to_env(obj: LeanObj) -> Env { +pub fn lean_ptr_to_env(obj: LeanObject) -> Env { // Phase 1: Collect pointers (sequential) let objs = collect_list_objs(obj); @@ -651,7 +651,7 @@ pub fn lean_ptr_to_env(obj: LeanObj) -> Env { } /// Sequential fallback for small environments. -pub fn lean_ptr_to_env_sequential(obj: LeanObj) -> Env { +pub fn lean_ptr_to_env_sequential(obj: LeanObject) -> Env { let objs = collect_list_objs(obj); let global = GlobalCache::new(); let mut env = Env::default(); @@ -669,7 +669,7 @@ pub fn lean_ptr_to_env_sequential(obj: LeanObj) -> Env { // roundtrip and size analysis. Output is intentionally suppressed; re-enable // individual `eprintln!` lines when debugging locally. #[unsafe(no_mangle)] -extern "C" fn rs_tmp_decode_const_map(obj: LeanObj) -> usize { +extern "C" fn rs_tmp_decode_const_map(obj: LeanObject) -> usize { // Enable hash-consed size tracking for debugging // TODO: Make this configurable via CLI instead of hardcoded crate::ix::compile::TRACK_HASH_CONSED_SIZE diff --git a/src/lean/ffi/primitives.rs b/src/lean/ffi/primitives.rs index 084d2009..3db23afb 100644 --- a/src/lean/ffi/primitives.rs +++ b/src/lean/ffi/primitives.rs @@ -6,10 +6,10 @@ //! - List, Array, ByteArray //! - AssocList, HashMap -use crate::lean::lean::lean_uint64_to_nat; +use crate::lean::lean_sys::lean_uint64_to_nat; use crate::lean::nat::Nat; -use crate::lean::obj::{ - LeanArray, LeanByteArray, LeanCtor, LeanList, LeanObj, LeanString, +use crate::lean::object::{ + LeanArray, LeanByteArray, LeanCtor, LeanList, LeanObject, LeanString, }; // ============================================================================= @@ -17,16 +17,16 @@ use crate::lean::obj::{ // ============================================================================= /// Build a Lean Nat from a Rust Nat. -pub fn build_nat(n: &Nat) -> LeanObj { +pub fn build_nat(n: &Nat) -> LeanObject { // Try to get as u64 first if let Some(val) = n.to_u64() { // For small values that fit in a boxed scalar (max value is usize::MAX >> 1) if val <= (usize::MAX >> 1) as u64 { #[allow(clippy::cast_possible_truncation)] - return LeanObj::box_usize(val as usize); + return LeanObject::box_usize(val as usize); } // For larger u64 values, use lean_uint64_to_nat - return unsafe { LeanObj::from_raw(lean_uint64_to_nat(val).cast()) }; + return unsafe { LeanObject::from_raw(lean_uint64_to_nat(val).cast()) }; } // For values larger than u64, convert to limbs and use GMP let bytes = n.to_le_bytes(); @@ -37,7 +37,7 @@ pub fn build_nat(n: &Nat) -> LeanObj { limbs.push(u64::from_le_bytes(arr)); } unsafe { - LeanObj::from_raw(crate::lean::nat::lean_nat_from_limbs( + LeanObject::from_raw(crate::lean::nat::lean_nat_from_limbs( limbs.len(), limbs.as_ptr(), )) @@ -50,34 +50,32 @@ pub fn build_nat(n: &Nat) -> LeanObj { /// Round-trip a Nat: decode from Lean, re-encode to Lean. #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_nat(nat_ptr: LeanObj) -> LeanObj { +pub extern "C" fn rs_roundtrip_nat(nat_ptr: LeanObject) -> LeanObject { let nat = Nat::from_obj(nat_ptr); build_nat(&nat) } /// Round-trip a String: decode from Lean, re-encode to Lean. #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_string(s_ptr: LeanObj) -> LeanObj { - let s = s_ptr.as_string(); - *LeanString::from_str(&s.to_string()) +pub extern "C" fn rs_roundtrip_string(s_ptr: LeanString) -> LeanString { + let s = s_ptr.to_string(); + LeanString::from_str(&s) } /// Round-trip a List Nat: decode from Lean, re-encode to Lean. #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_list_nat(list_ptr: LeanObj) -> LeanObj { +pub extern "C" fn rs_roundtrip_list_nat(list_ptr: LeanList) -> LeanObject { // Decode list to Vec - let list = list_ptr.as_list(); - let nats: Vec = list.collect(Nat::from_obj); + let nats: Vec = list_ptr.collect(Nat::from_obj); // Re-encode as Lean List build_list_nat(&nats) } /// Round-trip an Array Nat: decode from Lean, re-encode to Lean. #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_array_nat(arr_ptr: LeanObj) -> LeanObj { +pub extern "C" fn rs_roundtrip_array_nat(arr_ptr: LeanArray) -> LeanObject { // Decode array - let arr = arr_ptr.as_array(); - let nats: Vec = arr.map(Nat::from_obj); + let nats: Vec = arr_ptr.map(Nat::from_obj); // Re-encode as Lean Array build_array_nat(&nats) } @@ -91,7 +89,7 @@ pub extern "C" fn rs_roundtrip_bytearray(ba: LeanByteArray) -> LeanByteArray { /// Round-trip a Bool: decode from Lean, re-encode. /// Bool in Lean is passed as unboxed scalar: false = 0, true = 1 #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_bool(bool_ptr: LeanObj) -> LeanObj { +pub extern "C" fn rs_roundtrip_bool(bool_ptr: LeanObject) -> LeanObject { // Bool is passed as unboxed scalar - just return it as-is bool_ptr } @@ -101,13 +99,13 @@ pub extern "C" fn rs_roundtrip_bool(bool_ptr: LeanObj) -> LeanObj { // ============================================================================= /// Build a Lean List Nat from a Vec. -fn build_list_nat(nats: &[Nat]) -> LeanObj { - let items: Vec = nats.iter().map(build_nat).collect(); +fn build_list_nat(nats: &[Nat]) -> LeanObject { + let items: Vec = nats.iter().map(build_nat).collect(); *LeanList::from_iter(items) } /// Build a Lean Array Nat from a Vec. -fn build_array_nat(nats: &[Nat]) -> LeanObj { +fn build_array_nat(nats: &[Nat]) -> LeanObject { let arr = LeanArray::alloc(nats.len()); for (i, nat) in nats.iter().enumerate() { arr.set(i, build_nat(nat)); @@ -122,7 +120,7 @@ fn build_array_nat(nats: &[Nat]) -> LeanObj { /// Round-trip a Point (structure with x, y : Nat). /// Point is a structure, which in Lean is represented as a constructor with tag 0. #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_point(point_ptr: LeanObj) -> LeanObj { +pub extern "C" fn rs_roundtrip_point(point_ptr: LeanObject) -> LeanObject { let ctor = point_ptr.as_ctor(); // Point is a structure (single constructor, tag 0) with 2 Nat fields let x = Nat::from_obj(ctor.get(0)); @@ -137,11 +135,11 @@ pub extern "C" fn rs_roundtrip_point(point_ptr: LeanObj) -> LeanObj { /// Round-trip a NatTree (inductive with leaf : Nat → NatTree | node : NatTree → NatTree → NatTree). #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_nat_tree(tree_ptr: LeanObj) -> LeanObj { +pub extern "C" fn rs_roundtrip_nat_tree(tree_ptr: LeanObject) -> LeanObject { roundtrip_nat_tree_recursive(tree_ptr) } -fn roundtrip_nat_tree_recursive(obj: LeanObj) -> LeanObj { +fn roundtrip_nat_tree_recursive(obj: LeanObject) -> LeanObject { let ctor = obj.as_ctor(); match ctor.tag() { 0 => { @@ -168,18 +166,20 @@ fn roundtrip_nat_tree_recursive(obj: LeanObj) -> LeanObj { /// AssocList: nil (tag 0, 0 fields) | cons key value tail (tag 1, 3 fields) /// Note: nil with 0 fields may be represented as lean_box(0) #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_assoclist_nat_nat(list_ptr: LeanObj) -> LeanObj { +pub extern "C" fn rs_roundtrip_assoclist_nat_nat( + list_ptr: LeanObject, +) -> LeanObject { if list_ptr.is_scalar() { - return LeanObj::box_usize(0); + return LeanObject::box_usize(0); } let pairs = decode_assoc_list_nat_nat(list_ptr); build_assoc_list_nat_nat(&pairs) } /// Build an AssocList Nat Nat from pairs -fn build_assoc_list_nat_nat(pairs: &[(Nat, Nat)]) -> LeanObj { +fn build_assoc_list_nat_nat(pairs: &[(Nat, Nat)]) -> LeanObject { // Build in reverse to preserve order - let mut list = LeanObj::box_usize(0); // nil + let mut list = LeanObject::box_usize(0); // nil for (k, v) in pairs.iter().rev() { let cons = LeanCtor::alloc(1, 3, 0); // AssocList.cons cons.set(0, build_nat(k)); @@ -193,8 +193,8 @@ fn build_assoc_list_nat_nat(pairs: &[(Nat, Nat)]) -> LeanObj { /// Round-trip a DHashMap.Raw Nat Nat. #[unsafe(no_mangle)] pub extern "C" fn rs_roundtrip_dhashmap_raw_nat_nat( - raw_ptr: LeanObj, -) -> LeanObj { + raw_ptr: LeanObject, +) -> LeanObject { if raw_ptr.is_scalar() { return raw_ptr; } @@ -214,7 +214,7 @@ pub extern "C" fn rs_roundtrip_dhashmap_raw_nat_nat( // Rebuild buckets let new_buckets = LeanArray::alloc(num_buckets); - let nil = LeanObj::box_usize(0); + let nil = LeanObject::box_usize(0); for i in 0..num_buckets { new_buckets.set(i, nil); } @@ -261,7 +261,9 @@ pub extern "C" fn rs_roundtrip_dhashmap_raw_nat_nat( /// - nil: lean_box(0) /// - cons key value tail: ctor 1, 3 fields #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_hashmap_nat_nat(map_ptr: LeanObj) -> LeanObj { +pub extern "C" fn rs_roundtrip_hashmap_nat_nat( + map_ptr: LeanObject, +) -> LeanObject { let raw_ctor = map_ptr.as_ctor(); // Due to unboxing, map_ptr points directly to Raw let size = Nat::from_obj(raw_ctor.get(0)); @@ -280,7 +282,7 @@ pub extern "C" fn rs_roundtrip_hashmap_nat_nat(map_ptr: LeanObj) -> LeanObj { let new_buckets = LeanArray::alloc(num_buckets); // Initialize all buckets to AssocList.nil (lean_box(0)) - let nil = LeanObj::box_usize(0); + let nil = LeanObject::box_usize(0); for i in 0..num_buckets { new_buckets.set(i, nil); } @@ -321,7 +323,7 @@ pub extern "C" fn rs_roundtrip_hashmap_nat_nat(map_ptr: LeanObj) -> LeanObj { /// Decode a Lean AssocList Nat Nat to Vec of pairs /// AssocList: nil (tag 0) | cons key value tail (tag 1, 3 fields) -pub fn decode_assoc_list_nat_nat(obj: LeanObj) -> Vec<(Nat, Nat)> { +pub fn decode_assoc_list_nat_nat(obj: LeanObject) -> Vec<(Nat, Nat)> { let mut result = Vec::new(); let mut current = obj; @@ -359,7 +361,7 @@ pub extern "C" fn rs_bytearray_to_u64_le(ba: LeanByteArray) -> u64 { return 0; } unsafe { - let cptr = crate::lean::lean::lean_sarray_cptr(ba.as_ptr() as *mut _); + let cptr = crate::lean::lean_sys::lean_sarray_cptr(ba.as_ptr() as *mut _); std::ptr::read_unaligned(cptr as *const u64) } } diff --git a/src/lean/ffi/unsigned.rs b/src/lean/ffi/unsigned.rs index c518f7fc..226fe0ab 100644 --- a/src/lean/ffi/unsigned.rs +++ b/src/lean/ffi/unsigned.rs @@ -1,4 +1,4 @@ -use crate::lean::obj::LeanByteArray; +use crate::lean::object::LeanByteArray; #[unsafe(no_mangle)] extern "C" fn c_u16_to_le_bytes(v: u16) -> LeanByteArray { diff --git a/src/lean/nat.rs b/src/lean/nat.rs index fae7db05..3a61261e 100644 --- a/src/lean/nat.rs +++ b/src/lean/nat.rs @@ -9,7 +9,7 @@ use std::mem::MaybeUninit; use num_bigint::BigUint; -use crate::lean::obj::LeanObj; +use crate::lean::object::LeanObject; /// Arbitrary-precision natural number, wrapping `BigUint`. #[derive(Hash, PartialEq, Eq, Debug, Clone, PartialOrd, Ord)] @@ -39,7 +39,7 @@ impl Nat { /// Decode a `Nat` from a Lean object pointer. Handles both scalar (unboxed) /// and heap-allocated (GMP `mpz_object`) representations. pub fn from_ptr(ptr: *const c_void) -> Nat { - let obj = unsafe { LeanObj::from_raw(ptr) }; + let obj = unsafe { LeanObject::from_raw(ptr) }; if obj.is_scalar() { let u = obj.unbox_usize(); Nat(BigUint::from_bytes_le(&u.to_le_bytes())) @@ -50,8 +50,8 @@ impl Nat { } } - /// Decode a `Nat` from a `LeanObj`. Convenience wrapper over `from_ptr`. - pub fn from_obj(obj: crate::lean::obj::LeanObj) -> Nat { + /// Decode a `Nat` from a `LeanObject`. Convenience wrapper over `from_ptr`. + pub fn from_obj(obj: LeanObject) -> Nat { Self::from_ptr(obj.as_ptr()) } @@ -105,7 +105,7 @@ impl Mpz { // GMP interop for building Lean Nat objects from limbs // ============================================================================= -use crate::lean::lean::lean_uint64_to_nat; +use crate::lean::lean_sys::lean_uint64_to_nat; /// LEAN_MAX_SMALL_NAT = SIZE_MAX >> 1 const LEAN_MAX_SMALL_NAT: u64 = (usize::MAX >> 1) as u64; @@ -142,12 +142,12 @@ pub unsafe fn lean_nat_from_limbs( limbs: *const u64, ) -> *mut c_void { if num_limbs == 0 { - return LeanObj::box_usize(0).as_mut_ptr(); + return LeanObject::box_usize(0).as_mut_ptr(); } let first = unsafe { *limbs }; if num_limbs == 1 && first <= LEAN_MAX_SMALL_NAT { #[allow(clippy::cast_possible_truncation)] // only targets 64-bit - return LeanObj::box_usize(first as usize).as_mut_ptr(); + return LeanObject::box_usize(first as usize).as_mut_ptr(); } if num_limbs == 1 { return unsafe { lean_uint64_to_nat(first).cast() }; diff --git a/src/lean/obj.rs b/src/lean/object.rs similarity index 74% rename from src/lean/obj.rs rename to src/lean/object.rs index 0ecee682..4fcecb22 100644 --- a/src/lean/obj.rs +++ b/src/lean/object.rs @@ -8,19 +8,19 @@ use std::ffi::c_void; use std::marker::PhantomData; use std::ops::Deref; -use crate::lean::lean; +use crate::lean::lean_sys; use crate::lean::safe_cstring; // ============================================================================= -// LeanObj — Untyped base wrapper +// LeanObject — Untyped base wrapper // ============================================================================= /// Untyped wrapper around a raw Lean object pointer. #[derive(Clone, Copy)] #[repr(transparent)] -pub struct LeanObj(*const c_void); +pub struct LeanObject(*const c_void); -impl LeanObj { +impl LeanObject { /// Wrap a raw pointer without any tag check. /// /// # Safety @@ -52,21 +52,21 @@ impl LeanObj { assert!(!self.is_scalar(), "tag() called on scalar"); #[allow(clippy::cast_possible_truncation)] unsafe { - lean::lean_obj_tag(self.0 as *mut _) as u8 + lean_sys::lean_obj_tag(self.0 as *mut _) as u8 } } #[inline] pub fn inc_ref(self) { if !self.is_scalar() { - unsafe { lean::lean_inc_ref(self.0 as *mut _) } + unsafe { lean_sys::lean_inc_ref(self.0 as *mut _) } } } #[inline] pub fn dec_ref(self) { if !self.is_scalar() { - unsafe { lean::lean_dec_ref(self.0 as *mut _) } + unsafe { lean_sys::lean_dec_ref(self.0 as *mut _) } } } @@ -84,12 +84,12 @@ impl LeanObj { #[inline] pub fn box_u64(n: u64) -> Self { - Self(unsafe { lean::lean_box_uint64(n) }.cast()) + Self(unsafe { lean_sys::lean_box_uint64(n) }.cast()) } #[inline] pub fn unbox_u64(self) -> u64 { - unsafe { lean::lean_unbox_uint64(self.0 as *mut _) } + unsafe { lean_sys::lean_unbox_uint64(self.0 as *mut _) } } /// Interpret as a constructor object (tag 0–243). @@ -137,12 +137,12 @@ impl LeanObj { #[inline] pub fn box_u32(n: u32) -> Self { - Self(unsafe { lean::lean_box_uint32(n) }.cast()) + Self(unsafe { lean_sys::lean_box_uint32(n) }.cast()) } #[inline] pub fn unbox_u32(self) -> u32 { - unsafe { lean::lean_unbox_uint32(self.0 as *mut _) } + unsafe { lean_sys::lean_unbox_uint32(self.0 as *mut _) } } } @@ -153,12 +153,12 @@ impl LeanObj { /// Typed wrapper for a Lean `Array α` object (tag 246). #[derive(Clone, Copy)] #[repr(transparent)] -pub struct LeanArray(LeanObj); +pub struct LeanArray(LeanObject); impl Deref for LeanArray { - type Target = LeanObj; + type Target = LeanObject; #[inline] - fn deref(&self) -> &LeanObj { + fn deref(&self) -> &LeanObject { &self.0 } } @@ -169,35 +169,36 @@ impl LeanArray { /// # Safety /// The pointer must be a valid Lean `Array` object. pub unsafe fn from_raw(ptr: *const c_void) -> Self { - let obj = LeanObj(ptr); + let obj = LeanObject(ptr); debug_assert!(!obj.is_scalar() && obj.tag() == 246); Self(obj) } /// Allocate a new array with `size` elements (capacity = size). pub fn alloc(size: usize) -> Self { - let obj = unsafe { lean::lean_alloc_array(size, size) }; - Self(LeanObj(obj.cast())) + let obj = unsafe { lean_sys::lean_alloc_array(size, size) }; + Self(LeanObject(obj.cast())) } pub fn len(&self) -> usize { - unsafe { lean::lean_array_size(self.0.as_ptr() as *mut _) } + unsafe { lean_sys::lean_array_size(self.0.as_ptr() as *mut _) } } pub fn is_empty(&self) -> bool { self.len() == 0 } - pub fn get(&self, i: usize) -> LeanObj { - LeanObj( - unsafe { lean::lean_array_get_core(self.0.as_ptr() as *mut _, i) }.cast(), + pub fn get(&self, i: usize) -> LeanObject { + LeanObject( + unsafe { lean_sys::lean_array_get_core(self.0.as_ptr() as *mut _, i) } + .cast(), ) } - pub fn set(&self, i: usize, val: impl Into) { - let val: LeanObj = val.into(); + pub fn set(&self, i: usize, val: impl Into) { + let val: LeanObject = val.into(); unsafe { - lean::lean_array_set_core( + lean_sys::lean_array_set_core( self.0.as_ptr() as *mut _, i, val.as_ptr() as *mut _, @@ -206,20 +207,20 @@ impl LeanArray { } /// Return a slice over the array elements. - pub fn data(&self) -> &[LeanObj] { + pub fn data(&self) -> &[LeanObject] { unsafe { - let cptr = lean::lean_array_cptr(self.0.as_ptr() as *mut _); - // Safety: LeanObj is repr(transparent) over *const c_void, and + let cptr = lean_sys::lean_array_cptr(self.0.as_ptr() as *mut _); + // Safety: LeanObject is repr(transparent) over *const c_void, and // lean_array_cptr returns *mut *mut lean_object which has the same layout. std::slice::from_raw_parts(cptr.cast(), self.len()) } } - pub fn iter(&self) -> impl Iterator + '_ { + pub fn iter(&self) -> impl Iterator + '_ { self.data().iter().copied() } - pub fn map(&self, f: impl Fn(LeanObj) -> T) -> Vec { + pub fn map(&self, f: impl Fn(LeanObject) -> T) -> Vec { self.iter().map(f).collect() } } @@ -231,12 +232,12 @@ impl LeanArray { /// Typed wrapper for a Lean `ByteArray` object (tag 248). #[derive(Clone, Copy)] #[repr(transparent)] -pub struct LeanByteArray(LeanObj); +pub struct LeanByteArray(LeanObject); impl Deref for LeanByteArray { - type Target = LeanObj; + type Target = LeanObject; #[inline] - fn deref(&self) -> &LeanObj { + fn deref(&self) -> &LeanObject { &self.0 } } @@ -247,29 +248,29 @@ impl LeanByteArray { /// # Safety /// The pointer must be a valid Lean `ByteArray` object. pub unsafe fn from_raw(ptr: *const c_void) -> Self { - let obj = LeanObj(ptr); + let obj = LeanObject(ptr); debug_assert!(!obj.is_scalar() && obj.tag() == 248); Self(obj) } /// Allocate a new byte array with `size` bytes (capacity = size). pub fn alloc(size: usize) -> Self { - let obj = unsafe { lean::lean_alloc_sarray(1, size, size) }; - Self(LeanObj(obj.cast())) + let obj = unsafe { lean_sys::lean_alloc_sarray(1, size, size) }; + Self(LeanObject(obj.cast())) } /// Allocate a new byte array and copy `data` into it. pub fn from_bytes(data: &[u8]) -> Self { let arr = Self::alloc(data.len()); unsafe { - let cptr = lean::lean_sarray_cptr(arr.0.as_ptr() as *mut _); + let cptr = lean_sys::lean_sarray_cptr(arr.0.as_ptr() as *mut _); std::ptr::copy_nonoverlapping(data.as_ptr(), cptr, data.len()); } arr } pub fn len(&self) -> usize { - unsafe { lean::lean_sarray_size(self.0.as_ptr() as *mut _) } + unsafe { lean_sys::lean_sarray_size(self.0.as_ptr() as *mut _) } } pub fn is_empty(&self) -> bool { @@ -279,7 +280,7 @@ impl LeanByteArray { /// Return the byte contents as a slice. pub fn as_bytes(&self) -> &[u8] { unsafe { - let cptr = lean::lean_sarray_cptr(self.0.as_ptr() as *mut _); + let cptr = lean_sys::lean_sarray_cptr(self.0.as_ptr() as *mut _); std::slice::from_raw_parts(cptr, self.len()) } } @@ -291,7 +292,7 @@ impl LeanByteArray { pub unsafe fn set_data(&self, data: &[u8]) { unsafe { let obj = self.0.as_mut_ptr(); - let cptr = lean::lean_sarray_cptr(obj as *mut _); + let cptr = lean_sys::lean_sarray_cptr(obj.cast()); std::ptr::copy_nonoverlapping(data.as_ptr(), cptr, data.len()); // Update m_size: at offset 8 (after lean_object header) *obj.cast::().add(8).cast::() = data.len(); @@ -306,12 +307,12 @@ impl LeanByteArray { /// Typed wrapper for a Lean `String` object (tag 249). #[derive(Clone, Copy)] #[repr(transparent)] -pub struct LeanString(LeanObj); +pub struct LeanString(LeanObject); impl Deref for LeanString { - type Target = LeanObj; + type Target = LeanObject; #[inline] - fn deref(&self) -> &LeanObj { + fn deref(&self) -> &LeanObject { &self.0 } } @@ -322,7 +323,7 @@ impl LeanString { /// # Safety /// The pointer must be a valid Lean `String` object. pub unsafe fn from_raw(ptr: *const c_void) -> Self { - let obj = LeanObj(ptr); + let obj = LeanObject(ptr); debug_assert!(!obj.is_scalar() && obj.tag() == 249); Self(obj) } @@ -330,16 +331,16 @@ impl LeanString { /// Create a Lean string from a Rust `&str`. pub fn from_str(s: &str) -> Self { let c = safe_cstring(s); - let obj = unsafe { lean::lean_mk_string(c.as_ptr()) }; - Self(LeanObj(obj.cast())) + let obj = unsafe { lean_sys::lean_mk_string(c.as_ptr()) }; + Self(LeanObject(obj.cast())) } /// Decode the Lean string into a Rust `String`. pub fn to_string(&self) -> String { unsafe { let obj = self.0.as_ptr() as *mut _; - let len = lean::lean_string_size(obj) - 1; // m_size includes NUL - let data = lean::lean_string_cstr(obj); + let len = lean_sys::lean_string_size(obj) - 1; // m_size includes NUL + let data = lean_sys::lean_string_cstr(obj); let bytes = std::slice::from_raw_parts(data.cast::(), len); String::from_utf8_unchecked(bytes.to_vec()) } @@ -347,7 +348,7 @@ impl LeanString { /// Number of data bytes (excluding the trailing NUL). pub fn byte_len(&self) -> usize { - unsafe { lean::lean_string_size(self.0.as_ptr() as *mut _) - 1 } + unsafe { lean_sys::lean_string_size(self.0.as_ptr() as *mut _) - 1 } } } @@ -358,12 +359,12 @@ impl LeanString { /// Typed wrapper for a Lean constructor object (tag 0–243). #[derive(Clone, Copy)] #[repr(transparent)] -pub struct LeanCtor(LeanObj); +pub struct LeanCtor(LeanObject); impl Deref for LeanCtor { - type Target = LeanObj; + type Target = LeanObject; #[inline] - fn deref(&self) -> &LeanObj { + fn deref(&self) -> &LeanObject { &self.0 } } @@ -374,7 +375,7 @@ impl LeanCtor { /// # Safety /// The pointer must be a valid Lean constructor object. pub unsafe fn from_raw(ptr: *const c_void) -> Self { - let obj = LeanObj(ptr); + let obj = LeanObject(ptr); debug_assert!(!obj.is_scalar() && obj.tag() <= 243); Self(obj) } @@ -383,9 +384,9 @@ impl LeanCtor { pub fn alloc(tag: u8, num_objs: usize, scalar_size: usize) -> Self { #[allow(clippy::cast_possible_truncation)] let obj = unsafe { - lean::lean_alloc_ctor(tag as u32, num_objs as u32, scalar_size as u32) + lean_sys::lean_alloc_ctor(tag as u32, num_objs as u32, scalar_size as u32) }; - Self(LeanObj(obj.cast())) + Self(LeanObject(obj.cast())) } pub fn tag(&self) -> u8 { @@ -393,20 +394,20 @@ impl LeanCtor { } /// Get the `i`-th object field via `lean_ctor_get`. - pub fn get(&self, i: usize) -> LeanObj { + pub fn get(&self, i: usize) -> LeanObject { #[allow(clippy::cast_possible_truncation)] - LeanObj( - unsafe { lean::lean_ctor_get(self.0.as_ptr() as *mut _, i as u32) } + LeanObject( + unsafe { lean_sys::lean_ctor_get(self.0.as_ptr() as *mut _, i as u32) } .cast(), ) } /// Set the `i`-th object field via `lean_ctor_set`. - pub fn set(&self, i: usize, val: impl Into) { - let val: LeanObj = val.into(); + pub fn set(&self, i: usize, val: impl Into) { + let val: LeanObject = val.into(); #[allow(clippy::cast_possible_truncation)] unsafe { - lean::lean_ctor_set( + lean_sys::lean_ctor_set( self.0.as_ptr() as *mut _, i as u32, val.as_ptr() as *mut _, @@ -418,7 +419,11 @@ impl LeanCtor { pub fn set_u8(&self, offset: usize, val: u8) { #[allow(clippy::cast_possible_truncation)] unsafe { - lean::lean_ctor_set_uint8(self.0.as_ptr() as *mut _, offset as u32, val); + lean_sys::lean_ctor_set_uint8( + self.0.as_ptr() as *mut _, + offset as u32, + val, + ); } } @@ -426,7 +431,11 @@ impl LeanCtor { pub fn set_u64(&self, offset: usize, val: u64) { #[allow(clippy::cast_possible_truncation)] unsafe { - lean::lean_ctor_set_uint64(self.0.as_ptr() as *mut _, offset as u32, val); + lean_sys::lean_ctor_set_uint64( + self.0.as_ptr() as *mut _, + offset as u32, + val, + ); } } @@ -435,9 +444,9 @@ impl LeanCtor { /// This bypasses `lean_ctor_get`'s bounds check, which is necessary when /// reading past the declared object fields into the scalar area (e.g. for /// `Expr.Data`). - pub fn objs(&self) -> [LeanObj; N] { + pub fn objs(&self) -> [LeanObject; N] { let base = unsafe { self.0.as_ptr().cast::<*const c_void>().add(1) }; - std::array::from_fn(|i| LeanObj(unsafe { *base.add(i) })) + std::array::from_fn(|i| LeanObject(unsafe { *base.add(i) })) } /// Read a `u64` scalar at `offset` bytes past `num_objs` object fields. @@ -467,12 +476,12 @@ impl LeanCtor { /// Typed wrapper for a Lean external object (tag 254) holding a `T`. #[derive(Clone, Copy)] #[repr(transparent)] -pub struct LeanExternal(LeanObj, PhantomData); +pub struct LeanExternal(LeanObject, PhantomData); impl Deref for LeanExternal { - type Target = LeanObj; + type Target = LeanObject; #[inline] - fn deref(&self) -> &LeanObj { + fn deref(&self) -> &LeanObject { &self.0 } } @@ -484,7 +493,7 @@ impl LeanExternal { /// The pointer must be a valid Lean external object whose data pointer /// points to a valid `T`. pub unsafe fn from_raw(ptr: *const c_void) -> Self { - let obj = LeanObj(ptr); + let obj = LeanObject(ptr); debug_assert!(!obj.is_scalar() && obj.tag() == 254); Self(obj, PhantomData) } @@ -492,15 +501,16 @@ impl LeanExternal { /// Allocate a new external object holding `data`. pub fn alloc(class: &ExternalClass, data: T) -> Self { let data_ptr = Box::into_raw(Box::new(data)); - let obj = - unsafe { lean::lean_alloc_external(class.0 as *mut _, data_ptr.cast()) }; - Self(LeanObj(obj.cast()), PhantomData) + let obj = unsafe { + lean_sys::lean_alloc_external(class.0 as *mut _, data_ptr.cast()) + }; + Self(LeanObject(obj.cast()), PhantomData) } /// Get a reference to the wrapped data. pub fn get(&self) -> &T { unsafe { - &*lean::lean_get_external_data(self.0.as_ptr() as *mut _).cast::() + &*lean_sys::lean_get_external_data(self.0.as_ptr() as *mut _).cast::() } } } @@ -523,11 +533,12 @@ impl ExternalClass { /// The `finalizer` callback must correctly free the external data, and /// `foreach` must correctly visit any Lean object references held by the data. pub unsafe fn register( - finalizer: lean::lean_external_finalize_proc, - foreach: lean::lean_external_foreach_proc, + finalizer: lean_sys::lean_external_finalize_proc, + foreach: lean_sys::lean_external_foreach_proc, ) -> Self { Self( - unsafe { lean::lean_register_external_class(finalizer, foreach) }.cast(), + unsafe { lean_sys::lean_register_external_class(finalizer, foreach) } + .cast(), ) } @@ -552,12 +563,12 @@ impl ExternalClass { /// Typed wrapper for a Lean `List α` (nil = scalar `lean_box(0)`, cons = ctor tag 1). #[derive(Clone, Copy)] #[repr(transparent)] -pub struct LeanList(LeanObj); +pub struct LeanList(LeanObject); impl Deref for LeanList { - type Target = LeanObj; + type Target = LeanObject; #[inline] - fn deref(&self) -> &LeanObj { + fn deref(&self) -> &LeanObject { &self.0 } } @@ -568,18 +579,18 @@ impl LeanList { /// # Safety /// The pointer must be a valid Lean `List` object. pub unsafe fn from_raw(ptr: *const c_void) -> Self { - let obj = LeanObj(ptr); + let obj = LeanObject(ptr); debug_assert!(obj.is_scalar() || obj.tag() == 1); Self(obj) } /// The empty list. pub fn nil() -> Self { - Self(LeanObj::box_usize(0)) + Self(LeanObject::box_usize(0)) } /// Prepend `head` to `tail`. - pub fn cons(head: impl Into, tail: LeanList) -> Self { + pub fn cons(head: impl Into, tail: LeanList) -> Self { let ctor = LeanCtor::alloc(1, 2, 0); ctor.set(0, head); ctor.set(1, tail); @@ -594,15 +605,15 @@ impl LeanList { LeanListIter(self.0) } - pub fn collect(&self, f: impl Fn(LeanObj) -> T) -> Vec { + pub fn collect(&self, f: impl Fn(LeanObject) -> T) -> Vec { self.iter().map(f).collect() } - /// Build a list from an iterator of values convertible to `LeanObj`. + /// Build a list from an iterator of values convertible to `LeanObject`. pub fn from_iter( - items: impl IntoIterator>, + items: impl IntoIterator>, ) -> Self { - let items: Vec = items.into_iter().map(Into::into).collect(); + let items: Vec = items.into_iter().map(Into::into).collect(); let mut list = Self::nil(); for item in items.into_iter().rev() { list = Self::cons(item, list); @@ -612,10 +623,10 @@ impl LeanList { } /// Iterator over the elements of a `LeanList`. -pub struct LeanListIter(LeanObj); +pub struct LeanListIter(LeanObject); impl Iterator for LeanListIter { - type Item = LeanObj; + type Item = LeanObject; fn next(&mut self) -> Option { if self.0.is_scalar() { return None; @@ -634,12 +645,12 @@ impl Iterator for LeanListIter { /// Typed wrapper for a Lean `Option α` (none = scalar, some = ctor tag 1). #[derive(Clone, Copy)] #[repr(transparent)] -pub struct LeanOption(LeanObj); +pub struct LeanOption(LeanObject); impl Deref for LeanOption { - type Target = LeanObj; + type Target = LeanObject; #[inline] - fn deref(&self) -> &LeanObj { + fn deref(&self) -> &LeanObject { &self.0 } } @@ -650,16 +661,16 @@ impl LeanOption { /// # Safety /// The pointer must be a valid Lean `Option` object. pub unsafe fn from_raw(ptr: *const c_void) -> Self { - let obj = LeanObj(ptr); + let obj = LeanObject(ptr); debug_assert!(obj.is_scalar() || obj.tag() == 1); Self(obj) } pub fn none() -> Self { - Self(LeanObj::box_usize(0)) + Self(LeanObject::box_usize(0)) } - pub fn some(val: impl Into) -> Self { + pub fn some(val: impl Into) -> Self { let ctor = LeanCtor::alloc(1, 1, 0); ctor.set(0, val); Self(ctor.0) @@ -673,7 +684,7 @@ impl LeanOption { !self.is_none() } - pub fn to_option(&self) -> Option { + pub fn to_option(&self) -> Option { if self.is_none() { None } else { @@ -690,12 +701,12 @@ impl LeanOption { /// Typed wrapper for a Lean `Except ε α` (error = ctor tag 0, ok = ctor tag 1). #[derive(Clone, Copy)] #[repr(transparent)] -pub struct LeanExcept(LeanObj); +pub struct LeanExcept(LeanObject); impl Deref for LeanExcept { - type Target = LeanObj; + type Target = LeanObject; #[inline] - fn deref(&self) -> &LeanObj { + fn deref(&self) -> &LeanObject { &self.0 } } @@ -706,20 +717,20 @@ impl LeanExcept { /// # Safety /// The pointer must be a valid Lean `Except` object. pub unsafe fn from_raw(ptr: *const c_void) -> Self { - let obj = LeanObj(ptr); + let obj = LeanObject(ptr); debug_assert!(!obj.is_scalar() && (obj.tag() == 0 || obj.tag() == 1)); Self(obj) } /// Build `Except.ok val`. - pub fn ok(val: impl Into) -> Self { + pub fn ok(val: impl Into) -> Self { let ctor = LeanCtor::alloc(1, 1, 0); ctor.set(0, val); Self(ctor.0) } /// Build `Except.error msg`. - pub fn error(msg: impl Into) -> Self { + pub fn error(msg: impl Into) -> Self { let ctor = LeanCtor::alloc(0, 1, 0); ctor.set(0, msg); Self(ctor.0) @@ -738,66 +749,66 @@ impl LeanExcept { self.0.tag() == 0 } - pub fn into_result(self) -> Result { + pub fn into_result(self) -> Result { let ctor = self.0.as_ctor(); if self.is_ok() { Ok(ctor.get(0)) } else { Err(ctor.get(0)) } } } // ============================================================================= -// From for LeanObj — allow wrapper types to be passed to set() etc. +// From for LeanObject — allow wrapper types to be passed to set() etc. // ============================================================================= -impl From for LeanObj { +impl From for LeanObject { #[inline] fn from(x: LeanArray) -> Self { x.0 } } -impl From for LeanObj { +impl From for LeanObject { #[inline] fn from(x: LeanByteArray) -> Self { x.0 } } -impl From for LeanObj { +impl From for LeanObject { #[inline] fn from(x: LeanString) -> Self { x.0 } } -impl From for LeanObj { +impl From for LeanObject { #[inline] fn from(x: LeanCtor) -> Self { x.0 } } -impl From> for LeanObj { +impl From> for LeanObject { #[inline] fn from(x: LeanExternal) -> Self { x.0 } } -impl From for LeanObj { +impl From for LeanObject { #[inline] fn from(x: LeanList) -> Self { x.0 } } -impl From for LeanObj { +impl From for LeanObject { #[inline] fn from(x: LeanOption) -> Self { x.0 } } -impl From for LeanObj { +impl From for LeanObject { #[inline] fn from(x: LeanExcept) -> Self { x.0 @@ -808,29 +819,34 @@ impl From for LeanObj { // Domain types — typed newtypes for specific Lean types // ============================================================================= -/// Generate a `#[repr(transparent)]` newtype over `LeanObj` for a specific +/// Generate a `#[repr(transparent)]` newtype over `LeanObject` for a specific /// Lean type, with `Deref`, `From`, and a `new` constructor. macro_rules! lean_domain_type { ($($(#[$meta:meta])* $name:ident;)*) => {$( $(#[$meta])* #[derive(Clone, Copy)] #[repr(transparent)] - pub struct $name(LeanObj); + pub struct $name(LeanObject); impl Deref for $name { - type Target = LeanObj; + type Target = LeanObject; #[inline] - fn deref(&self) -> &LeanObj { &self.0 } + fn deref(&self) -> &LeanObject { &self.0 } } - impl From<$name> for LeanObj { + impl From<$name> for LeanObject { #[inline] fn from(x: $name) -> Self { x.0 } } + impl From for $name { + #[inline] + fn from(obj: LeanObject) -> Self { Self(obj) } + } + impl $name { #[inline] - pub fn new(obj: LeanObj) -> Self { Self(obj) } + pub fn new(obj: LeanObject) -> Self { Self(obj) } } )*}; } @@ -838,101 +854,101 @@ macro_rules! lean_domain_type { lean_domain_type! { // Ix core types /// Lean `Ix.Name` object. - IxName; + LeanIxName; /// Lean `Ix.Level` object. - IxLevel; + LeanIxLevel; /// Lean `Ix.Expr` object. - IxExpr; + LeanIxExpr; /// Lean `Ix.ConstantInfo` object. - IxConstantInfo; + LeanIxConstantInfo; /// Lean `Ix.RawEnvironment` object. - IxRawEnvironment; + LeanIxRawEnvironment; /// Lean `Ix.Environment` object. - IxEnvironment; + LeanIxEnvironment; /// Lean `Ix.RustCondensedBlocks` object. - IxCondensedBlocks; + LeanIxCondensedBlocks; /// Lean `Ix.CompileM.RustCompilePhases` object. - IxCompilePhases; + LeanIxCompilePhases; // Ix data types /// Lean `Ix.Int` object. - IxInt; + LeanIxInt; /// Lean `Ix.Substring` object. - IxSubstring; + LeanIxSubstring; /// Lean `Ix.SourceInfo` object. - IxSourceInfo; + LeanIxSourceInfo; /// Lean `Ix.SyntaxPreresolved` object. - IxSyntaxPreresolved; + LeanIxSyntaxPreresolved; /// Lean `Ix.Syntax` object. - IxSyntax; + LeanIxSyntax; /// Lean `Ix.DataValue` object. - IxDataValue; + LeanIxDataValue; // Ixon types /// Lean `Ixon.DefKind` object. - IxonDefKind; + LeanIxonDefKind; /// Lean `Ixon.DefinitionSafety` object. - IxonDefinitionSafety; + LeanIxonDefinitionSafety; /// Lean `Ixon.QuotKind` object. - IxonQuotKind; + LeanIxonQuotKind; /// Lean `Ixon.Univ` object. - IxonUniv; + LeanIxonUniv; /// Lean `Ixon.Expr` object. - IxonExpr; + LeanIxonExpr; /// Lean `Ixon.Definition` object. - IxonDefinition; + LeanIxonDefinition; /// Lean `Ixon.RecursorRule` object. - IxonRecursorRule; + LeanIxonRecursorRule; /// Lean `Ixon.Recursor` object. - IxonRecursor; + LeanIxonRecursor; /// Lean `Ixon.Axiom` object. - IxonAxiom; + LeanIxonAxiom; /// Lean `Ixon.Quotient` object. - IxonQuotient; + LeanIxonQuotient; /// Lean `Ixon.Constructor` object. - IxonConstructor; + LeanIxonConstructor; /// Lean `Ixon.Inductive` object. - IxonInductive; + LeanIxonInductive; /// Lean `Ixon.InductiveProj` object. - IxonInductiveProj; + LeanIxonInductiveProj; /// Lean `Ixon.ConstructorProj` object. - IxonConstructorProj; + LeanIxonConstructorProj; /// Lean `Ixon.RecursorProj` object. - IxonRecursorProj; + LeanIxonRecursorProj; /// Lean `Ixon.DefinitionProj` object. - IxonDefinitionProj; + LeanIxonDefinitionProj; /// Lean `Ixon.MutConst` object. - IxonMutConst; + LeanIxonMutConst; /// Lean `Ixon.ConstantInfo` object. - IxonConstantInfo; + LeanIxonConstantInfo; /// Lean `Ixon.Constant` object. - IxonConstant; + LeanIxonConstant; /// Lean `Ixon.DataValue` object. - IxonDataValue; + LeanIxonDataValue; /// Lean `Ixon.ExprMetaData` object. - IxonExprMetaData; + LeanIxonExprMetaData; /// Lean `Ixon.ExprMetaArena` object. - IxonExprMetaArena; + LeanIxonExprMetaArena; /// Lean `Ixon.ConstantMeta` object. - IxonConstantMeta; + LeanIxonConstantMeta; /// Lean `Ixon.Named` object. - IxonNamed; + LeanIxonNamed; /// Lean `Ixon.Comm` object. - IxonComm; + LeanIxonComm; /// Lean `Ixon.RawEnv` object. - IxonRawEnv; + LeanIxonRawEnv; // Error types /// Lean `Ixon.SerializeError` object. - IxSerializeError; + LeanIxSerializeError; /// Lean `Ix.DecompileM.DecompileError` object. - IxDecompileError; + LeanIxDecompileError; /// Lean `Ix.CompileM.CompileError` object. - IxCompileError; + LeanIxCompileError; /// Lean `BlockCompareResult` object. - IxBlockCompareResult; + LeanIxBlockCompareResult; /// Lean `BlockCompareDetail` object. - IxBlockCompareDetail; + LeanIxBlockCompareDetail; } // ============================================================================= @@ -942,17 +958,17 @@ lean_domain_type! { /// Typed wrapper for a Lean `Prod α β` (ctor tag 0, 2 object fields). #[derive(Clone, Copy)] #[repr(transparent)] -pub struct LeanProd(LeanObj); +pub struct LeanProd(LeanObject); impl Deref for LeanProd { - type Target = LeanObj; + type Target = LeanObject; #[inline] - fn deref(&self) -> &LeanObj { + fn deref(&self) -> &LeanObject { &self.0 } } -impl From for LeanObj { +impl From for LeanObject { #[inline] fn from(x: LeanProd) -> Self { x.0 @@ -961,7 +977,7 @@ impl From for LeanObj { impl LeanProd { /// Build a pair `(fst, snd)`. - pub fn new(fst: impl Into, snd: impl Into) -> Self { + pub fn new(fst: impl Into, snd: impl Into) -> Self { let ctor = LeanCtor::alloc(0, 2, 0); ctor.set(0, fst); ctor.set(1, snd); @@ -969,22 +985,22 @@ impl LeanProd { } /// Get the first element. - pub fn fst(&self) -> LeanObj { + pub fn fst(&self) -> LeanObject { let ctor = self.0.as_ctor(); ctor.get(0) } /// Get the second element. - pub fn snd(&self) -> LeanObj { + pub fn snd(&self) -> LeanObject { let ctor = self.0.as_ctor(); ctor.get(1) } } /// `Ix.Address = { hash : ByteArray }` — single-field struct, unboxed to `ByteArray`. -pub type IxAddress = LeanByteArray; +pub type LeanIxAddress = LeanByteArray; -impl From for LeanObj { +impl From for LeanObject { #[inline] fn from(x: u32) -> Self { Self::box_u32(x) diff --git a/src/sha256.rs b/src/sha256.rs index 6bf1b5f3..662d2b5c 100644 --- a/src/sha256.rs +++ b/src/sha256.rs @@ -1,6 +1,6 @@ use sha2::{Digest, Sha256}; -use crate::lean::obj::LeanByteArray; +use crate::lean::object::LeanByteArray; #[unsafe(no_mangle)] extern "C" fn rs_sha256(bytes: LeanByteArray) -> LeanByteArray { From da99816aa03dd403c23cd88aa5723c41b5fd4f14 Mon Sep 17 00:00:00 2001 From: samuelburnham <45365069+samuelburnham@users.noreply.github.com> Date: Tue, 3 Mar 2026 11:47:07 -0500 Subject: [PATCH 12/27] Move `src/lean/ffi` to `src/ffi` --- src/{lean => }/ffi.rs | 0 src/{lean => }/ffi/aiur.rs | 0 src/{lean => }/ffi/aiur/protocol.rs | 0 src/{lean => }/ffi/aiur/toplevel.rs | 2 +- src/{lean => }/ffi/builder.rs | 0 src/{lean => }/ffi/byte_array.rs | 0 src/{lean => }/ffi/compile.rs | 20 ++++++++++---------- src/{lean => }/ffi/graph.rs | 8 ++++---- src/{lean => }/ffi/ix.rs | 0 src/{lean => }/ffi/ix/address.rs | 0 src/{lean => }/ffi/ix/constant.rs | 8 ++++---- src/{lean => }/ffi/ix/data.rs | 6 +++--- src/{lean => }/ffi/ix/env.rs | 6 +++--- src/{lean => }/ffi/ix/expr.rs | 12 ++++++------ src/{lean => }/ffi/ix/level.rs | 6 +++--- src/{lean => }/ffi/ix/name.rs | 6 +++--- src/{lean => }/ffi/ixon.rs | 0 src/{lean => }/ffi/ixon/compare.rs | 4 ++-- src/{lean => }/ffi/ixon/constant.rs | 4 ++-- src/{lean => }/ffi/ixon/enums.rs | 0 src/{lean => }/ffi/ixon/env.rs | 8 ++++---- src/{lean => }/ffi/ixon/expr.rs | 0 src/{lean => }/ffi/ixon/meta.rs | 6 +++--- src/{lean => }/ffi/ixon/serialize.rs | 4 ++-- src/{lean => }/ffi/ixon/sharing.rs | 4 ++-- src/{lean => }/ffi/ixon/univ.rs | 0 src/{lean => }/ffi/keccak.rs | 0 src/{lean => }/ffi/lean_env.rs | 0 src/{lean => }/ffi/primitives.rs | 0 src/{lean => }/ffi/unsigned.rs | 0 src/lean.rs | 1 - src/lib.rs | 1 + 32 files changed, 53 insertions(+), 53 deletions(-) rename src/{lean => }/ffi.rs (100%) rename src/{lean => }/ffi/aiur.rs (100%) rename src/{lean => }/ffi/aiur/protocol.rs (100%) rename src/{lean => }/ffi/aiur/toplevel.rs (98%) rename src/{lean => }/ffi/builder.rs (100%) rename src/{lean => }/ffi/byte_array.rs (100%) rename src/{lean => }/ffi/compile.rs (98%) rename src/{lean => }/ffi/graph.rs (95%) rename src/{lean => }/ffi/ix.rs (100%) rename src/{lean => }/ffi/ix/address.rs (100%) rename src/{lean => }/ffi/ix/constant.rs (98%) rename src/{lean => }/ffi/ix/data.rs (98%) rename src/{lean => }/ffi/ix/env.rs (98%) rename src/{lean => }/ffi/ix/expr.rs (97%) rename src/{lean => }/ffi/ix/level.rs (95%) rename src/{lean => }/ffi/ix/name.rs (95%) rename src/{lean => }/ffi/ixon.rs (100%) rename src/{lean => }/ffi/ixon/compare.rs (98%) rename src/{lean => }/ffi/ixon/constant.rs (99%) rename src/{lean => }/ffi/ixon/enums.rs (100%) rename src/{lean => }/ffi/ixon/env.rs (98%) rename src/{lean => }/ffi/ixon/expr.rs (100%) rename src/{lean => }/ffi/ixon/meta.rs (99%) rename src/{lean => }/ffi/ixon/serialize.rs (99%) rename src/{lean => }/ffi/ixon/sharing.rs (97%) rename src/{lean => }/ffi/ixon/univ.rs (100%) rename src/{lean => }/ffi/keccak.rs (100%) rename src/{lean => }/ffi/lean_env.rs (100%) rename src/{lean => }/ffi/primitives.rs (100%) rename src/{lean => }/ffi/unsigned.rs (100%) diff --git a/src/lean/ffi.rs b/src/ffi.rs similarity index 100% rename from src/lean/ffi.rs rename to src/ffi.rs diff --git a/src/lean/ffi/aiur.rs b/src/ffi/aiur.rs similarity index 100% rename from src/lean/ffi/aiur.rs rename to src/ffi/aiur.rs diff --git a/src/lean/ffi/aiur/protocol.rs b/src/ffi/aiur/protocol.rs similarity index 100% rename from src/lean/ffi/aiur/protocol.rs rename to src/ffi/aiur/protocol.rs diff --git a/src/lean/ffi/aiur/toplevel.rs b/src/ffi/aiur/toplevel.rs similarity index 98% rename from src/lean/ffi/aiur/toplevel.rs rename to src/ffi/aiur/toplevel.rs index 622be7ee..92394a0d 100644 --- a/src/lean/ffi/aiur/toplevel.rs +++ b/src/ffi/aiur/toplevel.rs @@ -9,7 +9,7 @@ use crate::{ lean::object::LeanObject, }; -use crate::lean::ffi::aiur::{lean_unbox_g, lean_unbox_nat_as_usize}; +use crate::ffi::aiur::{lean_unbox_g, lean_unbox_nat_as_usize}; fn lean_ptr_to_vec_val_idx(obj: LeanObject) -> Vec { obj.as_array().map(lean_unbox_nat_as_usize) diff --git a/src/lean/ffi/builder.rs b/src/ffi/builder.rs similarity index 100% rename from src/lean/ffi/builder.rs rename to src/ffi/builder.rs diff --git a/src/lean/ffi/byte_array.rs b/src/ffi/byte_array.rs similarity index 100% rename from src/lean/ffi/byte_array.rs rename to src/ffi/byte_array.rs diff --git a/src/lean/ffi/compile.rs b/src/ffi/compile.rs similarity index 98% rename from src/lean/ffi/compile.rs rename to src/ffi/compile.rs index 6b20c5b7..206eaf57 100644 --- a/src/lean/ffi/compile.rs +++ b/src/ffi/compile.rs @@ -20,7 +20,7 @@ use crate::ix::ixon::constant::{Constant as IxonConstant, ConstantInfo}; use crate::ix::ixon::expr::Expr as IxonExpr; use crate::ix::ixon::serialize::put_expr; use crate::ix::ixon::{Comm, ConstantMeta}; -use crate::lean::ffi::{ffi_io_guard, io_error, io_ok}; +use crate::ffi::{ffi_io_guard, io_error, io_ok}; use crate::lean::lean_sys::lean_uint64_to_nat; use crate::lean::nat::Nat; use crate::lean::object::{ @@ -33,19 +33,19 @@ use crate::lean::object::{ use dashmap::DashMap; use dashmap::DashSet; -use crate::lean::ffi::builder::LeanBuildCache; -use crate::lean::ffi::graph::build_condensed_blocks; -use crate::lean::ffi::ix::constant::build_constant_info; -use crate::lean::ffi::ix::env::build_raw_environment; -use crate::lean::ffi::ix::name::build_name; -use crate::lean::ffi::ixon::constant::{ +use crate::ffi::builder::LeanBuildCache; +use crate::ffi::graph::build_condensed_blocks; +use crate::ffi::ix::constant::build_constant_info; +use crate::ffi::ix::env::build_raw_environment; +use crate::ffi::ix::name::build_name; +use crate::ffi::ixon::constant::{ build_address_from_ixon, build_ixon_constant, decode_ixon_address, }; -use crate::lean::ffi::ixon::env::{ +use crate::ffi::ixon::env::{ build_raw_env, build_raw_name_entry, decode_raw_env, decoded_to_ixon_env, }; -use crate::lean::ffi::ixon::meta::{build_constant_meta, build_ixon_comm}; -use crate::lean::ffi::lean_env::{ +use crate::ffi::ixon::meta::{build_constant_meta, build_ixon_comm}; +use crate::ffi::lean_env::{ GlobalCache, lean_ptr_to_env, lean_ptr_to_name, }; diff --git a/src/lean/ffi/graph.rs b/src/ffi/graph.rs similarity index 95% rename from src/lean/ffi/graph.rs rename to src/ffi/graph.rs index f4e5846e..b5196f23 100644 --- a/src/lean/ffi/graph.rs +++ b/src/ffi/graph.rs @@ -4,12 +4,12 @@ use std::sync::Arc; use crate::ix::condense::compute_sccs; use crate::ix::graph::build_ref_graph; -use crate::lean::ffi::{ffi_io_guard, io_ok}; +use crate::ffi::{ffi_io_guard, io_ok}; use crate::lean::object::{LeanArray, LeanCtor, LeanObject}; -use crate::lean::ffi::builder::LeanBuildCache; -use crate::lean::ffi::ix::name::build_name; -use crate::lean::ffi::lean_env::lean_ptr_to_env; +use crate::ffi::builder::LeanBuildCache; +use crate::ffi::ix::name::build_name; +use crate::ffi::lean_env::lean_ptr_to_env; /// Build an Array (Ix.Name × Array Ix.Name) from a RefMap. pub fn build_ref_graph_array( diff --git a/src/lean/ffi/ix.rs b/src/ffi/ix.rs similarity index 100% rename from src/lean/ffi/ix.rs rename to src/ffi/ix.rs diff --git a/src/lean/ffi/ix/address.rs b/src/ffi/ix/address.rs similarity index 100% rename from src/lean/ffi/ix/address.rs rename to src/ffi/ix/address.rs diff --git a/src/lean/ffi/ix/constant.rs b/src/ffi/ix/constant.rs similarity index 98% rename from src/lean/ffi/ix/constant.rs rename to src/ffi/ix/constant.rs index 34a7b016..d2501c04 100644 --- a/src/lean/ffi/ix/constant.rs +++ b/src/ffi/ix/constant.rs @@ -18,12 +18,12 @@ use crate::ix::env::{ use crate::lean::nat::Nat; use crate::lean::object::{LeanIxConstantInfo, LeanArray, LeanCtor, LeanObject}; -use crate::lean::ffi::builder::LeanBuildCache; -use crate::lean::ffi::ix::expr::{build_expr, decode_ix_expr}; -use crate::lean::ffi::ix::name::{ +use crate::ffi::builder::LeanBuildCache; +use crate::ffi::ix::expr::{build_expr, decode_ix_expr}; +use crate::ffi::ix::name::{ build_name, build_name_array, decode_ix_name, decode_name_array, }; -use crate::lean::ffi::primitives::build_nat; +use crate::ffi::primitives::build_nat; /// Build a Ix.ConstantVal structure. pub fn build_constant_val( diff --git a/src/lean/ffi/ix/data.rs b/src/ffi/ix/data.rs similarity index 98% rename from src/lean/ffi/ix/data.rs rename to src/ffi/ix/data.rs index e6aab8ab..eda6ae4f 100644 --- a/src/lean/ffi/ix/data.rs +++ b/src/ffi/ix/data.rs @@ -9,9 +9,9 @@ use crate::lean::object::{ LeanIxSyntaxPreresolved, LeanArray, LeanCtor, LeanObject, LeanString, }; -use crate::lean::ffi::builder::LeanBuildCache; -use crate::lean::ffi::ix::name::{build_name, decode_ix_name}; -use crate::lean::ffi::primitives::build_nat; +use crate::ffi::builder::LeanBuildCache; +use crate::ffi::ix::name::{build_name, decode_ix_name}; +use crate::ffi::primitives::build_nat; /// Build a Ix.Int (ofNat or negSucc). pub fn build_int(int: &Int) -> LeanIxInt { diff --git a/src/lean/ffi/ix/env.rs b/src/ffi/ix/env.rs similarity index 98% rename from src/lean/ffi/ix/env.rs rename to src/ffi/ix/env.rs index be70c190..37606769 100644 --- a/src/lean/ffi/ix/env.rs +++ b/src/ffi/ix/env.rs @@ -7,11 +7,11 @@ use crate::lean::object::{ LeanIxEnvironment, LeanIxRawEnvironment, LeanArray, LeanCtor, LeanObject, }; -use crate::lean::ffi::builder::LeanBuildCache; -use crate::lean::ffi::ix::constant::{ +use crate::ffi::builder::LeanBuildCache; +use crate::ffi::ix::constant::{ build_constant_info, decode_constant_info, }; -use crate::lean::ffi::ix::name::{build_name, decode_ix_name}; +use crate::ffi::ix::name::{build_name, decode_ix_name}; // ============================================================================= // HashMap Building diff --git a/src/lean/ffi/ix/expr.rs b/src/ffi/ix/expr.rs similarity index 97% rename from src/lean/ffi/ix/expr.rs rename to src/ffi/ix/expr.rs index cf4a6ed8..ee3b8475 100644 --- a/src/lean/ffi/ix/expr.rs +++ b/src/ffi/ix/expr.rs @@ -20,14 +20,14 @@ use crate::ix::env::{ use crate::lean::nat::Nat; use crate::lean::object::{LeanIxExpr, LeanArray, LeanCtor, LeanObject, LeanString}; -use crate::lean::ffi::builder::LeanBuildCache; -use crate::lean::ffi::ix::address::build_address; -use crate::lean::ffi::ix::data::{build_data_value, decode_data_value}; -use crate::lean::ffi::ix::level::{ +use crate::ffi::builder::LeanBuildCache; +use crate::ffi::ix::address::build_address; +use crate::ffi::ix::data::{build_data_value, decode_data_value}; +use crate::ffi::ix::level::{ build_level, build_level_array, decode_ix_level, }; -use crate::lean::ffi::ix::name::{build_name, decode_ix_name}; -use crate::lean::ffi::primitives::build_nat; +use crate::ffi::ix::name::{build_name, decode_ix_name}; +use crate::ffi::primitives::build_nat; /// Build a Lean Ix.Expr with embedded hash. /// Uses caching to avoid rebuilding the same expression. diff --git a/src/lean/ffi/ix/level.rs b/src/ffi/ix/level.rs similarity index 95% rename from src/lean/ffi/ix/level.rs rename to src/ffi/ix/level.rs index c6ba3b8e..a23d0963 100644 --- a/src/lean/ffi/ix/level.rs +++ b/src/ffi/ix/level.rs @@ -11,9 +11,9 @@ use crate::ix::env::{Level, LevelData}; use crate::lean::object::{LeanIxLevel, LeanArray, LeanCtor, LeanObject}; -use crate::lean::ffi::builder::LeanBuildCache; -use crate::lean::ffi::ix::address::build_address; -use crate::lean::ffi::ix::name::{build_name, decode_ix_name}; +use crate::ffi::builder::LeanBuildCache; +use crate::ffi::ix::address::build_address; +use crate::ffi::ix::name::{build_name, decode_ix_name}; /// Build a Lean Ix.Level with embedded hash. /// Uses caching to avoid rebuilding the same level. diff --git a/src/lean/ffi/ix/name.rs b/src/ffi/ix/name.rs similarity index 95% rename from src/lean/ffi/ix/name.rs rename to src/ffi/ix/name.rs index 46dfe234..413b4b2d 100644 --- a/src/lean/ffi/ix/name.rs +++ b/src/ffi/ix/name.rs @@ -9,9 +9,9 @@ use crate::ix::env::{Name, NameData}; use crate::lean::nat::Nat; use crate::lean::object::{LeanIxName, LeanArray, LeanCtor, LeanObject, LeanString}; -use crate::lean::ffi::builder::LeanBuildCache; -use crate::lean::ffi::ix::address::build_address; -use crate::lean::ffi::primitives::build_nat; +use crate::ffi::builder::LeanBuildCache; +use crate::ffi::ix::address::build_address; +use crate::ffi::primitives::build_nat; /// Build a Lean Ix.Name with embedded hash. /// Uses caching to avoid rebuilding the same name. diff --git a/src/lean/ffi/ixon.rs b/src/ffi/ixon.rs similarity index 100% rename from src/lean/ffi/ixon.rs rename to src/ffi/ixon.rs diff --git a/src/lean/ffi/ixon/compare.rs b/src/ffi/ixon/compare.rs similarity index 98% rename from src/lean/ffi/ixon/compare.rs rename to src/ffi/ixon/compare.rs index 62d800da..52a9d87c 100644 --- a/src/lean/ffi/ixon/compare.rs +++ b/src/ffi/ixon/compare.rs @@ -10,7 +10,7 @@ use crate::lean::object::{ LeanIxBlockCompareDetail, LeanByteArray, LeanCtor, LeanObject, }; -use crate::lean::ffi::lean_env::{ +use crate::ffi::lean_env::{ Cache as LeanCache, GlobalCache, lean_ptr_to_expr, lean_ptr_to_name, }; @@ -186,7 +186,7 @@ pub unsafe extern "C" fn rs_free_compiled_env(ptr: *mut RustBlockEnv) { pub extern "C" fn rs_build_compiled_env( env_consts_ptr: LeanObject, ) -> *mut RustBlockEnv { - use crate::lean::ffi::lean_env::lean_ptr_to_env; + use crate::ffi::lean_env::lean_ptr_to_env; // Decode Lean environment let rust_env = lean_ptr_to_env(env_consts_ptr); diff --git a/src/lean/ffi/ixon/constant.rs b/src/ffi/ixon/constant.rs similarity index 99% rename from src/lean/ffi/ixon/constant.rs rename to src/ffi/ixon/constant.rs index 3ab0d5b1..b5f9da29 100644 --- a/src/lean/ffi/ixon/constant.rs +++ b/src/ffi/ixon/constant.rs @@ -24,11 +24,11 @@ use crate::lean::object::{ LeanCtor, LeanObject, }; -use crate::lean::ffi::ixon::expr::{ +use crate::ffi::ixon::expr::{ build_ixon_expr, build_ixon_expr_array, decode_ixon_expr, decode_ixon_expr_array, }; -use crate::lean::ffi::ixon::univ::{ +use crate::ffi::ixon::univ::{ build_ixon_univ_array, decode_ixon_univ_array, }; diff --git a/src/lean/ffi/ixon/enums.rs b/src/ffi/ixon/enums.rs similarity index 100% rename from src/lean/ffi/ixon/enums.rs rename to src/ffi/ixon/enums.rs diff --git a/src/lean/ffi/ixon/env.rs b/src/ffi/ixon/env.rs similarity index 98% rename from src/lean/ffi/ixon/env.rs rename to src/ffi/ixon/env.rs index efde1691..cc434e57 100644 --- a/src/lean/ffi/ixon/env.rs +++ b/src/ffi/ixon/env.rs @@ -13,13 +13,13 @@ use crate::lean::object::{ LeanIxonRawEnv, LeanArray, LeanByteArray, LeanCtor, LeanExcept, LeanObject, }; -use crate::lean::ffi::builder::LeanBuildCache; -use crate::lean::ffi::ix::name::{build_name, decode_ix_name}; -use crate::lean::ffi::ixon::constant::{ +use crate::ffi::builder::LeanBuildCache; +use crate::ffi::ix::name::{build_name, decode_ix_name}; +use crate::ffi::ixon::constant::{ build_address_from_ixon, build_ixon_constant, decode_ixon_address, decode_ixon_constant, }; -use crate::lean::ffi::ixon::meta::{build_constant_meta, decode_constant_meta}; +use crate::ffi::ixon::meta::{build_constant_meta, decode_constant_meta}; // ============================================================================= // Comm Type (secret: Address, payload: Address) diff --git a/src/lean/ffi/ixon/expr.rs b/src/ffi/ixon/expr.rs similarity index 100% rename from src/lean/ffi/ixon/expr.rs rename to src/ffi/ixon/expr.rs diff --git a/src/lean/ffi/ixon/meta.rs b/src/ffi/ixon/meta.rs similarity index 99% rename from src/lean/ffi/ixon/meta.rs rename to src/ffi/ixon/meta.rs index 75ea9f0f..75456d98 100644 --- a/src/lean/ffi/ixon/meta.rs +++ b/src/ffi/ixon/meta.rs @@ -15,11 +15,11 @@ use crate::lean::object::{ LeanCtor, LeanObject, }; -use crate::lean::ffi::ix::constant::{ +use crate::ffi::ix::constant::{ build_reducibility_hints, decode_reducibility_hints, }; -use crate::lean::ffi::ix::expr::binder_info_to_u8; -use crate::lean::ffi::ixon::constant::{ +use crate::ffi::ix::expr::binder_info_to_u8; +use crate::ffi::ixon::constant::{ build_address_array, build_address_from_ixon, decode_ixon_address, decode_ixon_address_array, }; diff --git a/src/lean/ffi/ixon/serialize.rs b/src/ffi/ixon/serialize.rs similarity index 99% rename from src/lean/ffi/ixon/serialize.rs rename to src/ffi/ixon/serialize.rs index 564d1d9b..d9b34ec3 100644 --- a/src/lean/ffi/ixon/serialize.rs +++ b/src/ffi/ixon/serialize.rs @@ -15,7 +15,7 @@ use crate::lean::object::{ LeanIxonUniv, LeanByteArray, LeanObject, }; -use crate::lean::ffi::ixon::constant::{ +use crate::ffi::ixon::constant::{ decode_ixon_address, decode_ixon_constant, }; @@ -180,7 +180,7 @@ pub extern "C" fn rs_eq_env_serialization( bytes_obj: LeanByteArray, ) -> bool { use crate::ix::ixon::env::Env; - use crate::lean::ffi::ixon::env::decode_raw_env; + use crate::ffi::ixon::env::decode_raw_env; let decoded = decode_raw_env(*raw_env_obj); let bytes_data = bytes_obj.as_bytes(); diff --git a/src/lean/ffi/ixon/sharing.rs b/src/ffi/ixon/sharing.rs similarity index 97% rename from src/lean/ffi/ixon/sharing.rs rename to src/ffi/ixon/sharing.rs index b4d074c4..6b66b613 100644 --- a/src/lean/ffi/ixon/sharing.rs +++ b/src/ffi/ixon/sharing.rs @@ -9,8 +9,8 @@ use crate::ix::ixon::sharing::{ }; use crate::lean::object::{LeanArray, LeanByteArray}; -use crate::lean::ffi::ixon::expr::decode_ixon_expr_array; -use crate::lean::ffi::ixon::serialize::lean_ptr_to_ixon_expr; +use crate::ffi::ixon::expr::decode_ixon_expr_array; +use crate::ffi::ixon::serialize::lean_ptr_to_ixon_expr; /// FFI: Debug sharing analysis - print usage counts for subterms with usage >= 2. /// This helps diagnose why Lean and Rust make different sharing decisions. diff --git a/src/lean/ffi/ixon/univ.rs b/src/ffi/ixon/univ.rs similarity index 100% rename from src/lean/ffi/ixon/univ.rs rename to src/ffi/ixon/univ.rs diff --git a/src/lean/ffi/keccak.rs b/src/ffi/keccak.rs similarity index 100% rename from src/lean/ffi/keccak.rs rename to src/ffi/keccak.rs diff --git a/src/lean/ffi/lean_env.rs b/src/ffi/lean_env.rs similarity index 100% rename from src/lean/ffi/lean_env.rs rename to src/ffi/lean_env.rs diff --git a/src/lean/ffi/primitives.rs b/src/ffi/primitives.rs similarity index 100% rename from src/lean/ffi/primitives.rs rename to src/ffi/primitives.rs diff --git a/src/lean/ffi/unsigned.rs b/src/ffi/unsigned.rs similarity index 100% rename from src/lean/ffi/unsigned.rs rename to src/ffi/unsigned.rs diff --git a/src/lean.rs b/src/lean.rs index bdfaa183..357b4ff6 100644 --- a/src/lean.rs +++ b/src/lean.rs @@ -22,7 +22,6 @@ pub mod lean_sys { include!(concat!(env!("OUT_DIR"), "/lean.rs")); } -pub mod ffi; pub mod nat; pub mod object; diff --git a/src/lib.rs b/src/lib.rs index c2b2d7de..a4bca1e1 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -13,6 +13,7 @@ use indexmap::{IndexMap, IndexSet}; use rustc_hash::FxBuildHasher; pub mod aiur; +pub mod ffi; pub mod iroh; pub mod ix; pub mod lean; From 89559f7eea899a3b68ce8b2e3c2e43729ac3417f Mon Sep 17 00:00:00 2001 From: samuelburnham <45365069+samuelburnham@users.noreply.github.com> Date: Tue, 3 Mar 2026 12:10:43 -0500 Subject: [PATCH 13/27] Clippy --- src/ffi.rs | 8 ++--- src/ffi/aiur/protocol.rs | 14 ++++---- src/ffi/compile.rs | 17 +++++----- src/ffi/ix/constant.rs | 9 ++--- src/ffi/ix/data.rs | 8 ++--- src/ffi/ix/expr.rs | 2 +- src/ffi/ix/name.rs | 2 +- src/ffi/primitives.rs | 27 +++++---------- src/iroh/client.rs | 8 ++--- src/lean/nat.rs | 7 ++-- src/lean/object.rs | 73 +++++++++++++++++++++++++++++++--------- 11 files changed, 100 insertions(+), 75 deletions(-) diff --git a/src/ffi.rs b/src/ffi.rs index 718cc26b..e22aad79 100644 --- a/src/ffi.rs +++ b/src/ffi.rs @@ -42,17 +42,15 @@ where /// Wrap a Lean value in an IO success result. pub(crate) fn io_ok(val: impl Into) -> LeanObject { let val: LeanObject = val.into(); - unsafe { - LeanObject::from_raw(lean_io_result_mk_ok(val.as_mut_ptr().cast()).cast()) - } + unsafe { LeanObject::from_lean_ptr(lean_io_result_mk_ok(val.as_mut_ptr().cast())) } } /// Create a Lean IO error result from a Rust error message. pub(crate) fn io_error(msg: &str) -> LeanObject { - let lean_msg = LeanString::from_str(msg); + let lean_msg = LeanString::new(msg); unsafe { let lean_err = lean_mk_io_user_error(lean_msg.as_mut_ptr().cast()); - LeanObject::from_raw(lean_io_result_mk_error(lean_err).cast()) + LeanObject::from_lean_ptr(lean_io_result_mk_error(lean_err)) } } diff --git a/src/ffi/aiur/protocol.rs b/src/ffi/aiur/protocol.rs index d4f29485..7dfe8300 100644 --- a/src/ffi/aiur/protocol.rs +++ b/src/ffi/aiur/protocol.rs @@ -12,14 +12,12 @@ use crate::{ execute::{IOBuffer, IOKeyInfo}, synthesis::AiurSystem, }, - lean::{ - ffi::aiur::{ - lean_unbox_g, lean_unbox_nat_as_usize, toplevel::lean_ptr_to_toplevel, - }, - object::{ - ExternalClass, LeanArray, LeanByteArray, LeanCtor, LeanExcept, - LeanExternal, LeanObject, - }, + ffi::aiur::{ + lean_unbox_g, lean_unbox_nat_as_usize, toplevel::lean_ptr_to_toplevel, + }, + lean::object::{ + ExternalClass, LeanArray, LeanByteArray, LeanCtor, LeanExcept, + LeanExternal, LeanObject, }, }; diff --git a/src/ffi/compile.rs b/src/ffi/compile.rs index 206eaf57..3486c6ae 100644 --- a/src/ffi/compile.rs +++ b/src/ffi/compile.rs @@ -21,7 +21,6 @@ use crate::ix::ixon::expr::Expr as IxonExpr; use crate::ix::ixon::serialize::put_expr; use crate::ix::ixon::{Comm, ConstantMeta}; use crate::ffi::{ffi_io_guard, io_error, io_ok}; -use crate::lean::lean_sys::lean_uint64_to_nat; use crate::lean::nat::Nat; use crate::lean::object::{ LeanArray, LeanByteArray, LeanCtor, LeanExcept, LeanIxBlockCompareDetail, @@ -55,12 +54,12 @@ use crate::ffi::lean_env::{ /// Build a Lean String from a Rust &str. fn build_lean_string(s: &str) -> LeanObject { - LeanString::from_str(s).into() + LeanString::new(s).into() } /// Build a Lean Nat from a usize. fn build_lean_nat_usize(n: usize) -> LeanObject { - unsafe { LeanObject::from_raw(lean_uint64_to_nat(n as u64).cast()) } + LeanObject::from_nat_u64(n as u64) } // ============================================================================= @@ -1136,7 +1135,7 @@ pub fn decode_serialize_error(obj: LeanObject) -> SerializeError { }, 5 => SerializeError::AddressError, 6 => { - let max = Nat::from_ptr(ctor.get(0).as_ptr()) + let max = unsafe { Nat::from_ptr(ctor.get(0).as_ptr()) } .to_u64() .and_then(|x| usize::try_from(x).ok()) .unwrap_or(0); @@ -1230,7 +1229,7 @@ pub fn decode_decompile_error(obj: LeanObject) -> DecompileError { let ctor = obj.as_ctor(); match ctor.tag() { 0 => { - let refs_len = Nat::from_ptr(ctor.get(0).as_ptr()) + let refs_len = unsafe { Nat::from_ptr(ctor.get(0).as_ptr()) } .to_u64() .and_then(|x| usize::try_from(x).ok()) .unwrap_or(0); @@ -1239,7 +1238,7 @@ pub fn decode_decompile_error(obj: LeanObject) -> DecompileError { DecompileError::InvalidRefIndex { idx, refs_len, constant } }, 1 => { - let univs_len = Nat::from_ptr(ctor.get(0).as_ptr()) + let univs_len = unsafe { Nat::from_ptr(ctor.get(0).as_ptr()) } .to_u64() .and_then(|x| usize::try_from(x).ok()) .unwrap_or(0); @@ -1248,7 +1247,7 @@ pub fn decode_decompile_error(obj: LeanObject) -> DecompileError { DecompileError::InvalidUnivIndex { idx, univs_len, constant } }, 2 => { - let max = Nat::from_ptr(ctor.get(0).as_ptr()) + let max = unsafe { Nat::from_ptr(ctor.get(0).as_ptr()) } .to_u64() .and_then(|x| usize::try_from(x).ok()) .unwrap_or(0); @@ -1257,7 +1256,7 @@ pub fn decode_decompile_error(obj: LeanObject) -> DecompileError { DecompileError::InvalidShareIndex { idx, max, constant } }, 3 => { - let ctx_size = Nat::from_ptr(ctor.get(0).as_ptr()) + let ctx_size = unsafe { Nat::from_ptr(ctor.get(0).as_ptr()) } .to_u64() .and_then(|x| usize::try_from(x).ok()) .unwrap_or(0); @@ -1266,7 +1265,7 @@ pub fn decode_decompile_error(obj: LeanObject) -> DecompileError { DecompileError::InvalidRecIndex { idx, ctx_size, constant } }, 4 => { - let max = Nat::from_ptr(ctor.get(0).as_ptr()) + let max = unsafe { Nat::from_ptr(ctor.get(0).as_ptr()) } .to_u64() .and_then(|x| usize::try_from(x).ok()) .unwrap_or(0); diff --git a/src/ffi/ix/constant.rs b/src/ffi/ix/constant.rs index d2501c04..d330168f 100644 --- a/src/ffi/ix/constant.rs +++ b/src/ffi/ix/constant.rs @@ -55,11 +55,7 @@ pub fn build_reducibility_hints(hints: &ReducibilityHints) -> LeanObject { ReducibilityHints::Regular(h) => { // UInt32 is a scalar, stored inline let obj = LeanCtor::alloc(2, 0, 4); - // Set the uint32 at offset 0 in the scalar area - unsafe { - let ptr = obj.as_ptr().cast::(); - *(ptr.add(8).cast::().cast_mut()) = *h; - } + obj.set_u32(0, *h); *obj }, } @@ -295,8 +291,7 @@ pub fn decode_reducibility_hints(obj: LeanObject) -> ReducibilityHints { 1 => ReducibilityHints::Abbrev, 2 => { // regular: 0 obj fields, 4 scalar bytes (UInt32) - let h = unsafe { *(obj.as_ptr().cast::().add(8).cast::()) }; - ReducibilityHints::Regular(h) + ReducibilityHints::Regular(ctor.scalar_u32(0, 0)) }, _ => panic!("Invalid ReducibilityHints tag: {}", ctor.tag()), } diff --git a/src/ffi/ix/data.rs b/src/ffi/ix/data.rs index eda6ae4f..019cdc5e 100644 --- a/src/ffi/ix/data.rs +++ b/src/ffi/ix/data.rs @@ -32,7 +32,7 @@ pub fn build_int(int: &Int) -> LeanIxInt { /// Build a Ix.Substring. pub fn build_substring(ss: &Substring) -> LeanIxSubstring { let obj = LeanCtor::alloc(0, 3, 0); - obj.set(0, LeanString::from_str(ss.str.as_str())); + obj.set(0, LeanString::new(ss.str.as_str())); obj.set(1, build_nat(&ss.start_pos)); obj.set(2, build_nat(&ss.stop_pos)); LeanIxSubstring::new(*obj) @@ -91,7 +91,7 @@ pub fn build_syntax_preresolved( pub fn build_string_array(strings: &[String]) -> LeanArray { let arr = LeanArray::alloc(strings.len()); for (i, s) in strings.iter().enumerate() { - arr.set(i, LeanString::from_str(s.as_str())); + arr.set(i, LeanString::new(s.as_str())); } arr } @@ -117,7 +117,7 @@ pub fn build_syntax(cache: &mut LeanBuildCache, syn: &Syntax) -> LeanIxSyntax { let info_obj = build_source_info(info); let obj = LeanCtor::alloc(2, 2, 0); obj.set(0, info_obj); - obj.set(1, LeanString::from_str(val.as_str())); + obj.set(1, LeanString::new(val.as_str())); LeanIxSyntax::new(*obj) }, // | ident (info : SourceInfo) (rawVal : Substring) (val : Name) (preresolved : Array SyntaxPreresolved) -- tag 3 @@ -168,7 +168,7 @@ pub fn build_data_value( match dv { DataValue::OfString(s) => { let obj = LeanCtor::alloc(0, 1, 0); - obj.set(0, LeanString::from_str(s.as_str())); + obj.set(0, LeanString::new(s.as_str())); LeanIxDataValue::new(*obj) }, DataValue::OfBool(b) => { diff --git a/src/ffi/ix/expr.rs b/src/ffi/ix/expr.rs index ee3b8475..201e1e2a 100644 --- a/src/ffi/ix/expr.rs +++ b/src/ffi/ix/expr.rs @@ -194,7 +194,7 @@ pub fn build_literal(lit: &Literal) -> LeanObject { }, Literal::StrVal(s) => { let obj = LeanCtor::alloc(1, 1, 0); - obj.set(0, LeanString::from_str(s.as_str())); + obj.set(0, LeanString::new(s.as_str())); *obj }, } diff --git a/src/ffi/ix/name.rs b/src/ffi/ix/name.rs index 413b4b2d..d3337f99 100644 --- a/src/ffi/ix/name.rs +++ b/src/ffi/ix/name.rs @@ -30,7 +30,7 @@ pub fn build_name(cache: &mut LeanBuildCache, name: &Name) -> LeanIxName { }, NameData::Str(parent, s, h) => { let parent_obj = build_name(cache, parent); - let s_obj = LeanString::from_str(s.as_str()); + let s_obj = LeanString::new(s.as_str()); let ctor = LeanCtor::alloc(1, 3, 0); ctor.set(0, parent_obj); ctor.set(1, s_obj); diff --git a/src/ffi/primitives.rs b/src/ffi/primitives.rs index 3db23afb..ca779acf 100644 --- a/src/ffi/primitives.rs +++ b/src/ffi/primitives.rs @@ -6,7 +6,6 @@ //! - List, Array, ByteArray //! - AssocList, HashMap -use crate::lean::lean_sys::lean_uint64_to_nat; use crate::lean::nat::Nat; use crate::lean::object::{ LeanArray, LeanByteArray, LeanCtor, LeanList, LeanObject, LeanString, @@ -25,8 +24,7 @@ pub fn build_nat(n: &Nat) -> LeanObject { #[allow(clippy::cast_possible_truncation)] return LeanObject::box_usize(val as usize); } - // For larger u64 values, use lean_uint64_to_nat - return unsafe { LeanObject::from_raw(lean_uint64_to_nat(val).cast()) }; + return LeanObject::from_nat_u64(val); } // For values larger than u64, convert to limbs and use GMP let bytes = n.to_le_bytes(); @@ -59,24 +57,20 @@ pub extern "C" fn rs_roundtrip_nat(nat_ptr: LeanObject) -> LeanObject { #[unsafe(no_mangle)] pub extern "C" fn rs_roundtrip_string(s_ptr: LeanString) -> LeanString { let s = s_ptr.to_string(); - LeanString::from_str(&s) + LeanString::new(&s) } /// Round-trip a List Nat: decode from Lean, re-encode to Lean. #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_list_nat(list_ptr: LeanList) -> LeanObject { - // Decode list to Vec +pub extern "C" fn rs_roundtrip_list_nat(list_ptr: LeanList) -> LeanList { let nats: Vec = list_ptr.collect(Nat::from_obj); - // Re-encode as Lean List build_list_nat(&nats) } /// Round-trip an Array Nat: decode from Lean, re-encode to Lean. #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_array_nat(arr_ptr: LeanArray) -> LeanObject { - // Decode array +pub extern "C" fn rs_roundtrip_array_nat(arr_ptr: LeanArray) -> LeanArray { let nats: Vec = arr_ptr.map(Nat::from_obj); - // Re-encode as Lean Array build_array_nat(&nats) } @@ -99,18 +93,18 @@ pub extern "C" fn rs_roundtrip_bool(bool_ptr: LeanObject) -> LeanObject { // ============================================================================= /// Build a Lean List Nat from a Vec. -fn build_list_nat(nats: &[Nat]) -> LeanObject { +fn build_list_nat(nats: &[Nat]) -> LeanList { let items: Vec = nats.iter().map(build_nat).collect(); - *LeanList::from_iter(items) + items.into_iter().collect() } /// Build a Lean Array Nat from a Vec. -fn build_array_nat(nats: &[Nat]) -> LeanObject { +fn build_array_nat(nats: &[Nat]) -> LeanArray { let arr = LeanArray::alloc(nats.len()); for (i, nat) in nats.iter().enumerate() { arr.set(i, build_nat(nat)); } - *arr + arr } // ============================================================================= @@ -360,8 +354,5 @@ pub extern "C" fn rs_bytearray_to_u64_le(ba: LeanByteArray) -> u64 { if data.len() < 8 { return 0; } - unsafe { - let cptr = crate::lean::lean_sys::lean_sarray_cptr(ba.as_ptr() as *mut _); - std::ptr::read_unaligned(cptr as *const u64) - } + u64::from_le_bytes(data[..8].try_into().unwrap()) } diff --git a/src/iroh/client.rs b/src/iroh/client.rs index daf04dea..ab38da49 100644 --- a/src/iroh/client.rs +++ b/src/iroh/client.rs @@ -25,8 +25,8 @@ const READ_SIZE_LIMIT: usize = 100_000_000; /// ``` fn mk_put_response(message: &str, hash: &str) -> LeanCtor { let ctor = LeanCtor::alloc(0, 2, 0); - ctor.set(0, LeanString::from_str(message)); - ctor.set(1, LeanString::from_str(hash)); + ctor.set(0, LeanString::new(message)); + ctor.set(1, LeanString::new(hash)); ctor } @@ -40,8 +40,8 @@ fn mk_put_response(message: &str, hash: &str) -> LeanCtor { fn mk_get_response(message: &str, hash: &str, bytes: &[u8]) -> LeanCtor { let byte_array = LeanByteArray::from_bytes(bytes); let ctor = LeanCtor::alloc(0, 3, 0); - ctor.set(0, LeanString::from_str(message)); - ctor.set(1, LeanString::from_str(hash)); + ctor.set(0, LeanString::new(message)); + ctor.set(1, LeanString::new(hash)); ctor.set(2, byte_array); ctor } diff --git a/src/lean/nat.rs b/src/lean/nat.rs index 3a61261e..66a6f496 100644 --- a/src/lean/nat.rs +++ b/src/lean/nat.rs @@ -38,7 +38,10 @@ impl Nat { /// Decode a `Nat` from a Lean object pointer. Handles both scalar (unboxed) /// and heap-allocated (GMP `mpz_object`) representations. - pub fn from_ptr(ptr: *const c_void) -> Nat { + /// + /// # Safety + /// The pointer must be a valid Lean `Nat` object (scalar or mpz). + pub unsafe fn from_ptr(ptr: *const c_void) -> Nat { let obj = unsafe { LeanObject::from_raw(ptr) }; if obj.is_scalar() { let u = obj.unbox_usize(); @@ -52,7 +55,7 @@ impl Nat { /// Decode a `Nat` from a `LeanObject`. Convenience wrapper over `from_ptr`. pub fn from_obj(obj: LeanObject) -> Nat { - Self::from_ptr(obj.as_ptr()) + unsafe { Self::from_ptr(obj.as_ptr()) } } #[inline] diff --git a/src/lean/object.rs b/src/lean/object.rs index 4fcecb22..7bfc2621 100644 --- a/src/lean/object.rs +++ b/src/lean/object.rs @@ -30,6 +30,24 @@ impl LeanObject { Self(ptr) } + /// Wrap a `*mut lean_object` returned from a `lean_sys` function. + /// + /// # Safety + /// The pointer must be a valid Lean object (or tagged scalar). + #[inline] + pub unsafe fn from_lean_ptr(ptr: *mut lean_sys::lean_object) -> Self { + Self(ptr.cast()) + } + + /// Create a Lean `Nat` from a `u64` value. + /// + /// Small values are stored as tagged scalars; larger ones are heap-allocated + /// via the Lean runtime. + #[inline] + pub fn from_nat_u64(n: u64) -> Self { + unsafe { Self::from_lean_ptr(lean_sys::lean_uint64_to_nat(n)) } + } + #[inline] pub fn as_ptr(self) -> *const c_void { self.0 @@ -329,27 +347,29 @@ impl LeanString { } /// Create a Lean string from a Rust `&str`. - pub fn from_str(s: &str) -> Self { + pub fn new(s: &str) -> Self { let c = safe_cstring(s); let obj = unsafe { lean_sys::lean_mk_string(c.as_ptr()) }; Self(LeanObject(obj.cast())) } - /// Decode the Lean string into a Rust `String`. - pub fn to_string(&self) -> String { + /// Number of data bytes (excluding the trailing NUL). + pub fn byte_len(&self) -> usize { + unsafe { lean_sys::lean_string_size(self.0.as_ptr() as *mut _) - 1 } + } +} + +impl std::fmt::Display for LeanString { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { unsafe { let obj = self.0.as_ptr() as *mut _; let len = lean_sys::lean_string_size(obj) - 1; // m_size includes NUL let data = lean_sys::lean_string_cstr(obj); let bytes = std::slice::from_raw_parts(data.cast::(), len); - String::from_utf8_unchecked(bytes.to_vec()) + let s = std::str::from_utf8_unchecked(bytes); + f.write_str(s) } } - - /// Number of data bytes (excluding the trailing NUL). - pub fn byte_len(&self) -> usize { - unsafe { lean_sys::lean_string_size(self.0.as_ptr() as *mut _) - 1 } - } } // ============================================================================= @@ -427,6 +447,18 @@ impl LeanCtor { } } + /// Set a `u32` scalar field at the given byte offset (past all object fields). + pub fn set_u32(&self, offset: usize, val: u32) { + #[allow(clippy::cast_possible_truncation)] + unsafe { + lean_sys::lean_ctor_set_uint32( + self.0.as_ptr() as *mut _, + offset as u32, + val, + ); + } + } + /// Set a `u64` scalar field at the given byte offset (past all object fields). pub fn set_u64(&self, offset: usize, val: u64) { #[allow(clippy::cast_possible_truncation)] @@ -458,6 +490,15 @@ impl LeanCtor { } } + /// Read a `u32` scalar at `offset` bytes past `num_objs` object fields. + pub fn scalar_u32(&self, num_objs: usize, offset: usize) -> u32 { + unsafe { + std::ptr::read_unaligned( + self.0.as_ptr().cast::().add(8 + num_objs * 8 + offset).cast(), + ) + } + } + /// Read a `u8` scalar at `offset` bytes past `num_objs` object fields. pub fn scalar_u8(&self, num_objs: usize, offset: usize) -> u8 { unsafe { *self.0.as_ptr().cast::().add(8 + num_objs * 8 + offset) } @@ -502,7 +543,7 @@ impl LeanExternal { pub fn alloc(class: &ExternalClass, data: T) -> Self { let data_ptr = Box::into_raw(Box::new(data)); let obj = unsafe { - lean_sys::lean_alloc_external(class.0 as *mut _, data_ptr.cast()) + lean_sys::lean_alloc_external(class.0.cast(), data_ptr.cast()) }; Self(LeanObject(obj.cast()), PhantomData) } @@ -609,11 +650,11 @@ impl LeanList { self.iter().map(f).collect() } - /// Build a list from an iterator of values convertible to `LeanObject`. - pub fn from_iter( - items: impl IntoIterator>, - ) -> Self { - let items: Vec = items.into_iter().map(Into::into).collect(); +} + +impl> FromIterator for LeanList { + fn from_iter>(iter: I) -> Self { + let items: Vec = iter.into_iter().map(Into::into).collect(); let mut list = Self::nil(); for item in items.into_iter().rev() { list = Self::cons(item, list); @@ -738,7 +779,7 @@ impl LeanExcept { /// Build `Except.error (String.mk msg)` from a Rust string. pub fn error_string(msg: &str) -> Self { - Self::error(LeanString::from_str(msg)) + Self::error(LeanString::new(msg)) } pub fn is_ok(&self) -> bool { From b39bdfc745abada18b247e47d9c6aa0555305b9c Mon Sep 17 00:00:00 2001 From: samuelburnham <45365069+samuelburnham@users.noreply.github.com> Date: Tue, 3 Mar 2026 12:11:04 -0500 Subject: [PATCH 14/27] ci: Add Valgrind memcheck test --- .github/workflows/valgrind.yml | 36 ++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) create mode 100644 .github/workflows/valgrind.yml diff --git a/.github/workflows/valgrind.yml b/.github/workflows/valgrind.yml new file mode 100644 index 00000000..4b78ac97 --- /dev/null +++ b/.github/workflows/valgrind.yml @@ -0,0 +1,36 @@ +name: Valgrind + +on: + push: + branches: main + workflow_dispatch: + +permissions: + contents: read + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +jobs: + valgrind: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v6 + - uses: actions-rust-lang/setup-rust-toolchain@v1 + - uses: leanprover/lean-action@v1 + with: + auto-config: false + build: true + build-args: "IxTests" + - name: Install valgrind + run: sudo apt-get update && sudo apt-get install -y valgrind + - name: Run tests under valgrind + run: | + valgrind \ + --leak-check=full \ + --show-leak-kinds=definite,possible \ + --errors-for-leak-kinds=definite \ + --track-origins=yes \ + --error-exitcode=1 \ + .lake/build/bin/IxTests From a4a10765116c037c8178516920ce6c2bad71ce90 Mon Sep 17 00:00:00 2001 From: samuelburnham <45365069+samuelburnham@users.noreply.github.com> Date: Tue, 3 Mar 2026 12:15:56 -0500 Subject: [PATCH 15/27] Fmt --- src/ffi.rs | 4 +++- src/ffi/compile.rs | 6 ++---- src/ffi/graph.rs | 2 +- src/ffi/ix/address.rs | 6 ++++-- src/ffi/ix/constant.rs | 4 +++- src/ffi/ix/data.rs | 9 ++++++--- src/ffi/ix/env.rs | 6 ++---- src/ffi/ix/expr.rs | 8 ++++---- src/ffi/ix/level.rs | 2 +- src/ffi/ix/name.rs | 4 +++- src/ffi/ixon/compare.rs | 2 +- src/ffi/ixon/constant.rs | 15 ++++++--------- src/ffi/ixon/enums.rs | 12 +++++++++--- src/ffi/ixon/env.rs | 2 +- src/ffi/ixon/expr.rs | 2 +- src/ffi/ixon/meta.rs | 5 ++--- src/ffi/ixon/serialize.rs | 10 ++++------ src/ffi/ixon/univ.rs | 2 +- src/ffi/keccak.rs | 8 ++++++-- src/lean/object.rs | 6 ++---- 20 files changed, 62 insertions(+), 53 deletions(-) diff --git a/src/ffi.rs b/src/ffi.rs index e22aad79..32b9f497 100644 --- a/src/ffi.rs +++ b/src/ffi.rs @@ -42,7 +42,9 @@ where /// Wrap a Lean value in an IO success result. pub(crate) fn io_ok(val: impl Into) -> LeanObject { let val: LeanObject = val.into(); - unsafe { LeanObject::from_lean_ptr(lean_io_result_mk_ok(val.as_mut_ptr().cast())) } + unsafe { + LeanObject::from_lean_ptr(lean_io_result_mk_ok(val.as_mut_ptr().cast())) + } } /// Create a Lean IO error result from a Rust error message. diff --git a/src/ffi/compile.rs b/src/ffi/compile.rs index 3486c6ae..9034d6ef 100644 --- a/src/ffi/compile.rs +++ b/src/ffi/compile.rs @@ -10,6 +10,7 @@ use std::collections::HashMap; use std::sync::Arc; +use crate::ffi::{ffi_io_guard, io_error, io_ok}; use crate::ix::address::Address; use crate::ix::compile::{CompileState, compile_env}; use crate::ix::condense::compute_sccs; @@ -20,7 +21,6 @@ use crate::ix::ixon::constant::{Constant as IxonConstant, ConstantInfo}; use crate::ix::ixon::expr::Expr as IxonExpr; use crate::ix::ixon::serialize::put_expr; use crate::ix::ixon::{Comm, ConstantMeta}; -use crate::ffi::{ffi_io_guard, io_error, io_ok}; use crate::lean::nat::Nat; use crate::lean::object::{ LeanArray, LeanByteArray, LeanCtor, LeanExcept, LeanIxBlockCompareDetail, @@ -44,9 +44,7 @@ use crate::ffi::ixon::env::{ build_raw_env, build_raw_name_entry, decode_raw_env, decoded_to_ixon_env, }; use crate::ffi::ixon::meta::{build_constant_meta, build_ixon_comm}; -use crate::ffi::lean_env::{ - GlobalCache, lean_ptr_to_env, lean_ptr_to_name, -}; +use crate::ffi::lean_env::{GlobalCache, lean_ptr_to_env, lean_ptr_to_name}; // ============================================================================= // Helper builders diff --git a/src/ffi/graph.rs b/src/ffi/graph.rs index b5196f23..78f78d14 100644 --- a/src/ffi/graph.rs +++ b/src/ffi/graph.rs @@ -2,9 +2,9 @@ use std::sync::Arc; +use crate::ffi::{ffi_io_guard, io_ok}; use crate::ix::condense::compute_sccs; use crate::ix::graph::build_ref_graph; -use crate::ffi::{ffi_io_guard, io_ok}; use crate::lean::object::{LeanArray, LeanCtor, LeanObject}; use crate::ffi::builder::LeanBuildCache; diff --git a/src/ffi/ix/address.rs b/src/ffi/ix/address.rs index e885aed4..0c10d569 100644 --- a/src/ffi/ix/address.rs +++ b/src/ffi/ix/address.rs @@ -2,7 +2,7 @@ //! //! Address = { hash : ByteArray } - ByteArray wrapper for blake3 Hash -use crate::lean::object::{LeanIxAddress, LeanByteArray}; +use crate::lean::object::{LeanByteArray, LeanIxAddress}; /// Build a Ix.Address from a blake3::Hash. /// Address = { hash : ByteArray } - single field struct, so UNBOXED to ByteArray @@ -13,7 +13,9 @@ pub fn build_address(hash: &blake3::Hash) -> LeanIxAddress { /// Round-trip an Ix.Address: decode ByteArray, re-encode. /// Address = { hash : ByteArray } - single field struct, so UNBOXED to ByteArray directly #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ix_address(addr: LeanIxAddress) -> LeanIxAddress { +pub extern "C" fn rs_roundtrip_ix_address( + addr: LeanIxAddress, +) -> LeanIxAddress { // Address is a single-field struct { hash : ByteArray } // Due to unboxing, addr IS the ByteArray directly LeanByteArray::from_bytes(addr.as_bytes()) diff --git a/src/ffi/ix/constant.rs b/src/ffi/ix/constant.rs index d330168f..f2495849 100644 --- a/src/ffi/ix/constant.rs +++ b/src/ffi/ix/constant.rs @@ -16,7 +16,9 @@ use crate::ix::env::{ RecursorRule, RecursorVal, ReducibilityHints, TheoremVal, }; use crate::lean::nat::Nat; -use crate::lean::object::{LeanIxConstantInfo, LeanArray, LeanCtor, LeanObject}; +use crate::lean::object::{ + LeanArray, LeanCtor, LeanIxConstantInfo, LeanObject, +}; use crate::ffi::builder::LeanBuildCache; use crate::ffi::ix::expr::{build_expr, decode_ix_expr}; diff --git a/src/ffi/ix/data.rs b/src/ffi/ix/data.rs index 019cdc5e..013de803 100644 --- a/src/ffi/ix/data.rs +++ b/src/ffi/ix/data.rs @@ -5,8 +5,9 @@ use crate::ix::env::{ }; use crate::lean::nat::Nat; use crate::lean::object::{ - LeanIxDataValue, LeanIxInt, LeanIxSourceInfo, LeanIxSubstring, LeanIxSyntax, - LeanIxSyntaxPreresolved, LeanArray, LeanCtor, LeanObject, LeanString, + LeanArray, LeanCtor, LeanIxDataValue, LeanIxInt, LeanIxSourceInfo, + LeanIxSubstring, LeanIxSyntax, LeanIxSyntaxPreresolved, LeanObject, + LeanString, }; use crate::ffi::builder::LeanBuildCache; @@ -411,7 +412,9 @@ pub extern "C" fn rs_roundtrip_ix_syntax_preresolved( /// Round-trip an Ix.Syntax: decode from Lean, re-encode. #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ix_syntax(syn_ptr: LeanIxSyntax) -> LeanIxSyntax { +pub extern "C" fn rs_roundtrip_ix_syntax( + syn_ptr: LeanIxSyntax, +) -> LeanIxSyntax { let syn = decode_ix_syntax(*syn_ptr); let mut cache = LeanBuildCache::new(); build_syntax(&mut cache, &syn) diff --git a/src/ffi/ix/env.rs b/src/ffi/ix/env.rs index 37606769..4db9fde9 100644 --- a/src/ffi/ix/env.rs +++ b/src/ffi/ix/env.rs @@ -4,13 +4,11 @@ use rustc_hash::FxHashMap; use crate::ix::env::{ConstantInfo, Name}; use crate::lean::object::{ - LeanIxEnvironment, LeanIxRawEnvironment, LeanArray, LeanCtor, LeanObject, + LeanArray, LeanCtor, LeanIxEnvironment, LeanIxRawEnvironment, LeanObject, }; use crate::ffi::builder::LeanBuildCache; -use crate::ffi::ix::constant::{ - build_constant_info, decode_constant_info, -}; +use crate::ffi::ix::constant::{build_constant_info, decode_constant_info}; use crate::ffi::ix::name::{build_name, decode_ix_name}; // ============================================================================= diff --git a/src/ffi/ix/expr.rs b/src/ffi/ix/expr.rs index 201e1e2a..8b968877 100644 --- a/src/ffi/ix/expr.rs +++ b/src/ffi/ix/expr.rs @@ -18,14 +18,14 @@ use crate::ix::env::{ BinderInfo, DataValue, Expr, ExprData, Level, Literal, Name, }; use crate::lean::nat::Nat; -use crate::lean::object::{LeanIxExpr, LeanArray, LeanCtor, LeanObject, LeanString}; +use crate::lean::object::{ + LeanArray, LeanCtor, LeanIxExpr, LeanObject, LeanString, +}; use crate::ffi::builder::LeanBuildCache; use crate::ffi::ix::address::build_address; use crate::ffi::ix::data::{build_data_value, decode_data_value}; -use crate::ffi::ix::level::{ - build_level, build_level_array, decode_ix_level, -}; +use crate::ffi::ix::level::{build_level, build_level_array, decode_ix_level}; use crate::ffi::ix::name::{build_name, decode_ix_name}; use crate::ffi::primitives::build_nat; diff --git a/src/ffi/ix/level.rs b/src/ffi/ix/level.rs index a23d0963..462e97be 100644 --- a/src/ffi/ix/level.rs +++ b/src/ffi/ix/level.rs @@ -9,7 +9,7 @@ //! - Tag 5: mvar (n : Name) (hash : Address) use crate::ix::env::{Level, LevelData}; -use crate::lean::object::{LeanIxLevel, LeanArray, LeanCtor, LeanObject}; +use crate::lean::object::{LeanArray, LeanCtor, LeanIxLevel, LeanObject}; use crate::ffi::builder::LeanBuildCache; use crate::ffi::ix::address::build_address; diff --git a/src/ffi/ix/name.rs b/src/ffi/ix/name.rs index d3337f99..c41f42b0 100644 --- a/src/ffi/ix/name.rs +++ b/src/ffi/ix/name.rs @@ -7,7 +7,9 @@ use crate::ix::env::{Name, NameData}; use crate::lean::nat::Nat; -use crate::lean::object::{LeanIxName, LeanArray, LeanCtor, LeanObject, LeanString}; +use crate::lean::object::{ + LeanArray, LeanCtor, LeanIxName, LeanObject, LeanString, +}; use crate::ffi::builder::LeanBuildCache; use crate::ffi::ix::address::build_address; diff --git a/src/ffi/ixon/compare.rs b/src/ffi/ixon/compare.rs index 52a9d87c..3942f8f9 100644 --- a/src/ffi/ixon/compare.rs +++ b/src/ffi/ixon/compare.rs @@ -7,7 +7,7 @@ use crate::ix::env::Name; use crate::ix::ixon::serialize::put_expr; use crate::ix::mutual::MutCtx; use crate::lean::object::{ - LeanIxBlockCompareDetail, LeanByteArray, LeanCtor, LeanObject, + LeanByteArray, LeanCtor, LeanIxBlockCompareDetail, LeanObject, }; use crate::ffi::lean_env::{ diff --git a/src/ffi/ixon/constant.rs b/src/ffi/ixon/constant.rs index b5f9da29..0efc4e6e 100644 --- a/src/ffi/ixon/constant.rs +++ b/src/ffi/ixon/constant.rs @@ -16,21 +16,18 @@ use crate::ix::ixon::constant::{ RecursorRule as IxonRecursorRule, }; use crate::lean::object::{ - LeanIxAddress, LeanIxonAxiom, LeanIxonConstant, LeanIxonConstantInfo, - LeanIxonConstructor, LeanIxonConstructorProj, LeanIxonDefinition, - LeanIxonDefinitionProj, LeanIxonInductive, LeanIxonInductiveProj, - LeanIxonMutConst, LeanIxonQuotient, LeanIxonRecursor, - LeanIxonRecursorProj, LeanIxonRecursorRule, LeanArray, LeanByteArray, - LeanCtor, LeanObject, + LeanArray, LeanByteArray, LeanCtor, LeanIxAddress, LeanIxonAxiom, + LeanIxonConstant, LeanIxonConstantInfo, LeanIxonConstructor, + LeanIxonConstructorProj, LeanIxonDefinition, LeanIxonDefinitionProj, + LeanIxonInductive, LeanIxonInductiveProj, LeanIxonMutConst, LeanIxonQuotient, + LeanIxonRecursor, LeanIxonRecursorProj, LeanIxonRecursorRule, LeanObject, }; use crate::ffi::ixon::expr::{ build_ixon_expr, build_ixon_expr_array, decode_ixon_expr, decode_ixon_expr_array, }; -use crate::ffi::ixon::univ::{ - build_ixon_univ_array, decode_ixon_univ_array, -}; +use crate::ffi::ixon::univ::{build_ixon_univ_array, decode_ixon_univ_array}; /// Build Address from Ixon Address type (which is just a [u8; 32]). pub fn build_address_from_ixon(addr: &Address) -> LeanIxAddress { diff --git a/src/ffi/ixon/enums.rs b/src/ffi/ixon/enums.rs index d2402bf2..ec25d5e3 100644 --- a/src/ffi/ixon/enums.rs +++ b/src/ffi/ixon/enums.rs @@ -4,7 +4,9 @@ use std::ffi::c_void; use crate::ix::env::{DefinitionSafety, QuotKind}; use crate::ix::ixon::constant::DefKind; -use crate::lean::object::{LeanIxonDefKind, LeanIxonDefinitionSafety, LeanIxonQuotKind, LeanObject}; +use crate::lean::object::{ + LeanIxonDefKind, LeanIxonDefinitionSafety, LeanIxonQuotKind, LeanObject, +}; /// Build Ixon.DefKind /// | defn -- tag 0 @@ -92,7 +94,9 @@ pub fn decode_ixon_quot_kind(obj: LeanObject) -> QuotKind { /// Round-trip Ixon.DefKind. #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ixon_def_kind(obj: LeanIxonDefKind) -> LeanIxonDefKind { +pub extern "C" fn rs_roundtrip_ixon_def_kind( + obj: LeanIxonDefKind, +) -> LeanIxonDefKind { let kind = decode_ixon_def_kind(*obj); build_def_kind(&kind).into() } @@ -108,7 +112,9 @@ pub extern "C" fn rs_roundtrip_ixon_definition_safety( /// Round-trip Ixon.QuotKind. #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_ixon_quot_kind(obj: LeanIxonQuotKind) -> LeanIxonQuotKind { +pub extern "C" fn rs_roundtrip_ixon_quot_kind( + obj: LeanIxonQuotKind, +) -> LeanIxonQuotKind { let kind = decode_ixon_quot_kind(*obj); build_ixon_quot_kind(&kind).into() } diff --git a/src/ffi/ixon/env.rs b/src/ffi/ixon/env.rs index cc434e57..d9bf3e68 100644 --- a/src/ffi/ixon/env.rs +++ b/src/ffi/ixon/env.rs @@ -10,7 +10,7 @@ use crate::ix::ixon::constant::Constant as IxonConstant; use crate::ix::ixon::env::{Env as IxonEnv, Named as IxonNamed}; use crate::ix::ixon::metadata::ConstantMeta; use crate::lean::object::{ - LeanIxonRawEnv, LeanArray, LeanByteArray, LeanCtor, LeanExcept, LeanObject, + LeanArray, LeanByteArray, LeanCtor, LeanExcept, LeanIxonRawEnv, LeanObject, }; use crate::ffi::builder::LeanBuildCache; diff --git a/src/ffi/ixon/expr.rs b/src/ffi/ixon/expr.rs index cb109423..524801da 100644 --- a/src/ffi/ixon/expr.rs +++ b/src/ffi/ixon/expr.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use crate::ix::ixon::expr::Expr as IxonExpr; -use crate::lean::object::{LeanIxonExpr, LeanArray, LeanCtor, LeanObject}; +use crate::lean::object::{LeanArray, LeanCtor, LeanIxonExpr, LeanObject}; /// Build Ixon.Expr (12 constructors). pub fn build_ixon_expr(expr: &IxonExpr) -> LeanObject { diff --git a/src/ffi/ixon/meta.rs b/src/ffi/ixon/meta.rs index 75456d98..3810d542 100644 --- a/src/ffi/ixon/meta.rs +++ b/src/ffi/ixon/meta.rs @@ -10,9 +10,8 @@ use crate::ix::ixon::metadata::{ ConstantMeta, DataValue as IxonDataValue, ExprMeta, ExprMetaData, KVMap, }; use crate::lean::object::{ - LeanIxonComm, LeanIxonConstantMeta, LeanIxonDataValue, - LeanIxonExprMetaArena, LeanIxonExprMetaData, LeanIxonNamed, LeanArray, - LeanCtor, LeanObject, + LeanArray, LeanCtor, LeanIxonComm, LeanIxonConstantMeta, LeanIxonDataValue, + LeanIxonExprMetaArena, LeanIxonExprMetaData, LeanIxonNamed, LeanObject, }; use crate::ffi::ix::constant::{ diff --git a/src/ffi/ixon/serialize.rs b/src/ffi/ixon/serialize.rs index d9b34ec3..1eb18b96 100644 --- a/src/ffi/ixon/serialize.rs +++ b/src/ffi/ixon/serialize.rs @@ -11,13 +11,11 @@ use crate::ix::ixon::serialize::put_expr; use crate::ix::ixon::sharing::hash_expr; use crate::ix::ixon::univ::{Univ as IxonUniv, put_univ}; use crate::lean::object::{ - LeanIxAddress, LeanIxonConstant, LeanIxonExpr, LeanIxonRawEnv, - LeanIxonUniv, LeanByteArray, LeanObject, + LeanByteArray, LeanIxAddress, LeanIxonConstant, LeanIxonExpr, LeanIxonRawEnv, + LeanIxonUniv, LeanObject, }; -use crate::ffi::ixon::constant::{ - decode_ixon_address, decode_ixon_constant, -}; +use crate::ffi::ixon::constant::{decode_ixon_address, decode_ixon_constant}; /// Unbox a Lean UInt64, handling both scalar and boxed representations. fn lean_ptr_to_u64(obj: LeanObject) -> u64 { @@ -179,8 +177,8 @@ pub extern "C" fn rs_eq_env_serialization( raw_env_obj: LeanIxonRawEnv, bytes_obj: LeanByteArray, ) -> bool { - use crate::ix::ixon::env::Env; use crate::ffi::ixon::env::decode_raw_env; + use crate::ix::ixon::env::Env; let decoded = decode_raw_env(*raw_env_obj); let bytes_data = bytes_obj.as_bytes(); diff --git a/src/ffi/ixon/univ.rs b/src/ffi/ixon/univ.rs index a6e6ae04..7cb4e944 100644 --- a/src/ffi/ixon/univ.rs +++ b/src/ffi/ixon/univ.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use crate::ix::ixon::univ::Univ; -use crate::lean::object::{LeanIxonUniv, LeanArray, LeanCtor, LeanObject}; +use crate::lean::object::{LeanArray, LeanCtor, LeanIxonUniv, LeanObject}; impl LeanIxonUniv { /// Build Ixon.Univ diff --git a/src/ffi/keccak.rs b/src/ffi/keccak.rs index 7ae4cf0c..d14189fb 100644 --- a/src/ffi/keccak.rs +++ b/src/ffi/keccak.rs @@ -2,7 +2,9 @@ use std::sync::OnceLock; use tiny_keccak::{Hasher, Keccak}; -use crate::lean::object::{ExternalClass, LeanByteArray, LeanExternal, LeanObject}; +use crate::lean::object::{ + ExternalClass, LeanByteArray, LeanExternal, LeanObject, +}; static KECCAK_CLASS: OnceLock = OnceLock::new(); @@ -12,7 +14,9 @@ fn keccak_class() -> &'static ExternalClass { /// `Keccak.Hasher.init : Unit → Hasher` #[unsafe(no_mangle)] -extern "C" fn rs_keccak256_hasher_init(_unit: LeanObject) -> LeanExternal { +extern "C" fn rs_keccak256_hasher_init( + _unit: LeanObject, +) -> LeanExternal { LeanExternal::alloc(keccak_class(), Keccak::v256()) } diff --git a/src/lean/object.rs b/src/lean/object.rs index 7bfc2621..42938b13 100644 --- a/src/lean/object.rs +++ b/src/lean/object.rs @@ -542,9 +542,8 @@ impl LeanExternal { /// Allocate a new external object holding `data`. pub fn alloc(class: &ExternalClass, data: T) -> Self { let data_ptr = Box::into_raw(Box::new(data)); - let obj = unsafe { - lean_sys::lean_alloc_external(class.0.cast(), data_ptr.cast()) - }; + let obj = + unsafe { lean_sys::lean_alloc_external(class.0.cast(), data_ptr.cast()) }; Self(LeanObject(obj.cast()), PhantomData) } @@ -649,7 +648,6 @@ impl LeanList { pub fn collect(&self, f: impl Fn(LeanObject) -> T) -> Vec { self.iter().map(f).collect() } - } impl> FromIterator for LeanList { From 495e6d9d505f44c997d121979370d19933057267 Mon Sep 17 00:00:00 2001 From: samuelburnham <45365069+samuelburnham@users.noreply.github.com> Date: Tue, 3 Mar 2026 12:54:51 -0500 Subject: [PATCH 16/27] Add LeanIOResult type and `--include-ignored` test flag --- .github/workflows/valgrind.yml | 4 +-- README.md | 10 ++++--- Tests/Main.lean | 17 +++++++----- flake.nix | 2 ++ src/ffi.rs | 28 +++---------------- src/ffi/compile.rs | 33 +++++++++++----------- src/ffi/graph.rs | 11 ++++---- src/lean.rs | 2 +- src/lean/object.rs | 51 ++++++++++++++++++++++++++++++++++ 9 files changed, 99 insertions(+), 59 deletions(-) diff --git a/.github/workflows/valgrind.yml b/.github/workflows/valgrind.yml index 4b78ac97..3879f166 100644 --- a/.github/workflows/valgrind.yml +++ b/.github/workflows/valgrind.yml @@ -14,7 +14,7 @@ concurrency: jobs: valgrind: - runs-on: ubuntu-latest + runs-on: warp-ubuntu-latest-x64-16x steps: - uses: actions/checkout@v6 - uses: actions-rust-lang/setup-rust-toolchain@v1 @@ -33,4 +33,4 @@ jobs: --errors-for-leak-kinds=definite \ --track-origins=yes \ --error-exitcode=1 \ - .lake/build/bin/IxTests + .lake/build/bin/IxTests -- --include-ignored aiur aiur-hashes ixvm diff --git a/README.md b/README.md index 70c90fa6..91c5efed 100644 --- a/README.md +++ b/README.md @@ -191,15 +191,17 @@ Compiler performance benchmarks are tracked at https://bencher.dev/console/proje **Lean tests:** `lake test` -- `lake test -- ` runs a specific test suite. Primary suites: `ffi`, `byte-array`, `ixon`, `claim`, `commit`, `canon`, `keccak`, `sharing`, `graph-unit`, `condense-unit` -- `lake test -- --ignored` runs expensive test suites: `shard-map`, `rust-canon-roundtrip`, `serial-canon-roundtrip`, `parallel-canon-roundtrip`, `graph-cross`, `condense-cross`, `compile`, `decompile`, `rust-serialize`, `rust-decompile`, `commit-io`, `aiur`, `aiur-hashes`, `ixvm` +- `lake test -- ` runs one or multiple primary test suites. Primary suites: `ffi`, `byte-array`, `ixon`, `claim`, `commit`, `canon`, `keccak`, `sharing`, `graph-unit`, `condense-unit` +- `lake test -- --ignored` runs only the expensive test suites: `shard-map`, `rust-canon-roundtrip`, `serial-canon-roundtrip`, `parallel-canon-roundtrip`, `graph-cross`, `condense-cross`, `compile`, `decompile`, `rust-serialize`, `rust-decompile`, `commit-io`, `aiur`, `aiur-hashes`, `ixvm` - Any `canon` or `compile` test will require significant RAM, beware of OOM - `aiur` and `aiur-hashes` generate ZK proofs and use significant CPU -- `lake test -- --ignored ` runs a specific expensive suite by name +- `lake test -- --ignored ` runs one or multiple expensive suites by name +- `lake test -- --include-ignored` runs both primary and expensive test suites +- `lake test -- --include-ignored ` runs all primary suites plus one or multiple expensive suites - `lake test -- cli` runs CLI integration tests - `lake test -- rust-compile` runs the Rust cross-compilation diagnostic -**Rust tests:** `cargo test` +**Rust tests:** `cargo test` or `cargo nextest run` ### Nix diff --git a/Tests/Main.lean b/Tests/Main.lean index e80952e5..e57732a2 100644 --- a/Tests/Main.lean +++ b/Tests/Main.lean @@ -86,17 +86,20 @@ def main (args : List String) : IO UInt32 := do return ← Tests.Cli.suite let runIgnored := args.contains "--ignored" - let filterArgs := args.filter (· != "--ignored") + let includeIgnored := args.contains "--include-ignored" + let filterArgs := args.filter fun a => a != "--ignored" && a != "--include-ignored" - -- Run primary tests - let primaryResult ← LSpec.lspecIO primarySuites filterArgs - if primaryResult != 0 then return primaryResult + -- Run primary tests unless --ignored (without --include-ignored) is specified + if !runIgnored || includeIgnored then + let primaryArgs := if runIgnored || includeIgnored then [] else filterArgs + let primaryResult ← LSpec.lspecIO primarySuites primaryArgs + if primaryResult != 0 then return primaryResult - -- Run ignored tests only when --ignored is specified - if runIgnored then + -- Run ignored tests when --ignored or --include-ignored is specified + if runIgnored || includeIgnored then let mut result ← LSpec.lspecIO ignoredSuites filterArgs let filtered := if filterArgs.isEmpty then ignoredRunners - else ignoredRunners.filter fun (key, _) => filterArgs.any fun arg => key == arg + else filterArgs.filterMap fun arg => ignoredRunners.find? fun (key, _) => key == arg for (_, action) in filtered do let r ← action if r != 0 then result := r diff --git a/flake.nix b/flake.nix index 834aa8e0..62258918 100644 --- a/flake.nix +++ b/flake.nix @@ -143,6 +143,8 @@ # Provide a unified dev shell with Lean + Rust devShells.default = pkgs.mkShell { + # Disable fortify hardening as it causes warnings with cargo debug builds + hardeningDisable = ["fortify"]; # Add libclang for FFI with rust-bindgen LIBCLANG_PATH = "${pkgs.llvmPackages.libclang.lib}/lib"; packages = with pkgs; [ diff --git a/src/ffi.rs b/src/ffi.rs index 32b9f497..3587b387 100644 --- a/src/ffi.rs +++ b/src/ffi.rs @@ -12,17 +12,14 @@ pub mod ix; // Ix types: Name, Level, Expr, ConstantInfo, Environment pub mod ixon; // Ixon types: Univ, Expr, Constant, metadata pub mod primitives; // Primitives: rs_roundtrip_nat, rs_roundtrip_string, etc. -use crate::lean::lean_sys::{ - lean_io_result_mk_error, lean_io_result_mk_ok, lean_mk_io_user_error, -}; -use crate::lean::object::{LeanArray, LeanByteArray, LeanObject, LeanString}; +use crate::lean::object::{LeanArray, LeanByteArray, LeanIOResult}; /// Guard an FFI function that returns a Lean IO result against panics. /// On panic, returns a Lean IO error with the panic message instead of /// unwinding across the `extern "C"` boundary (which is undefined behavior). -pub(crate) fn ffi_io_guard(f: F) -> LeanObject +pub(crate) fn ffi_io_guard(f: F) -> LeanIOResult where - F: FnOnce() -> LeanObject + std::panic::UnwindSafe, + F: FnOnce() -> LeanIOResult + std::panic::UnwindSafe, { match std::panic::catch_unwind(f) { Ok(result) => result, @@ -34,28 +31,11 @@ where } else { "FFI panic: unknown".to_string() }; - io_error(&msg) + LeanIOResult::error_string(&msg) }, } } -/// Wrap a Lean value in an IO success result. -pub(crate) fn io_ok(val: impl Into) -> LeanObject { - let val: LeanObject = val.into(); - unsafe { - LeanObject::from_lean_ptr(lean_io_result_mk_ok(val.as_mut_ptr().cast())) - } -} - -/// Create a Lean IO error result from a Rust error message. -pub(crate) fn io_error(msg: &str) -> LeanObject { - let lean_msg = LeanString::new(msg); - unsafe { - let lean_err = lean_mk_io_user_error(lean_msg.as_mut_ptr().cast()); - LeanObject::from_lean_ptr(lean_io_result_mk_error(lean_err)) - } -} - #[unsafe(no_mangle)] extern "C" fn rs_boxed_u32s_are_equivalent_to_bytes( u32s: LeanArray, diff --git a/src/ffi/compile.rs b/src/ffi/compile.rs index 9034d6ef..dd276a4c 100644 --- a/src/ffi/compile.rs +++ b/src/ffi/compile.rs @@ -10,7 +10,8 @@ use std::collections::HashMap; use std::sync::Arc; -use crate::ffi::{ffi_io_guard, io_error, io_ok}; +use crate::ffi::ffi_io_guard; +use crate::lean::object::LeanIOResult; use crate::ix::address::Address; use crate::ix::compile::{CompileState, compile_env}; use crate::ix::condense::compute_sccs; @@ -206,7 +207,7 @@ pub extern "C" fn rs_roundtrip_block_compare_detail( #[unsafe(no_mangle)] pub extern "C" fn rs_compile_env_full( env_consts_ptr: LeanObject, -) -> LeanObject { +) -> LeanIOResult { ffi_io_guard(std::panic::AssertUnwindSafe(|| { // Phase 1: Decode Lean environment let rust_env = lean_ptr_to_env(env_consts_ptr); @@ -223,7 +224,7 @@ pub extern "C" fn rs_compile_env_full( Err(e) => { let msg = format!("rs_compile_env_full: Rust compilation failed: {:?}", e); - return io_error(&msg); + return LeanIOResult::error_string(&msg); }, }; @@ -298,13 +299,13 @@ pub extern "C" fn rs_compile_env_full( result.set(1, condensed_obj); result.set(2, *compiled_obj); - io_ok(*result) + LeanIOResult::ok(*result) })) } /// FFI function to compile a Lean environment to serialized Ixon.Env bytes. #[unsafe(no_mangle)] -pub extern "C" fn rs_compile_env(env_consts_ptr: LeanObject) -> LeanObject { +pub extern "C" fn rs_compile_env(env_consts_ptr: LeanObject) -> LeanIOResult { ffi_io_guard(std::panic::AssertUnwindSafe(|| { let rust_env = lean_ptr_to_env(env_consts_ptr); let rust_env = Arc::new(rust_env); @@ -313,7 +314,7 @@ pub extern "C" fn rs_compile_env(env_consts_ptr: LeanObject) -> LeanObject { Ok(stt) => stt, Err(e) => { let msg = format!("rs_compile_env: Rust compilation failed: {:?}", e); - return io_error(&msg); + return LeanIOResult::error_string(&msg); }, }; @@ -321,12 +322,12 @@ pub extern "C" fn rs_compile_env(env_consts_ptr: LeanObject) -> LeanObject { let mut buf = Vec::new(); if let Err(e) = compile_stt.env.put(&mut buf) { let msg = format!("rs_compile_env: Env serialization failed: {}", e); - return io_error(&msg); + return LeanIOResult::error_string(&msg); } // Build Lean ByteArray let ba = LeanByteArray::from_bytes(&buf); - io_ok(ba) + LeanIOResult::ok(ba) })) } @@ -342,7 +343,7 @@ pub extern "C" fn rs_roundtrip_raw_env( /// FFI function to run all compilation phases and return combined results. #[unsafe(no_mangle)] -pub extern "C" fn rs_compile_phases(env_consts_ptr: LeanObject) -> LeanObject { +pub extern "C" fn rs_compile_phases(env_consts_ptr: LeanObject) -> LeanIOResult { ffi_io_guard(std::panic::AssertUnwindSafe(|| { let rust_env = lean_ptr_to_env(env_consts_ptr); let env_len = rust_env.len(); @@ -361,7 +362,7 @@ pub extern "C" fn rs_compile_phases(env_consts_ptr: LeanObject) -> LeanObject { Ok(stt) => stt, Err(e) => { let msg = format!("rs_compile_phases: compilation failed: {:?}", e); - return io_error(&msg); + return LeanIOResult::error_string(&msg); }, }; @@ -434,7 +435,7 @@ pub extern "C" fn rs_compile_phases(env_consts_ptr: LeanObject) -> LeanObject { result.set(1, condensed_obj); result.set(2, *raw_ixon_env); - io_ok(*result) + LeanIOResult::ok(*result) })) } @@ -442,7 +443,7 @@ pub extern "C" fn rs_compile_phases(env_consts_ptr: LeanObject) -> LeanObject { #[unsafe(no_mangle)] pub extern "C" fn rs_compile_env_to_ixon( env_consts_ptr: LeanObject, -) -> LeanObject { +) -> LeanIOResult { ffi_io_guard(std::panic::AssertUnwindSafe(|| { let rust_env = lean_ptr_to_env(env_consts_ptr); let rust_env = Arc::new(rust_env); @@ -452,7 +453,7 @@ pub extern "C" fn rs_compile_env_to_ixon( Err(e) => { let msg = format!("rs_compile_env_to_ixon: compilation failed: {:?}", e); - return io_error(&msg); + return LeanIOResult::error_string(&msg); }, }; @@ -520,7 +521,7 @@ pub extern "C" fn rs_compile_env_to_ixon( result.set(2, *blobs_arr); result.set(3, *comms_arr); result.set(4, *names_arr); - io_ok(*result) + LeanIOResult::ok(*result) })) } @@ -528,12 +529,12 @@ pub extern "C" fn rs_compile_env_to_ixon( #[unsafe(no_mangle)] pub extern "C" fn rs_canonicalize_env_to_ix( env_consts_ptr: LeanObject, -) -> LeanObject { +) -> LeanIOResult { ffi_io_guard(std::panic::AssertUnwindSafe(|| { let rust_env = lean_ptr_to_env(env_consts_ptr); let mut cache = LeanBuildCache::with_capacity(rust_env.len()); let raw_env = build_raw_environment(&mut cache, &rust_env); - io_ok(raw_env) + LeanIOResult::ok(raw_env) })) } diff --git a/src/ffi/graph.rs b/src/ffi/graph.rs index 78f78d14..4a74200b 100644 --- a/src/ffi/graph.rs +++ b/src/ffi/graph.rs @@ -2,7 +2,8 @@ use std::sync::Arc; -use crate::ffi::{ffi_io_guard, io_ok}; +use crate::ffi::ffi_io_guard; +use crate::lean::object::LeanIOResult; use crate::ix::condense::compute_sccs; use crate::ix::graph::build_ref_graph; use crate::lean::object::{LeanArray, LeanCtor, LeanObject}; @@ -94,20 +95,20 @@ pub fn build_condensed_blocks( /// FFI function to build a reference graph from a Lean environment. #[unsafe(no_mangle)] -pub extern "C" fn rs_build_ref_graph(env_consts_ptr: LeanObject) -> LeanObject { +pub extern "C" fn rs_build_ref_graph(env_consts_ptr: LeanObject) -> LeanIOResult { ffi_io_guard(std::panic::AssertUnwindSafe(|| { let rust_env = lean_ptr_to_env(env_consts_ptr); let rust_env = Arc::new(rust_env); let ref_graph = build_ref_graph(&rust_env); let mut cache = LeanBuildCache::with_capacity(rust_env.len()); let result = build_ref_graph_array(&mut cache, &ref_graph.out_refs); - io_ok(result) + LeanIOResult::ok(result) })) } /// FFI function to compute SCCs from a Lean environment. #[unsafe(no_mangle)] -pub extern "C" fn rs_compute_sccs(env_consts_ptr: LeanObject) -> LeanObject { +pub extern "C" fn rs_compute_sccs(env_consts_ptr: LeanObject) -> LeanIOResult { ffi_io_guard(std::panic::AssertUnwindSafe(|| { let rust_env = lean_ptr_to_env(env_consts_ptr); let rust_env = Arc::new(rust_env); @@ -115,6 +116,6 @@ pub extern "C" fn rs_compute_sccs(env_consts_ptr: LeanObject) -> LeanObject { let condensed = compute_sccs(&ref_graph.out_refs); let mut cache = LeanBuildCache::with_capacity(rust_env.len()); let result = build_condensed_blocks(&mut cache, &condensed); - io_ok(result) + LeanIOResult::ok(result) })) } diff --git a/src/lean.rs b/src/lean.rs index 357b4ff6..41481dfb 100644 --- a/src/lean.rs +++ b/src/lean.rs @@ -1,7 +1,7 @@ //! Rust bindings for Lean, implemented by mimicking the memory layout of Lean's //! low-level C objects. //! -//! The `lean` submodule contains auto-generated bindings from `lean.h` via +//! The `lean_sys` submodule contains auto-generated bindings from `lean.h` via //! bindgen. Higher-level helpers and custom `#[repr(C)]` types are defined //! alongside it in sibling modules. diff --git a/src/lean/object.rs b/src/lean/object.rs index 42938b13..2c3f6872 100644 --- a/src/lean/object.rs +++ b/src/lean/object.rs @@ -794,6 +794,50 @@ impl LeanExcept { } } +// ============================================================================= +// LeanIOResult — EStateM.Result (BaseIO.Result) +// ============================================================================= + +/// Typed wrapper for a Lean `BaseIO.Result α` (`EStateM.Result`). +/// ok = ctor tag 0 (value, world), error = ctor tag 1 (error, world). +#[derive(Clone, Copy)] +#[repr(transparent)] +pub struct LeanIOResult(LeanObject); + +impl Deref for LeanIOResult { + type Target = LeanObject; + #[inline] + fn deref(&self) -> &LeanObject { + &self.0 + } +} + +impl LeanIOResult { + /// Build a successful IO result (tag 0, fields: [val, box(0)]). + pub fn ok(val: impl Into) -> Self { + let ctor = LeanCtor::alloc(0, 2, 0); + ctor.set(0, val); + ctor.set(1, LeanObject::box_usize(0)); // world token + Self(ctor.0) + } + + /// Build an IO error result (tag 1, fields: [err, box(0)]). + pub fn error(err: impl Into) -> Self { + let ctor = LeanCtor::alloc(1, 2, 0); + ctor.set(0, err); + ctor.set(1, LeanObject::box_usize(0)); // world token + Self(ctor.0) + } + + /// Build an IO error from a Rust string via `IO.Error.userError` (tag 7, 1 field). + pub fn error_string(msg: &str) -> Self { + // IO.Error.userError is tag 7 with 1 object field (the String) + let user_error = LeanCtor::alloc(7, 1, 0); + user_error.set(0, LeanString::new(msg)); + Self::error(*user_error) + } +} + // ============================================================================= // From for LeanObject — allow wrapper types to be passed to set() etc. // ============================================================================= @@ -854,6 +898,13 @@ impl From for LeanObject { } } +impl From for LeanObject { + #[inline] + fn from(x: LeanIOResult) -> Self { + x.0 + } +} + // ============================================================================= // Domain types — typed newtypes for specific Lean types // ============================================================================= From cdec7da485125017298606a6692f7244f16b8efc Mon Sep 17 00:00:00 2001 From: samuelburnham <45365069+samuelburnham@users.noreply.github.com> Date: Tue, 3 Mar 2026 14:51:25 -0500 Subject: [PATCH 17/27] Move `lean.h` Rust bindings to separate subcrate --- Cargo.lock | 12 +- Cargo.toml | 9 +- lean-sys/Cargo.toml | 11 + build.rs => lean-sys/build.rs | 0 lean-sys/src/lib.rs | 80 +++++++ {src/lean => lean-sys/src}/nat.rs | 4 +- {src/lean => lean-sys/src}/object.rs | 305 ++++++++------------------- src/ffi.rs | 2 +- src/ffi/aiur.rs | 3 +- src/ffi/aiur/protocol.rs | 9 +- src/ffi/aiur/toplevel.rs | 3 +- src/ffi/builder.rs | 2 +- src/ffi/byte_array.rs | 2 +- src/ffi/compile.rs | 16 +- src/ffi/graph.rs | 3 +- src/ffi/ix/address.rs | 3 +- src/ffi/ix/constant.rs | 7 +- src/ffi/ix/data.rs | 10 +- src/ffi/ix/env.rs | 5 +- src/ffi/ix/expr.rs | 7 +- src/ffi/ix/level.rs | 3 +- src/ffi/ix/name.rs | 7 +- src/ffi/ixon/compare.rs | 5 +- src/ffi/ixon/constant.rs | 13 +- src/ffi/ixon/enums.rs | 5 +- src/ffi/ixon/env.rs | 5 +- src/ffi/ixon/expr.rs | 3 +- src/ffi/ixon/meta.rs | 7 +- src/ffi/ixon/serialize.rs | 6 +- src/ffi/ixon/sharing.rs | 2 +- src/ffi/ixon/univ.rs | 3 +- src/ffi/keccak.rs | 2 +- src/ffi/lean_env.rs | 4 +- src/ffi/primitives.rs | 6 +- src/ffi/unsigned.rs | 2 +- src/iroh/_client.rs | 2 +- src/iroh/_server.rs | 2 +- src/iroh/client.rs | 2 +- src/iroh/server.rs | 2 +- src/ix/compile.rs | 3 +- src/ix/decompile.rs | 3 +- src/ix/env.rs | 2 +- src/ix/graph.rs | 2 +- src/ix/ground.rs | 3 +- src/ix/ixon/serialize.rs | 2 +- src/ix/mutual.rs | 3 +- src/lean.rs | 142 +++++++++---- src/sha256.rs | 2 +- 48 files changed, 381 insertions(+), 355 deletions(-) create mode 100644 lean-sys/Cargo.toml rename build.rs => lean-sys/build.rs (100%) create mode 100644 lean-sys/src/lib.rs rename {src/lean => lean-sys/src}/nat.rs (98%) rename {src/lean => lean-sys/src}/object.rs (79%) diff --git a/Cargo.lock b/Cargo.lock index ca961f54..dc4f824a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1775,15 +1775,14 @@ version = "0.1.0" dependencies = [ "anyhow", "bincode", - "bindgen", "blake3", "bytes", - "cc", "dashmap", "indexmap", "iroh", "iroh-base", "itertools 0.14.0", + "lean-sys", "multi-stark", "n0-snafu", "n0-watcher", @@ -1817,6 +1816,15 @@ version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" +[[package]] +name = "lean-sys" +version = "0.1.0" +dependencies = [ + "bindgen", + "cc", + "num-bigint", +] + [[package]] name = "libc" version = "0.2.180" diff --git a/Cargo.toml b/Cargo.toml index 6e5a29e4..4e9c9910 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,3 +1,6 @@ +[workspace] +members = ["lean-sys"] + [package] name = "ix_rs" version = "0.1.0" @@ -11,6 +14,7 @@ anyhow = "1" blake3 = "1.8.2" itertools = "0.14.0" indexmap = { version = "2", features = ["rayon"] } +lean-sys = { path = "lean-sys" } multi-stark = { git = "https://github.com/argumentcomputer/multi-stark.git", rev = "14b70601317e4500c7246c32a13ad08b3f560f2e" } num-bigint = "0.4.6" rayon = "1" @@ -31,11 +35,6 @@ tracing-subscriber = { version = "0.3", features = ["env-filter"], optional = tr bincode = { version = "2.0.1", optional = true } serde = { version = "1.0.219", features = ["derive"], optional = true } - -[build-dependencies] -bindgen = "0.71" -cc = "1" - [dev-dependencies] quickcheck = "1.0.3" rand = "0.8.5" diff --git a/lean-sys/Cargo.toml b/lean-sys/Cargo.toml new file mode 100644 index 00000000..0a9b6340 --- /dev/null +++ b/lean-sys/Cargo.toml @@ -0,0 +1,11 @@ +[package] +name = "lean-sys" +version = "0.1.0" +edition = "2024" + +[dependencies] +num-bigint = "0.4.6" + +[build-dependencies] +bindgen = "0.71" +cc = "1" diff --git a/build.rs b/lean-sys/build.rs similarity index 100% rename from build.rs rename to lean-sys/build.rs diff --git a/lean-sys/src/lib.rs b/lean-sys/src/lib.rs new file mode 100644 index 00000000..da9ff1b8 --- /dev/null +++ b/lean-sys/src/lib.rs @@ -0,0 +1,80 @@ +//! Low-level Lean FFI bindings and type-safe wrappers. +//! +//! The `include` submodule contains auto-generated bindings from `lean.h` via +//! bindgen. Higher-level helpers are in `object` and `nat`. + +#[allow( + non_upper_case_globals, + non_camel_case_types, + non_snake_case, + dead_code, + unsafe_op_in_unsafe_fn, + unused_qualifications, + clippy::all, + clippy::ptr_as_ptr, + clippy::cast_possible_wrap, + clippy::cast_possible_truncation, + clippy::derive_partial_eq_without_eq +)] +pub mod include { + include!(concat!(env!("OUT_DIR"), "/lean.rs")); +} + +pub mod nat; +pub mod object; + +use std::ffi::{CString, c_void}; + +/// Create a CString from a str, stripping any interior null bytes. +/// Lean strings are length-prefixed and can contain null bytes, but the +/// `lean_mk_string` FFI requires a null-terminated C string. This function +/// ensures conversion always succeeds by filtering out interior nulls. +pub fn safe_cstring(s: &str) -> CString { + CString::new(s).unwrap_or_else(|_| { + let bytes: Vec = s.bytes().filter(|&b| b != 0).collect(); + CString::new(bytes).expect("filtered string should have no nulls") + }) +} + +/// No-op foreach callback for external classes that hold no Lean references. +/// +/// # Safety +/// Must only be used as a `lean_external_foreach_fn` callback. +pub unsafe extern "C" fn noop_foreach( + _: *mut c_void, + _: *mut include::lean_object, +) { +} + +/// Generate a `#[repr(transparent)]` newtype over `LeanObject` for a specific +/// Lean type, with `Deref`, `From`, and a `new` constructor. +#[macro_export] +macro_rules! lean_domain_type { + ($($(#[$meta:meta])* $name:ident;)*) => {$( + $(#[$meta])* + #[derive(Clone, Copy)] + #[repr(transparent)] + pub struct $name($crate::object::LeanObject); + + impl std::ops::Deref for $name { + type Target = $crate::object::LeanObject; + #[inline] + fn deref(&self) -> &$crate::object::LeanObject { &self.0 } + } + + impl From<$name> for $crate::object::LeanObject { + #[inline] + fn from(x: $name) -> Self { x.0 } + } + + impl From<$crate::object::LeanObject> for $name { + #[inline] + fn from(obj: $crate::object::LeanObject) -> Self { Self(obj) } + } + + impl $name { + #[inline] + pub fn new(obj: $crate::object::LeanObject) -> Self { Self(obj) } + } + )*}; +} diff --git a/src/lean/nat.rs b/lean-sys/src/nat.rs similarity index 98% rename from src/lean/nat.rs rename to lean-sys/src/nat.rs index 66a6f496..b5ded9c5 100644 --- a/src/lean/nat.rs +++ b/lean-sys/src/nat.rs @@ -9,7 +9,7 @@ use std::mem::MaybeUninit; use num_bigint::BigUint; -use crate::lean::object::LeanObject; +use crate::object::LeanObject; /// Arbitrary-precision natural number, wrapping `BigUint`. #[derive(Hash, PartialEq, Eq, Debug, Clone, PartialOrd, Ord)] @@ -108,7 +108,7 @@ impl Mpz { // GMP interop for building Lean Nat objects from limbs // ============================================================================= -use crate::lean::lean_sys::lean_uint64_to_nat; +use crate::include::lean_uint64_to_nat; /// LEAN_MAX_SMALL_NAT = SIZE_MAX >> 1 const LEAN_MAX_SMALL_NAT: u64 = (usize::MAX >> 1) as u64; diff --git a/src/lean/object.rs b/lean-sys/src/object.rs similarity index 79% rename from src/lean/object.rs rename to lean-sys/src/object.rs index 2c3f6872..0a34fdbc 100644 --- a/src/lean/object.rs +++ b/lean-sys/src/object.rs @@ -8,8 +8,8 @@ use std::ffi::c_void; use std::marker::PhantomData; use std::ops::Deref; -use crate::lean::lean_sys; -use crate::lean::safe_cstring; +use crate::include; +use crate::safe_cstring; // ============================================================================= // LeanObject — Untyped base wrapper @@ -35,7 +35,7 @@ impl LeanObject { /// # Safety /// The pointer must be a valid Lean object (or tagged scalar). #[inline] - pub unsafe fn from_lean_ptr(ptr: *mut lean_sys::lean_object) -> Self { + pub unsafe fn from_lean_ptr(ptr: *mut include::lean_object) -> Self { Self(ptr.cast()) } @@ -45,7 +45,7 @@ impl LeanObject { /// via the Lean runtime. #[inline] pub fn from_nat_u64(n: u64) -> Self { - unsafe { Self::from_lean_ptr(lean_sys::lean_uint64_to_nat(n)) } + unsafe { Self::from_lean_ptr(include::lean_uint64_to_nat(n)) } } #[inline] @@ -70,21 +70,21 @@ impl LeanObject { assert!(!self.is_scalar(), "tag() called on scalar"); #[allow(clippy::cast_possible_truncation)] unsafe { - lean_sys::lean_obj_tag(self.0 as *mut _) as u8 + include::lean_obj_tag(self.0 as *mut _) as u8 } } #[inline] pub fn inc_ref(self) { if !self.is_scalar() { - unsafe { lean_sys::lean_inc_ref(self.0 as *mut _) } + unsafe { include::lean_inc_ref(self.0 as *mut _) } } } #[inline] pub fn dec_ref(self) { if !self.is_scalar() { - unsafe { lean_sys::lean_dec_ref(self.0 as *mut _) } + unsafe { include::lean_dec_ref(self.0 as *mut _) } } } @@ -102,12 +102,12 @@ impl LeanObject { #[inline] pub fn box_u64(n: u64) -> Self { - Self(unsafe { lean_sys::lean_box_uint64(n) }.cast()) + Self(unsafe { include::lean_box_uint64(n) }.cast()) } #[inline] pub fn unbox_u64(self) -> u64 { - unsafe { lean_sys::lean_unbox_uint64(self.0 as *mut _) } + unsafe { include::lean_unbox_uint64(self.0 as *mut _) } } /// Interpret as a constructor object (tag 0–243). @@ -155,12 +155,12 @@ impl LeanObject { #[inline] pub fn box_u32(n: u32) -> Self { - Self(unsafe { lean_sys::lean_box_uint32(n) }.cast()) + Self(unsafe { include::lean_box_uint32(n) }.cast()) } #[inline] pub fn unbox_u32(self) -> u32 { - unsafe { lean_sys::lean_unbox_uint32(self.0 as *mut _) } + unsafe { include::lean_unbox_uint32(self.0 as *mut _) } } } @@ -194,12 +194,12 @@ impl LeanArray { /// Allocate a new array with `size` elements (capacity = size). pub fn alloc(size: usize) -> Self { - let obj = unsafe { lean_sys::lean_alloc_array(size, size) }; + let obj = unsafe { include::lean_alloc_array(size, size) }; Self(LeanObject(obj.cast())) } pub fn len(&self) -> usize { - unsafe { lean_sys::lean_array_size(self.0.as_ptr() as *mut _) } + unsafe { include::lean_array_size(self.0.as_ptr() as *mut _) } } pub fn is_empty(&self) -> bool { @@ -208,7 +208,7 @@ impl LeanArray { pub fn get(&self, i: usize) -> LeanObject { LeanObject( - unsafe { lean_sys::lean_array_get_core(self.0.as_ptr() as *mut _, i) } + unsafe { include::lean_array_get_core(self.0.as_ptr() as *mut _, i) } .cast(), ) } @@ -216,7 +216,7 @@ impl LeanArray { pub fn set(&self, i: usize, val: impl Into) { let val: LeanObject = val.into(); unsafe { - lean_sys::lean_array_set_core( + include::lean_array_set_core( self.0.as_ptr() as *mut _, i, val.as_ptr() as *mut _, @@ -227,7 +227,7 @@ impl LeanArray { /// Return a slice over the array elements. pub fn data(&self) -> &[LeanObject] { unsafe { - let cptr = lean_sys::lean_array_cptr(self.0.as_ptr() as *mut _); + let cptr = include::lean_array_cptr(self.0.as_ptr() as *mut _); // Safety: LeanObject is repr(transparent) over *const c_void, and // lean_array_cptr returns *mut *mut lean_object which has the same layout. std::slice::from_raw_parts(cptr.cast(), self.len()) @@ -273,7 +273,7 @@ impl LeanByteArray { /// Allocate a new byte array with `size` bytes (capacity = size). pub fn alloc(size: usize) -> Self { - let obj = unsafe { lean_sys::lean_alloc_sarray(1, size, size) }; + let obj = unsafe { include::lean_alloc_sarray(1, size, size) }; Self(LeanObject(obj.cast())) } @@ -281,14 +281,14 @@ impl LeanByteArray { pub fn from_bytes(data: &[u8]) -> Self { let arr = Self::alloc(data.len()); unsafe { - let cptr = lean_sys::lean_sarray_cptr(arr.0.as_ptr() as *mut _); + let cptr = include::lean_sarray_cptr(arr.0.as_ptr() as *mut _); std::ptr::copy_nonoverlapping(data.as_ptr(), cptr, data.len()); } arr } pub fn len(&self) -> usize { - unsafe { lean_sys::lean_sarray_size(self.0.as_ptr() as *mut _) } + unsafe { include::lean_sarray_size(self.0.as_ptr() as *mut _) } } pub fn is_empty(&self) -> bool { @@ -298,7 +298,7 @@ impl LeanByteArray { /// Return the byte contents as a slice. pub fn as_bytes(&self) -> &[u8] { unsafe { - let cptr = lean_sys::lean_sarray_cptr(self.0.as_ptr() as *mut _); + let cptr = include::lean_sarray_cptr(self.0.as_ptr() as *mut _); std::slice::from_raw_parts(cptr, self.len()) } } @@ -310,7 +310,7 @@ impl LeanByteArray { pub unsafe fn set_data(&self, data: &[u8]) { unsafe { let obj = self.0.as_mut_ptr(); - let cptr = lean_sys::lean_sarray_cptr(obj.cast()); + let cptr = include::lean_sarray_cptr(obj.cast()); std::ptr::copy_nonoverlapping(data.as_ptr(), cptr, data.len()); // Update m_size: at offset 8 (after lean_object header) *obj.cast::().add(8).cast::() = data.len(); @@ -349,13 +349,13 @@ impl LeanString { /// Create a Lean string from a Rust `&str`. pub fn new(s: &str) -> Self { let c = safe_cstring(s); - let obj = unsafe { lean_sys::lean_mk_string(c.as_ptr()) }; + let obj = unsafe { include::lean_mk_string(c.as_ptr()) }; Self(LeanObject(obj.cast())) } /// Number of data bytes (excluding the trailing NUL). pub fn byte_len(&self) -> usize { - unsafe { lean_sys::lean_string_size(self.0.as_ptr() as *mut _) - 1 } + unsafe { include::lean_string_size(self.0.as_ptr() as *mut _) - 1 } } } @@ -363,8 +363,8 @@ impl std::fmt::Display for LeanString { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { unsafe { let obj = self.0.as_ptr() as *mut _; - let len = lean_sys::lean_string_size(obj) - 1; // m_size includes NUL - let data = lean_sys::lean_string_cstr(obj); + let len = include::lean_string_size(obj) - 1; // m_size includes NUL + let data = include::lean_string_cstr(obj); let bytes = std::slice::from_raw_parts(data.cast::(), len); let s = std::str::from_utf8_unchecked(bytes); f.write_str(s) @@ -404,7 +404,7 @@ impl LeanCtor { pub fn alloc(tag: u8, num_objs: usize, scalar_size: usize) -> Self { #[allow(clippy::cast_possible_truncation)] let obj = unsafe { - lean_sys::lean_alloc_ctor(tag as u32, num_objs as u32, scalar_size as u32) + include::lean_alloc_ctor(tag as u32, num_objs as u32, scalar_size as u32) }; Self(LeanObject(obj.cast())) } @@ -417,7 +417,7 @@ impl LeanCtor { pub fn get(&self, i: usize) -> LeanObject { #[allow(clippy::cast_possible_truncation)] LeanObject( - unsafe { lean_sys::lean_ctor_get(self.0.as_ptr() as *mut _, i as u32) } + unsafe { include::lean_ctor_get(self.0.as_ptr() as *mut _, i as u32) } .cast(), ) } @@ -427,7 +427,7 @@ impl LeanCtor { let val: LeanObject = val.into(); #[allow(clippy::cast_possible_truncation)] unsafe { - lean_sys::lean_ctor_set( + include::lean_ctor_set( self.0.as_ptr() as *mut _, i as u32, val.as_ptr() as *mut _, @@ -439,7 +439,7 @@ impl LeanCtor { pub fn set_u8(&self, offset: usize, val: u8) { #[allow(clippy::cast_possible_truncation)] unsafe { - lean_sys::lean_ctor_set_uint8( + include::lean_ctor_set_uint8( self.0.as_ptr() as *mut _, offset as u32, val, @@ -451,7 +451,7 @@ impl LeanCtor { pub fn set_u32(&self, offset: usize, val: u32) { #[allow(clippy::cast_possible_truncation)] unsafe { - lean_sys::lean_ctor_set_uint32( + include::lean_ctor_set_uint32( self.0.as_ptr() as *mut _, offset as u32, val, @@ -463,7 +463,7 @@ impl LeanCtor { pub fn set_u64(&self, offset: usize, val: u64) { #[allow(clippy::cast_possible_truncation)] unsafe { - lean_sys::lean_ctor_set_uint64( + include::lean_ctor_set_uint64( self.0.as_ptr() as *mut _, offset as u32, val, @@ -543,14 +543,14 @@ impl LeanExternal { pub fn alloc(class: &ExternalClass, data: T) -> Self { let data_ptr = Box::into_raw(Box::new(data)); let obj = - unsafe { lean_sys::lean_alloc_external(class.0.cast(), data_ptr.cast()) }; + unsafe { include::lean_alloc_external(class.0.cast(), data_ptr.cast()) }; Self(LeanObject(obj.cast()), PhantomData) } /// Get a reference to the wrapped data. pub fn get(&self) -> &T { unsafe { - &*lean_sys::lean_get_external_data(self.0.as_ptr() as *mut _).cast::() + &*include::lean_get_external_data(self.0.as_ptr() as *mut _).cast::() } } } @@ -573,11 +573,11 @@ impl ExternalClass { /// The `finalizer` callback must correctly free the external data, and /// `foreach` must correctly visit any Lean object references held by the data. pub unsafe fn register( - finalizer: lean_sys::lean_external_finalize_proc, - foreach: lean_sys::lean_external_foreach_proc, + finalizer: include::lean_external_finalize_proc, + foreach: include::lean_external_foreach_proc, ) -> Self { Self( - unsafe { lean_sys::lean_register_external_class(finalizer, foreach) } + unsafe { include::lean_register_external_class(finalizer, foreach) } .cast(), ) } @@ -591,7 +591,7 @@ impl ExternalClass { } } unsafe { - Self::register(Some(drop_finalizer::), Some(super::noop_foreach)) + Self::register(Some(drop_finalizer::), Some(crate::noop_foreach)) } } } @@ -838,6 +838,52 @@ impl LeanIOResult { } } +// ============================================================================= +// LeanProd — Prod α β (pair) +// ============================================================================= + +/// Typed wrapper for a Lean `Prod α β` (ctor tag 0, 2 object fields). +#[derive(Clone, Copy)] +#[repr(transparent)] +pub struct LeanProd(LeanObject); + +impl Deref for LeanProd { + type Target = LeanObject; + #[inline] + fn deref(&self) -> &LeanObject { + &self.0 + } +} + +impl From for LeanObject { + #[inline] + fn from(x: LeanProd) -> Self { + x.0 + } +} + +impl LeanProd { + /// Build a pair `(fst, snd)`. + pub fn new(fst: impl Into, snd: impl Into) -> Self { + let ctor = LeanCtor::alloc(0, 2, 0); + ctor.set(0, fst); + ctor.set(1, snd); + Self(*ctor) + } + + /// Get the first element. + pub fn fst(&self) -> LeanObject { + let ctor = self.0.as_ctor(); + ctor.get(0) + } + + /// Get the second element. + pub fn snd(&self) -> LeanObject { + let ctor = self.0.as_ctor(); + ctor.get(1) + } +} + // ============================================================================= // From for LeanObject — allow wrapper types to be passed to set() etc. // ============================================================================= @@ -905,191 +951,6 @@ impl From for LeanObject { } } -// ============================================================================= -// Domain types — typed newtypes for specific Lean types -// ============================================================================= - -/// Generate a `#[repr(transparent)]` newtype over `LeanObject` for a specific -/// Lean type, with `Deref`, `From`, and a `new` constructor. -macro_rules! lean_domain_type { - ($($(#[$meta:meta])* $name:ident;)*) => {$( - $(#[$meta])* - #[derive(Clone, Copy)] - #[repr(transparent)] - pub struct $name(LeanObject); - - impl Deref for $name { - type Target = LeanObject; - #[inline] - fn deref(&self) -> &LeanObject { &self.0 } - } - - impl From<$name> for LeanObject { - #[inline] - fn from(x: $name) -> Self { x.0 } - } - - impl From for $name { - #[inline] - fn from(obj: LeanObject) -> Self { Self(obj) } - } - - impl $name { - #[inline] - pub fn new(obj: LeanObject) -> Self { Self(obj) } - } - )*}; -} - -lean_domain_type! { - // Ix core types - /// Lean `Ix.Name` object. - LeanIxName; - /// Lean `Ix.Level` object. - LeanIxLevel; - /// Lean `Ix.Expr` object. - LeanIxExpr; - /// Lean `Ix.ConstantInfo` object. - LeanIxConstantInfo; - /// Lean `Ix.RawEnvironment` object. - LeanIxRawEnvironment; - /// Lean `Ix.Environment` object. - LeanIxEnvironment; - /// Lean `Ix.RustCondensedBlocks` object. - LeanIxCondensedBlocks; - /// Lean `Ix.CompileM.RustCompilePhases` object. - LeanIxCompilePhases; - - // Ix data types - /// Lean `Ix.Int` object. - LeanIxInt; - /// Lean `Ix.Substring` object. - LeanIxSubstring; - /// Lean `Ix.SourceInfo` object. - LeanIxSourceInfo; - /// Lean `Ix.SyntaxPreresolved` object. - LeanIxSyntaxPreresolved; - /// Lean `Ix.Syntax` object. - LeanIxSyntax; - /// Lean `Ix.DataValue` object. - LeanIxDataValue; - - // Ixon types - /// Lean `Ixon.DefKind` object. - LeanIxonDefKind; - /// Lean `Ixon.DefinitionSafety` object. - LeanIxonDefinitionSafety; - /// Lean `Ixon.QuotKind` object. - LeanIxonQuotKind; - /// Lean `Ixon.Univ` object. - LeanIxonUniv; - /// Lean `Ixon.Expr` object. - LeanIxonExpr; - /// Lean `Ixon.Definition` object. - LeanIxonDefinition; - /// Lean `Ixon.RecursorRule` object. - LeanIxonRecursorRule; - /// Lean `Ixon.Recursor` object. - LeanIxonRecursor; - /// Lean `Ixon.Axiom` object. - LeanIxonAxiom; - /// Lean `Ixon.Quotient` object. - LeanIxonQuotient; - /// Lean `Ixon.Constructor` object. - LeanIxonConstructor; - /// Lean `Ixon.Inductive` object. - LeanIxonInductive; - /// Lean `Ixon.InductiveProj` object. - LeanIxonInductiveProj; - /// Lean `Ixon.ConstructorProj` object. - LeanIxonConstructorProj; - /// Lean `Ixon.RecursorProj` object. - LeanIxonRecursorProj; - /// Lean `Ixon.DefinitionProj` object. - LeanIxonDefinitionProj; - /// Lean `Ixon.MutConst` object. - LeanIxonMutConst; - /// Lean `Ixon.ConstantInfo` object. - LeanIxonConstantInfo; - /// Lean `Ixon.Constant` object. - LeanIxonConstant; - /// Lean `Ixon.DataValue` object. - LeanIxonDataValue; - /// Lean `Ixon.ExprMetaData` object. - LeanIxonExprMetaData; - /// Lean `Ixon.ExprMetaArena` object. - LeanIxonExprMetaArena; - /// Lean `Ixon.ConstantMeta` object. - LeanIxonConstantMeta; - /// Lean `Ixon.Named` object. - LeanIxonNamed; - /// Lean `Ixon.Comm` object. - LeanIxonComm; - /// Lean `Ixon.RawEnv` object. - LeanIxonRawEnv; - - // Error types - /// Lean `Ixon.SerializeError` object. - LeanIxSerializeError; - /// Lean `Ix.DecompileM.DecompileError` object. - LeanIxDecompileError; - /// Lean `Ix.CompileM.CompileError` object. - LeanIxCompileError; - /// Lean `BlockCompareResult` object. - LeanIxBlockCompareResult; - /// Lean `BlockCompareDetail` object. - LeanIxBlockCompareDetail; -} - -// ============================================================================= -// LeanProd — Prod α β (pair) -// ============================================================================= - -/// Typed wrapper for a Lean `Prod α β` (ctor tag 0, 2 object fields). -#[derive(Clone, Copy)] -#[repr(transparent)] -pub struct LeanProd(LeanObject); - -impl Deref for LeanProd { - type Target = LeanObject; - #[inline] - fn deref(&self) -> &LeanObject { - &self.0 - } -} - -impl From for LeanObject { - #[inline] - fn from(x: LeanProd) -> Self { - x.0 - } -} - -impl LeanProd { - /// Build a pair `(fst, snd)`. - pub fn new(fst: impl Into, snd: impl Into) -> Self { - let ctor = LeanCtor::alloc(0, 2, 0); - ctor.set(0, fst); - ctor.set(1, snd); - Self(*ctor) - } - - /// Get the first element. - pub fn fst(&self) -> LeanObject { - let ctor = self.0.as_ctor(); - ctor.get(0) - } - - /// Get the second element. - pub fn snd(&self) -> LeanObject { - let ctor = self.0.as_ctor(); - ctor.get(1) - } -} - -/// `Ix.Address = { hash : ByteArray }` — single-field struct, unboxed to `ByteArray`. -pub type LeanIxAddress = LeanByteArray; - impl From for LeanObject { #[inline] fn from(x: u32) -> Self { diff --git a/src/ffi.rs b/src/ffi.rs index 3587b387..13d362bd 100644 --- a/src/ffi.rs +++ b/src/ffi.rs @@ -12,7 +12,7 @@ pub mod ix; // Ix types: Name, Level, Expr, ConstantInfo, Environment pub mod ixon; // Ixon types: Univ, Expr, Constant, metadata pub mod primitives; // Primitives: rs_roundtrip_nat, rs_roundtrip_string, etc. -use crate::lean::object::{LeanArray, LeanByteArray, LeanIOResult}; +use lean_sys::object::{LeanArray, LeanByteArray, LeanIOResult}; /// Guard an FFI function that returns a Lean IO result against panics. /// On panic, returns a Lean IO error with the panic message instead of diff --git a/src/ffi/aiur.rs b/src/ffi/aiur.rs index 42b8423a..0ba537bb 100644 --- a/src/ffi/aiur.rs +++ b/src/ffi/aiur.rs @@ -3,7 +3,8 @@ use multi_stark::p3_field::integers::QuotientMap; pub mod protocol; pub mod toplevel; -use crate::{aiur::G, lean::object::LeanObject}; +use crate::aiur::G; +use lean_sys::object::LeanObject; #[inline] pub(super) fn lean_unbox_nat_as_usize(obj: LeanObject) -> usize { diff --git a/src/ffi/aiur/protocol.rs b/src/ffi/aiur/protocol.rs index 7dfe8300..d6122bfa 100644 --- a/src/ffi/aiur/protocol.rs +++ b/src/ffi/aiur/protocol.rs @@ -6,6 +6,11 @@ use multi_stark::{ use rustc_hash::{FxBuildHasher, FxHashMap}; use std::sync::OnceLock; +use lean_sys::object::{ + ExternalClass, LeanArray, LeanByteArray, LeanCtor, LeanExcept, + LeanExternal, LeanObject, +}; + use crate::{ aiur::{ G, @@ -15,10 +20,6 @@ use crate::{ ffi::aiur::{ lean_unbox_g, lean_unbox_nat_as_usize, toplevel::lean_ptr_to_toplevel, }, - lean::object::{ - ExternalClass, LeanArray, LeanByteArray, LeanCtor, LeanExcept, - LeanExternal, LeanObject, - }, }; // ============================================================================= diff --git a/src/ffi/aiur/toplevel.rs b/src/ffi/aiur/toplevel.rs index 92394a0d..c836c30a 100644 --- a/src/ffi/aiur/toplevel.rs +++ b/src/ffi/aiur/toplevel.rs @@ -1,12 +1,13 @@ use multi_stark::p3_field::PrimeCharacteristicRing; +use lean_sys::object::LeanObject; + use crate::{ FxIndexMap, aiur::{ G, bytecode::{Block, Ctrl, Function, FunctionLayout, Op, Toplevel, ValIdx}, }, - lean::object::LeanObject, }; use crate::ffi::aiur::{lean_unbox_g, lean_unbox_nat_as_usize}; diff --git a/src/ffi/builder.rs b/src/ffi/builder.rs index 8e85a25b..e4fe8655 100644 --- a/src/ffi/builder.rs +++ b/src/ffi/builder.rs @@ -3,7 +3,7 @@ use blake3::Hash; use rustc_hash::FxHashMap; -use crate::lean::object::{LeanIxExpr, LeanIxLevel, LeanIxName}; +use crate::lean::{LeanIxExpr, LeanIxLevel, LeanIxName}; /// Cache for constructing Lean Ix types with deduplication. /// diff --git a/src/ffi/byte_array.rs b/src/ffi/byte_array.rs index 7e247e07..9dfb31b6 100644 --- a/src/ffi/byte_array.rs +++ b/src/ffi/byte_array.rs @@ -1,4 +1,4 @@ -use crate::lean::object::LeanByteArray; +use lean_sys::object::LeanByteArray; /// `@& ByteArray → @& ByteArray → Bool` /// Efficient implementation for `BEq ByteArray` diff --git a/src/ffi/compile.rs b/src/ffi/compile.rs index dd276a4c..3ae19bf8 100644 --- a/src/ffi/compile.rs +++ b/src/ffi/compile.rs @@ -11,7 +11,7 @@ use std::collections::HashMap; use std::sync::Arc; use crate::ffi::ffi_io_guard; -use crate::lean::object::LeanIOResult; +use lean_sys::object::LeanIOResult; use crate::ix::address::Address; use crate::ix::compile::{CompileState, compile_env}; use crate::ix::condense::compute_sccs; @@ -22,12 +22,14 @@ use crate::ix::ixon::constant::{Constant as IxonConstant, ConstantInfo}; use crate::ix::ixon::expr::Expr as IxonExpr; use crate::ix::ixon::serialize::put_expr; use crate::ix::ixon::{Comm, ConstantMeta}; -use crate::lean::nat::Nat; -use crate::lean::object::{ - LeanArray, LeanByteArray, LeanCtor, LeanExcept, LeanIxBlockCompareDetail, - LeanIxBlockCompareResult, LeanIxCompileError, LeanIxCompilePhases, - LeanIxCondensedBlocks, LeanIxDecompileError, LeanIxSerializeError, - LeanIxonRawEnv, LeanObject, LeanString, +use lean_sys::nat::Nat; +use lean_sys::object::{ + LeanArray, LeanByteArray, LeanCtor, LeanExcept, LeanObject, LeanString, +}; +use crate::lean::{ + LeanIxBlockCompareDetail, LeanIxBlockCompareResult, LeanIxCompileError, + LeanIxCompilePhases, LeanIxCondensedBlocks, LeanIxDecompileError, + LeanIxSerializeError, LeanIxonRawEnv, }; use dashmap::DashMap; diff --git a/src/ffi/graph.rs b/src/ffi/graph.rs index 4a74200b..c2626404 100644 --- a/src/ffi/graph.rs +++ b/src/ffi/graph.rs @@ -3,10 +3,9 @@ use std::sync::Arc; use crate::ffi::ffi_io_guard; -use crate::lean::object::LeanIOResult; use crate::ix::condense::compute_sccs; use crate::ix::graph::build_ref_graph; -use crate::lean::object::{LeanArray, LeanCtor, LeanObject}; +use lean_sys::object::{LeanArray, LeanCtor, LeanIOResult, LeanObject}; use crate::ffi::builder::LeanBuildCache; use crate::ffi::ix::name::build_name; diff --git a/src/ffi/ix/address.rs b/src/ffi/ix/address.rs index 0c10d569..396b4f4f 100644 --- a/src/ffi/ix/address.rs +++ b/src/ffi/ix/address.rs @@ -2,7 +2,8 @@ //! //! Address = { hash : ByteArray } - ByteArray wrapper for blake3 Hash -use crate::lean::object::{LeanByteArray, LeanIxAddress}; +use lean_sys::object::LeanByteArray; +use crate::lean::LeanIxAddress; /// Build a Ix.Address from a blake3::Hash. /// Address = { hash : ByteArray } - single field struct, so UNBOXED to ByteArray diff --git a/src/ffi/ix/constant.rs b/src/ffi/ix/constant.rs index f2495849..419f47e8 100644 --- a/src/ffi/ix/constant.rs +++ b/src/ffi/ix/constant.rs @@ -15,10 +15,9 @@ use crate::ix::env::{ DefinitionVal, InductiveVal, Name, OpaqueVal, QuotKind, QuotVal, RecursorRule, RecursorVal, ReducibilityHints, TheoremVal, }; -use crate::lean::nat::Nat; -use crate::lean::object::{ - LeanArray, LeanCtor, LeanIxConstantInfo, LeanObject, -}; +use lean_sys::nat::Nat; +use lean_sys::object::{LeanArray, LeanCtor, LeanObject}; +use crate::lean::LeanIxConstantInfo; use crate::ffi::builder::LeanBuildCache; use crate::ffi::ix::expr::{build_expr, decode_ix_expr}; diff --git a/src/ffi/ix/data.rs b/src/ffi/ix/data.rs index 013de803..a82586c0 100644 --- a/src/ffi/ix/data.rs +++ b/src/ffi/ix/data.rs @@ -3,11 +3,11 @@ use crate::ix::env::{ DataValue, Int, Name, SourceInfo, Substring, Syntax, SyntaxPreresolved, }; -use crate::lean::nat::Nat; -use crate::lean::object::{ - LeanArray, LeanCtor, LeanIxDataValue, LeanIxInt, LeanIxSourceInfo, - LeanIxSubstring, LeanIxSyntax, LeanIxSyntaxPreresolved, LeanObject, - LeanString, +use lean_sys::nat::Nat; +use lean_sys::object::{LeanArray, LeanCtor, LeanObject, LeanString}; +use crate::lean::{ + LeanIxDataValue, LeanIxInt, LeanIxSourceInfo, LeanIxSubstring, LeanIxSyntax, + LeanIxSyntaxPreresolved, }; use crate::ffi::builder::LeanBuildCache; diff --git a/src/ffi/ix/env.rs b/src/ffi/ix/env.rs index 4db9fde9..abd9909b 100644 --- a/src/ffi/ix/env.rs +++ b/src/ffi/ix/env.rs @@ -3,9 +3,8 @@ use rustc_hash::FxHashMap; use crate::ix::env::{ConstantInfo, Name}; -use crate::lean::object::{ - LeanArray, LeanCtor, LeanIxEnvironment, LeanIxRawEnvironment, LeanObject, -}; +use lean_sys::object::{LeanArray, LeanCtor, LeanObject}; +use crate::lean::{LeanIxEnvironment, LeanIxRawEnvironment}; use crate::ffi::builder::LeanBuildCache; use crate::ffi::ix::constant::{build_constant_info, decode_constant_info}; diff --git a/src/ffi/ix/expr.rs b/src/ffi/ix/expr.rs index 8b968877..e855cfb7 100644 --- a/src/ffi/ix/expr.rs +++ b/src/ffi/ix/expr.rs @@ -17,10 +17,9 @@ use crate::ix::env::{ BinderInfo, DataValue, Expr, ExprData, Level, Literal, Name, }; -use crate::lean::nat::Nat; -use crate::lean::object::{ - LeanArray, LeanCtor, LeanIxExpr, LeanObject, LeanString, -}; +use lean_sys::nat::Nat; +use lean_sys::object::{LeanArray, LeanCtor, LeanObject, LeanString}; +use crate::lean::LeanIxExpr; use crate::ffi::builder::LeanBuildCache; use crate::ffi::ix::address::build_address; diff --git a/src/ffi/ix/level.rs b/src/ffi/ix/level.rs index 462e97be..68cebde0 100644 --- a/src/ffi/ix/level.rs +++ b/src/ffi/ix/level.rs @@ -9,7 +9,8 @@ //! - Tag 5: mvar (n : Name) (hash : Address) use crate::ix::env::{Level, LevelData}; -use crate::lean::object::{LeanArray, LeanCtor, LeanIxLevel, LeanObject}; +use lean_sys::object::{LeanArray, LeanCtor, LeanObject}; +use crate::lean::LeanIxLevel; use crate::ffi::builder::LeanBuildCache; use crate::ffi::ix::address::build_address; diff --git a/src/ffi/ix/name.rs b/src/ffi/ix/name.rs index c41f42b0..2e697b41 100644 --- a/src/ffi/ix/name.rs +++ b/src/ffi/ix/name.rs @@ -6,10 +6,9 @@ //! - Tag 2: num (parent : Name) (i : Nat) (hash : Address) use crate::ix::env::{Name, NameData}; -use crate::lean::nat::Nat; -use crate::lean::object::{ - LeanArray, LeanCtor, LeanIxName, LeanObject, LeanString, -}; +use lean_sys::nat::Nat; +use lean_sys::object::{LeanArray, LeanCtor, LeanObject, LeanString}; +use crate::lean::LeanIxName; use crate::ffi::builder::LeanBuildCache; use crate::ffi::ix::address::build_address; diff --git a/src/ffi/ixon/compare.rs b/src/ffi/ixon/compare.rs index 3942f8f9..356cd127 100644 --- a/src/ffi/ixon/compare.rs +++ b/src/ffi/ixon/compare.rs @@ -6,9 +6,8 @@ use crate::ix::compile::{BlockCache, CompileState, compile_env, compile_expr}; use crate::ix::env::Name; use crate::ix::ixon::serialize::put_expr; use crate::ix::mutual::MutCtx; -use crate::lean::object::{ - LeanByteArray, LeanCtor, LeanIxBlockCompareDetail, LeanObject, -}; +use lean_sys::object::{LeanByteArray, LeanCtor, LeanObject}; +use crate::lean::LeanIxBlockCompareDetail; use crate::ffi::lean_env::{ Cache as LeanCache, GlobalCache, lean_ptr_to_expr, lean_ptr_to_name, diff --git a/src/ffi/ixon/constant.rs b/src/ffi/ixon/constant.rs index 0efc4e6e..b29b3508 100644 --- a/src/ffi/ixon/constant.rs +++ b/src/ffi/ixon/constant.rs @@ -15,12 +15,13 @@ use crate::ix::ixon::constant::{ Quotient as IxonQuotient, Recursor as IxonRecursor, RecursorProj, RecursorRule as IxonRecursorRule, }; -use crate::lean::object::{ - LeanArray, LeanByteArray, LeanCtor, LeanIxAddress, LeanIxonAxiom, - LeanIxonConstant, LeanIxonConstantInfo, LeanIxonConstructor, - LeanIxonConstructorProj, LeanIxonDefinition, LeanIxonDefinitionProj, - LeanIxonInductive, LeanIxonInductiveProj, LeanIxonMutConst, LeanIxonQuotient, - LeanIxonRecursor, LeanIxonRecursorProj, LeanIxonRecursorRule, LeanObject, +use lean_sys::object::{LeanArray, LeanByteArray, LeanCtor, LeanObject}; +use crate::lean::{ + LeanIxAddress, LeanIxonAxiom, LeanIxonConstant, LeanIxonConstantInfo, + LeanIxonConstructor, LeanIxonConstructorProj, LeanIxonDefinition, + LeanIxonDefinitionProj, LeanIxonInductive, LeanIxonInductiveProj, + LeanIxonMutConst, LeanIxonQuotient, LeanIxonRecursor, LeanIxonRecursorProj, + LeanIxonRecursorRule, }; use crate::ffi::ixon::expr::{ diff --git a/src/ffi/ixon/enums.rs b/src/ffi/ixon/enums.rs index ec25d5e3..4e5e96f8 100644 --- a/src/ffi/ixon/enums.rs +++ b/src/ffi/ixon/enums.rs @@ -4,9 +4,8 @@ use std::ffi::c_void; use crate::ix::env::{DefinitionSafety, QuotKind}; use crate::ix::ixon::constant::DefKind; -use crate::lean::object::{ - LeanIxonDefKind, LeanIxonDefinitionSafety, LeanIxonQuotKind, LeanObject, -}; +use lean_sys::object::LeanObject; +use crate::lean::{LeanIxonDefKind, LeanIxonDefinitionSafety, LeanIxonQuotKind}; /// Build Ixon.DefKind /// | defn -- tag 0 diff --git a/src/ffi/ixon/env.rs b/src/ffi/ixon/env.rs index d9bf3e68..a5d8edf6 100644 --- a/src/ffi/ixon/env.rs +++ b/src/ffi/ixon/env.rs @@ -9,9 +9,10 @@ use crate::ix::ixon::comm::Comm; use crate::ix::ixon::constant::Constant as IxonConstant; use crate::ix::ixon::env::{Env as IxonEnv, Named as IxonNamed}; use crate::ix::ixon::metadata::ConstantMeta; -use crate::lean::object::{ - LeanArray, LeanByteArray, LeanCtor, LeanExcept, LeanIxonRawEnv, LeanObject, +use lean_sys::object::{ + LeanArray, LeanByteArray, LeanCtor, LeanExcept, LeanObject, }; +use crate::lean::LeanIxonRawEnv; use crate::ffi::builder::LeanBuildCache; use crate::ffi::ix::name::{build_name, decode_ix_name}; diff --git a/src/ffi/ixon/expr.rs b/src/ffi/ixon/expr.rs index 524801da..915449fd 100644 --- a/src/ffi/ixon/expr.rs +++ b/src/ffi/ixon/expr.rs @@ -3,7 +3,8 @@ use std::sync::Arc; use crate::ix::ixon::expr::Expr as IxonExpr; -use crate::lean::object::{LeanArray, LeanCtor, LeanIxonExpr, LeanObject}; +use lean_sys::object::{LeanArray, LeanCtor, LeanObject}; +use crate::lean::LeanIxonExpr; /// Build Ixon.Expr (12 constructors). pub fn build_ixon_expr(expr: &IxonExpr) -> LeanObject { diff --git a/src/ffi/ixon/meta.rs b/src/ffi/ixon/meta.rs index 3810d542..26dc8114 100644 --- a/src/ffi/ixon/meta.rs +++ b/src/ffi/ixon/meta.rs @@ -9,9 +9,10 @@ use crate::ix::ixon::env::Named; use crate::ix::ixon::metadata::{ ConstantMeta, DataValue as IxonDataValue, ExprMeta, ExprMetaData, KVMap, }; -use crate::lean::object::{ - LeanArray, LeanCtor, LeanIxonComm, LeanIxonConstantMeta, LeanIxonDataValue, - LeanIxonExprMetaArena, LeanIxonExprMetaData, LeanIxonNamed, LeanObject, +use lean_sys::object::{LeanArray, LeanCtor, LeanObject}; +use crate::lean::{ + LeanIxonComm, LeanIxonConstantMeta, LeanIxonDataValue, + LeanIxonExprMetaArena, LeanIxonExprMetaData, LeanIxonNamed, }; use crate::ffi::ix::constant::{ diff --git a/src/ffi/ixon/serialize.rs b/src/ffi/ixon/serialize.rs index 1eb18b96..17c23a09 100644 --- a/src/ffi/ixon/serialize.rs +++ b/src/ffi/ixon/serialize.rs @@ -10,9 +10,9 @@ use crate::ix::ixon::expr::Expr as IxonExpr; use crate::ix::ixon::serialize::put_expr; use crate::ix::ixon::sharing::hash_expr; use crate::ix::ixon::univ::{Univ as IxonUniv, put_univ}; -use crate::lean::object::{ - LeanByteArray, LeanIxAddress, LeanIxonConstant, LeanIxonExpr, LeanIxonRawEnv, - LeanIxonUniv, LeanObject, +use lean_sys::object::{LeanByteArray, LeanObject}; +use crate::lean::{ + LeanIxAddress, LeanIxonConstant, LeanIxonExpr, LeanIxonRawEnv, LeanIxonUniv, }; use crate::ffi::ixon::constant::{decode_ixon_address, decode_ixon_constant}; diff --git a/src/ffi/ixon/sharing.rs b/src/ffi/ixon/sharing.rs index 6b66b613..797311fe 100644 --- a/src/ffi/ixon/sharing.rs +++ b/src/ffi/ixon/sharing.rs @@ -7,7 +7,7 @@ use crate::ix::ixon::serialize::put_expr; use crate::ix::ixon::sharing::{ analyze_block, build_sharing_vec, decide_sharing, }; -use crate::lean::object::{LeanArray, LeanByteArray}; +use lean_sys::object::{LeanArray, LeanByteArray}; use crate::ffi::ixon::expr::decode_ixon_expr_array; use crate::ffi::ixon::serialize::lean_ptr_to_ixon_expr; diff --git a/src/ffi/ixon/univ.rs b/src/ffi/ixon/univ.rs index 7cb4e944..357392d3 100644 --- a/src/ffi/ixon/univ.rs +++ b/src/ffi/ixon/univ.rs @@ -3,7 +3,8 @@ use std::sync::Arc; use crate::ix::ixon::univ::Univ; -use crate::lean::object::{LeanArray, LeanCtor, LeanIxonUniv, LeanObject}; +use lean_sys::object::{LeanArray, LeanCtor, LeanObject}; +use crate::lean::LeanIxonUniv; impl LeanIxonUniv { /// Build Ixon.Univ diff --git a/src/ffi/keccak.rs b/src/ffi/keccak.rs index d14189fb..0284e9f6 100644 --- a/src/ffi/keccak.rs +++ b/src/ffi/keccak.rs @@ -2,7 +2,7 @@ use std::sync::OnceLock; use tiny_keccak::{Hasher, Keccak}; -use crate::lean::object::{ +use lean_sys::object::{ ExternalClass, LeanByteArray, LeanExternal, LeanObject, }; diff --git a/src/ffi/lean_env.rs b/src/ffi/lean_env.rs index 6a52f3b7..8d13fbab 100644 --- a/src/ffi/lean_env.rs +++ b/src/ffi/lean_env.rs @@ -19,8 +19,8 @@ use std::sync::Arc; use rustc_hash::FxHashMap; -use crate::lean::nat::Nat; -use crate::lean::object::LeanObject; +use lean_sys::nat::Nat; +use lean_sys::object::LeanObject; use crate::{ ix::compile::compile_env, diff --git a/src/ffi/primitives.rs b/src/ffi/primitives.rs index ca779acf..80a6e0e6 100644 --- a/src/ffi/primitives.rs +++ b/src/ffi/primitives.rs @@ -6,8 +6,8 @@ //! - List, Array, ByteArray //! - AssocList, HashMap -use crate::lean::nat::Nat; -use crate::lean::object::{ +use lean_sys::nat::Nat; +use lean_sys::object::{ LeanArray, LeanByteArray, LeanCtor, LeanList, LeanObject, LeanString, }; @@ -35,7 +35,7 @@ pub fn build_nat(n: &Nat) -> LeanObject { limbs.push(u64::from_le_bytes(arr)); } unsafe { - LeanObject::from_raw(crate::lean::nat::lean_nat_from_limbs( + LeanObject::from_raw(lean_sys::nat::lean_nat_from_limbs( limbs.len(), limbs.as_ptr(), )) diff --git a/src/ffi/unsigned.rs b/src/ffi/unsigned.rs index 226fe0ab..396b4954 100644 --- a/src/ffi/unsigned.rs +++ b/src/ffi/unsigned.rs @@ -1,4 +1,4 @@ -use crate::lean::object::LeanByteArray; +use lean_sys::object::LeanByteArray; #[unsafe(no_mangle)] extern "C" fn c_u16_to_le_bytes(v: u16) -> LeanByteArray { diff --git a/src/iroh/_client.rs b/src/iroh/_client.rs index aac86d51..09358ca6 100644 --- a/src/iroh/_client.rs +++ b/src/iroh/_client.rs @@ -1,4 +1,4 @@ -use crate::lean::object::{LeanExcept, LeanObject}; +use lean_sys::object::{LeanExcept, LeanObject}; const ERR_MSG: &str = "Iroh functions not supported when the Rust `net` feature is disabled \ or on MacOS aarch64-darwin"; diff --git a/src/iroh/_server.rs b/src/iroh/_server.rs index 228f0d4e..c7cec03a 100644 --- a/src/iroh/_server.rs +++ b/src/iroh/_server.rs @@ -1,4 +1,4 @@ -use crate::lean::object::LeanExcept; +use lean_sys::object::LeanExcept; /// `Iroh.Serve.serve' : Unit → Except String Unit` #[unsafe(no_mangle)] diff --git a/src/iroh/client.rs b/src/iroh/client.rs index ab38da49..dd8119f2 100644 --- a/src/iroh/client.rs +++ b/src/iroh/client.rs @@ -8,7 +8,7 @@ use tracing_subscriber::util::SubscriberInitExt; use tracing_subscriber::{EnvFilter, fmt}; use crate::iroh::common::{GetRequest, PutRequest, Request, Response}; -use crate::lean::object::{ +use lean_sys::object::{ LeanArray, LeanByteArray, LeanCtor, LeanExcept, LeanString, }; diff --git a/src/iroh/server.rs b/src/iroh/server.rs index 07ce3fd6..789d04c4 100644 --- a/src/iroh/server.rs +++ b/src/iroh/server.rs @@ -11,7 +11,7 @@ use tracing_subscriber::util::SubscriberInitExt; use tracing_subscriber::{EnvFilter, fmt}; use crate::iroh::common::{GetResponse, PutResponse, Request, Response}; -use crate::lean::object::LeanExcept; +use lean_sys::object::LeanExcept; // An example ALPN that we are using to communicate over the `Endpoint` const EXAMPLE_ALPN: &[u8] = b"n0/iroh/examples/magic/0"; diff --git a/src/ix/compile.rs b/src/ix/compile.rs index 5c2a8269..03809f32 100644 --- a/src/ix/compile.rs +++ b/src/ix/compile.rs @@ -17,6 +17,8 @@ use std::{ thread, }; +use lean_sys::nat::Nat; + use crate::{ ix::address::Address, ix::condense::compute_sccs, @@ -44,7 +46,6 @@ use crate::{ }, ix::mutual::{Def, Ind, MutConst, MutCtx, Rec, ctx_to_all}, ix::strong_ordering::SOrd, - lean::nat::Nat, }; /// Whether to track hash-consed sizes during compilation. diff --git a/src/ix/decompile.rs b/src/ix/decompile.rs index 88082135..afa932e2 100644 --- a/src/ix/decompile.rs +++ b/src/ix/decompile.rs @@ -9,6 +9,8 @@ #![allow(clippy::map_err_ignore)] #![allow(clippy::match_same_arms)] +use lean_sys::nat::Nat; + use crate::{ ix::address::Address, ix::compile::CompileState, @@ -32,7 +34,6 @@ use crate::{ univ::Univ, }, ix::mutual::{MutCtx, all_to_ctx}, - lean::nat::Nat, }; use dashmap::DashMap; use rayon::iter::{IntoParallelRefIterator, ParallelIterator}; diff --git a/src/ix/env.rs b/src/ix/env.rs index 73749f98..bc77e11b 100644 --- a/src/ix/env.rs +++ b/src/ix/env.rs @@ -14,7 +14,7 @@ use std::{ sync::Arc, }; -use crate::lean::nat::Nat; +use lean_sys::nat::Nat; use rustc_hash::FxHashMap; // -- Name tags ---------------------------------------------------------------- diff --git a/src/ix/graph.rs b/src/ix/graph.rs index 86d211fc..56cd3ae7 100644 --- a/src/ix/graph.rs +++ b/src/ix/graph.rs @@ -177,7 +177,7 @@ fn get_expr_references<'a>( mod tests { use super::*; use crate::ix::env::*; - use crate::lean::nat::Nat; + use lean_sys::nat::Nat; fn n(s: &str) -> Name { Name::str(Name::anon(), s.to_string()) diff --git a/src/ix/ground.rs b/src/ix/ground.rs index 008d00fd..0c963409 100644 --- a/src/ix/ground.rs +++ b/src/ix/ground.rs @@ -9,12 +9,13 @@ use rayon::iter::{IntoParallelRefIterator, ParallelIterator}; use rustc_hash::{FxHashMap, FxHashSet}; use std::collections::hash_map::Entry; +use lean_sys::nat::Nat; + use crate::{ ix::env::{ ConstantInfo, Env, Expr, ExprData, InductiveVal, Level, LevelData, Name, }, ix::graph::RefMap, - lean::nat::Nat, }; /// Reason a constant failed groundedness checking. diff --git a/src/ix/ixon/serialize.rs b/src/ix/ixon/serialize.rs index c0572160..7671c57b 100644 --- a/src/ix/ixon/serialize.rs +++ b/src/ix/ixon/serialize.rs @@ -871,7 +871,7 @@ impl Constant { // ============================================================================ use crate::ix::env::{Name, NameData}; -use crate::lean::nat::Nat; +use lean_sys::nat::Nat; use rustc_hash::FxHashMap; /// Serialize a Name to bytes (full recursive serialization, for standalone use). diff --git a/src/ix/mutual.rs b/src/ix/mutual.rs index 3e0e5dde..194a0db5 100644 --- a/src/ix/mutual.rs +++ b/src/ix/mutual.rs @@ -5,13 +5,14 @@ //! [`ctx_to_all`] / [`all_to_ctx`] functions convert between ordered name //! vectors and index maps. +use lean_sys::nat::Nat; + use crate::{ ix::env::{ ConstructorVal, DefinitionSafety, DefinitionVal, Expr, InductiveVal, Name, OpaqueVal, RecursorVal, ReducibilityHints, TheoremVal, }, ix::ixon::constant::DefKind, - lean::nat::Nat, }; use rustc_hash::FxHashMap; diff --git a/src/lean.rs b/src/lean.rs index 41481dfb..14bd93d6 100644 --- a/src/lean.rs +++ b/src/lean.rs @@ -1,49 +1,107 @@ -//! Rust bindings for Lean, implemented by mimicking the memory layout of Lean's -//! low-level C objects. +//! Ix-specific Lean domain type definitions. //! -//! The `lean_sys` submodule contains auto-generated bindings from `lean.h` via -//! bindgen. Higher-level helpers and custom `#[repr(C)]` types are defined -//! alongside it in sibling modules. +//! Generic Lean FFI wrappers live in the `lean_sys` crate. This module defines +//! typed newtypes for ix-specific Lean types using `lean_sys::lean_domain_type!`. -#[allow( - non_upper_case_globals, - non_camel_case_types, - non_snake_case, - dead_code, - unsafe_op_in_unsafe_fn, - unused_qualifications, - clippy::all, - clippy::ptr_as_ptr, - clippy::cast_possible_wrap, - clippy::cast_possible_truncation, - clippy::derive_partial_eq_without_eq -)] -pub mod lean_sys { - include!(concat!(env!("OUT_DIR"), "/lean.rs")); -} +lean_sys::lean_domain_type! { + // Ix core types + /// Lean `Ix.Name` object. + LeanIxName; + /// Lean `Ix.Level` object. + LeanIxLevel; + /// Lean `Ix.Expr` object. + LeanIxExpr; + /// Lean `Ix.ConstantInfo` object. + LeanIxConstantInfo; + /// Lean `Ix.RawEnvironment` object. + LeanIxRawEnvironment; + /// Lean `Ix.Environment` object. + LeanIxEnvironment; + /// Lean `Ix.RustCondensedBlocks` object. + LeanIxCondensedBlocks; + /// Lean `Ix.CompileM.RustCompilePhases` object. + LeanIxCompilePhases; -pub mod nat; -pub mod object; + // Ix data types + /// Lean `Ix.Int` object. + LeanIxInt; + /// Lean `Ix.Substring` object. + LeanIxSubstring; + /// Lean `Ix.SourceInfo` object. + LeanIxSourceInfo; + /// Lean `Ix.SyntaxPreresolved` object. + LeanIxSyntaxPreresolved; + /// Lean `Ix.Syntax` object. + LeanIxSyntax; + /// Lean `Ix.DataValue` object. + LeanIxDataValue; -use std::ffi::{CString, c_void}; + // Ixon types + /// Lean `Ixon.DefKind` object. + LeanIxonDefKind; + /// Lean `Ixon.DefinitionSafety` object. + LeanIxonDefinitionSafety; + /// Lean `Ixon.QuotKind` object. + LeanIxonQuotKind; + /// Lean `Ixon.Univ` object. + LeanIxonUniv; + /// Lean `Ixon.Expr` object. + LeanIxonExpr; + /// Lean `Ixon.Definition` object. + LeanIxonDefinition; + /// Lean `Ixon.RecursorRule` object. + LeanIxonRecursorRule; + /// Lean `Ixon.Recursor` object. + LeanIxonRecursor; + /// Lean `Ixon.Axiom` object. + LeanIxonAxiom; + /// Lean `Ixon.Quotient` object. + LeanIxonQuotient; + /// Lean `Ixon.Constructor` object. + LeanIxonConstructor; + /// Lean `Ixon.Inductive` object. + LeanIxonInductive; + /// Lean `Ixon.InductiveProj` object. + LeanIxonInductiveProj; + /// Lean `Ixon.ConstructorProj` object. + LeanIxonConstructorProj; + /// Lean `Ixon.RecursorProj` object. + LeanIxonRecursorProj; + /// Lean `Ixon.DefinitionProj` object. + LeanIxonDefinitionProj; + /// Lean `Ixon.MutConst` object. + LeanIxonMutConst; + /// Lean `Ixon.ConstantInfo` object. + LeanIxonConstantInfo; + /// Lean `Ixon.Constant` object. + LeanIxonConstant; + /// Lean `Ixon.DataValue` object. + LeanIxonDataValue; + /// Lean `Ixon.ExprMetaData` object. + LeanIxonExprMetaData; + /// Lean `Ixon.ExprMetaArena` object. + LeanIxonExprMetaArena; + /// Lean `Ixon.ConstantMeta` object. + LeanIxonConstantMeta; + /// Lean `Ixon.Named` object. + LeanIxonNamed; + /// Lean `Ixon.Comm` object. + LeanIxonComm; + /// Lean `Ixon.RawEnv` object. + LeanIxonRawEnv; -/// Create a CString from a str, stripping any interior null bytes. -/// Lean strings are length-prefixed and can contain null bytes, but the -/// `lean_mk_string` FFI requires a null-terminated C string. This function -/// ensures conversion always succeeds by filtering out interior nulls. -pub fn safe_cstring(s: &str) -> CString { - CString::new(s).unwrap_or_else(|_| { - let bytes: Vec = s.bytes().filter(|&b| b != 0).collect(); - CString::new(bytes).expect("filtered string should have no nulls") - }) + // Error types + /// Lean `Ixon.SerializeError` object. + LeanIxSerializeError; + /// Lean `Ix.DecompileM.DecompileError` object. + LeanIxDecompileError; + /// Lean `Ix.CompileM.CompileError` object. + LeanIxCompileError; + /// Lean `BlockCompareResult` object. + LeanIxBlockCompareResult; + /// Lean `BlockCompareDetail` object. + LeanIxBlockCompareDetail; } -/// No-op foreach callback for external classes that hold no Lean references. -/// -/// # Safety -/// Must only be used as a `lean_external_foreach_fn` callback. -pub unsafe extern "C" fn noop_foreach( - _: *mut c_void, - _: *mut lean_sys::lean_object, -) { -} +/// `Ix.Address = { hash : ByteArray }` — single-field struct, unboxed to `ByteArray`. +pub type LeanIxAddress = lean_sys::object::LeanByteArray; diff --git a/src/sha256.rs b/src/sha256.rs index 662d2b5c..9bc854ed 100644 --- a/src/sha256.rs +++ b/src/sha256.rs @@ -1,6 +1,6 @@ use sha2::{Digest, Sha256}; -use crate::lean::object::LeanByteArray; +use lean_sys::object::LeanByteArray; #[unsafe(no_mangle)] extern "C" fn rs_sha256(bytes: LeanByteArray) -> LeanByteArray { From 5028eb294709e45306a6108fbcdaf1205febe5e7 Mon Sep 17 00:00:00 2001 From: samuelburnham <45365069+samuelburnham@users.noreply.github.com> Date: Tue, 3 Mar 2026 15:04:30 -0500 Subject: [PATCH 18/27] Fmt --- src/ffi/aiur/protocol.rs | 4 ++-- src/ffi/compile.rs | 14 ++++++++------ src/ffi/graph.rs | 4 +++- src/ffi/ix/address.rs | 2 +- src/ffi/ix/constant.rs | 2 +- src/ffi/ix/data.rs | 4 ++-- src/ffi/ix/env.rs | 2 +- src/ffi/ix/expr.rs | 2 +- src/ffi/ix/level.rs | 2 +- src/ffi/ix/name.rs | 2 +- src/ffi/ixon/compare.rs | 2 +- src/ffi/ixon/constant.rs | 2 +- src/ffi/ixon/enums.rs | 4 +++- src/ffi/ixon/env.rs | 2 +- src/ffi/ixon/expr.rs | 2 +- src/ffi/ixon/meta.rs | 6 +++--- src/ffi/ixon/serialize.rs | 2 +- src/ffi/ixon/univ.rs | 2 +- 18 files changed, 33 insertions(+), 27 deletions(-) diff --git a/src/ffi/aiur/protocol.rs b/src/ffi/aiur/protocol.rs index d6122bfa..2df913cc 100644 --- a/src/ffi/aiur/protocol.rs +++ b/src/ffi/aiur/protocol.rs @@ -7,8 +7,8 @@ use rustc_hash::{FxBuildHasher, FxHashMap}; use std::sync::OnceLock; use lean_sys::object::{ - ExternalClass, LeanArray, LeanByteArray, LeanCtor, LeanExcept, - LeanExternal, LeanObject, + ExternalClass, LeanArray, LeanByteArray, LeanCtor, LeanExcept, LeanExternal, + LeanObject, }; use crate::{ diff --git a/src/ffi/compile.rs b/src/ffi/compile.rs index 3ae19bf8..98560339 100644 --- a/src/ffi/compile.rs +++ b/src/ffi/compile.rs @@ -11,7 +11,6 @@ use std::collections::HashMap; use std::sync::Arc; use crate::ffi::ffi_io_guard; -use lean_sys::object::LeanIOResult; use crate::ix::address::Address; use crate::ix::compile::{CompileState, compile_env}; use crate::ix::condense::compute_sccs; @@ -22,15 +21,16 @@ use crate::ix::ixon::constant::{Constant as IxonConstant, ConstantInfo}; use crate::ix::ixon::expr::Expr as IxonExpr; use crate::ix::ixon::serialize::put_expr; use crate::ix::ixon::{Comm, ConstantMeta}; -use lean_sys::nat::Nat; -use lean_sys::object::{ - LeanArray, LeanByteArray, LeanCtor, LeanExcept, LeanObject, LeanString, -}; use crate::lean::{ LeanIxBlockCompareDetail, LeanIxBlockCompareResult, LeanIxCompileError, LeanIxCompilePhases, LeanIxCondensedBlocks, LeanIxDecompileError, LeanIxSerializeError, LeanIxonRawEnv, }; +use lean_sys::nat::Nat; +use lean_sys::object::LeanIOResult; +use lean_sys::object::{ + LeanArray, LeanByteArray, LeanCtor, LeanExcept, LeanObject, LeanString, +}; use dashmap::DashMap; use dashmap::DashSet; @@ -345,7 +345,9 @@ pub extern "C" fn rs_roundtrip_raw_env( /// FFI function to run all compilation phases and return combined results. #[unsafe(no_mangle)] -pub extern "C" fn rs_compile_phases(env_consts_ptr: LeanObject) -> LeanIOResult { +pub extern "C" fn rs_compile_phases( + env_consts_ptr: LeanObject, +) -> LeanIOResult { ffi_io_guard(std::panic::AssertUnwindSafe(|| { let rust_env = lean_ptr_to_env(env_consts_ptr); let env_len = rust_env.len(); diff --git a/src/ffi/graph.rs b/src/ffi/graph.rs index c2626404..fd6904d7 100644 --- a/src/ffi/graph.rs +++ b/src/ffi/graph.rs @@ -94,7 +94,9 @@ pub fn build_condensed_blocks( /// FFI function to build a reference graph from a Lean environment. #[unsafe(no_mangle)] -pub extern "C" fn rs_build_ref_graph(env_consts_ptr: LeanObject) -> LeanIOResult { +pub extern "C" fn rs_build_ref_graph( + env_consts_ptr: LeanObject, +) -> LeanIOResult { ffi_io_guard(std::panic::AssertUnwindSafe(|| { let rust_env = lean_ptr_to_env(env_consts_ptr); let rust_env = Arc::new(rust_env); diff --git a/src/ffi/ix/address.rs b/src/ffi/ix/address.rs index 396b4f4f..599eccbd 100644 --- a/src/ffi/ix/address.rs +++ b/src/ffi/ix/address.rs @@ -2,8 +2,8 @@ //! //! Address = { hash : ByteArray } - ByteArray wrapper for blake3 Hash -use lean_sys::object::LeanByteArray; use crate::lean::LeanIxAddress; +use lean_sys::object::LeanByteArray; /// Build a Ix.Address from a blake3::Hash. /// Address = { hash : ByteArray } - single field struct, so UNBOXED to ByteArray diff --git a/src/ffi/ix/constant.rs b/src/ffi/ix/constant.rs index 419f47e8..c16685df 100644 --- a/src/ffi/ix/constant.rs +++ b/src/ffi/ix/constant.rs @@ -15,9 +15,9 @@ use crate::ix::env::{ DefinitionVal, InductiveVal, Name, OpaqueVal, QuotKind, QuotVal, RecursorRule, RecursorVal, ReducibilityHints, TheoremVal, }; +use crate::lean::LeanIxConstantInfo; use lean_sys::nat::Nat; use lean_sys::object::{LeanArray, LeanCtor, LeanObject}; -use crate::lean::LeanIxConstantInfo; use crate::ffi::builder::LeanBuildCache; use crate::ffi::ix::expr::{build_expr, decode_ix_expr}; diff --git a/src/ffi/ix/data.rs b/src/ffi/ix/data.rs index a82586c0..c5819519 100644 --- a/src/ffi/ix/data.rs +++ b/src/ffi/ix/data.rs @@ -3,12 +3,12 @@ use crate::ix::env::{ DataValue, Int, Name, SourceInfo, Substring, Syntax, SyntaxPreresolved, }; -use lean_sys::nat::Nat; -use lean_sys::object::{LeanArray, LeanCtor, LeanObject, LeanString}; use crate::lean::{ LeanIxDataValue, LeanIxInt, LeanIxSourceInfo, LeanIxSubstring, LeanIxSyntax, LeanIxSyntaxPreresolved, }; +use lean_sys::nat::Nat; +use lean_sys::object::{LeanArray, LeanCtor, LeanObject, LeanString}; use crate::ffi::builder::LeanBuildCache; use crate::ffi::ix::name::{build_name, decode_ix_name}; diff --git a/src/ffi/ix/env.rs b/src/ffi/ix/env.rs index abd9909b..a959f554 100644 --- a/src/ffi/ix/env.rs +++ b/src/ffi/ix/env.rs @@ -3,8 +3,8 @@ use rustc_hash::FxHashMap; use crate::ix::env::{ConstantInfo, Name}; -use lean_sys::object::{LeanArray, LeanCtor, LeanObject}; use crate::lean::{LeanIxEnvironment, LeanIxRawEnvironment}; +use lean_sys::object::{LeanArray, LeanCtor, LeanObject}; use crate::ffi::builder::LeanBuildCache; use crate::ffi::ix::constant::{build_constant_info, decode_constant_info}; diff --git a/src/ffi/ix/expr.rs b/src/ffi/ix/expr.rs index e855cfb7..cc8d1264 100644 --- a/src/ffi/ix/expr.rs +++ b/src/ffi/ix/expr.rs @@ -17,9 +17,9 @@ use crate::ix::env::{ BinderInfo, DataValue, Expr, ExprData, Level, Literal, Name, }; +use crate::lean::LeanIxExpr; use lean_sys::nat::Nat; use lean_sys::object::{LeanArray, LeanCtor, LeanObject, LeanString}; -use crate::lean::LeanIxExpr; use crate::ffi::builder::LeanBuildCache; use crate::ffi::ix::address::build_address; diff --git a/src/ffi/ix/level.rs b/src/ffi/ix/level.rs index 68cebde0..d4972881 100644 --- a/src/ffi/ix/level.rs +++ b/src/ffi/ix/level.rs @@ -9,8 +9,8 @@ //! - Tag 5: mvar (n : Name) (hash : Address) use crate::ix::env::{Level, LevelData}; -use lean_sys::object::{LeanArray, LeanCtor, LeanObject}; use crate::lean::LeanIxLevel; +use lean_sys::object::{LeanArray, LeanCtor, LeanObject}; use crate::ffi::builder::LeanBuildCache; use crate::ffi::ix::address::build_address; diff --git a/src/ffi/ix/name.rs b/src/ffi/ix/name.rs index 2e697b41..f2f55569 100644 --- a/src/ffi/ix/name.rs +++ b/src/ffi/ix/name.rs @@ -6,9 +6,9 @@ //! - Tag 2: num (parent : Name) (i : Nat) (hash : Address) use crate::ix::env::{Name, NameData}; +use crate::lean::LeanIxName; use lean_sys::nat::Nat; use lean_sys::object::{LeanArray, LeanCtor, LeanObject, LeanString}; -use crate::lean::LeanIxName; use crate::ffi::builder::LeanBuildCache; use crate::ffi::ix::address::build_address; diff --git a/src/ffi/ixon/compare.rs b/src/ffi/ixon/compare.rs index 356cd127..bc7b7576 100644 --- a/src/ffi/ixon/compare.rs +++ b/src/ffi/ixon/compare.rs @@ -6,8 +6,8 @@ use crate::ix::compile::{BlockCache, CompileState, compile_env, compile_expr}; use crate::ix::env::Name; use crate::ix::ixon::serialize::put_expr; use crate::ix::mutual::MutCtx; -use lean_sys::object::{LeanByteArray, LeanCtor, LeanObject}; use crate::lean::LeanIxBlockCompareDetail; +use lean_sys::object::{LeanByteArray, LeanCtor, LeanObject}; use crate::ffi::lean_env::{ Cache as LeanCache, GlobalCache, lean_ptr_to_expr, lean_ptr_to_name, diff --git a/src/ffi/ixon/constant.rs b/src/ffi/ixon/constant.rs index b29b3508..9bd4ce9b 100644 --- a/src/ffi/ixon/constant.rs +++ b/src/ffi/ixon/constant.rs @@ -15,7 +15,6 @@ use crate::ix::ixon::constant::{ Quotient as IxonQuotient, Recursor as IxonRecursor, RecursorProj, RecursorRule as IxonRecursorRule, }; -use lean_sys::object::{LeanArray, LeanByteArray, LeanCtor, LeanObject}; use crate::lean::{ LeanIxAddress, LeanIxonAxiom, LeanIxonConstant, LeanIxonConstantInfo, LeanIxonConstructor, LeanIxonConstructorProj, LeanIxonDefinition, @@ -23,6 +22,7 @@ use crate::lean::{ LeanIxonMutConst, LeanIxonQuotient, LeanIxonRecursor, LeanIxonRecursorProj, LeanIxonRecursorRule, }; +use lean_sys::object::{LeanArray, LeanByteArray, LeanCtor, LeanObject}; use crate::ffi::ixon::expr::{ build_ixon_expr, build_ixon_expr_array, decode_ixon_expr, diff --git a/src/ffi/ixon/enums.rs b/src/ffi/ixon/enums.rs index 4e5e96f8..e4f1cc78 100644 --- a/src/ffi/ixon/enums.rs +++ b/src/ffi/ixon/enums.rs @@ -4,8 +4,10 @@ use std::ffi::c_void; use crate::ix::env::{DefinitionSafety, QuotKind}; use crate::ix::ixon::constant::DefKind; +use crate::lean::{ + LeanIxonDefKind, LeanIxonDefinitionSafety, LeanIxonQuotKind, +}; use lean_sys::object::LeanObject; -use crate::lean::{LeanIxonDefKind, LeanIxonDefinitionSafety, LeanIxonQuotKind}; /// Build Ixon.DefKind /// | defn -- tag 0 diff --git a/src/ffi/ixon/env.rs b/src/ffi/ixon/env.rs index a5d8edf6..b40218f3 100644 --- a/src/ffi/ixon/env.rs +++ b/src/ffi/ixon/env.rs @@ -9,10 +9,10 @@ use crate::ix::ixon::comm::Comm; use crate::ix::ixon::constant::Constant as IxonConstant; use crate::ix::ixon::env::{Env as IxonEnv, Named as IxonNamed}; use crate::ix::ixon::metadata::ConstantMeta; +use crate::lean::LeanIxonRawEnv; use lean_sys::object::{ LeanArray, LeanByteArray, LeanCtor, LeanExcept, LeanObject, }; -use crate::lean::LeanIxonRawEnv; use crate::ffi::builder::LeanBuildCache; use crate::ffi::ix::name::{build_name, decode_ix_name}; diff --git a/src/ffi/ixon/expr.rs b/src/ffi/ixon/expr.rs index 915449fd..074fa03f 100644 --- a/src/ffi/ixon/expr.rs +++ b/src/ffi/ixon/expr.rs @@ -3,8 +3,8 @@ use std::sync::Arc; use crate::ix::ixon::expr::Expr as IxonExpr; -use lean_sys::object::{LeanArray, LeanCtor, LeanObject}; use crate::lean::LeanIxonExpr; +use lean_sys::object::{LeanArray, LeanCtor, LeanObject}; /// Build Ixon.Expr (12 constructors). pub fn build_ixon_expr(expr: &IxonExpr) -> LeanObject { diff --git a/src/ffi/ixon/meta.rs b/src/ffi/ixon/meta.rs index 26dc8114..64bbcabc 100644 --- a/src/ffi/ixon/meta.rs +++ b/src/ffi/ixon/meta.rs @@ -9,11 +9,11 @@ use crate::ix::ixon::env::Named; use crate::ix::ixon::metadata::{ ConstantMeta, DataValue as IxonDataValue, ExprMeta, ExprMetaData, KVMap, }; -use lean_sys::object::{LeanArray, LeanCtor, LeanObject}; use crate::lean::{ - LeanIxonComm, LeanIxonConstantMeta, LeanIxonDataValue, - LeanIxonExprMetaArena, LeanIxonExprMetaData, LeanIxonNamed, + LeanIxonComm, LeanIxonConstantMeta, LeanIxonDataValue, LeanIxonExprMetaArena, + LeanIxonExprMetaData, LeanIxonNamed, }; +use lean_sys::object::{LeanArray, LeanCtor, LeanObject}; use crate::ffi::ix::constant::{ build_reducibility_hints, decode_reducibility_hints, diff --git a/src/ffi/ixon/serialize.rs b/src/ffi/ixon/serialize.rs index 17c23a09..86149792 100644 --- a/src/ffi/ixon/serialize.rs +++ b/src/ffi/ixon/serialize.rs @@ -10,10 +10,10 @@ use crate::ix::ixon::expr::Expr as IxonExpr; use crate::ix::ixon::serialize::put_expr; use crate::ix::ixon::sharing::hash_expr; use crate::ix::ixon::univ::{Univ as IxonUniv, put_univ}; -use lean_sys::object::{LeanByteArray, LeanObject}; use crate::lean::{ LeanIxAddress, LeanIxonConstant, LeanIxonExpr, LeanIxonRawEnv, LeanIxonUniv, }; +use lean_sys::object::{LeanByteArray, LeanObject}; use crate::ffi::ixon::constant::{decode_ixon_address, decode_ixon_constant}; diff --git a/src/ffi/ixon/univ.rs b/src/ffi/ixon/univ.rs index 357392d3..321773de 100644 --- a/src/ffi/ixon/univ.rs +++ b/src/ffi/ixon/univ.rs @@ -3,8 +3,8 @@ use std::sync::Arc; use crate::ix::ixon::univ::Univ; -use lean_sys::object::{LeanArray, LeanCtor, LeanObject}; use crate::lean::LeanIxonUniv; +use lean_sys::object::{LeanArray, LeanCtor, LeanObject}; impl LeanIxonUniv { /// Build Ixon.Univ From dc7a2e13b0dbefddf4f83880cb3910bc76355eba Mon Sep 17 00:00:00 2001 From: samuelburnham <45365069+samuelburnham@users.noreply.github.com> Date: Tue, 3 Mar 2026 17:05:25 -0500 Subject: [PATCH 19/27] Refactor Iroh FFI --- src/ffi.rs | 5 ++ src/ffi/iroh.rs | 120 +++++++++++++++++++++++++++++++++++++++++++++ src/iroh/client.rs | 97 +----------------------------------- src/iroh/server.rs | 17 +------ 4 files changed, 128 insertions(+), 111 deletions(-) create mode 100644 src/ffi/iroh.rs diff --git a/src/ffi.rs b/src/ffi.rs index 13d362bd..a6d3c0e2 100644 --- a/src/ffi.rs +++ b/src/ffi.rs @@ -1,5 +1,10 @@ pub mod aiur; pub mod byte_array; +#[cfg(all( + feature = "net", + not(all(target_os = "macos", target_arch = "aarch64")) +))] +pub mod iroh; pub mod keccak; pub mod lean_env; pub mod unsigned; diff --git a/src/ffi/iroh.rs b/src/ffi/iroh.rs new file mode 100644 index 00000000..bfe32689 --- /dev/null +++ b/src/ffi/iroh.rs @@ -0,0 +1,120 @@ +use lean_sys::object::{ + LeanArray, LeanByteArray, LeanCtor, LeanExcept, LeanString, +}; + +use crate::iroh::common::{GetRequest, PutRequest, Request, Response}; +use crate::iroh::{client, server}; + +lean_sys::lean_domain_type! { + /// Lean `Iroh.Connect.PutResponse` object. + LeanPutResponse; + /// Lean `Iroh.Connect.GetResponse` object. + LeanGetResponse; +} + +impl LeanPutResponse { + /// Build from `message` and `hash` strings. + /// + /// ```lean + /// structure PutResponse where + /// message : String + /// hash : String + /// ``` + pub fn mk(message: &str, hash: &str) -> Self { + let ctor = LeanCtor::alloc(0, 2, 0); + ctor.set(0, LeanString::new(message)); + ctor.set(1, LeanString::new(hash)); + Self::new((*ctor).into()) + } +} + +impl LeanGetResponse { + /// Build from `message`, `hash`, and raw `bytes`. + /// + /// ```lean + /// structure GetResponse where + /// message : String + /// hash : String + /// bytes : ByteArray + /// ``` + pub fn mk(message: &str, hash: &str, bytes: &[u8]) -> Self { + let ctor = LeanCtor::alloc(0, 3, 0); + ctor.set(0, LeanString::new(message)); + ctor.set(1, LeanString::new(hash)); + ctor.set(2, LeanByteArray::from_bytes(bytes)); + Self::new((*ctor).into()) + } +} + +/// `Iroh.Connect.putBytes' : @& String → @& Array String → @& String → @& String → Except String PutResponse` +#[unsafe(no_mangle)] +extern "C" fn rs_iroh_put( + node_id: LeanString, + addrs: LeanArray, + relay_url: LeanString, + input: LeanString, +) -> LeanExcept { + let node_id = node_id.to_string(); + let addrs: Vec = addrs.map(|x| x.as_string().to_string()); + let relay_url = relay_url.to_string(); + let input_str = input.to_string(); + + let request = + Request::Put(PutRequest { bytes: input_str.as_bytes().to_vec() }); + let rt = + tokio::runtime::Runtime::new().expect("Failed to create Tokio runtime"); + + match rt.block_on(client::connect(&node_id, &addrs, &relay_url, request)) { + Ok(response) => match response { + Response::Put(put_response) => LeanExcept::ok(LeanPutResponse::mk( + &put_response.message, + &put_response.hash, + )), + _ => LeanExcept::error_string("error: incorrect server response"), + }, + Err(err) => LeanExcept::error_string(&err.to_string()), + } +} + +/// `Iroh.Connect.getBytes' : @& String → @& Array String → @& String → @& String → Except String GetResponse` +#[unsafe(no_mangle)] +extern "C" fn rs_iroh_get( + node_id: LeanString, + addrs: LeanArray, + relay_url: LeanString, + hash: LeanString, +) -> LeanExcept { + let node_id = node_id.to_string(); + let addrs: Vec = addrs.map(|x| x.as_string().to_string()); + let relay_url = relay_url.to_string(); + let hash_str = hash.to_string(); + + let request = Request::Get(GetRequest { hash: hash_str.clone() }); + + let rt = + tokio::runtime::Runtime::new().expect("Failed to create Tokio runtime"); + + match rt.block_on(client::connect(&node_id, &addrs, &relay_url, request)) { + Ok(response) => match response { + Response::Get(get_response) => LeanExcept::ok(LeanGetResponse::mk( + &get_response.message, + &get_response.hash, + &get_response.bytes, + )), + _ => LeanExcept::error_string("error: incorrect server response"), + }, + Err(err) => LeanExcept::error_string(&err.to_string()), + } +} + +/// `Iroh.Serve.serve' : Unit → Except String Unit` +#[unsafe(no_mangle)] +extern "C" fn rs_iroh_serve() -> LeanExcept { + let rt = + tokio::runtime::Runtime::new().expect("Failed to create Tokio runtime"); + + match rt.block_on(server::serve()) { + Ok(()) => LeanExcept::ok(0), + Err(err) => LeanExcept::error_string(&err.to_string()), + } +} diff --git a/src/iroh/client.rs b/src/iroh/client.rs index dd8119f2..1828ea10 100644 --- a/src/iroh/client.rs +++ b/src/iroh/client.rs @@ -7,108 +7,15 @@ use tracing_subscriber::layer::SubscriberExt; use tracing_subscriber::util::SubscriberInitExt; use tracing_subscriber::{EnvFilter, fmt}; -use crate::iroh::common::{GetRequest, PutRequest, Request, Response}; -use lean_sys::object::{ - LeanArray, LeanByteArray, LeanCtor, LeanExcept, LeanString, -}; +use crate::iroh::common::{Request, Response}; // An example ALPN that we are using to communicate over the `Endpoint` const EXAMPLE_ALPN: &[u8] = b"n0/iroh/examples/magic/0"; // Maximum number of characters to read from the server. Connection automatically closed if this is exceeded const READ_SIZE_LIMIT: usize = 100_000_000; -/// Build a Lean `PutResponse` structure: -/// ``` -/// structure PutResponse where -/// message: String -/// hash: String -/// ``` -fn mk_put_response(message: &str, hash: &str) -> LeanCtor { - let ctor = LeanCtor::alloc(0, 2, 0); - ctor.set(0, LeanString::new(message)); - ctor.set(1, LeanString::new(hash)); - ctor -} - -/// Build a Lean `GetResponse` structure: -/// ``` -/// structure GetResponse where -/// message: String -/// hash: String -/// bytes: ByteArray -/// ``` -fn mk_get_response(message: &str, hash: &str, bytes: &[u8]) -> LeanCtor { - let byte_array = LeanByteArray::from_bytes(bytes); - let ctor = LeanCtor::alloc(0, 3, 0); - ctor.set(0, LeanString::new(message)); - ctor.set(1, LeanString::new(hash)); - ctor.set(2, byte_array); - ctor -} - -/// `Iroh.Connect.putBytes' : @& String → @& Array String → @& String → @& String → Except String PutResponse` -#[unsafe(no_mangle)] -extern "C" fn rs_iroh_put( - node_id: LeanString, - addrs: LeanArray, - relay_url: LeanString, - input: LeanString, -) -> LeanExcept { - let node_id = node_id.to_string(); - let addrs: Vec = addrs.map(|x| x.as_string().to_string()); - let relay_url = relay_url.to_string(); - let input_str = input.to_string(); - - let request = - Request::Put(PutRequest { bytes: input_str.as_bytes().to_vec() }); - let rt = - tokio::runtime::Runtime::new().expect("Failed to create Tokio runtime"); - - match rt.block_on(connect(&node_id, &addrs, &relay_url, request)) { - Ok(response) => match response { - Response::Put(put_response) => LeanExcept::ok(mk_put_response( - &put_response.message, - &put_response.hash, - )), - _ => LeanExcept::error_string("error: incorrect server response"), - }, - Err(err) => LeanExcept::error_string(&err.to_string()), - } -} - -/// `Iroh.Connect.getBytes' : @& String → @& Array String → @& String → @& String → Except String GetResponse` -#[unsafe(no_mangle)] -extern "C" fn rs_iroh_get( - node_id: LeanString, - addrs: LeanArray, - relay_url: LeanString, - hash: LeanString, -) -> LeanExcept { - let node_id = node_id.to_string(); - let addrs: Vec = addrs.map(|x| x.as_string().to_string()); - let relay_url = relay_url.to_string(); - let hash_str = hash.to_string(); - - let request = Request::Get(GetRequest { hash: hash_str.clone() }); - - let rt = - tokio::runtime::Runtime::new().expect("Failed to create Tokio runtime"); - - match rt.block_on(connect(&node_id, &addrs, &relay_url, request)) { - Ok(response) => match response { - Response::Get(get_response) => LeanExcept::ok(mk_get_response( - &get_response.message, - &get_response.hash, - &get_response.bytes, - )), - _ => LeanExcept::error_string("error: incorrect server response"), - }, - Err(err) => LeanExcept::error_string(&err.to_string()), - } -} - // Largely taken from https://github.com/n0-computer/iroh/blob/main/iroh/examples/connect.rs -async fn connect( +pub async fn connect( node_id: &str, addrs: &[String], relay_url: &str, diff --git a/src/iroh/server.rs b/src/iroh/server.rs index 789d04c4..a40c5c3f 100644 --- a/src/iroh/server.rs +++ b/src/iroh/server.rs @@ -11,29 +11,14 @@ use tracing_subscriber::util::SubscriberInitExt; use tracing_subscriber::{EnvFilter, fmt}; use crate::iroh::common::{GetResponse, PutResponse, Request, Response}; -use lean_sys::object::LeanExcept; // An example ALPN that we are using to communicate over the `Endpoint` const EXAMPLE_ALPN: &[u8] = b"n0/iroh/examples/magic/0"; // Maximum number of characters to read from the client. Connection automatically closed if this is exceeded const READ_SIZE_LIMIT: usize = 100_000_000; -/// `Iroh.Serve.serve' : Unit → Except String Unit` -#[unsafe(no_mangle)] -extern "C" fn rs_iroh_serve() -> LeanExcept { - // Create a Tokio runtime to block on the async function - let rt = - tokio::runtime::Runtime::new().expect("Failed to create Tokio runtime"); - - // Run the async function and block until we get the result - match rt.block_on(serve()) { - Ok(()) => LeanExcept::ok(0), - Err(err) => LeanExcept::error_string(&err.to_string()), - } -} - // Largely taken from https://github.com/n0-computer/iroh/blob/main/iroh/examples/listen.rs -async fn serve() -> n0_snafu::Result<()> { +pub async fn serve() -> n0_snafu::Result<()> { // Initialize the subscriber with `RUST_LOG=info` to preserve some server logging tracing_subscriber::registry() .with(fmt::layer()) From 7104fd17fa45f356fb5e0ceb7e7bf609532a2f90 Mon Sep 17 00:00:00 2001 From: samuelburnham <45365069+samuelburnham@users.noreply.github.com> Date: Tue, 3 Mar 2026 17:21:26 -0500 Subject: [PATCH 20/27] Docs and address review --- c/common.h | 4 + c/linear.h | 265 ++++++++++++++++++++++++++++++++++++ docs/ffi.md | 287 ++++++++++++--------------------------- lean-sys/LICENSE-APACHE | 201 +++++++++++++++++++++++++++ lean-sys/LICENSE-MIT | 21 +++ lean-sys/src/object.rs | 69 ++++++---- src/ffi/aiur/protocol.rs | 4 +- src/ffi/aiur/toplevel.rs | 2 +- src/ffi/iroh.rs | 4 +- 9 files changed, 625 insertions(+), 232 deletions(-) create mode 100644 c/common.h create mode 100644 c/linear.h create mode 100644 lean-sys/LICENSE-APACHE create mode 100644 lean-sys/LICENSE-MIT diff --git a/c/common.h b/c/common.h new file mode 100644 index 00000000..28617d5a --- /dev/null +++ b/c/common.h @@ -0,0 +1,4 @@ +#pragma once +#include "lean/lean.h" + +static void noop_foreach(void *mod, b_lean_obj_arg fn) {} diff --git a/c/linear.h b/c/linear.h new file mode 100644 index 00000000..9febff87 --- /dev/null +++ b/c/linear.h @@ -0,0 +1,265 @@ +/* + +NOTE: This file and the linear API in general are currently unused, as we have decided to not pass mutable objects to and from Rust in order to keep the FFI boundary simple. + +However, we may revisit the `linear.h` API in the future, at which point this file would be ported to Rust in the `lean_sys` crate. + +For now, the `linear.h` documenation is provided below as a docstring. + +*/ + +/* + +## Dealing with mutable objects + +As a functional language, Lean primarily uses purely functional data structures, +whereas Rust functions often mutate objects. This fundamental difference in +computational paradigms requires special care; otherwise, we risk introducing +Lean code with unintended or incorrect behavior. + +Let's consider a type `T` and a Rust function `f(&mut T)`. In Lean, we would +like to have the corresponding `f : T → T`, which returns a modified `T` but +leaves the input `T` intact. How can we use Rust's `f` as the implementation of +Lean's `f`? + +One approach is to use a Rust function `g(&T) -> T`, implemented as follows: + +```rust +fn g(t: &T) -> T { + let mut clone = t.clone(); + f(&mut clone); + clone +} +``` + +Already we can see two problems. First, `g` requires `T` to implement `Clone`. +Second, even when `T: Clone`, cloning might be expensive. The fact is that the +implementation provided, Rust's `f`, was designed to mutate `T` and we shouldn't +be fighting against that. + +So Ix goes with the flow and mutates `T` with Rust's `f`. Consequently, Lean's +`f : T → T` will, in fact, mutate the input, which will be returned as the +output. A direct sin against the purity of the functional world. + +At this point, the best we can do is to create guardrails to protect us against +ourselves and force us to use terms of `T` linearly when `f` is involved. That +is, after applying `f (t : T)`, reusing `t` should be prohibitive. + +### The birth of `linear.h` + +We've explored the motivation for the API provided by `linear.h`, in which a +`linear_object` wraps the reference to the raw Rust object and has a +`bool outdated` attribute telling whether the linear object can be used or not. +Then, instead of `lean_external_object` pointing directly to the Rust object, it +points to a `linear_object`. When we ought to use the Rust object, we must +always "assert linearity", which panics if `outdated` is `true`. + +To illustrate it, let's use "E" for `lean_external_object`, "L" for +`linear_object` and "R" for potentially mutating Rust objects. Right after +initialization, we have: + +``` +E0 ──> L0 (outdated = false) ──> R +``` + +Now suppose we need to mutate `R`. We do it and then we perform a "linear bump", +which copies `L` and sets it as outdated. Then we wrap it as another external +object: + +``` +E1 ──> L1 (outdated = false) ─┐ +E0 ──> L0 (outdated = true) ──┴> R +``` + +And after `N` linear bumps: + +``` +EN ──> LN (outdated = false) ─┐ +... ┆ +E2 ──> L2 (outdated = true) ──┤ +E1 ──> L1 (outdated = true) ──┤ +E0 ──> L0 (outdated = true) ──┴> R +``` + +Great. Now imagine Lean wants to free these external objects. The function that +frees a linear object should only free the Rust object when `outdated == false`. +Following up with the image above, let's free `E1`. + +``` +EN ──> LN (outdated = false) ─┐ +... ┆ +E2 ──> L2 (outdated = true) ──┤ + │ +E0 ──> L0 (outdated = true) ──┴> R +``` + +When freeing `EN`, the Rust object will be deallocated: + +``` +... ┆ +E2 ──> L2 (outdated = true) ──┤ + │ +E0 ──> L0 (outdated = true) ──┴> X +``` + +All remaining external objects are outdated so their respective linear objects +won't try to free the (already dropped) Rust object. + +## What if a Rust function takes ownership of the object? + +When ownership is required, we mutate the Rust object by "taking" or "replacing" +it with a dummy object. Concretely, `std::mem::take` or `std::mem::replace` are +used, returning the actual `T` from a `&mut T`. And with `T` at hand, the target +function can be called. + +The latest linear object is marked as outdated and the chain of linear objects +is broken. But then, how will the residual Rust object be dropped once Lean +wants to drop all external objects? + +It turns out we also need a `bool finalize_even_if_outdated` attribute on the +`linear_object` struct, which becomes `true` in these scenarios. By doing this, +we're "ditching" the linear object. And the logic to free linear objects needs +one small adjustment: the Rust object must be dropped when either the linear +object is not outdated or when `finalize_even_if_outdated` is set to `true`. + +The invariant that needs to be maintained is that *only one* linear object can +free the shared Rust object. + +## Preventing unintentional Lean optimizations + +We've done our lower level homework and now we have an `f : T → T` in Lean that +should panic at runtime when its input is reused. So we do: + +```lean4 + ... + let a := f t + let b := f t -- reuses `t`! + ... +``` + +We run the code and it executes smoothly. Why!? + +The Lean compilation process detects that both `a` and `b` are equal to `f t` so +instead of calling `f` a second time it just sets `b` with the value of `a`. It +appears to be harmless but in fact we want discourage this kind of source code +at all costs. + +Lean provides the tag `never_extract` precisely for this. It's used internally +when some function performs side-effects and should never be optimized away. + +And to conclude, there are cases in which this optimization is truly harmful. +Consider an initialization function `T.init : Unit → T` in the following code: + +```lean4 + ... + let t1 := T.init () + let t2 := T.init () + let a := f t1 + let b := f t2 + ... +``` + +If `T.init` is not tagged with `never_extract`, `t2` and `t1` will point to the +same object, the first call to `f` will mark it as outdated and thus the second +call will panic! + +So the `never_extract` tag must be applied to functions that: + +* Mutate their input or +* Return objects that work on the basis of mutation + +*/ + +#pragma once +#include "lean/lean.h" +#include "common.h" + +/* +This file provides a framework for enforcing linear usage of mutating objects by +Lean's runtime. It's particularly useful when making use of Rust objects that +don't implement `Clone` and work on the basis of mutation. +*/ + +typedef struct { + /* A reference to the underlying mutable object */ + void *object_ref; + /* A pointer to a function that can free `object_ref` */ + void (*finalizer)(void *); + /* If set to `true`, the resource pointed by `object_ref` cannot be used */ + bool outdated; + /* If set to `true`, allow the finalizer to be called on outdated objects */ + bool finalize_even_if_outdated; +} linear_object; + +static inline linear_object *linear_object_init(void *object_ref, void (*finalizer)(void *)) { + linear_object *linear = malloc(sizeof(linear_object)); + linear->object_ref = object_ref; + linear->finalizer = finalizer; + linear->outdated = false; + linear->finalize_even_if_outdated = false; + return linear; +} + +static inline linear_object *to_linear_object(void *ptr) { + return (linear_object*)ptr; +} + +static inline void *get_object_ref(linear_object *linear) { + return linear->object_ref; +} + +static inline linear_object *linear_bump(linear_object *linear) { + linear_object *copy = malloc(sizeof(linear_object)); + *copy = *linear; + linear->outdated = true; + return copy; +} + +static inline void ditch_linear(linear_object *linear) { + linear->outdated = true; + linear->finalize_even_if_outdated = true; +} + +static inline void assert_linearity(linear_object *linear) { + if (LEAN_UNLIKELY(linear->outdated)) { + lean_internal_panic("Non-linear usage of linear object"); + } +} + +static inline void free_linear_object(linear_object *linear) { + // Only finalize `object_ref` if `linear` is the latest linear object reference + // or if the finalizer was forcibly set as allowed. By doing this, we avoid + // double-free attempts. + if (LEAN_UNLIKELY(!linear->outdated || linear->finalize_even_if_outdated)) { + linear->finalizer(linear->object_ref); + } + free(linear); +} + +/* --- API to implement Lean objects --- */ + +static void linear_object_finalizer(void *ptr) { + free_linear_object(to_linear_object(ptr)); +} + +static lean_external_class *g_linear_object_class = NULL; + +static lean_external_class *get_linear_object_class() { + if (g_linear_object_class == NULL) { + g_linear_object_class = lean_register_external_class( + &linear_object_finalizer, + &noop_foreach + ); + } + return g_linear_object_class; +} + +static inline lean_object *alloc_lean_linear_object(linear_object *linear) { + return lean_alloc_external(get_linear_object_class(), linear); +} + +static inline linear_object *validated_linear(lean_object *obj) { + linear_object *linear = to_linear_object(lean_get_external_data(obj)); + assert_linearity(linear); + return linear; +} diff --git a/docs/ffi.md b/docs/ffi.md index eb8f9e02..dcecdb8a 100644 --- a/docs/ffi.md +++ b/docs/ffi.md @@ -1,224 +1,117 @@ # Ix FFI framework Ix extensively utilizes Lean's FFI capabilities to interface with Rust -implementations while minimizing overhead. This document consolidates the -principles for doing so responsibly. - -We follow a strict dependency order: - -* Lean can interface with C -* C can interface with Rust -* Lean can interface with Rust - -Hence we use the following naming conventions: - -* Names of external C functions start with "c_" -* Names of external Rust functions start with "rs_" -* Names of external C functions that depend on Rust functions start with "c_rs_" +implementations for performance benefits while minimizing overhead. This document +describes the approach used in Ix and best practices for writing Lean->Rust FFI. Interfacing with C is a well-established and well-supported case in Lean. After all, Lean's runtime is implemented in C and the API for reading, allocating and populating Lean objects is rich enough to support this interaction. Interfacing -with Rust, however, introduces a new set of challenges. - -## Reading data from Lean - -Making sense of data that's produced by Lean already poses an initial challenge. -One possible approach is as follows: - -1. Serialize the data in Lean as a `ByteArray` and provide it to a C function -2. Get the reference to the slice of bytes and pass it to the Rust function -3. Deserialize the data and use it as needed - -While that's possible (and plausible!) it adds a recurring serde cost overhead. -So the approach taken in Ix is different. - -The Ix's Rust static lib mimics the memory layout of Lean runtime objects and -uses `unsafe` code to turn `*const c_void` pointers into appropriate `&T` -references. Though, when possible, raw data extraction of Lean objects is -preferably done in C with the API provided by the Lean toolchain (via `lean.h`). - -For example, when targeting a Rust function that consumes a string, we don't -need to pass a reference to the whole `lean_string_object`. Instead, we make use -of the fact that Lean strings are `\0`-terminated and only pass a `char const *` -from C to Rust, which receives it as a `*const c_char` and then (unsafely) turns -it into a `&str`. - -Extra care must be taken when dealing with -[inductive types](https://github.com/leanprover/lean4/blob/master/doc/dev/ffi.md#inductive-types), -as the order of arguments in the Lean objects may not match the same order from -the higher level type definition in Lean. - -## Producing data for Lean - -Since we can mimic the memory layout of Lean objects in Rust, we should allocate -and populate them in Rust, right? Well, the answer is "no". - -Lean employs different allocation methods depending on compilation flags, making -it impractical to track them in Rust. Instead, we allocate the inner data on the -heap and return a raw pointer to C, which then wraps it using the appropriate -API. +with Rust, however, is not trivial because of Rust's distinct +ownership-based memory management system. + +## Bindgen Rust bindings to `lean.h` + +In order to avoid this complexity and keep Lean in control of memory +management for objects created via FFI to Rust, we use +[rust-bindgen](https://github.com/rust-lang/rust-bindgen) to automatically +generate Rust bindings to +[`lean.h`](https://github.com/leanprover/lean4/blob/master/src/include/lean/lean.h). +This allows us to create and manage Lean objects in Rust without taking +control of the underlying memory, needing to implement `Drop`, or having to +know about the state of Lean's reference counting mechanism. Bindgen runs in +`build.rs` and generates unsafe Rust functions that link to the `lean.h` +library. This external module can then be found at +`target/release/lean-sys-/out/lean.rs`. + +## `LeanObject` API + +To facilitate working with Lean objects in Rust, we also designed an +ergonomic API in the `lean-sys` crate to wrap raw C pointers in Rust types, +with methods to abstract the low-level binding function calls from `lean.h`. +The fundamental building block is `LeanObject`, a wrapper around an opaque +Lean value represented in Rust as `*const c_void`. This value is either a +pointer to a heap-allocated object or a tagged scalar (a raw value that fits +into one pointer's width, e.g. a `Bool` or small `Nat`). `LeanObject` is +then itself wrapped into Lean types such as `LeanCtor` for inductives, +`LeanArray` for arrays, etc. + +A `lean_domain_type!` macro is also defined to allow for easy construction +of arbitrary Lean object types, which can then be used directly in FFI +functions to disambiguate between other `LeanObject`s. In Ix these are +defined in `src/lean.rs`. To construct custom data in Rust, the user can +define their own constructor methods using `LeanCtor` (e.g. +[`LeanPutResponse`](src/ffi/iroh.rs)). It is possible to use `LeanObject` +or `*const c_void` directly in an `extern "C" fn`, but this is generally +not recommended as internal Rust functions may pass in the wrong object +more easily, and any low-level constructors would not be hidden behind the +API boundary. A key concept in this design is that ownership of the data is transferred to Lean, making it responsible for deallocation. If the data type is intended to be -used as a black box by Lean, `lean_external_object` is an useful abstraction. It -requires a function pointer for deallocation, meaning the Rust code must provide -a function that properly frees the object's memory by dropping it. - -## Dealing with mutable objects - -As a functional language, Lean primarily uses purely functional data structures, -whereas Rust functions often mutate objects. This fundamental difference in -computational paradigms requires special care; otherwise, we risk introducing -Lean code with unintended or incorrect behavior. - -Let's consider a type `T` and a Rust function `f(&mut T)`. In Lean, we would -like to have the corresponding `f : T → T`, which returns a modified `T` but -leaves the input `T` intact. How can we use Rust's `f` as the implementation of -Lean's `f`? - -One approach is to use a Rust function `g(&T) -> T`, implemented as follows: - -```rust -fn g(t: &T) -> T { - let mut clone = t.clone(); - f(&mut clone); - clone -} -``` - -Already we can see two problems. First, `g` requires `T` to implement `Clone`. -Second, even when `T: Clone`, cloning might be expensive. The fact is that the -implementation provided, Rust's `f`, was designed to mutate `T` and we shouldn't -be fighting against that. - -So Ix goes with the flow and mutates `T` with Rust's `f`. Consequently, Lean's -`f : T → T` will, in fact, mutate the input, which will be returned as the -output. A direct sin against the purity of the functional world. - -At this point, the best we can do is to create guardrails to protect us against -ourselves and force us to use terms of `T` linearly when `f` is involved. That -is, after applying `f (t : T)`, reusing `t` should be prohibitive. - -### The birth of `linear.h` - -We've explored the motivation for the API provided by `linear.h`, in which a -`linear_object` wraps the reference to the raw Rust object and has a -`bool outdated` attribute telling whether the linear object can be used or not. -Then, instead of `lean_external_object` pointing directly to the Rust object, it -points to a `linear_object`. When we ought to use the Rust object, we must -always "assert linearity", which panics if `outdated` is `true`. - -To illustrate it, let's use "E" for `lean_external_object`, "L" for -`linear_object` and "R" for potentially mutating Rust objects. Right after -initialization, we have: +used as a black box by Lean, `ExternalClass` is a useful abstraction. It +requires a function pointer for deallocation, meaning the Rust code must +provide a function that properly frees the object's memory by dropping it. +See [`KECCAK_CLASS`](src/ffi/keccak.rs) for an example. -``` -E0 ──> L0 (outdated = false) ──> R -``` +## Notes -Now suppose we need to mutate `R`. We do it and then we perform a "linear bump", -which copies `L` and sets it as outdated. Then we wrap it as another external -object: +By convention, names of external Rust functions start with `rs_`. -``` -E1 ──> L1 (outdated = false) ─┐ -E0 ──> L0 (outdated = true) ──┴> R -``` +### Inductive Types -And after `N` linear bumps: +Extra care must be taken when dealing with [inductive +types](https://lean-lang.org/doc/reference/latest/The-Type-System/Inductive-Types/#run-time-inductives) +as the runtime memory layout of constructor fields may not match the +declaration order in Lean. Fields are reordered into three groups: -``` -EN ──> LN (outdated = false) ─┐ -... ┆ -E2 ──> L2 (outdated = true) ──┤ -E1 ──> L1 (outdated = true) ──┤ -E0 ──> L0 (outdated = true) ──┴> R -``` +1. Non-scalar fields (lean_object *), in declaration order +2. `USize` fields, in declaration order +3. Other scalar fields, in decreasing order by size, then declaration order within each size -Great. Now imagine Lean wants to free these external objects. The function that -frees a linear object should only free the Rust object when `outdated == false`. -Following up with the image above, let's free `E1`. +This means a structure like -``` -EN ──> LN (outdated = false) ─┐ -... ┆ -E2 ──> L2 (outdated = true) ──┤ - │ -E0 ──> L0 (outdated = true) ──┴> R -``` - -When freeing `EN`, the Rust object will be deallocated: - -``` -... ┆ -E2 ──> L2 (outdated = true) ──┤ - │ -E0 ──> L0 (outdated = true) ──┴> X +```lean +structure Reorder where + flag : Bool + obj : Array Nat + size : UInt64 ``` -All remaining external objects are outdated so their respective linear objects -won't try to free the (already dropped) Rust object. +would be laid out as [obj, size, flag] at runtime — the `UInt64` is placed +before the `Bool`. Trivial wrapper types (e.g. `Char` wraps `UInt32`) count as +their underlying scalar type. -## What if a Rust function takes ownership of the object? +To avoid issues, define Lean structures with fields already in runtime order +(objects first, then scalars in decreasing size), so that declaration order +matches the reordered layout. -When ownership is required, we mutate the Rust object by "taking" or "replacing" -it with a dummy object. Concretely, `std::mem::take` or `std::mem::replace` are -used, returning the actual `T` from a `&mut T`. And with `T` at hand, the target -function can be called. +### Enum FFI convention -The latest linear object is marked as outdated and the chain of linear objects -is broken. But then, how will the residual Rust object be dropped once Lean -wants to drop all external objects? +Lean passes simple enums (inductives where all constructors have zero fields, +e.g. `DefKind`, `QuotKind`) as **raw unboxed tag values** (`0`, `1`, `2`, ...) +across the FFI boundary, not as `lean_box(tag)`. To decode, use +`obj.as_ptr() as usize`; to build, use `LeanObject::from_raw(tag as *const c_void)`. +Do **not** use `box_usize`/`unbox_usize` for these — doing so will silently +corrupt the value. -It turns out we also need a `bool finalize_even_if_outdated` attribute on the -`linear_object` struct, which becomes `true` in these scenarios. By doing this, -we're "ditching" the linear object. And the logic to free linear objects needs -one small adjustment: the Rust object must be dropped when either the linear -object is not outdated or when `finalize_even_if_outdated` is set to `true`. +### Reference counting for reused objects -The invariant that needs to be maintained is that *only one* linear object can -free the shared Rust object. - -## Preventing unintentional Lean optimizations - -We've done our lower level homework and now we have an `f : T → T` in Lean that -should panic at runtime when its input is reused. So we do: - -```lean4 - ... - let a := f t - let b := f t -- reuses `t`! - ... -``` - -We run the code and it executes smoothly. Why!? - -The Lean compilation process detects that both `a` and `b` are equal to `f t` so -instead of calling `f` a second time it just sets `b` with the value of `a`. It -appears to be harmless but in fact we want discourage this kind of source code -at all costs. - -Lean provides the tag `never_extract` precisely for this. It's used internally -when some function performs side-effects and should never be optimized away. - -And to conclude, there are cases in which this optimization is truly harmful. -Consider an initialization function `T.init : Unit → T` in the following code: - -```lean4 - ... - let t1 := T.init () - let t2 := T.init () - let a := f t1 - let b := f t2 - ... -``` +When building a new Lean object, if you construct all fields from scratch (e.g. +`LeanString::new(...)`, `LeanByteArray::from_bytes(...)`), ownership is +straightforward — the freshly allocated objects start with rc=1 and Lean manages +them from there. -If `T.init` is not tagged with `never_extract`, `t2` and `t1` will point to the -same object, the first call to `f` will mark it as outdated and thus the second -call will panic! +However, if you take a Lean object received as a **borrowed** argument (`@&` in +Lean, `b_lean_obj_arg` in C) and store it directly into a new object via +`.set()`, you must call `.inc_ref()` on it first. Otherwise Lean will free the +original while the new object still references it. If you only read/decode the +argument into Rust types and then build fresh Lean objects, this does not apply. -So the `never_extract` tag must be applied to functions that: +### `lean_string_size` vs `lean_string_byte_size` -* Mutate their input or -* Return objects that work on the basis of mutation +`lean_string_byte_size` returns the **total object memory size** +(`sizeof(lean_string_object) + m_size`), not the string data length. +Use `lean_string_size` instead, which returns `m_size` — the number of data +bytes including the NUL terminator. The `LeanString::byte_len()` wrapper handles +this correctly by returning `lean_string_size(obj) - 1`. diff --git a/lean-sys/LICENSE-APACHE b/lean-sys/LICENSE-APACHE new file mode 100644 index 00000000..4252ff0c --- /dev/null +++ b/lean-sys/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2025 Argument Computer Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/lean-sys/LICENSE-MIT b/lean-sys/LICENSE-MIT new file mode 100644 index 00000000..829c2986 --- /dev/null +++ b/lean-sys/LICENSE-MIT @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2025 Argument Computer Corporation + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/lean-sys/src/object.rs b/lean-sys/src/object.rs index 0a34fdbc..1787e2ab 100644 --- a/lean-sys/src/object.rs +++ b/lean-sys/src/object.rs @@ -11,6 +11,16 @@ use std::ops::Deref; use crate::include; use crate::safe_cstring; +// Tag constants from lean.h +const LEAN_MAX_CTOR_TAG: u8 = 243; +const LEAN_TAG_ARRAY: u8 = 246; +const LEAN_TAG_SCALAR_ARRAY: u8 = 248; +const LEAN_TAG_STRING: u8 = 249; +const LEAN_TAG_EXTERNAL: u8 = 254; + +/// Constructor tag for `IO.Error.userError`. +const IO_ERROR_USER_ERROR_TAG: u8 = 7; + // ============================================================================= // LeanObject — Untyped base wrapper // ============================================================================= @@ -110,30 +120,30 @@ impl LeanObject { unsafe { include::lean_unbox_uint64(self.0 as *mut _) } } - /// Interpret as a constructor object (tag 0–243). + /// Interpret as a constructor object (tag 0–`LEAN_MAX_CTOR_TAG`). /// /// Debug-asserts the tag is in range. #[inline] pub fn as_ctor(self) -> LeanCtor { - debug_assert!(!self.is_scalar() && self.tag() <= 243); + debug_assert!(!self.is_scalar() && self.tag() <= LEAN_MAX_CTOR_TAG); LeanCtor(self) } - /// Interpret as a `String` object (tag 249). + /// Interpret as a `String` object (tag `LEAN_TAG_STRING`). /// /// Debug-asserts the tag is correct. #[inline] pub fn as_string(self) -> LeanString { - debug_assert!(!self.is_scalar() && self.tag() == 249); + debug_assert!(!self.is_scalar() && self.tag() == LEAN_TAG_STRING); LeanString(self) } - /// Interpret as an `Array` object (tag 246). + /// Interpret as an `Array` object (tag `LEAN_TAG_ARRAY`). /// /// Debug-asserts the tag is correct. #[inline] pub fn as_array(self) -> LeanArray { - debug_assert!(!self.is_scalar() && self.tag() == 246); + debug_assert!(!self.is_scalar() && self.tag() == LEAN_TAG_ARRAY); LeanArray(self) } @@ -146,10 +156,10 @@ impl LeanObject { LeanList(self) } - /// Interpret as a `ByteArray` object (tag 248). + /// Interpret as a `ByteArray` object (tag `LEAN_TAG_SCALAR_ARRAY`). #[inline] pub fn as_byte_array(self) -> LeanByteArray { - debug_assert!(!self.is_scalar() && self.tag() == 248); + debug_assert!(!self.is_scalar() && self.tag() == LEAN_TAG_SCALAR_ARRAY); LeanByteArray(self) } @@ -165,10 +175,10 @@ impl LeanObject { } // ============================================================================= -// LeanArray — Array α (tag 246) +// LeanArray — Array α (tag LEAN_TAG_ARRAY) // ============================================================================= -/// Typed wrapper for a Lean `Array α` object (tag 246). +/// Typed wrapper for a Lean `Array α` object (tag `LEAN_TAG_ARRAY`). #[derive(Clone, Copy)] #[repr(transparent)] pub struct LeanArray(LeanObject); @@ -182,13 +192,13 @@ impl Deref for LeanArray { } impl LeanArray { - /// Wrap a raw pointer, asserting it is an `Array` (tag 246). + /// Wrap a raw pointer, asserting it is an `Array` (tag `LEAN_TAG_ARRAY`). /// /// # Safety /// The pointer must be a valid Lean `Array` object. pub unsafe fn from_raw(ptr: *const c_void) -> Self { let obj = LeanObject(ptr); - debug_assert!(!obj.is_scalar() && obj.tag() == 246); + debug_assert!(!obj.is_scalar() && obj.tag() == LEAN_TAG_ARRAY); Self(obj) } @@ -244,10 +254,10 @@ impl LeanArray { } // ============================================================================= -// LeanByteArray — ByteArray (tag 248, scalar array) +// LeanByteArray — ByteArray (tag LEAN_TAG_SCALAR_ARRAY) // ============================================================================= -/// Typed wrapper for a Lean `ByteArray` object (tag 248). +/// Typed wrapper for a Lean `ByteArray` object (tag `LEAN_TAG_SCALAR_ARRAY`). #[derive(Clone, Copy)] #[repr(transparent)] pub struct LeanByteArray(LeanObject); @@ -261,13 +271,13 @@ impl Deref for LeanByteArray { } impl LeanByteArray { - /// Wrap a raw pointer, asserting it is a `ByteArray` (tag 248). + /// Wrap a raw pointer, asserting it is a `ByteArray` (tag `LEAN_TAG_SCALAR_ARRAY`). /// /// # Safety /// The pointer must be a valid Lean `ByteArray` object. pub unsafe fn from_raw(ptr: *const c_void) -> Self { let obj = LeanObject(ptr); - debug_assert!(!obj.is_scalar() && obj.tag() == 248); + debug_assert!(!obj.is_scalar() && obj.tag() == LEAN_TAG_SCALAR_ARRAY); Self(obj) } @@ -319,10 +329,10 @@ impl LeanByteArray { } // ============================================================================= -// LeanString — String (tag 249) +// LeanString — String (tag LEAN_TAG_STRING) // ============================================================================= -/// Typed wrapper for a Lean `String` object (tag 249). +/// Typed wrapper for a Lean `String` object (tag `LEAN_TAG_STRING`). #[derive(Clone, Copy)] #[repr(transparent)] pub struct LeanString(LeanObject); @@ -336,13 +346,13 @@ impl Deref for LeanString { } impl LeanString { - /// Wrap a raw pointer, asserting it is a `String` (tag 249). + /// Wrap a raw pointer, asserting it is a `String` (tag `LEAN_TAG_STRING`). /// /// # Safety /// The pointer must be a valid Lean `String` object. pub unsafe fn from_raw(ptr: *const c_void) -> Self { let obj = LeanObject(ptr); - debug_assert!(!obj.is_scalar() && obj.tag() == 249); + debug_assert!(!obj.is_scalar() && obj.tag() == LEAN_TAG_STRING); Self(obj) } @@ -373,10 +383,10 @@ impl std::fmt::Display for LeanString { } // ============================================================================= -// LeanCtor — Constructor objects (tag 0–243) +// LeanCtor — Constructor objects (tag 0–LEAN_MAX_CTOR_TAG) // ============================================================================= -/// Typed wrapper for a Lean constructor object (tag 0–243). +/// Typed wrapper for a Lean constructor object (tag 0–`LEAN_MAX_CTOR_TAG`). #[derive(Clone, Copy)] #[repr(transparent)] pub struct LeanCtor(LeanObject); @@ -390,13 +400,13 @@ impl Deref for LeanCtor { } impl LeanCtor { - /// Wrap a raw pointer, asserting it is a constructor (tag <= 243). + /// Wrap a raw pointer, asserting it is a constructor (tag <= `LEAN_MAX_CTOR_TAG`). /// /// # Safety /// The pointer must be a valid Lean constructor object. pub unsafe fn from_raw(ptr: *const c_void) -> Self { let obj = LeanObject(ptr); - debug_assert!(!obj.is_scalar() && obj.tag() <= 243); + debug_assert!(!obj.is_scalar() && obj.tag() <= LEAN_MAX_CTOR_TAG); Self(obj) } @@ -511,10 +521,10 @@ impl LeanCtor { } // ============================================================================= -// LeanExternal — External objects (tag 254) +// LeanExternal — External objects (tag LEAN_TAG_EXTERNAL) // ============================================================================= -/// Typed wrapper for a Lean external object (tag 254) holding a `T`. +/// Typed wrapper for a Lean external object (tag `LEAN_TAG_EXTERNAL`) holding a `T`. #[derive(Clone, Copy)] #[repr(transparent)] pub struct LeanExternal(LeanObject, PhantomData); @@ -528,14 +538,14 @@ impl Deref for LeanExternal { } impl LeanExternal { - /// Wrap a raw pointer, asserting it is an external object (tag 254). + /// Wrap a raw pointer, asserting it is an external object (tag `LEAN_TAG_EXTERNAL`). /// /// # Safety /// The pointer must be a valid Lean external object whose data pointer /// points to a valid `T`. pub unsafe fn from_raw(ptr: *const c_void) -> Self { let obj = LeanObject(ptr); - debug_assert!(!obj.is_scalar() && obj.tag() == 254); + debug_assert!(!obj.is_scalar() && obj.tag() == LEAN_TAG_EXTERNAL); Self(obj, PhantomData) } @@ -831,8 +841,7 @@ impl LeanIOResult { /// Build an IO error from a Rust string via `IO.Error.userError` (tag 7, 1 field). pub fn error_string(msg: &str) -> Self { - // IO.Error.userError is tag 7 with 1 object field (the String) - let user_error = LeanCtor::alloc(7, 1, 0); + let user_error = LeanCtor::alloc(IO_ERROR_USER_ERROR_TAG, 1, 0); user_error.set(0, LeanString::new(msg)); Self::error(*user_error) } diff --git a/src/ffi/aiur/protocol.rs b/src/ffi/aiur/protocol.rs index 2df913cc..839510b7 100644 --- a/src/ffi/aiur/protocol.rs +++ b/src/ffi/aiur/protocol.rs @@ -18,7 +18,7 @@ use crate::{ synthesis::AiurSystem, }, ffi::aiur::{ - lean_unbox_g, lean_unbox_nat_as_usize, toplevel::lean_ptr_to_toplevel, + lean_unbox_g, lean_unbox_nat_as_usize, toplevel::decode_toplevel, }, }; @@ -67,7 +67,7 @@ extern "C" fn rs_aiur_system_build( commitment_parameters: LeanObject, ) -> LeanExternal { let system = AiurSystem::build( - lean_ptr_to_toplevel(toplevel), + decode_toplevel(toplevel), lean_ptr_to_commitment_parameters(commitment_parameters), ); LeanExternal::alloc(system_class(), system) diff --git a/src/ffi/aiur/toplevel.rs b/src/ffi/aiur/toplevel.rs index c836c30a..33789ddf 100644 --- a/src/ffi/aiur/toplevel.rs +++ b/src/ffi/aiur/toplevel.rs @@ -194,7 +194,7 @@ fn lean_ptr_to_function(obj: LeanObject) -> Function { Function { body, layout, unconstrained } } -pub(crate) fn lean_ptr_to_toplevel(obj: LeanObject) -> Toplevel { +pub(crate) fn decode_toplevel(obj: LeanObject) -> Toplevel { let ctor = obj.as_ctor(); let [functions_obj, memory_sizes_obj] = ctor.objs::<2>(); let functions = functions_obj.as_array().map(lean_ptr_to_function); diff --git a/src/ffi/iroh.rs b/src/ffi/iroh.rs index bfe32689..ab7c8d8f 100644 --- a/src/ffi/iroh.rs +++ b/src/ffi/iroh.rs @@ -24,7 +24,7 @@ impl LeanPutResponse { let ctor = LeanCtor::alloc(0, 2, 0); ctor.set(0, LeanString::new(message)); ctor.set(1, LeanString::new(hash)); - Self::new((*ctor).into()) + Self::new(*ctor) } } @@ -42,7 +42,7 @@ impl LeanGetResponse { ctor.set(0, LeanString::new(message)); ctor.set(1, LeanString::new(hash)); ctor.set(2, LeanByteArray::from_bytes(bytes)); - Self::new((*ctor).into()) + Self::new(*ctor) } } From 32522af85b468f93cc4e1f09a3da28c1f1674f1a Mon Sep 17 00:00:00 2001 From: samuelburnham <45365069+samuelburnham@users.noreply.github.com> Date: Wed, 4 Mar 2026 13:08:04 -0500 Subject: [PATCH 21/27] Rename lean-sys to lean-ffi --- Cargo.lock | 4 +- Cargo.toml | 4 +- docs/ffi.md | 4 +- lean-sys/Cargo.toml | 11 - lean-sys/LICENSE-APACHE | 201 -------- lean-sys/LICENSE-MIT | 21 - lean-sys/build.rs | 60 --- lean-sys/src/lib.rs | 80 ---- lean-sys/src/nat.rs | 170 ------- lean-sys/src/object.rs | 968 -------------------------------------- src/ffi.rs | 2 +- src/ffi/aiur.rs | 2 +- src/ffi/aiur/protocol.rs | 2 +- src/ffi/aiur/toplevel.rs | 2 +- src/ffi/byte_array.rs | 2 +- src/ffi/compile.rs | 6 +- src/ffi/graph.rs | 2 +- src/ffi/iroh.rs | 4 +- src/ffi/ix/address.rs | 2 +- src/ffi/ix/constant.rs | 4 +- src/ffi/ix/data.rs | 4 +- src/ffi/ix/env.rs | 2 +- src/ffi/ix/expr.rs | 4 +- src/ffi/ix/level.rs | 2 +- src/ffi/ix/name.rs | 4 +- src/ffi/ixon/compare.rs | 2 +- src/ffi/ixon/constant.rs | 2 +- src/ffi/ixon/enums.rs | 2 +- src/ffi/ixon/env.rs | 2 +- src/ffi/ixon/expr.rs | 2 +- src/ffi/ixon/meta.rs | 2 +- src/ffi/ixon/serialize.rs | 2 +- src/ffi/ixon/sharing.rs | 2 +- src/ffi/ixon/univ.rs | 2 +- src/ffi/keccak.rs | 2 +- src/ffi/lean_env.rs | 4 +- src/ffi/primitives.rs | 6 +- src/ffi/unsigned.rs | 2 +- src/iroh/_client.rs | 2 +- src/iroh/_server.rs | 2 +- src/ix/compile.rs | 2 +- src/ix/decompile.rs | 2 +- src/ix/env.rs | 2 +- src/ix/graph.rs | 2 +- src/ix/ground.rs | 2 +- src/ix/ixon/serialize.rs | 2 +- src/ix/mutual.rs | 2 +- src/lean.rs | 8 +- src/sha256.rs | 2 +- 49 files changed, 58 insertions(+), 1569 deletions(-) delete mode 100644 lean-sys/Cargo.toml delete mode 100644 lean-sys/LICENSE-APACHE delete mode 100644 lean-sys/LICENSE-MIT delete mode 100644 lean-sys/build.rs delete mode 100644 lean-sys/src/lib.rs delete mode 100644 lean-sys/src/nat.rs delete mode 100644 lean-sys/src/object.rs diff --git a/Cargo.lock b/Cargo.lock index dc4f824a..6fef6f8d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1782,7 +1782,7 @@ dependencies = [ "iroh", "iroh-base", "itertools 0.14.0", - "lean-sys", + "lean-ffi", "multi-stark", "n0-snafu", "n0-watcher", @@ -1817,7 +1817,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" [[package]] -name = "lean-sys" +name = "lean-ffi" version = "0.1.0" dependencies = [ "bindgen", diff --git a/Cargo.toml b/Cargo.toml index 4e9c9910..1f86fa1e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,5 +1,5 @@ [workspace] -members = ["lean-sys"] +members = ["lean-ffi"] [package] name = "ix_rs" @@ -14,7 +14,7 @@ anyhow = "1" blake3 = "1.8.2" itertools = "0.14.0" indexmap = { version = "2", features = ["rayon"] } -lean-sys = { path = "lean-sys" } +lean-ffi = { path = "lean-ffi" } multi-stark = { git = "https://github.com/argumentcomputer/multi-stark.git", rev = "14b70601317e4500c7246c32a13ad08b3f560f2e" } num-bigint = "0.4.6" rayon = "1" diff --git a/docs/ffi.md b/docs/ffi.md index dcecdb8a..b02ad5e6 100644 --- a/docs/ffi.md +++ b/docs/ffi.md @@ -22,12 +22,12 @@ control of the underlying memory, needing to implement `Drop`, or having to know about the state of Lean's reference counting mechanism. Bindgen runs in `build.rs` and generates unsafe Rust functions that link to the `lean.h` library. This external module can then be found at -`target/release/lean-sys-/out/lean.rs`. +`target/release/lean-ffi-/out/lean.rs`. ## `LeanObject` API To facilitate working with Lean objects in Rust, we also designed an -ergonomic API in the `lean-sys` crate to wrap raw C pointers in Rust types, +ergonomic API in the `lean-ffi` crate to wrap raw C pointers in Rust types, with methods to abstract the low-level binding function calls from `lean.h`. The fundamental building block is `LeanObject`, a wrapper around an opaque Lean value represented in Rust as `*const c_void`. This value is either a diff --git a/lean-sys/Cargo.toml b/lean-sys/Cargo.toml deleted file mode 100644 index 0a9b6340..00000000 --- a/lean-sys/Cargo.toml +++ /dev/null @@ -1,11 +0,0 @@ -[package] -name = "lean-sys" -version = "0.1.0" -edition = "2024" - -[dependencies] -num-bigint = "0.4.6" - -[build-dependencies] -bindgen = "0.71" -cc = "1" diff --git a/lean-sys/LICENSE-APACHE b/lean-sys/LICENSE-APACHE deleted file mode 100644 index 4252ff0c..00000000 --- a/lean-sys/LICENSE-APACHE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2025 Argument Computer Corporation - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/lean-sys/LICENSE-MIT b/lean-sys/LICENSE-MIT deleted file mode 100644 index 829c2986..00000000 --- a/lean-sys/LICENSE-MIT +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2025 Argument Computer Corporation - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. \ No newline at end of file diff --git a/lean-sys/build.rs b/lean-sys/build.rs deleted file mode 100644 index e8b98e57..00000000 --- a/lean-sys/build.rs +++ /dev/null @@ -1,60 +0,0 @@ -use std::{env, path::PathBuf, process::Command}; - -fn find_lean_include_dir() -> PathBuf { - // 1. Try LEAN_SYSROOT env var - if let Ok(sysroot) = env::var("LEAN_SYSROOT") { - let inc = PathBuf::from(sysroot).join("include"); - if inc.exists() { - return inc; - } - } - // 2. Try `lean --print-prefix` - if let Ok(output) = Command::new("lean").arg("--print-prefix").output() - && output.status.success() - { - let prefix = String::from_utf8_lossy(&output.stdout).trim().to_string(); - let inc = PathBuf::from(prefix).join("include"); - if inc.exists() { - return inc; - } - } - panic!( - "Cannot find Lean include directory. \ - Set LEAN_SYSROOT or ensure `lean` is on PATH." - ); -} - -fn main() { - let lean_include = find_lean_include_dir(); - let lean_h = lean_include.join("lean").join("lean.h"); - let out_dir = PathBuf::from(env::var("OUT_DIR").unwrap()); - let wrapper_c = out_dir.join("lean_static_fns.c"); - - // Generate C wrappers for lean.h's static inline functions and - // Rust bindings for all types and functions. - bindgen::Builder::default() - .header(lean_h.to_str().unwrap()) - .clang_arg(format!("-I{}", lean_include.display())) - .parse_callbacks(Box::new(bindgen::CargoCallbacks::new())) - .wrap_static_fns(true) - .wrap_static_fns_path(&wrapper_c) - // lean_get_rc_mt_addr returns `_Atomic(int)*` which bindgen - // cannot wrap. Types using `_Atomic` are made opaque. - .blocklist_function("lean_get_rc_mt_addr") - .opaque_type("lean_thunk_object") - .opaque_type("lean_task_object") - .generate() - .expect("bindgen failed to process lean.h") - .write_to_file(out_dir.join("lean.rs")) - .expect("Couldn't write bindings"); - - // Compile the generated C wrappers into a static library. - cc::Build::new() - .file(&wrapper_c) - .include(&lean_include) - .compile("lean_static_fns"); - - println!("cargo:rerun-if-env-changed=LEAN_SYSROOT"); - println!("cargo:rerun-if-changed={}", lean_h.display()); - println!("cargo:rerun-if-changed=build.rs"); -} diff --git a/lean-sys/src/lib.rs b/lean-sys/src/lib.rs deleted file mode 100644 index da9ff1b8..00000000 --- a/lean-sys/src/lib.rs +++ /dev/null @@ -1,80 +0,0 @@ -//! Low-level Lean FFI bindings and type-safe wrappers. -//! -//! The `include` submodule contains auto-generated bindings from `lean.h` via -//! bindgen. Higher-level helpers are in `object` and `nat`. - -#[allow( - non_upper_case_globals, - non_camel_case_types, - non_snake_case, - dead_code, - unsafe_op_in_unsafe_fn, - unused_qualifications, - clippy::all, - clippy::ptr_as_ptr, - clippy::cast_possible_wrap, - clippy::cast_possible_truncation, - clippy::derive_partial_eq_without_eq -)] -pub mod include { - include!(concat!(env!("OUT_DIR"), "/lean.rs")); -} - -pub mod nat; -pub mod object; - -use std::ffi::{CString, c_void}; - -/// Create a CString from a str, stripping any interior null bytes. -/// Lean strings are length-prefixed and can contain null bytes, but the -/// `lean_mk_string` FFI requires a null-terminated C string. This function -/// ensures conversion always succeeds by filtering out interior nulls. -pub fn safe_cstring(s: &str) -> CString { - CString::new(s).unwrap_or_else(|_| { - let bytes: Vec = s.bytes().filter(|&b| b != 0).collect(); - CString::new(bytes).expect("filtered string should have no nulls") - }) -} - -/// No-op foreach callback for external classes that hold no Lean references. -/// -/// # Safety -/// Must only be used as a `lean_external_foreach_fn` callback. -pub unsafe extern "C" fn noop_foreach( - _: *mut c_void, - _: *mut include::lean_object, -) { -} - -/// Generate a `#[repr(transparent)]` newtype over `LeanObject` for a specific -/// Lean type, with `Deref`, `From`, and a `new` constructor. -#[macro_export] -macro_rules! lean_domain_type { - ($($(#[$meta:meta])* $name:ident;)*) => {$( - $(#[$meta])* - #[derive(Clone, Copy)] - #[repr(transparent)] - pub struct $name($crate::object::LeanObject); - - impl std::ops::Deref for $name { - type Target = $crate::object::LeanObject; - #[inline] - fn deref(&self) -> &$crate::object::LeanObject { &self.0 } - } - - impl From<$name> for $crate::object::LeanObject { - #[inline] - fn from(x: $name) -> Self { x.0 } - } - - impl From<$crate::object::LeanObject> for $name { - #[inline] - fn from(obj: $crate::object::LeanObject) -> Self { Self(obj) } - } - - impl $name { - #[inline] - pub fn new(obj: $crate::object::LeanObject) -> Self { Self(obj) } - } - )*}; -} diff --git a/lean-sys/src/nat.rs b/lean-sys/src/nat.rs deleted file mode 100644 index b5ded9c5..00000000 --- a/lean-sys/src/nat.rs +++ /dev/null @@ -1,170 +0,0 @@ -//! Lean `Nat` (arbitrary-precision natural number) representation. -//! -//! Lean stores small naturals as tagged scalars and large ones as GMP -//! `mpz_object`s on the heap. This module handles both representations. - -use std::ffi::{c_int, c_void}; -use std::fmt; -use std::mem::MaybeUninit; - -use num_bigint::BigUint; - -use crate::object::LeanObject; - -/// Arbitrary-precision natural number, wrapping `BigUint`. -#[derive(Hash, PartialEq, Eq, Debug, Clone, PartialOrd, Ord)] -pub struct Nat(pub BigUint); - -impl fmt::Display for Nat { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{}", self.0) - } -} - -impl From for Nat { - fn from(x: u64) -> Self { - Nat(BigUint::from(x)) - } -} - -impl Nat { - pub const ZERO: Self = Self(BigUint::ZERO); - - /// Try to convert to u64, returning None if the value is too large. - #[inline] - pub fn to_u64(&self) -> Option { - u64::try_from(&self.0).ok() - } - - /// Decode a `Nat` from a Lean object pointer. Handles both scalar (unboxed) - /// and heap-allocated (GMP `mpz_object`) representations. - /// - /// # Safety - /// The pointer must be a valid Lean `Nat` object (scalar or mpz). - pub unsafe fn from_ptr(ptr: *const c_void) -> Nat { - let obj = unsafe { LeanObject::from_raw(ptr) }; - if obj.is_scalar() { - let u = obj.unbox_usize(); - Nat(BigUint::from_bytes_le(&u.to_le_bytes())) - } else { - // Heap-allocated big integer (mpz_object) - let mpz: &MpzObject = unsafe { &*ptr.cast() }; - Nat(mpz.m_value.to_biguint()) - } - } - - /// Decode a `Nat` from a `LeanObject`. Convenience wrapper over `from_ptr`. - pub fn from_obj(obj: LeanObject) -> Nat { - unsafe { Self::from_ptr(obj.as_ptr()) } - } - - #[inline] - pub fn from_le_bytes(bytes: &[u8]) -> Nat { - Nat(BigUint::from_bytes_le(bytes)) - } - - #[inline] - pub fn to_le_bytes(&self) -> Vec { - self.0.to_bytes_le() - } -} - -/// From https://github.com/leanprover/lean4/blob/master/src/runtime/object.h: -/// ```cpp -/// struct mpz_object { -/// lean_object m_header; -/// mpz m_value; -/// mpz_object() {} -/// explicit mpz_object(mpz const & m):m_value(m) {} -/// }; -/// ``` -#[repr(C)] -struct MpzObject { - _header: [u8; 8], - m_value: Mpz, -} - -#[repr(C)] -struct Mpz { - alloc: i32, - size: i32, - d: *const u64, -} - -impl Mpz { - fn to_biguint(&self) -> BigUint { - let nlimbs = self.size.unsigned_abs() as usize; - let limbs = unsafe { std::slice::from_raw_parts(self.d, nlimbs) }; - - // Convert limbs (little-endian by limb) - let bytes: Vec<_> = - limbs.iter().flat_map(|&limb| limb.to_le_bytes()).collect(); - - BigUint::from_bytes_le(&bytes) - } -} - -// ============================================================================= -// GMP interop for building Lean Nat objects from limbs -// ============================================================================= - -use crate::include::lean_uint64_to_nat; - -/// LEAN_MAX_SMALL_NAT = SIZE_MAX >> 1 -const LEAN_MAX_SMALL_NAT: u64 = (usize::MAX >> 1) as u64; - -unsafe extern "C" { - #[link_name = "__gmpz_init"] - fn mpz_init(x: *mut Mpz); - - #[link_name = "__gmpz_import"] - fn mpz_import( - rop: *mut Mpz, - count: usize, - order: c_int, - size: usize, - endian: c_int, - nails: usize, - op: *const u64, - ); - - #[link_name = "__gmpz_clear"] - fn mpz_clear(x: *mut Mpz); - - /// Lean's internal mpz allocation — deep-copies the mpz value. - /// Caller must still call mpz_clear on the original. - fn lean_alloc_mpz(v: *mut Mpz) -> *mut c_void; -} - -/// Create a Lean `Nat` from a little-endian array of u64 limbs. -/// Replaces the C function `c_lean_nat_from_limbs` from `ixon_ffi.c`. -/// # Safety -/// `limbs` must be valid for reading `num_limbs` elements. -pub unsafe fn lean_nat_from_limbs( - num_limbs: usize, - limbs: *const u64, -) -> *mut c_void { - if num_limbs == 0 { - return LeanObject::box_usize(0).as_mut_ptr(); - } - let first = unsafe { *limbs }; - if num_limbs == 1 && first <= LEAN_MAX_SMALL_NAT { - #[allow(clippy::cast_possible_truncation)] // only targets 64-bit - return LeanObject::box_usize(first as usize).as_mut_ptr(); - } - if num_limbs == 1 { - return unsafe { lean_uint64_to_nat(first).cast() }; - } - // Multi-limb: use GMP - unsafe { - let mut value = MaybeUninit::::uninit(); - mpz_init(value.as_mut_ptr()); - // order = -1 (least significant limb first) - // size = 8 bytes per limb, endian = 0 (native), nails = 0 - mpz_import(value.as_mut_ptr(), num_limbs, -1, 8, 0, 0, limbs); - // lean_alloc_mpz deep-copies; we must free the original - let result = lean_alloc_mpz(value.as_mut_ptr()); - mpz_clear(value.as_mut_ptr()); - result - } -} diff --git a/lean-sys/src/object.rs b/lean-sys/src/object.rs deleted file mode 100644 index 1787e2ab..00000000 --- a/lean-sys/src/object.rs +++ /dev/null @@ -1,968 +0,0 @@ -//! Type-safe wrappers for Lean FFI object pointers. -//! -//! Each wrapper is a `#[repr(transparent)]` `Copy` newtype over `*const c_void` -//! that asserts the correct Lean tag on construction and provides safe accessor -//! methods. Reference counting is left to Lean (no `Drop` impl). - -use std::ffi::c_void; -use std::marker::PhantomData; -use std::ops::Deref; - -use crate::include; -use crate::safe_cstring; - -// Tag constants from lean.h -const LEAN_MAX_CTOR_TAG: u8 = 243; -const LEAN_TAG_ARRAY: u8 = 246; -const LEAN_TAG_SCALAR_ARRAY: u8 = 248; -const LEAN_TAG_STRING: u8 = 249; -const LEAN_TAG_EXTERNAL: u8 = 254; - -/// Constructor tag for `IO.Error.userError`. -const IO_ERROR_USER_ERROR_TAG: u8 = 7; - -// ============================================================================= -// LeanObject — Untyped base wrapper -// ============================================================================= - -/// Untyped wrapper around a raw Lean object pointer. -#[derive(Clone, Copy)] -#[repr(transparent)] -pub struct LeanObject(*const c_void); - -impl LeanObject { - /// Wrap a raw pointer without any tag check. - /// - /// # Safety - /// The pointer must be a valid Lean object (or tagged scalar). - #[inline] - pub unsafe fn from_raw(ptr: *const c_void) -> Self { - Self(ptr) - } - - /// Wrap a `*mut lean_object` returned from a `lean_sys` function. - /// - /// # Safety - /// The pointer must be a valid Lean object (or tagged scalar). - #[inline] - pub unsafe fn from_lean_ptr(ptr: *mut include::lean_object) -> Self { - Self(ptr.cast()) - } - - /// Create a Lean `Nat` from a `u64` value. - /// - /// Small values are stored as tagged scalars; larger ones are heap-allocated - /// via the Lean runtime. - #[inline] - pub fn from_nat_u64(n: u64) -> Self { - unsafe { Self::from_lean_ptr(include::lean_uint64_to_nat(n)) } - } - - #[inline] - pub fn as_ptr(self) -> *const c_void { - self.0 - } - - #[inline] - pub fn as_mut_ptr(self) -> *mut c_void { - self.0 as *mut c_void - } - - /// True if this is a tagged scalar (bit 0 set). - #[inline] - pub fn is_scalar(self) -> bool { - self.0 as usize & 1 == 1 - } - - /// Return the object tag. Panics if the object is a scalar. - #[inline] - pub fn tag(self) -> u8 { - assert!(!self.is_scalar(), "tag() called on scalar"); - #[allow(clippy::cast_possible_truncation)] - unsafe { - include::lean_obj_tag(self.0 as *mut _) as u8 - } - } - - #[inline] - pub fn inc_ref(self) { - if !self.is_scalar() { - unsafe { include::lean_inc_ref(self.0 as *mut _) } - } - } - - #[inline] - pub fn dec_ref(self) { - if !self.is_scalar() { - unsafe { include::lean_dec_ref(self.0 as *mut _) } - } - } - - /// Box a `usize` into a tagged scalar pointer. - #[inline] - pub fn box_usize(n: usize) -> Self { - Self(((n << 1) | 1) as *const c_void) - } - - /// Unbox a tagged scalar pointer into a `usize`. - #[inline] - pub fn unbox_usize(self) -> usize { - self.0 as usize >> 1 - } - - #[inline] - pub fn box_u64(n: u64) -> Self { - Self(unsafe { include::lean_box_uint64(n) }.cast()) - } - - #[inline] - pub fn unbox_u64(self) -> u64 { - unsafe { include::lean_unbox_uint64(self.0 as *mut _) } - } - - /// Interpret as a constructor object (tag 0–`LEAN_MAX_CTOR_TAG`). - /// - /// Debug-asserts the tag is in range. - #[inline] - pub fn as_ctor(self) -> LeanCtor { - debug_assert!(!self.is_scalar() && self.tag() <= LEAN_MAX_CTOR_TAG); - LeanCtor(self) - } - - /// Interpret as a `String` object (tag `LEAN_TAG_STRING`). - /// - /// Debug-asserts the tag is correct. - #[inline] - pub fn as_string(self) -> LeanString { - debug_assert!(!self.is_scalar() && self.tag() == LEAN_TAG_STRING); - LeanString(self) - } - - /// Interpret as an `Array` object (tag `LEAN_TAG_ARRAY`). - /// - /// Debug-asserts the tag is correct. - #[inline] - pub fn as_array(self) -> LeanArray { - debug_assert!(!self.is_scalar() && self.tag() == LEAN_TAG_ARRAY); - LeanArray(self) - } - - /// Interpret as a `List` (nil = scalar, cons = tag 1). - /// - /// Debug-asserts the tag is valid for a list. - #[inline] - pub fn as_list(self) -> LeanList { - debug_assert!(self.is_scalar() || self.tag() == 1); - LeanList(self) - } - - /// Interpret as a `ByteArray` object (tag `LEAN_TAG_SCALAR_ARRAY`). - #[inline] - pub fn as_byte_array(self) -> LeanByteArray { - debug_assert!(!self.is_scalar() && self.tag() == LEAN_TAG_SCALAR_ARRAY); - LeanByteArray(self) - } - - #[inline] - pub fn box_u32(n: u32) -> Self { - Self(unsafe { include::lean_box_uint32(n) }.cast()) - } - - #[inline] - pub fn unbox_u32(self) -> u32 { - unsafe { include::lean_unbox_uint32(self.0 as *mut _) } - } -} - -// ============================================================================= -// LeanArray — Array α (tag LEAN_TAG_ARRAY) -// ============================================================================= - -/// Typed wrapper for a Lean `Array α` object (tag `LEAN_TAG_ARRAY`). -#[derive(Clone, Copy)] -#[repr(transparent)] -pub struct LeanArray(LeanObject); - -impl Deref for LeanArray { - type Target = LeanObject; - #[inline] - fn deref(&self) -> &LeanObject { - &self.0 - } -} - -impl LeanArray { - /// Wrap a raw pointer, asserting it is an `Array` (tag `LEAN_TAG_ARRAY`). - /// - /// # Safety - /// The pointer must be a valid Lean `Array` object. - pub unsafe fn from_raw(ptr: *const c_void) -> Self { - let obj = LeanObject(ptr); - debug_assert!(!obj.is_scalar() && obj.tag() == LEAN_TAG_ARRAY); - Self(obj) - } - - /// Allocate a new array with `size` elements (capacity = size). - pub fn alloc(size: usize) -> Self { - let obj = unsafe { include::lean_alloc_array(size, size) }; - Self(LeanObject(obj.cast())) - } - - pub fn len(&self) -> usize { - unsafe { include::lean_array_size(self.0.as_ptr() as *mut _) } - } - - pub fn is_empty(&self) -> bool { - self.len() == 0 - } - - pub fn get(&self, i: usize) -> LeanObject { - LeanObject( - unsafe { include::lean_array_get_core(self.0.as_ptr() as *mut _, i) } - .cast(), - ) - } - - pub fn set(&self, i: usize, val: impl Into) { - let val: LeanObject = val.into(); - unsafe { - include::lean_array_set_core( - self.0.as_ptr() as *mut _, - i, - val.as_ptr() as *mut _, - ); - } - } - - /// Return a slice over the array elements. - pub fn data(&self) -> &[LeanObject] { - unsafe { - let cptr = include::lean_array_cptr(self.0.as_ptr() as *mut _); - // Safety: LeanObject is repr(transparent) over *const c_void, and - // lean_array_cptr returns *mut *mut lean_object which has the same layout. - std::slice::from_raw_parts(cptr.cast(), self.len()) - } - } - - pub fn iter(&self) -> impl Iterator + '_ { - self.data().iter().copied() - } - - pub fn map(&self, f: impl Fn(LeanObject) -> T) -> Vec { - self.iter().map(f).collect() - } -} - -// ============================================================================= -// LeanByteArray — ByteArray (tag LEAN_TAG_SCALAR_ARRAY) -// ============================================================================= - -/// Typed wrapper for a Lean `ByteArray` object (tag `LEAN_TAG_SCALAR_ARRAY`). -#[derive(Clone, Copy)] -#[repr(transparent)] -pub struct LeanByteArray(LeanObject); - -impl Deref for LeanByteArray { - type Target = LeanObject; - #[inline] - fn deref(&self) -> &LeanObject { - &self.0 - } -} - -impl LeanByteArray { - /// Wrap a raw pointer, asserting it is a `ByteArray` (tag `LEAN_TAG_SCALAR_ARRAY`). - /// - /// # Safety - /// The pointer must be a valid Lean `ByteArray` object. - pub unsafe fn from_raw(ptr: *const c_void) -> Self { - let obj = LeanObject(ptr); - debug_assert!(!obj.is_scalar() && obj.tag() == LEAN_TAG_SCALAR_ARRAY); - Self(obj) - } - - /// Allocate a new byte array with `size` bytes (capacity = size). - pub fn alloc(size: usize) -> Self { - let obj = unsafe { include::lean_alloc_sarray(1, size, size) }; - Self(LeanObject(obj.cast())) - } - - /// Allocate a new byte array and copy `data` into it. - pub fn from_bytes(data: &[u8]) -> Self { - let arr = Self::alloc(data.len()); - unsafe { - let cptr = include::lean_sarray_cptr(arr.0.as_ptr() as *mut _); - std::ptr::copy_nonoverlapping(data.as_ptr(), cptr, data.len()); - } - arr - } - - pub fn len(&self) -> usize { - unsafe { include::lean_sarray_size(self.0.as_ptr() as *mut _) } - } - - pub fn is_empty(&self) -> bool { - self.len() == 0 - } - - /// Return the byte contents as a slice. - pub fn as_bytes(&self) -> &[u8] { - unsafe { - let cptr = include::lean_sarray_cptr(self.0.as_ptr() as *mut _); - std::slice::from_raw_parts(cptr, self.len()) - } - } - - /// Copy `data` into the byte array and update its size. - /// - /// # Safety - /// The caller must ensure the array has sufficient capacity for `data`. - pub unsafe fn set_data(&self, data: &[u8]) { - unsafe { - let obj = self.0.as_mut_ptr(); - let cptr = include::lean_sarray_cptr(obj.cast()); - std::ptr::copy_nonoverlapping(data.as_ptr(), cptr, data.len()); - // Update m_size: at offset 8 (after lean_object header) - *obj.cast::().add(8).cast::() = data.len(); - } - } -} - -// ============================================================================= -// LeanString — String (tag LEAN_TAG_STRING) -// ============================================================================= - -/// Typed wrapper for a Lean `String` object (tag `LEAN_TAG_STRING`). -#[derive(Clone, Copy)] -#[repr(transparent)] -pub struct LeanString(LeanObject); - -impl Deref for LeanString { - type Target = LeanObject; - #[inline] - fn deref(&self) -> &LeanObject { - &self.0 - } -} - -impl LeanString { - /// Wrap a raw pointer, asserting it is a `String` (tag `LEAN_TAG_STRING`). - /// - /// # Safety - /// The pointer must be a valid Lean `String` object. - pub unsafe fn from_raw(ptr: *const c_void) -> Self { - let obj = LeanObject(ptr); - debug_assert!(!obj.is_scalar() && obj.tag() == LEAN_TAG_STRING); - Self(obj) - } - - /// Create a Lean string from a Rust `&str`. - pub fn new(s: &str) -> Self { - let c = safe_cstring(s); - let obj = unsafe { include::lean_mk_string(c.as_ptr()) }; - Self(LeanObject(obj.cast())) - } - - /// Number of data bytes (excluding the trailing NUL). - pub fn byte_len(&self) -> usize { - unsafe { include::lean_string_size(self.0.as_ptr() as *mut _) - 1 } - } -} - -impl std::fmt::Display for LeanString { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - unsafe { - let obj = self.0.as_ptr() as *mut _; - let len = include::lean_string_size(obj) - 1; // m_size includes NUL - let data = include::lean_string_cstr(obj); - let bytes = std::slice::from_raw_parts(data.cast::(), len); - let s = std::str::from_utf8_unchecked(bytes); - f.write_str(s) - } - } -} - -// ============================================================================= -// LeanCtor — Constructor objects (tag 0–LEAN_MAX_CTOR_TAG) -// ============================================================================= - -/// Typed wrapper for a Lean constructor object (tag 0–`LEAN_MAX_CTOR_TAG`). -#[derive(Clone, Copy)] -#[repr(transparent)] -pub struct LeanCtor(LeanObject); - -impl Deref for LeanCtor { - type Target = LeanObject; - #[inline] - fn deref(&self) -> &LeanObject { - &self.0 - } -} - -impl LeanCtor { - /// Wrap a raw pointer, asserting it is a constructor (tag <= `LEAN_MAX_CTOR_TAG`). - /// - /// # Safety - /// The pointer must be a valid Lean constructor object. - pub unsafe fn from_raw(ptr: *const c_void) -> Self { - let obj = LeanObject(ptr); - debug_assert!(!obj.is_scalar() && obj.tag() <= LEAN_MAX_CTOR_TAG); - Self(obj) - } - - /// Allocate a new constructor object. - pub fn alloc(tag: u8, num_objs: usize, scalar_size: usize) -> Self { - #[allow(clippy::cast_possible_truncation)] - let obj = unsafe { - include::lean_alloc_ctor(tag as u32, num_objs as u32, scalar_size as u32) - }; - Self(LeanObject(obj.cast())) - } - - pub fn tag(&self) -> u8 { - self.0.tag() - } - - /// Get the `i`-th object field via `lean_ctor_get`. - pub fn get(&self, i: usize) -> LeanObject { - #[allow(clippy::cast_possible_truncation)] - LeanObject( - unsafe { include::lean_ctor_get(self.0.as_ptr() as *mut _, i as u32) } - .cast(), - ) - } - - /// Set the `i`-th object field via `lean_ctor_set`. - pub fn set(&self, i: usize, val: impl Into) { - let val: LeanObject = val.into(); - #[allow(clippy::cast_possible_truncation)] - unsafe { - include::lean_ctor_set( - self.0.as_ptr() as *mut _, - i as u32, - val.as_ptr() as *mut _, - ); - } - } - - /// Set a `u8` scalar field at the given byte offset (past all object fields). - pub fn set_u8(&self, offset: usize, val: u8) { - #[allow(clippy::cast_possible_truncation)] - unsafe { - include::lean_ctor_set_uint8( - self.0.as_ptr() as *mut _, - offset as u32, - val, - ); - } - } - - /// Set a `u32` scalar field at the given byte offset (past all object fields). - pub fn set_u32(&self, offset: usize, val: u32) { - #[allow(clippy::cast_possible_truncation)] - unsafe { - include::lean_ctor_set_uint32( - self.0.as_ptr() as *mut _, - offset as u32, - val, - ); - } - } - - /// Set a `u64` scalar field at the given byte offset (past all object fields). - pub fn set_u64(&self, offset: usize, val: u64) { - #[allow(clippy::cast_possible_truncation)] - unsafe { - include::lean_ctor_set_uint64( - self.0.as_ptr() as *mut _, - offset as u32, - val, - ); - } - } - - /// Read `N` object-field pointers using raw pointer math. - /// - /// This bypasses `lean_ctor_get`'s bounds check, which is necessary when - /// reading past the declared object fields into the scalar area (e.g. for - /// `Expr.Data`). - pub fn objs(&self) -> [LeanObject; N] { - let base = unsafe { self.0.as_ptr().cast::<*const c_void>().add(1) }; - std::array::from_fn(|i| LeanObject(unsafe { *base.add(i) })) - } - - /// Read a `u64` scalar at `offset` bytes past `num_objs` object fields. - pub fn scalar_u64(&self, num_objs: usize, offset: usize) -> u64 { - unsafe { - std::ptr::read_unaligned( - self.0.as_ptr().cast::().add(8 + num_objs * 8 + offset).cast(), - ) - } - } - - /// Read a `u32` scalar at `offset` bytes past `num_objs` object fields. - pub fn scalar_u32(&self, num_objs: usize, offset: usize) -> u32 { - unsafe { - std::ptr::read_unaligned( - self.0.as_ptr().cast::().add(8 + num_objs * 8 + offset).cast(), - ) - } - } - - /// Read a `u8` scalar at `offset` bytes past `num_objs` object fields. - pub fn scalar_u8(&self, num_objs: usize, offset: usize) -> u8 { - unsafe { *self.0.as_ptr().cast::().add(8 + num_objs * 8 + offset) } - } - - /// Read a `bool` scalar at `offset` bytes past `num_objs` object fields. - pub fn scalar_bool(&self, num_objs: usize, offset: usize) -> bool { - self.scalar_u8(num_objs, offset) != 0 - } -} - -// ============================================================================= -// LeanExternal — External objects (tag LEAN_TAG_EXTERNAL) -// ============================================================================= - -/// Typed wrapper for a Lean external object (tag `LEAN_TAG_EXTERNAL`) holding a `T`. -#[derive(Clone, Copy)] -#[repr(transparent)] -pub struct LeanExternal(LeanObject, PhantomData); - -impl Deref for LeanExternal { - type Target = LeanObject; - #[inline] - fn deref(&self) -> &LeanObject { - &self.0 - } -} - -impl LeanExternal { - /// Wrap a raw pointer, asserting it is an external object (tag `LEAN_TAG_EXTERNAL`). - /// - /// # Safety - /// The pointer must be a valid Lean external object whose data pointer - /// points to a valid `T`. - pub unsafe fn from_raw(ptr: *const c_void) -> Self { - let obj = LeanObject(ptr); - debug_assert!(!obj.is_scalar() && obj.tag() == LEAN_TAG_EXTERNAL); - Self(obj, PhantomData) - } - - /// Allocate a new external object holding `data`. - pub fn alloc(class: &ExternalClass, data: T) -> Self { - let data_ptr = Box::into_raw(Box::new(data)); - let obj = - unsafe { include::lean_alloc_external(class.0.cast(), data_ptr.cast()) }; - Self(LeanObject(obj.cast()), PhantomData) - } - - /// Get a reference to the wrapped data. - pub fn get(&self) -> &T { - unsafe { - &*include::lean_get_external_data(self.0.as_ptr() as *mut _).cast::() - } - } -} - -// ============================================================================= -// ExternalClass — Registered external class -// ============================================================================= - -/// A registered Lean external class (wraps `lean_external_class*`). -pub struct ExternalClass(*mut c_void); - -// Safety: the class pointer is initialized once and read-only thereafter. -unsafe impl Send for ExternalClass {} -unsafe impl Sync for ExternalClass {} - -impl ExternalClass { - /// Register a new external class with explicit finalizer and foreach callbacks. - /// - /// # Safety - /// The `finalizer` callback must correctly free the external data, and - /// `foreach` must correctly visit any Lean object references held by the data. - pub unsafe fn register( - finalizer: include::lean_external_finalize_proc, - foreach: include::lean_external_foreach_proc, - ) -> Self { - Self( - unsafe { include::lean_register_external_class(finalizer, foreach) } - .cast(), - ) - } - - /// Register a new external class that uses `Drop` to finalize `T` - /// and has no Lean object references to visit. - pub fn register_with_drop() -> Self { - unsafe extern "C" fn drop_finalizer(ptr: *mut c_void) { - if !ptr.is_null() { - drop(unsafe { Box::from_raw(ptr.cast::()) }); - } - } - unsafe { - Self::register(Some(drop_finalizer::), Some(crate::noop_foreach)) - } - } -} - -// ============================================================================= -// LeanList — List α -// ============================================================================= - -/// Typed wrapper for a Lean `List α` (nil = scalar `lean_box(0)`, cons = ctor tag 1). -#[derive(Clone, Copy)] -#[repr(transparent)] -pub struct LeanList(LeanObject); - -impl Deref for LeanList { - type Target = LeanObject; - #[inline] - fn deref(&self) -> &LeanObject { - &self.0 - } -} - -impl LeanList { - /// Wrap a raw pointer, asserting it is a valid `List` (scalar nil or ctor tag 1). - /// - /// # Safety - /// The pointer must be a valid Lean `List` object. - pub unsafe fn from_raw(ptr: *const c_void) -> Self { - let obj = LeanObject(ptr); - debug_assert!(obj.is_scalar() || obj.tag() == 1); - Self(obj) - } - - /// The empty list. - pub fn nil() -> Self { - Self(LeanObject::box_usize(0)) - } - - /// Prepend `head` to `tail`. - pub fn cons(head: impl Into, tail: LeanList) -> Self { - let ctor = LeanCtor::alloc(1, 2, 0); - ctor.set(0, head); - ctor.set(1, tail); - Self(ctor.0) - } - - pub fn is_nil(&self) -> bool { - self.0.is_scalar() - } - - pub fn iter(&self) -> LeanListIter { - LeanListIter(self.0) - } - - pub fn collect(&self, f: impl Fn(LeanObject) -> T) -> Vec { - self.iter().map(f).collect() - } -} - -impl> FromIterator for LeanList { - fn from_iter>(iter: I) -> Self { - let items: Vec = iter.into_iter().map(Into::into).collect(); - let mut list = Self::nil(); - for item in items.into_iter().rev() { - list = Self::cons(item, list); - } - list - } -} - -/// Iterator over the elements of a `LeanList`. -pub struct LeanListIter(LeanObject); - -impl Iterator for LeanListIter { - type Item = LeanObject; - fn next(&mut self) -> Option { - if self.0.is_scalar() { - return None; - } - let ctor = self.0.as_ctor(); - let [head, tail] = ctor.objs::<2>(); - self.0 = tail; - Some(head) - } -} - -// ============================================================================= -// LeanOption — Option α -// ============================================================================= - -/// Typed wrapper for a Lean `Option α` (none = scalar, some = ctor tag 1). -#[derive(Clone, Copy)] -#[repr(transparent)] -pub struct LeanOption(LeanObject); - -impl Deref for LeanOption { - type Target = LeanObject; - #[inline] - fn deref(&self) -> &LeanObject { - &self.0 - } -} - -impl LeanOption { - /// Wrap a raw pointer, asserting it is a valid `Option`. - /// - /// # Safety - /// The pointer must be a valid Lean `Option` object. - pub unsafe fn from_raw(ptr: *const c_void) -> Self { - let obj = LeanObject(ptr); - debug_assert!(obj.is_scalar() || obj.tag() == 1); - Self(obj) - } - - pub fn none() -> Self { - Self(LeanObject::box_usize(0)) - } - - pub fn some(val: impl Into) -> Self { - let ctor = LeanCtor::alloc(1, 1, 0); - ctor.set(0, val); - Self(ctor.0) - } - - pub fn is_none(&self) -> bool { - self.0.is_scalar() - } - - pub fn is_some(&self) -> bool { - !self.is_none() - } - - pub fn to_option(&self) -> Option { - if self.is_none() { - None - } else { - let ctor = self.0.as_ctor(); - Some(ctor.get(0)) - } - } -} - -// ============================================================================= -// LeanExcept — Except ε α -// ============================================================================= - -/// Typed wrapper for a Lean `Except ε α` (error = ctor tag 0, ok = ctor tag 1). -#[derive(Clone, Copy)] -#[repr(transparent)] -pub struct LeanExcept(LeanObject); - -impl Deref for LeanExcept { - type Target = LeanObject; - #[inline] - fn deref(&self) -> &LeanObject { - &self.0 - } -} - -impl LeanExcept { - /// Wrap a raw pointer, asserting it is a valid `Except`. - /// - /// # Safety - /// The pointer must be a valid Lean `Except` object. - pub unsafe fn from_raw(ptr: *const c_void) -> Self { - let obj = LeanObject(ptr); - debug_assert!(!obj.is_scalar() && (obj.tag() == 0 || obj.tag() == 1)); - Self(obj) - } - - /// Build `Except.ok val`. - pub fn ok(val: impl Into) -> Self { - let ctor = LeanCtor::alloc(1, 1, 0); - ctor.set(0, val); - Self(ctor.0) - } - - /// Build `Except.error msg`. - pub fn error(msg: impl Into) -> Self { - let ctor = LeanCtor::alloc(0, 1, 0); - ctor.set(0, msg); - Self(ctor.0) - } - - /// Build `Except.error (String.mk msg)` from a Rust string. - pub fn error_string(msg: &str) -> Self { - Self::error(LeanString::new(msg)) - } - - pub fn is_ok(&self) -> bool { - self.0.tag() == 1 - } - - pub fn is_error(&self) -> bool { - self.0.tag() == 0 - } - - pub fn into_result(self) -> Result { - let ctor = self.0.as_ctor(); - if self.is_ok() { Ok(ctor.get(0)) } else { Err(ctor.get(0)) } - } -} - -// ============================================================================= -// LeanIOResult — EStateM.Result (BaseIO.Result) -// ============================================================================= - -/// Typed wrapper for a Lean `BaseIO.Result α` (`EStateM.Result`). -/// ok = ctor tag 0 (value, world), error = ctor tag 1 (error, world). -#[derive(Clone, Copy)] -#[repr(transparent)] -pub struct LeanIOResult(LeanObject); - -impl Deref for LeanIOResult { - type Target = LeanObject; - #[inline] - fn deref(&self) -> &LeanObject { - &self.0 - } -} - -impl LeanIOResult { - /// Build a successful IO result (tag 0, fields: [val, box(0)]). - pub fn ok(val: impl Into) -> Self { - let ctor = LeanCtor::alloc(0, 2, 0); - ctor.set(0, val); - ctor.set(1, LeanObject::box_usize(0)); // world token - Self(ctor.0) - } - - /// Build an IO error result (tag 1, fields: [err, box(0)]). - pub fn error(err: impl Into) -> Self { - let ctor = LeanCtor::alloc(1, 2, 0); - ctor.set(0, err); - ctor.set(1, LeanObject::box_usize(0)); // world token - Self(ctor.0) - } - - /// Build an IO error from a Rust string via `IO.Error.userError` (tag 7, 1 field). - pub fn error_string(msg: &str) -> Self { - let user_error = LeanCtor::alloc(IO_ERROR_USER_ERROR_TAG, 1, 0); - user_error.set(0, LeanString::new(msg)); - Self::error(*user_error) - } -} - -// ============================================================================= -// LeanProd — Prod α β (pair) -// ============================================================================= - -/// Typed wrapper for a Lean `Prod α β` (ctor tag 0, 2 object fields). -#[derive(Clone, Copy)] -#[repr(transparent)] -pub struct LeanProd(LeanObject); - -impl Deref for LeanProd { - type Target = LeanObject; - #[inline] - fn deref(&self) -> &LeanObject { - &self.0 - } -} - -impl From for LeanObject { - #[inline] - fn from(x: LeanProd) -> Self { - x.0 - } -} - -impl LeanProd { - /// Build a pair `(fst, snd)`. - pub fn new(fst: impl Into, snd: impl Into) -> Self { - let ctor = LeanCtor::alloc(0, 2, 0); - ctor.set(0, fst); - ctor.set(1, snd); - Self(*ctor) - } - - /// Get the first element. - pub fn fst(&self) -> LeanObject { - let ctor = self.0.as_ctor(); - ctor.get(0) - } - - /// Get the second element. - pub fn snd(&self) -> LeanObject { - let ctor = self.0.as_ctor(); - ctor.get(1) - } -} - -// ============================================================================= -// From for LeanObject — allow wrapper types to be passed to set() etc. -// ============================================================================= - -impl From for LeanObject { - #[inline] - fn from(x: LeanArray) -> Self { - x.0 - } -} - -impl From for LeanObject { - #[inline] - fn from(x: LeanByteArray) -> Self { - x.0 - } -} - -impl From for LeanObject { - #[inline] - fn from(x: LeanString) -> Self { - x.0 - } -} - -impl From for LeanObject { - #[inline] - fn from(x: LeanCtor) -> Self { - x.0 - } -} - -impl From> for LeanObject { - #[inline] - fn from(x: LeanExternal) -> Self { - x.0 - } -} - -impl From for LeanObject { - #[inline] - fn from(x: LeanList) -> Self { - x.0 - } -} - -impl From for LeanObject { - #[inline] - fn from(x: LeanOption) -> Self { - x.0 - } -} - -impl From for LeanObject { - #[inline] - fn from(x: LeanExcept) -> Self { - x.0 - } -} - -impl From for LeanObject { - #[inline] - fn from(x: LeanIOResult) -> Self { - x.0 - } -} - -impl From for LeanObject { - #[inline] - fn from(x: u32) -> Self { - Self::box_u32(x) - } -} diff --git a/src/ffi.rs b/src/ffi.rs index a6d3c0e2..b31bb96e 100644 --- a/src/ffi.rs +++ b/src/ffi.rs @@ -17,7 +17,7 @@ pub mod ix; // Ix types: Name, Level, Expr, ConstantInfo, Environment pub mod ixon; // Ixon types: Univ, Expr, Constant, metadata pub mod primitives; // Primitives: rs_roundtrip_nat, rs_roundtrip_string, etc. -use lean_sys::object::{LeanArray, LeanByteArray, LeanIOResult}; +use lean_ffi::object::{LeanArray, LeanByteArray, LeanIOResult}; /// Guard an FFI function that returns a Lean IO result against panics. /// On panic, returns a Lean IO error with the panic message instead of diff --git a/src/ffi/aiur.rs b/src/ffi/aiur.rs index 0ba537bb..ed31c634 100644 --- a/src/ffi/aiur.rs +++ b/src/ffi/aiur.rs @@ -4,7 +4,7 @@ pub mod protocol; pub mod toplevel; use crate::aiur::G; -use lean_sys::object::LeanObject; +use lean_ffi::object::LeanObject; #[inline] pub(super) fn lean_unbox_nat_as_usize(obj: LeanObject) -> usize { diff --git a/src/ffi/aiur/protocol.rs b/src/ffi/aiur/protocol.rs index 839510b7..03d5b340 100644 --- a/src/ffi/aiur/protocol.rs +++ b/src/ffi/aiur/protocol.rs @@ -6,7 +6,7 @@ use multi_stark::{ use rustc_hash::{FxBuildHasher, FxHashMap}; use std::sync::OnceLock; -use lean_sys::object::{ +use lean_ffi::object::{ ExternalClass, LeanArray, LeanByteArray, LeanCtor, LeanExcept, LeanExternal, LeanObject, }; diff --git a/src/ffi/aiur/toplevel.rs b/src/ffi/aiur/toplevel.rs index 33789ddf..133dcd22 100644 --- a/src/ffi/aiur/toplevel.rs +++ b/src/ffi/aiur/toplevel.rs @@ -1,6 +1,6 @@ use multi_stark::p3_field::PrimeCharacteristicRing; -use lean_sys::object::LeanObject; +use lean_ffi::object::LeanObject; use crate::{ FxIndexMap, diff --git a/src/ffi/byte_array.rs b/src/ffi/byte_array.rs index 9dfb31b6..2831380e 100644 --- a/src/ffi/byte_array.rs +++ b/src/ffi/byte_array.rs @@ -1,4 +1,4 @@ -use lean_sys::object::LeanByteArray; +use lean_ffi::object::LeanByteArray; /// `@& ByteArray → @& ByteArray → Bool` /// Efficient implementation for `BEq ByteArray` diff --git a/src/ffi/compile.rs b/src/ffi/compile.rs index 98560339..6c19b371 100644 --- a/src/ffi/compile.rs +++ b/src/ffi/compile.rs @@ -26,9 +26,9 @@ use crate::lean::{ LeanIxCompilePhases, LeanIxCondensedBlocks, LeanIxDecompileError, LeanIxSerializeError, LeanIxonRawEnv, }; -use lean_sys::nat::Nat; -use lean_sys::object::LeanIOResult; -use lean_sys::object::{ +use lean_ffi::nat::Nat; +use lean_ffi::object::LeanIOResult; +use lean_ffi::object::{ LeanArray, LeanByteArray, LeanCtor, LeanExcept, LeanObject, LeanString, }; diff --git a/src/ffi/graph.rs b/src/ffi/graph.rs index fd6904d7..a1c22ff2 100644 --- a/src/ffi/graph.rs +++ b/src/ffi/graph.rs @@ -5,7 +5,7 @@ use std::sync::Arc; use crate::ffi::ffi_io_guard; use crate::ix::condense::compute_sccs; use crate::ix::graph::build_ref_graph; -use lean_sys::object::{LeanArray, LeanCtor, LeanIOResult, LeanObject}; +use lean_ffi::object::{LeanArray, LeanCtor, LeanIOResult, LeanObject}; use crate::ffi::builder::LeanBuildCache; use crate::ffi::ix::name::build_name; diff --git a/src/ffi/iroh.rs b/src/ffi/iroh.rs index ab7c8d8f..6ccce722 100644 --- a/src/ffi/iroh.rs +++ b/src/ffi/iroh.rs @@ -1,11 +1,11 @@ -use lean_sys::object::{ +use lean_ffi::object::{ LeanArray, LeanByteArray, LeanCtor, LeanExcept, LeanString, }; use crate::iroh::common::{GetRequest, PutRequest, Request, Response}; use crate::iroh::{client, server}; -lean_sys::lean_domain_type! { +lean_ffi::lean_domain_type! { /// Lean `Iroh.Connect.PutResponse` object. LeanPutResponse; /// Lean `Iroh.Connect.GetResponse` object. diff --git a/src/ffi/ix/address.rs b/src/ffi/ix/address.rs index 599eccbd..62d0a6ea 100644 --- a/src/ffi/ix/address.rs +++ b/src/ffi/ix/address.rs @@ -3,7 +3,7 @@ //! Address = { hash : ByteArray } - ByteArray wrapper for blake3 Hash use crate::lean::LeanIxAddress; -use lean_sys::object::LeanByteArray; +use lean_ffi::object::LeanByteArray; /// Build a Ix.Address from a blake3::Hash. /// Address = { hash : ByteArray } - single field struct, so UNBOXED to ByteArray diff --git a/src/ffi/ix/constant.rs b/src/ffi/ix/constant.rs index c16685df..9656572c 100644 --- a/src/ffi/ix/constant.rs +++ b/src/ffi/ix/constant.rs @@ -16,8 +16,8 @@ use crate::ix::env::{ RecursorRule, RecursorVal, ReducibilityHints, TheoremVal, }; use crate::lean::LeanIxConstantInfo; -use lean_sys::nat::Nat; -use lean_sys::object::{LeanArray, LeanCtor, LeanObject}; +use lean_ffi::nat::Nat; +use lean_ffi::object::{LeanArray, LeanCtor, LeanObject}; use crate::ffi::builder::LeanBuildCache; use crate::ffi::ix::expr::{build_expr, decode_ix_expr}; diff --git a/src/ffi/ix/data.rs b/src/ffi/ix/data.rs index c5819519..80cfe42c 100644 --- a/src/ffi/ix/data.rs +++ b/src/ffi/ix/data.rs @@ -7,8 +7,8 @@ use crate::lean::{ LeanIxDataValue, LeanIxInt, LeanIxSourceInfo, LeanIxSubstring, LeanIxSyntax, LeanIxSyntaxPreresolved, }; -use lean_sys::nat::Nat; -use lean_sys::object::{LeanArray, LeanCtor, LeanObject, LeanString}; +use lean_ffi::nat::Nat; +use lean_ffi::object::{LeanArray, LeanCtor, LeanObject, LeanString}; use crate::ffi::builder::LeanBuildCache; use crate::ffi::ix::name::{build_name, decode_ix_name}; diff --git a/src/ffi/ix/env.rs b/src/ffi/ix/env.rs index a959f554..9175bc7b 100644 --- a/src/ffi/ix/env.rs +++ b/src/ffi/ix/env.rs @@ -4,7 +4,7 @@ use rustc_hash::FxHashMap; use crate::ix::env::{ConstantInfo, Name}; use crate::lean::{LeanIxEnvironment, LeanIxRawEnvironment}; -use lean_sys::object::{LeanArray, LeanCtor, LeanObject}; +use lean_ffi::object::{LeanArray, LeanCtor, LeanObject}; use crate::ffi::builder::LeanBuildCache; use crate::ffi::ix::constant::{build_constant_info, decode_constant_info}; diff --git a/src/ffi/ix/expr.rs b/src/ffi/ix/expr.rs index cc8d1264..229b2845 100644 --- a/src/ffi/ix/expr.rs +++ b/src/ffi/ix/expr.rs @@ -18,8 +18,8 @@ use crate::ix::env::{ BinderInfo, DataValue, Expr, ExprData, Level, Literal, Name, }; use crate::lean::LeanIxExpr; -use lean_sys::nat::Nat; -use lean_sys::object::{LeanArray, LeanCtor, LeanObject, LeanString}; +use lean_ffi::nat::Nat; +use lean_ffi::object::{LeanArray, LeanCtor, LeanObject, LeanString}; use crate::ffi::builder::LeanBuildCache; use crate::ffi::ix::address::build_address; diff --git a/src/ffi/ix/level.rs b/src/ffi/ix/level.rs index d4972881..61ba7048 100644 --- a/src/ffi/ix/level.rs +++ b/src/ffi/ix/level.rs @@ -10,7 +10,7 @@ use crate::ix::env::{Level, LevelData}; use crate::lean::LeanIxLevel; -use lean_sys::object::{LeanArray, LeanCtor, LeanObject}; +use lean_ffi::object::{LeanArray, LeanCtor, LeanObject}; use crate::ffi::builder::LeanBuildCache; use crate::ffi::ix::address::build_address; diff --git a/src/ffi/ix/name.rs b/src/ffi/ix/name.rs index f2f55569..fe153b1c 100644 --- a/src/ffi/ix/name.rs +++ b/src/ffi/ix/name.rs @@ -7,8 +7,8 @@ use crate::ix::env::{Name, NameData}; use crate::lean::LeanIxName; -use lean_sys::nat::Nat; -use lean_sys::object::{LeanArray, LeanCtor, LeanObject, LeanString}; +use lean_ffi::nat::Nat; +use lean_ffi::object::{LeanArray, LeanCtor, LeanObject, LeanString}; use crate::ffi::builder::LeanBuildCache; use crate::ffi::ix::address::build_address; diff --git a/src/ffi/ixon/compare.rs b/src/ffi/ixon/compare.rs index bc7b7576..998a2eeb 100644 --- a/src/ffi/ixon/compare.rs +++ b/src/ffi/ixon/compare.rs @@ -7,7 +7,7 @@ use crate::ix::env::Name; use crate::ix::ixon::serialize::put_expr; use crate::ix::mutual::MutCtx; use crate::lean::LeanIxBlockCompareDetail; -use lean_sys::object::{LeanByteArray, LeanCtor, LeanObject}; +use lean_ffi::object::{LeanByteArray, LeanCtor, LeanObject}; use crate::ffi::lean_env::{ Cache as LeanCache, GlobalCache, lean_ptr_to_expr, lean_ptr_to_name, diff --git a/src/ffi/ixon/constant.rs b/src/ffi/ixon/constant.rs index 9bd4ce9b..5559e4cf 100644 --- a/src/ffi/ixon/constant.rs +++ b/src/ffi/ixon/constant.rs @@ -22,7 +22,7 @@ use crate::lean::{ LeanIxonMutConst, LeanIxonQuotient, LeanIxonRecursor, LeanIxonRecursorProj, LeanIxonRecursorRule, }; -use lean_sys::object::{LeanArray, LeanByteArray, LeanCtor, LeanObject}; +use lean_ffi::object::{LeanArray, LeanByteArray, LeanCtor, LeanObject}; use crate::ffi::ixon::expr::{ build_ixon_expr, build_ixon_expr_array, decode_ixon_expr, diff --git a/src/ffi/ixon/enums.rs b/src/ffi/ixon/enums.rs index e4f1cc78..fce3f393 100644 --- a/src/ffi/ixon/enums.rs +++ b/src/ffi/ixon/enums.rs @@ -7,7 +7,7 @@ use crate::ix::ixon::constant::DefKind; use crate::lean::{ LeanIxonDefKind, LeanIxonDefinitionSafety, LeanIxonQuotKind, }; -use lean_sys::object::LeanObject; +use lean_ffi::object::LeanObject; /// Build Ixon.DefKind /// | defn -- tag 0 diff --git a/src/ffi/ixon/env.rs b/src/ffi/ixon/env.rs index b40218f3..879a05f0 100644 --- a/src/ffi/ixon/env.rs +++ b/src/ffi/ixon/env.rs @@ -10,7 +10,7 @@ use crate::ix::ixon::constant::Constant as IxonConstant; use crate::ix::ixon::env::{Env as IxonEnv, Named as IxonNamed}; use crate::ix::ixon::metadata::ConstantMeta; use crate::lean::LeanIxonRawEnv; -use lean_sys::object::{ +use lean_ffi::object::{ LeanArray, LeanByteArray, LeanCtor, LeanExcept, LeanObject, }; diff --git a/src/ffi/ixon/expr.rs b/src/ffi/ixon/expr.rs index 074fa03f..b984cb1a 100644 --- a/src/ffi/ixon/expr.rs +++ b/src/ffi/ixon/expr.rs @@ -4,7 +4,7 @@ use std::sync::Arc; use crate::ix::ixon::expr::Expr as IxonExpr; use crate::lean::LeanIxonExpr; -use lean_sys::object::{LeanArray, LeanCtor, LeanObject}; +use lean_ffi::object::{LeanArray, LeanCtor, LeanObject}; /// Build Ixon.Expr (12 constructors). pub fn build_ixon_expr(expr: &IxonExpr) -> LeanObject { diff --git a/src/ffi/ixon/meta.rs b/src/ffi/ixon/meta.rs index 64bbcabc..630cf09c 100644 --- a/src/ffi/ixon/meta.rs +++ b/src/ffi/ixon/meta.rs @@ -13,7 +13,7 @@ use crate::lean::{ LeanIxonComm, LeanIxonConstantMeta, LeanIxonDataValue, LeanIxonExprMetaArena, LeanIxonExprMetaData, LeanIxonNamed, }; -use lean_sys::object::{LeanArray, LeanCtor, LeanObject}; +use lean_ffi::object::{LeanArray, LeanCtor, LeanObject}; use crate::ffi::ix::constant::{ build_reducibility_hints, decode_reducibility_hints, diff --git a/src/ffi/ixon/serialize.rs b/src/ffi/ixon/serialize.rs index 86149792..7ed48d9f 100644 --- a/src/ffi/ixon/serialize.rs +++ b/src/ffi/ixon/serialize.rs @@ -13,7 +13,7 @@ use crate::ix::ixon::univ::{Univ as IxonUniv, put_univ}; use crate::lean::{ LeanIxAddress, LeanIxonConstant, LeanIxonExpr, LeanIxonRawEnv, LeanIxonUniv, }; -use lean_sys::object::{LeanByteArray, LeanObject}; +use lean_ffi::object::{LeanByteArray, LeanObject}; use crate::ffi::ixon::constant::{decode_ixon_address, decode_ixon_constant}; diff --git a/src/ffi/ixon/sharing.rs b/src/ffi/ixon/sharing.rs index 797311fe..34b26c85 100644 --- a/src/ffi/ixon/sharing.rs +++ b/src/ffi/ixon/sharing.rs @@ -7,7 +7,7 @@ use crate::ix::ixon::serialize::put_expr; use crate::ix::ixon::sharing::{ analyze_block, build_sharing_vec, decide_sharing, }; -use lean_sys::object::{LeanArray, LeanByteArray}; +use lean_ffi::object::{LeanArray, LeanByteArray}; use crate::ffi::ixon::expr::decode_ixon_expr_array; use crate::ffi::ixon::serialize::lean_ptr_to_ixon_expr; diff --git a/src/ffi/ixon/univ.rs b/src/ffi/ixon/univ.rs index 321773de..fa72a958 100644 --- a/src/ffi/ixon/univ.rs +++ b/src/ffi/ixon/univ.rs @@ -4,7 +4,7 @@ use std::sync::Arc; use crate::ix::ixon::univ::Univ; use crate::lean::LeanIxonUniv; -use lean_sys::object::{LeanArray, LeanCtor, LeanObject}; +use lean_ffi::object::{LeanArray, LeanCtor, LeanObject}; impl LeanIxonUniv { /// Build Ixon.Univ diff --git a/src/ffi/keccak.rs b/src/ffi/keccak.rs index 0284e9f6..3638a3d9 100644 --- a/src/ffi/keccak.rs +++ b/src/ffi/keccak.rs @@ -2,7 +2,7 @@ use std::sync::OnceLock; use tiny_keccak::{Hasher, Keccak}; -use lean_sys::object::{ +use lean_ffi::object::{ ExternalClass, LeanByteArray, LeanExternal, LeanObject, }; diff --git a/src/ffi/lean_env.rs b/src/ffi/lean_env.rs index 8d13fbab..62c1454b 100644 --- a/src/ffi/lean_env.rs +++ b/src/ffi/lean_env.rs @@ -19,8 +19,8 @@ use std::sync::Arc; use rustc_hash::FxHashMap; -use lean_sys::nat::Nat; -use lean_sys::object::LeanObject; +use lean_ffi::nat::Nat; +use lean_ffi::object::LeanObject; use crate::{ ix::compile::compile_env, diff --git a/src/ffi/primitives.rs b/src/ffi/primitives.rs index 80a6e0e6..f50ba36c 100644 --- a/src/ffi/primitives.rs +++ b/src/ffi/primitives.rs @@ -6,8 +6,8 @@ //! - List, Array, ByteArray //! - AssocList, HashMap -use lean_sys::nat::Nat; -use lean_sys::object::{ +use lean_ffi::nat::Nat; +use lean_ffi::object::{ LeanArray, LeanByteArray, LeanCtor, LeanList, LeanObject, LeanString, }; @@ -35,7 +35,7 @@ pub fn build_nat(n: &Nat) -> LeanObject { limbs.push(u64::from_le_bytes(arr)); } unsafe { - LeanObject::from_raw(lean_sys::nat::lean_nat_from_limbs( + LeanObject::from_raw(lean_ffi::nat::lean_nat_from_limbs( limbs.len(), limbs.as_ptr(), )) diff --git a/src/ffi/unsigned.rs b/src/ffi/unsigned.rs index 396b4954..ffc44d25 100644 --- a/src/ffi/unsigned.rs +++ b/src/ffi/unsigned.rs @@ -1,4 +1,4 @@ -use lean_sys::object::LeanByteArray; +use lean_ffi::object::LeanByteArray; #[unsafe(no_mangle)] extern "C" fn c_u16_to_le_bytes(v: u16) -> LeanByteArray { diff --git a/src/iroh/_client.rs b/src/iroh/_client.rs index 09358ca6..70e17bc8 100644 --- a/src/iroh/_client.rs +++ b/src/iroh/_client.rs @@ -1,4 +1,4 @@ -use lean_sys::object::{LeanExcept, LeanObject}; +use lean_ffi::object::{LeanExcept, LeanObject}; const ERR_MSG: &str = "Iroh functions not supported when the Rust `net` feature is disabled \ or on MacOS aarch64-darwin"; diff --git a/src/iroh/_server.rs b/src/iroh/_server.rs index c7cec03a..29af5a56 100644 --- a/src/iroh/_server.rs +++ b/src/iroh/_server.rs @@ -1,4 +1,4 @@ -use lean_sys::object::LeanExcept; +use lean_ffi::object::LeanExcept; /// `Iroh.Serve.serve' : Unit → Except String Unit` #[unsafe(no_mangle)] diff --git a/src/ix/compile.rs b/src/ix/compile.rs index 03809f32..0f176002 100644 --- a/src/ix/compile.rs +++ b/src/ix/compile.rs @@ -17,7 +17,7 @@ use std::{ thread, }; -use lean_sys::nat::Nat; +use lean_ffi::nat::Nat; use crate::{ ix::address::Address, diff --git a/src/ix/decompile.rs b/src/ix/decompile.rs index afa932e2..eb63f31b 100644 --- a/src/ix/decompile.rs +++ b/src/ix/decompile.rs @@ -9,7 +9,7 @@ #![allow(clippy::map_err_ignore)] #![allow(clippy::match_same_arms)] -use lean_sys::nat::Nat; +use lean_ffi::nat::Nat; use crate::{ ix::address::Address, diff --git a/src/ix/env.rs b/src/ix/env.rs index bc77e11b..c57dc2ff 100644 --- a/src/ix/env.rs +++ b/src/ix/env.rs @@ -14,7 +14,7 @@ use std::{ sync::Arc, }; -use lean_sys::nat::Nat; +use lean_ffi::nat::Nat; use rustc_hash::FxHashMap; // -- Name tags ---------------------------------------------------------------- diff --git a/src/ix/graph.rs b/src/ix/graph.rs index 56cd3ae7..74f4d961 100644 --- a/src/ix/graph.rs +++ b/src/ix/graph.rs @@ -177,7 +177,7 @@ fn get_expr_references<'a>( mod tests { use super::*; use crate::ix::env::*; - use lean_sys::nat::Nat; + use lean_ffi::nat::Nat; fn n(s: &str) -> Name { Name::str(Name::anon(), s.to_string()) diff --git a/src/ix/ground.rs b/src/ix/ground.rs index 0c963409..4be05110 100644 --- a/src/ix/ground.rs +++ b/src/ix/ground.rs @@ -9,7 +9,7 @@ use rayon::iter::{IntoParallelRefIterator, ParallelIterator}; use rustc_hash::{FxHashMap, FxHashSet}; use std::collections::hash_map::Entry; -use lean_sys::nat::Nat; +use lean_ffi::nat::Nat; use crate::{ ix::env::{ diff --git a/src/ix/ixon/serialize.rs b/src/ix/ixon/serialize.rs index 7671c57b..78e05580 100644 --- a/src/ix/ixon/serialize.rs +++ b/src/ix/ixon/serialize.rs @@ -871,7 +871,7 @@ impl Constant { // ============================================================================ use crate::ix::env::{Name, NameData}; -use lean_sys::nat::Nat; +use lean_ffi::nat::Nat; use rustc_hash::FxHashMap; /// Serialize a Name to bytes (full recursive serialization, for standalone use). diff --git a/src/ix/mutual.rs b/src/ix/mutual.rs index 194a0db5..b3bf8122 100644 --- a/src/ix/mutual.rs +++ b/src/ix/mutual.rs @@ -5,7 +5,7 @@ //! [`ctx_to_all`] / [`all_to_ctx`] functions convert between ordered name //! vectors and index maps. -use lean_sys::nat::Nat; +use lean_ffi::nat::Nat; use crate::{ ix::env::{ diff --git a/src/lean.rs b/src/lean.rs index 14bd93d6..f5839388 100644 --- a/src/lean.rs +++ b/src/lean.rs @@ -1,9 +1,9 @@ //! Ix-specific Lean domain type definitions. //! -//! Generic Lean FFI wrappers live in the `lean_sys` crate. This module defines -//! typed newtypes for ix-specific Lean types using `lean_sys::lean_domain_type!`. +//! Generic Lean FFI wrappers live in the `lean_ffi` crate. This module defines +//! typed newtypes for ix-specific Lean types using `lean_ffi::lean_domain_type!`. -lean_sys::lean_domain_type! { +lean_ffi::lean_domain_type! { // Ix core types /// Lean `Ix.Name` object. LeanIxName; @@ -104,4 +104,4 @@ lean_sys::lean_domain_type! { } /// `Ix.Address = { hash : ByteArray }` — single-field struct, unboxed to `ByteArray`. -pub type LeanIxAddress = lean_sys::object::LeanByteArray; +pub type LeanIxAddress = lean_ffi::object::LeanByteArray; diff --git a/src/sha256.rs b/src/sha256.rs index 9bc854ed..fef0f35d 100644 --- a/src/sha256.rs +++ b/src/sha256.rs @@ -1,6 +1,6 @@ use sha2::{Digest, Sha256}; -use lean_sys::object::LeanByteArray; +use lean_ffi::object::LeanByteArray; #[unsafe(no_mangle)] extern "C" fn rs_sha256(bytes: LeanByteArray) -> LeanByteArray { From 7302e67b4c4dc9afb1e3a0045cda4eddeb5f6a51 Mon Sep 17 00:00:00 2001 From: samuelburnham <45365069+samuelburnham@users.noreply.github.com> Date: Wed, 4 Mar 2026 13:08:29 -0500 Subject: [PATCH 22/27] Fixup --- lean-ffi/Cargo.toml | 11 + lean-ffi/LICENSE-APACHE | 201 +++++++++ lean-ffi/LICENSE-MIT | 21 + lean-ffi/build.rs | 60 +++ lean-ffi/src/lib.rs | 80 ++++ lean-ffi/src/nat.rs | 170 +++++++ lean-ffi/src/object.rs | 968 ++++++++++++++++++++++++++++++++++++++++ 7 files changed, 1511 insertions(+) create mode 100644 lean-ffi/Cargo.toml create mode 100644 lean-ffi/LICENSE-APACHE create mode 100644 lean-ffi/LICENSE-MIT create mode 100644 lean-ffi/build.rs create mode 100644 lean-ffi/src/lib.rs create mode 100644 lean-ffi/src/nat.rs create mode 100644 lean-ffi/src/object.rs diff --git a/lean-ffi/Cargo.toml b/lean-ffi/Cargo.toml new file mode 100644 index 00000000..fc2c4939 --- /dev/null +++ b/lean-ffi/Cargo.toml @@ -0,0 +1,11 @@ +[package] +name = "lean-ffi" +version = "0.1.0" +edition = "2024" + +[dependencies] +num-bigint = "0.4.6" + +[build-dependencies] +bindgen = "0.71" +cc = "1" diff --git a/lean-ffi/LICENSE-APACHE b/lean-ffi/LICENSE-APACHE new file mode 100644 index 00000000..4252ff0c --- /dev/null +++ b/lean-ffi/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2025 Argument Computer Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/lean-ffi/LICENSE-MIT b/lean-ffi/LICENSE-MIT new file mode 100644 index 00000000..829c2986 --- /dev/null +++ b/lean-ffi/LICENSE-MIT @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2025 Argument Computer Corporation + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/lean-ffi/build.rs b/lean-ffi/build.rs new file mode 100644 index 00000000..e8b98e57 --- /dev/null +++ b/lean-ffi/build.rs @@ -0,0 +1,60 @@ +use std::{env, path::PathBuf, process::Command}; + +fn find_lean_include_dir() -> PathBuf { + // 1. Try LEAN_SYSROOT env var + if let Ok(sysroot) = env::var("LEAN_SYSROOT") { + let inc = PathBuf::from(sysroot).join("include"); + if inc.exists() { + return inc; + } + } + // 2. Try `lean --print-prefix` + if let Ok(output) = Command::new("lean").arg("--print-prefix").output() + && output.status.success() + { + let prefix = String::from_utf8_lossy(&output.stdout).trim().to_string(); + let inc = PathBuf::from(prefix).join("include"); + if inc.exists() { + return inc; + } + } + panic!( + "Cannot find Lean include directory. \ + Set LEAN_SYSROOT or ensure `lean` is on PATH." + ); +} + +fn main() { + let lean_include = find_lean_include_dir(); + let lean_h = lean_include.join("lean").join("lean.h"); + let out_dir = PathBuf::from(env::var("OUT_DIR").unwrap()); + let wrapper_c = out_dir.join("lean_static_fns.c"); + + // Generate C wrappers for lean.h's static inline functions and + // Rust bindings for all types and functions. + bindgen::Builder::default() + .header(lean_h.to_str().unwrap()) + .clang_arg(format!("-I{}", lean_include.display())) + .parse_callbacks(Box::new(bindgen::CargoCallbacks::new())) + .wrap_static_fns(true) + .wrap_static_fns_path(&wrapper_c) + // lean_get_rc_mt_addr returns `_Atomic(int)*` which bindgen + // cannot wrap. Types using `_Atomic` are made opaque. + .blocklist_function("lean_get_rc_mt_addr") + .opaque_type("lean_thunk_object") + .opaque_type("lean_task_object") + .generate() + .expect("bindgen failed to process lean.h") + .write_to_file(out_dir.join("lean.rs")) + .expect("Couldn't write bindings"); + + // Compile the generated C wrappers into a static library. + cc::Build::new() + .file(&wrapper_c) + .include(&lean_include) + .compile("lean_static_fns"); + + println!("cargo:rerun-if-env-changed=LEAN_SYSROOT"); + println!("cargo:rerun-if-changed={}", lean_h.display()); + println!("cargo:rerun-if-changed=build.rs"); +} diff --git a/lean-ffi/src/lib.rs b/lean-ffi/src/lib.rs new file mode 100644 index 00000000..da9ff1b8 --- /dev/null +++ b/lean-ffi/src/lib.rs @@ -0,0 +1,80 @@ +//! Low-level Lean FFI bindings and type-safe wrappers. +//! +//! The `include` submodule contains auto-generated bindings from `lean.h` via +//! bindgen. Higher-level helpers are in `object` and `nat`. + +#[allow( + non_upper_case_globals, + non_camel_case_types, + non_snake_case, + dead_code, + unsafe_op_in_unsafe_fn, + unused_qualifications, + clippy::all, + clippy::ptr_as_ptr, + clippy::cast_possible_wrap, + clippy::cast_possible_truncation, + clippy::derive_partial_eq_without_eq +)] +pub mod include { + include!(concat!(env!("OUT_DIR"), "/lean.rs")); +} + +pub mod nat; +pub mod object; + +use std::ffi::{CString, c_void}; + +/// Create a CString from a str, stripping any interior null bytes. +/// Lean strings are length-prefixed and can contain null bytes, but the +/// `lean_mk_string` FFI requires a null-terminated C string. This function +/// ensures conversion always succeeds by filtering out interior nulls. +pub fn safe_cstring(s: &str) -> CString { + CString::new(s).unwrap_or_else(|_| { + let bytes: Vec = s.bytes().filter(|&b| b != 0).collect(); + CString::new(bytes).expect("filtered string should have no nulls") + }) +} + +/// No-op foreach callback for external classes that hold no Lean references. +/// +/// # Safety +/// Must only be used as a `lean_external_foreach_fn` callback. +pub unsafe extern "C" fn noop_foreach( + _: *mut c_void, + _: *mut include::lean_object, +) { +} + +/// Generate a `#[repr(transparent)]` newtype over `LeanObject` for a specific +/// Lean type, with `Deref`, `From`, and a `new` constructor. +#[macro_export] +macro_rules! lean_domain_type { + ($($(#[$meta:meta])* $name:ident;)*) => {$( + $(#[$meta])* + #[derive(Clone, Copy)] + #[repr(transparent)] + pub struct $name($crate::object::LeanObject); + + impl std::ops::Deref for $name { + type Target = $crate::object::LeanObject; + #[inline] + fn deref(&self) -> &$crate::object::LeanObject { &self.0 } + } + + impl From<$name> for $crate::object::LeanObject { + #[inline] + fn from(x: $name) -> Self { x.0 } + } + + impl From<$crate::object::LeanObject> for $name { + #[inline] + fn from(obj: $crate::object::LeanObject) -> Self { Self(obj) } + } + + impl $name { + #[inline] + pub fn new(obj: $crate::object::LeanObject) -> Self { Self(obj) } + } + )*}; +} diff --git a/lean-ffi/src/nat.rs b/lean-ffi/src/nat.rs new file mode 100644 index 00000000..b5ded9c5 --- /dev/null +++ b/lean-ffi/src/nat.rs @@ -0,0 +1,170 @@ +//! Lean `Nat` (arbitrary-precision natural number) representation. +//! +//! Lean stores small naturals as tagged scalars and large ones as GMP +//! `mpz_object`s on the heap. This module handles both representations. + +use std::ffi::{c_int, c_void}; +use std::fmt; +use std::mem::MaybeUninit; + +use num_bigint::BigUint; + +use crate::object::LeanObject; + +/// Arbitrary-precision natural number, wrapping `BigUint`. +#[derive(Hash, PartialEq, Eq, Debug, Clone, PartialOrd, Ord)] +pub struct Nat(pub BigUint); + +impl fmt::Display for Nat { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.0) + } +} + +impl From for Nat { + fn from(x: u64) -> Self { + Nat(BigUint::from(x)) + } +} + +impl Nat { + pub const ZERO: Self = Self(BigUint::ZERO); + + /// Try to convert to u64, returning None if the value is too large. + #[inline] + pub fn to_u64(&self) -> Option { + u64::try_from(&self.0).ok() + } + + /// Decode a `Nat` from a Lean object pointer. Handles both scalar (unboxed) + /// and heap-allocated (GMP `mpz_object`) representations. + /// + /// # Safety + /// The pointer must be a valid Lean `Nat` object (scalar or mpz). + pub unsafe fn from_ptr(ptr: *const c_void) -> Nat { + let obj = unsafe { LeanObject::from_raw(ptr) }; + if obj.is_scalar() { + let u = obj.unbox_usize(); + Nat(BigUint::from_bytes_le(&u.to_le_bytes())) + } else { + // Heap-allocated big integer (mpz_object) + let mpz: &MpzObject = unsafe { &*ptr.cast() }; + Nat(mpz.m_value.to_biguint()) + } + } + + /// Decode a `Nat` from a `LeanObject`. Convenience wrapper over `from_ptr`. + pub fn from_obj(obj: LeanObject) -> Nat { + unsafe { Self::from_ptr(obj.as_ptr()) } + } + + #[inline] + pub fn from_le_bytes(bytes: &[u8]) -> Nat { + Nat(BigUint::from_bytes_le(bytes)) + } + + #[inline] + pub fn to_le_bytes(&self) -> Vec { + self.0.to_bytes_le() + } +} + +/// From https://github.com/leanprover/lean4/blob/master/src/runtime/object.h: +/// ```cpp +/// struct mpz_object { +/// lean_object m_header; +/// mpz m_value; +/// mpz_object() {} +/// explicit mpz_object(mpz const & m):m_value(m) {} +/// }; +/// ``` +#[repr(C)] +struct MpzObject { + _header: [u8; 8], + m_value: Mpz, +} + +#[repr(C)] +struct Mpz { + alloc: i32, + size: i32, + d: *const u64, +} + +impl Mpz { + fn to_biguint(&self) -> BigUint { + let nlimbs = self.size.unsigned_abs() as usize; + let limbs = unsafe { std::slice::from_raw_parts(self.d, nlimbs) }; + + // Convert limbs (little-endian by limb) + let bytes: Vec<_> = + limbs.iter().flat_map(|&limb| limb.to_le_bytes()).collect(); + + BigUint::from_bytes_le(&bytes) + } +} + +// ============================================================================= +// GMP interop for building Lean Nat objects from limbs +// ============================================================================= + +use crate::include::lean_uint64_to_nat; + +/// LEAN_MAX_SMALL_NAT = SIZE_MAX >> 1 +const LEAN_MAX_SMALL_NAT: u64 = (usize::MAX >> 1) as u64; + +unsafe extern "C" { + #[link_name = "__gmpz_init"] + fn mpz_init(x: *mut Mpz); + + #[link_name = "__gmpz_import"] + fn mpz_import( + rop: *mut Mpz, + count: usize, + order: c_int, + size: usize, + endian: c_int, + nails: usize, + op: *const u64, + ); + + #[link_name = "__gmpz_clear"] + fn mpz_clear(x: *mut Mpz); + + /// Lean's internal mpz allocation — deep-copies the mpz value. + /// Caller must still call mpz_clear on the original. + fn lean_alloc_mpz(v: *mut Mpz) -> *mut c_void; +} + +/// Create a Lean `Nat` from a little-endian array of u64 limbs. +/// Replaces the C function `c_lean_nat_from_limbs` from `ixon_ffi.c`. +/// # Safety +/// `limbs` must be valid for reading `num_limbs` elements. +pub unsafe fn lean_nat_from_limbs( + num_limbs: usize, + limbs: *const u64, +) -> *mut c_void { + if num_limbs == 0 { + return LeanObject::box_usize(0).as_mut_ptr(); + } + let first = unsafe { *limbs }; + if num_limbs == 1 && first <= LEAN_MAX_SMALL_NAT { + #[allow(clippy::cast_possible_truncation)] // only targets 64-bit + return LeanObject::box_usize(first as usize).as_mut_ptr(); + } + if num_limbs == 1 { + return unsafe { lean_uint64_to_nat(first).cast() }; + } + // Multi-limb: use GMP + unsafe { + let mut value = MaybeUninit::::uninit(); + mpz_init(value.as_mut_ptr()); + // order = -1 (least significant limb first) + // size = 8 bytes per limb, endian = 0 (native), nails = 0 + mpz_import(value.as_mut_ptr(), num_limbs, -1, 8, 0, 0, limbs); + // lean_alloc_mpz deep-copies; we must free the original + let result = lean_alloc_mpz(value.as_mut_ptr()); + mpz_clear(value.as_mut_ptr()); + result + } +} diff --git a/lean-ffi/src/object.rs b/lean-ffi/src/object.rs new file mode 100644 index 00000000..2fbf0f6c --- /dev/null +++ b/lean-ffi/src/object.rs @@ -0,0 +1,968 @@ +//! Type-safe wrappers for Lean FFI object pointers. +//! +//! Each wrapper is a `#[repr(transparent)]` `Copy` newtype over `*const c_void` +//! that asserts the correct Lean tag on construction and provides safe accessor +//! methods. Reference counting is left to Lean (no `Drop` impl). + +use std::ffi::c_void; +use std::marker::PhantomData; +use std::ops::Deref; + +use crate::include; +use crate::safe_cstring; + +// Tag constants from lean.h +const LEAN_MAX_CTOR_TAG: u8 = 243; +const LEAN_TAG_ARRAY: u8 = 246; +const LEAN_TAG_SCALAR_ARRAY: u8 = 248; +const LEAN_TAG_STRING: u8 = 249; +const LEAN_TAG_EXTERNAL: u8 = 254; + +/// Constructor tag for `IO.Error.userError`. +const IO_ERROR_USER_ERROR_TAG: u8 = 7; + +// ============================================================================= +// LeanObject — Untyped base wrapper +// ============================================================================= + +/// Untyped wrapper around a raw Lean object pointer. +#[derive(Clone, Copy)] +#[repr(transparent)] +pub struct LeanObject(*const c_void); + +impl LeanObject { + /// Wrap a raw pointer without any tag check. + /// + /// # Safety + /// The pointer must be a valid Lean object (or tagged scalar). + #[inline] + pub unsafe fn from_raw(ptr: *const c_void) -> Self { + Self(ptr) + } + + /// Wrap a `*mut lean_object` returned from a `lean_ffi` function. + /// + /// # Safety + /// The pointer must be a valid Lean object (or tagged scalar). + #[inline] + pub unsafe fn from_lean_ptr(ptr: *mut include::lean_object) -> Self { + Self(ptr.cast()) + } + + /// Create a Lean `Nat` from a `u64` value. + /// + /// Small values are stored as tagged scalars; larger ones are heap-allocated + /// via the Lean runtime. + #[inline] + pub fn from_nat_u64(n: u64) -> Self { + unsafe { Self::from_lean_ptr(include::lean_uint64_to_nat(n)) } + } + + #[inline] + pub fn as_ptr(self) -> *const c_void { + self.0 + } + + #[inline] + pub fn as_mut_ptr(self) -> *mut c_void { + self.0 as *mut c_void + } + + /// True if this is a tagged scalar (bit 0 set). + #[inline] + pub fn is_scalar(self) -> bool { + self.0 as usize & 1 == 1 + } + + /// Return the object tag. Panics if the object is a scalar. + #[inline] + pub fn tag(self) -> u8 { + assert!(!self.is_scalar(), "tag() called on scalar"); + #[allow(clippy::cast_possible_truncation)] + unsafe { + include::lean_obj_tag(self.0 as *mut _) as u8 + } + } + + #[inline] + pub fn inc_ref(self) { + if !self.is_scalar() { + unsafe { include::lean_inc_ref(self.0 as *mut _) } + } + } + + #[inline] + pub fn dec_ref(self) { + if !self.is_scalar() { + unsafe { include::lean_dec_ref(self.0 as *mut _) } + } + } + + /// Box a `usize` into a tagged scalar pointer. + #[inline] + pub fn box_usize(n: usize) -> Self { + Self(((n << 1) | 1) as *const c_void) + } + + /// Unbox a tagged scalar pointer into a `usize`. + #[inline] + pub fn unbox_usize(self) -> usize { + self.0 as usize >> 1 + } + + #[inline] + pub fn box_u64(n: u64) -> Self { + Self(unsafe { include::lean_box_uint64(n) }.cast()) + } + + #[inline] + pub fn unbox_u64(self) -> u64 { + unsafe { include::lean_unbox_uint64(self.0 as *mut _) } + } + + /// Interpret as a constructor object (tag 0–`LEAN_MAX_CTOR_TAG`). + /// + /// Debug-asserts the tag is in range. + #[inline] + pub fn as_ctor(self) -> LeanCtor { + debug_assert!(!self.is_scalar() && self.tag() <= LEAN_MAX_CTOR_TAG); + LeanCtor(self) + } + + /// Interpret as a `String` object (tag `LEAN_TAG_STRING`). + /// + /// Debug-asserts the tag is correct. + #[inline] + pub fn as_string(self) -> LeanString { + debug_assert!(!self.is_scalar() && self.tag() == LEAN_TAG_STRING); + LeanString(self) + } + + /// Interpret as an `Array` object (tag `LEAN_TAG_ARRAY`). + /// + /// Debug-asserts the tag is correct. + #[inline] + pub fn as_array(self) -> LeanArray { + debug_assert!(!self.is_scalar() && self.tag() == LEAN_TAG_ARRAY); + LeanArray(self) + } + + /// Interpret as a `List` (nil = scalar, cons = tag 1). + /// + /// Debug-asserts the tag is valid for a list. + #[inline] + pub fn as_list(self) -> LeanList { + debug_assert!(self.is_scalar() || self.tag() == 1); + LeanList(self) + } + + /// Interpret as a `ByteArray` object (tag `LEAN_TAG_SCALAR_ARRAY`). + #[inline] + pub fn as_byte_array(self) -> LeanByteArray { + debug_assert!(!self.is_scalar() && self.tag() == LEAN_TAG_SCALAR_ARRAY); + LeanByteArray(self) + } + + #[inline] + pub fn box_u32(n: u32) -> Self { + Self(unsafe { include::lean_box_uint32(n) }.cast()) + } + + #[inline] + pub fn unbox_u32(self) -> u32 { + unsafe { include::lean_unbox_uint32(self.0 as *mut _) } + } +} + +// ============================================================================= +// LeanArray — Array α (tag LEAN_TAG_ARRAY) +// ============================================================================= + +/// Typed wrapper for a Lean `Array α` object (tag `LEAN_TAG_ARRAY`). +#[derive(Clone, Copy)] +#[repr(transparent)] +pub struct LeanArray(LeanObject); + +impl Deref for LeanArray { + type Target = LeanObject; + #[inline] + fn deref(&self) -> &LeanObject { + &self.0 + } +} + +impl LeanArray { + /// Wrap a raw pointer, asserting it is an `Array` (tag `LEAN_TAG_ARRAY`). + /// + /// # Safety + /// The pointer must be a valid Lean `Array` object. + pub unsafe fn from_raw(ptr: *const c_void) -> Self { + let obj = LeanObject(ptr); + debug_assert!(!obj.is_scalar() && obj.tag() == LEAN_TAG_ARRAY); + Self(obj) + } + + /// Allocate a new array with `size` elements (capacity = size). + pub fn alloc(size: usize) -> Self { + let obj = unsafe { include::lean_alloc_array(size, size) }; + Self(LeanObject(obj.cast())) + } + + pub fn len(&self) -> usize { + unsafe { include::lean_array_size(self.0.as_ptr() as *mut _) } + } + + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + + pub fn get(&self, i: usize) -> LeanObject { + LeanObject( + unsafe { include::lean_array_get_core(self.0.as_ptr() as *mut _, i) } + .cast(), + ) + } + + pub fn set(&self, i: usize, val: impl Into) { + let val: LeanObject = val.into(); + unsafe { + include::lean_array_set_core( + self.0.as_ptr() as *mut _, + i, + val.as_ptr() as *mut _, + ); + } + } + + /// Return a slice over the array elements. + pub fn data(&self) -> &[LeanObject] { + unsafe { + let cptr = include::lean_array_cptr(self.0.as_ptr() as *mut _); + // Safety: LeanObject is repr(transparent) over *const c_void, and + // lean_array_cptr returns *mut *mut lean_object which has the same layout. + std::slice::from_raw_parts(cptr.cast(), self.len()) + } + } + + pub fn iter(&self) -> impl Iterator + '_ { + self.data().iter().copied() + } + + pub fn map(&self, f: impl Fn(LeanObject) -> T) -> Vec { + self.iter().map(f).collect() + } +} + +// ============================================================================= +// LeanByteArray — ByteArray (tag LEAN_TAG_SCALAR_ARRAY) +// ============================================================================= + +/// Typed wrapper for a Lean `ByteArray` object (tag `LEAN_TAG_SCALAR_ARRAY`). +#[derive(Clone, Copy)] +#[repr(transparent)] +pub struct LeanByteArray(LeanObject); + +impl Deref for LeanByteArray { + type Target = LeanObject; + #[inline] + fn deref(&self) -> &LeanObject { + &self.0 + } +} + +impl LeanByteArray { + /// Wrap a raw pointer, asserting it is a `ByteArray` (tag `LEAN_TAG_SCALAR_ARRAY`). + /// + /// # Safety + /// The pointer must be a valid Lean `ByteArray` object. + pub unsafe fn from_raw(ptr: *const c_void) -> Self { + let obj = LeanObject(ptr); + debug_assert!(!obj.is_scalar() && obj.tag() == LEAN_TAG_SCALAR_ARRAY); + Self(obj) + } + + /// Allocate a new byte array with `size` bytes (capacity = size). + pub fn alloc(size: usize) -> Self { + let obj = unsafe { include::lean_alloc_sarray(1, size, size) }; + Self(LeanObject(obj.cast())) + } + + /// Allocate a new byte array and copy `data` into it. + pub fn from_bytes(data: &[u8]) -> Self { + let arr = Self::alloc(data.len()); + unsafe { + let cptr = include::lean_sarray_cptr(arr.0.as_ptr() as *mut _); + std::ptr::copy_nonoverlapping(data.as_ptr(), cptr, data.len()); + } + arr + } + + pub fn len(&self) -> usize { + unsafe { include::lean_sarray_size(self.0.as_ptr() as *mut _) } + } + + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + + /// Return the byte contents as a slice. + pub fn as_bytes(&self) -> &[u8] { + unsafe { + let cptr = include::lean_sarray_cptr(self.0.as_ptr() as *mut _); + std::slice::from_raw_parts(cptr, self.len()) + } + } + + /// Copy `data` into the byte array and update its size. + /// + /// # Safety + /// The caller must ensure the array has sufficient capacity for `data`. + pub unsafe fn set_data(&self, data: &[u8]) { + unsafe { + let obj = self.0.as_mut_ptr(); + let cptr = include::lean_sarray_cptr(obj.cast()); + std::ptr::copy_nonoverlapping(data.as_ptr(), cptr, data.len()); + // Update m_size: at offset 8 (after lean_object header) + *obj.cast::().add(8).cast::() = data.len(); + } + } +} + +// ============================================================================= +// LeanString — String (tag LEAN_TAG_STRING) +// ============================================================================= + +/// Typed wrapper for a Lean `String` object (tag `LEAN_TAG_STRING`). +#[derive(Clone, Copy)] +#[repr(transparent)] +pub struct LeanString(LeanObject); + +impl Deref for LeanString { + type Target = LeanObject; + #[inline] + fn deref(&self) -> &LeanObject { + &self.0 + } +} + +impl LeanString { + /// Wrap a raw pointer, asserting it is a `String` (tag `LEAN_TAG_STRING`). + /// + /// # Safety + /// The pointer must be a valid Lean `String` object. + pub unsafe fn from_raw(ptr: *const c_void) -> Self { + let obj = LeanObject(ptr); + debug_assert!(!obj.is_scalar() && obj.tag() == LEAN_TAG_STRING); + Self(obj) + } + + /// Create a Lean string from a Rust `&str`. + pub fn new(s: &str) -> Self { + let c = safe_cstring(s); + let obj = unsafe { include::lean_mk_string(c.as_ptr()) }; + Self(LeanObject(obj.cast())) + } + + /// Number of data bytes (excluding the trailing NUL). + pub fn byte_len(&self) -> usize { + unsafe { include::lean_string_size(self.0.as_ptr() as *mut _) - 1 } + } +} + +impl std::fmt::Display for LeanString { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + unsafe { + let obj = self.0.as_ptr() as *mut _; + let len = include::lean_string_size(obj) - 1; // m_size includes NUL + let data = include::lean_string_cstr(obj); + let bytes = std::slice::from_raw_parts(data.cast::(), len); + let s = std::str::from_utf8_unchecked(bytes); + f.write_str(s) + } + } +} + +// ============================================================================= +// LeanCtor — Constructor objects (tag 0–LEAN_MAX_CTOR_TAG) +// ============================================================================= + +/// Typed wrapper for a Lean constructor object (tag 0–`LEAN_MAX_CTOR_TAG`). +#[derive(Clone, Copy)] +#[repr(transparent)] +pub struct LeanCtor(LeanObject); + +impl Deref for LeanCtor { + type Target = LeanObject; + #[inline] + fn deref(&self) -> &LeanObject { + &self.0 + } +} + +impl LeanCtor { + /// Wrap a raw pointer, asserting it is a constructor (tag <= `LEAN_MAX_CTOR_TAG`). + /// + /// # Safety + /// The pointer must be a valid Lean constructor object. + pub unsafe fn from_raw(ptr: *const c_void) -> Self { + let obj = LeanObject(ptr); + debug_assert!(!obj.is_scalar() && obj.tag() <= LEAN_MAX_CTOR_TAG); + Self(obj) + } + + /// Allocate a new constructor object. + pub fn alloc(tag: u8, num_objs: usize, scalar_size: usize) -> Self { + #[allow(clippy::cast_possible_truncation)] + let obj = unsafe { + include::lean_alloc_ctor(tag as u32, num_objs as u32, scalar_size as u32) + }; + Self(LeanObject(obj.cast())) + } + + pub fn tag(&self) -> u8 { + self.0.tag() + } + + /// Get the `i`-th object field via `lean_ctor_get`. + pub fn get(&self, i: usize) -> LeanObject { + #[allow(clippy::cast_possible_truncation)] + LeanObject( + unsafe { include::lean_ctor_get(self.0.as_ptr() as *mut _, i as u32) } + .cast(), + ) + } + + /// Set the `i`-th object field via `lean_ctor_set`. + pub fn set(&self, i: usize, val: impl Into) { + let val: LeanObject = val.into(); + #[allow(clippy::cast_possible_truncation)] + unsafe { + include::lean_ctor_set( + self.0.as_ptr() as *mut _, + i as u32, + val.as_ptr() as *mut _, + ); + } + } + + /// Set a `u8` scalar field at the given byte offset (past all object fields). + pub fn set_u8(&self, offset: usize, val: u8) { + #[allow(clippy::cast_possible_truncation)] + unsafe { + include::lean_ctor_set_uint8( + self.0.as_ptr() as *mut _, + offset as u32, + val, + ); + } + } + + /// Set a `u32` scalar field at the given byte offset (past all object fields). + pub fn set_u32(&self, offset: usize, val: u32) { + #[allow(clippy::cast_possible_truncation)] + unsafe { + include::lean_ctor_set_uint32( + self.0.as_ptr() as *mut _, + offset as u32, + val, + ); + } + } + + /// Set a `u64` scalar field at the given byte offset (past all object fields). + pub fn set_u64(&self, offset: usize, val: u64) { + #[allow(clippy::cast_possible_truncation)] + unsafe { + include::lean_ctor_set_uint64( + self.0.as_ptr() as *mut _, + offset as u32, + val, + ); + } + } + + /// Read `N` object-field pointers using raw pointer math. + /// + /// This bypasses `lean_ctor_get`'s bounds check, which is necessary when + /// reading past the declared object fields into the scalar area (e.g. for + /// `Expr.Data`). + pub fn objs(&self) -> [LeanObject; N] { + let base = unsafe { self.0.as_ptr().cast::<*const c_void>().add(1) }; + std::array::from_fn(|i| LeanObject(unsafe { *base.add(i) })) + } + + /// Read a `u64` scalar at `offset` bytes past `num_objs` object fields. + pub fn scalar_u64(&self, num_objs: usize, offset: usize) -> u64 { + unsafe { + std::ptr::read_unaligned( + self.0.as_ptr().cast::().add(8 + num_objs * 8 + offset).cast(), + ) + } + } + + /// Read a `u32` scalar at `offset` bytes past `num_objs` object fields. + pub fn scalar_u32(&self, num_objs: usize, offset: usize) -> u32 { + unsafe { + std::ptr::read_unaligned( + self.0.as_ptr().cast::().add(8 + num_objs * 8 + offset).cast(), + ) + } + } + + /// Read a `u8` scalar at `offset` bytes past `num_objs` object fields. + pub fn scalar_u8(&self, num_objs: usize, offset: usize) -> u8 { + unsafe { *self.0.as_ptr().cast::().add(8 + num_objs * 8 + offset) } + } + + /// Read a `bool` scalar at `offset` bytes past `num_objs` object fields. + pub fn scalar_bool(&self, num_objs: usize, offset: usize) -> bool { + self.scalar_u8(num_objs, offset) != 0 + } +} + +// ============================================================================= +// LeanExternal — External objects (tag LEAN_TAG_EXTERNAL) +// ============================================================================= + +/// Typed wrapper for a Lean external object (tag `LEAN_TAG_EXTERNAL`) holding a `T`. +#[derive(Clone, Copy)] +#[repr(transparent)] +pub struct LeanExternal(LeanObject, PhantomData); + +impl Deref for LeanExternal { + type Target = LeanObject; + #[inline] + fn deref(&self) -> &LeanObject { + &self.0 + } +} + +impl LeanExternal { + /// Wrap a raw pointer, asserting it is an external object (tag `LEAN_TAG_EXTERNAL`). + /// + /// # Safety + /// The pointer must be a valid Lean external object whose data pointer + /// points to a valid `T`. + pub unsafe fn from_raw(ptr: *const c_void) -> Self { + let obj = LeanObject(ptr); + debug_assert!(!obj.is_scalar() && obj.tag() == LEAN_TAG_EXTERNAL); + Self(obj, PhantomData) + } + + /// Allocate a new external object holding `data`. + pub fn alloc(class: &ExternalClass, data: T) -> Self { + let data_ptr = Box::into_raw(Box::new(data)); + let obj = + unsafe { include::lean_alloc_external(class.0.cast(), data_ptr.cast()) }; + Self(LeanObject(obj.cast()), PhantomData) + } + + /// Get a reference to the wrapped data. + pub fn get(&self) -> &T { + unsafe { + &*include::lean_get_external_data(self.0.as_ptr() as *mut _).cast::() + } + } +} + +// ============================================================================= +// ExternalClass — Registered external class +// ============================================================================= + +/// A registered Lean external class (wraps `lean_external_class*`). +pub struct ExternalClass(*mut c_void); + +// Safety: the class pointer is initialized once and read-only thereafter. +unsafe impl Send for ExternalClass {} +unsafe impl Sync for ExternalClass {} + +impl ExternalClass { + /// Register a new external class with explicit finalizer and foreach callbacks. + /// + /// # Safety + /// The `finalizer` callback must correctly free the external data, and + /// `foreach` must correctly visit any Lean object references held by the data. + pub unsafe fn register( + finalizer: include::lean_external_finalize_proc, + foreach: include::lean_external_foreach_proc, + ) -> Self { + Self( + unsafe { include::lean_register_external_class(finalizer, foreach) } + .cast(), + ) + } + + /// Register a new external class that uses `Drop` to finalize `T` + /// and has no Lean object references to visit. + pub fn register_with_drop() -> Self { + unsafe extern "C" fn drop_finalizer(ptr: *mut c_void) { + if !ptr.is_null() { + drop(unsafe { Box::from_raw(ptr.cast::()) }); + } + } + unsafe { + Self::register(Some(drop_finalizer::), Some(crate::noop_foreach)) + } + } +} + +// ============================================================================= +// LeanList — List α +// ============================================================================= + +/// Typed wrapper for a Lean `List α` (nil = scalar `lean_box(0)`, cons = ctor tag 1). +#[derive(Clone, Copy)] +#[repr(transparent)] +pub struct LeanList(LeanObject); + +impl Deref for LeanList { + type Target = LeanObject; + #[inline] + fn deref(&self) -> &LeanObject { + &self.0 + } +} + +impl LeanList { + /// Wrap a raw pointer, asserting it is a valid `List` (scalar nil or ctor tag 1). + /// + /// # Safety + /// The pointer must be a valid Lean `List` object. + pub unsafe fn from_raw(ptr: *const c_void) -> Self { + let obj = LeanObject(ptr); + debug_assert!(obj.is_scalar() || obj.tag() == 1); + Self(obj) + } + + /// The empty list. + pub fn nil() -> Self { + Self(LeanObject::box_usize(0)) + } + + /// Prepend `head` to `tail`. + pub fn cons(head: impl Into, tail: LeanList) -> Self { + let ctor = LeanCtor::alloc(1, 2, 0); + ctor.set(0, head); + ctor.set(1, tail); + Self(ctor.0) + } + + pub fn is_nil(&self) -> bool { + self.0.is_scalar() + } + + pub fn iter(&self) -> LeanListIter { + LeanListIter(self.0) + } + + pub fn collect(&self, f: impl Fn(LeanObject) -> T) -> Vec { + self.iter().map(f).collect() + } +} + +impl> FromIterator for LeanList { + fn from_iter>(iter: I) -> Self { + let items: Vec = iter.into_iter().map(Into::into).collect(); + let mut list = Self::nil(); + for item in items.into_iter().rev() { + list = Self::cons(item, list); + } + list + } +} + +/// Iterator over the elements of a `LeanList`. +pub struct LeanListIter(LeanObject); + +impl Iterator for LeanListIter { + type Item = LeanObject; + fn next(&mut self) -> Option { + if self.0.is_scalar() { + return None; + } + let ctor = self.0.as_ctor(); + let [head, tail] = ctor.objs::<2>(); + self.0 = tail; + Some(head) + } +} + +// ============================================================================= +// LeanOption — Option α +// ============================================================================= + +/// Typed wrapper for a Lean `Option α` (none = scalar, some = ctor tag 1). +#[derive(Clone, Copy)] +#[repr(transparent)] +pub struct LeanOption(LeanObject); + +impl Deref for LeanOption { + type Target = LeanObject; + #[inline] + fn deref(&self) -> &LeanObject { + &self.0 + } +} + +impl LeanOption { + /// Wrap a raw pointer, asserting it is a valid `Option`. + /// + /// # Safety + /// The pointer must be a valid Lean `Option` object. + pub unsafe fn from_raw(ptr: *const c_void) -> Self { + let obj = LeanObject(ptr); + debug_assert!(obj.is_scalar() || obj.tag() == 1); + Self(obj) + } + + pub fn none() -> Self { + Self(LeanObject::box_usize(0)) + } + + pub fn some(val: impl Into) -> Self { + let ctor = LeanCtor::alloc(1, 1, 0); + ctor.set(0, val); + Self(ctor.0) + } + + pub fn is_none(&self) -> bool { + self.0.is_scalar() + } + + pub fn is_some(&self) -> bool { + !self.is_none() + } + + pub fn to_option(&self) -> Option { + if self.is_none() { + None + } else { + let ctor = self.0.as_ctor(); + Some(ctor.get(0)) + } + } +} + +// ============================================================================= +// LeanExcept — Except ε α +// ============================================================================= + +/// Typed wrapper for a Lean `Except ε α` (error = ctor tag 0, ok = ctor tag 1). +#[derive(Clone, Copy)] +#[repr(transparent)] +pub struct LeanExcept(LeanObject); + +impl Deref for LeanExcept { + type Target = LeanObject; + #[inline] + fn deref(&self) -> &LeanObject { + &self.0 + } +} + +impl LeanExcept { + /// Wrap a raw pointer, asserting it is a valid `Except`. + /// + /// # Safety + /// The pointer must be a valid Lean `Except` object. + pub unsafe fn from_raw(ptr: *const c_void) -> Self { + let obj = LeanObject(ptr); + debug_assert!(!obj.is_scalar() && (obj.tag() == 0 || obj.tag() == 1)); + Self(obj) + } + + /// Build `Except.ok val`. + pub fn ok(val: impl Into) -> Self { + let ctor = LeanCtor::alloc(1, 1, 0); + ctor.set(0, val); + Self(ctor.0) + } + + /// Build `Except.error msg`. + pub fn error(msg: impl Into) -> Self { + let ctor = LeanCtor::alloc(0, 1, 0); + ctor.set(0, msg); + Self(ctor.0) + } + + /// Build `Except.error (String.mk msg)` from a Rust string. + pub fn error_string(msg: &str) -> Self { + Self::error(LeanString::new(msg)) + } + + pub fn is_ok(&self) -> bool { + self.0.tag() == 1 + } + + pub fn is_error(&self) -> bool { + self.0.tag() == 0 + } + + pub fn into_result(self) -> Result { + let ctor = self.0.as_ctor(); + if self.is_ok() { Ok(ctor.get(0)) } else { Err(ctor.get(0)) } + } +} + +// ============================================================================= +// LeanIOResult — EStateM.Result (BaseIO.Result) +// ============================================================================= + +/// Typed wrapper for a Lean `BaseIO.Result α` (`EStateM.Result`). +/// ok = ctor tag 0 (value, world), error = ctor tag 1 (error, world). +#[derive(Clone, Copy)] +#[repr(transparent)] +pub struct LeanIOResult(LeanObject); + +impl Deref for LeanIOResult { + type Target = LeanObject; + #[inline] + fn deref(&self) -> &LeanObject { + &self.0 + } +} + +impl LeanIOResult { + /// Build a successful IO result (tag 0, fields: [val, box(0)]). + pub fn ok(val: impl Into) -> Self { + let ctor = LeanCtor::alloc(0, 2, 0); + ctor.set(0, val); + ctor.set(1, LeanObject::box_usize(0)); // world token + Self(ctor.0) + } + + /// Build an IO error result (tag 1, fields: [err, box(0)]). + pub fn error(err: impl Into) -> Self { + let ctor = LeanCtor::alloc(1, 2, 0); + ctor.set(0, err); + ctor.set(1, LeanObject::box_usize(0)); // world token + Self(ctor.0) + } + + /// Build an IO error from a Rust string via `IO.Error.userError` (tag 7, 1 field). + pub fn error_string(msg: &str) -> Self { + let user_error = LeanCtor::alloc(IO_ERROR_USER_ERROR_TAG, 1, 0); + user_error.set(0, LeanString::new(msg)); + Self::error(*user_error) + } +} + +// ============================================================================= +// LeanProd — Prod α β (pair) +// ============================================================================= + +/// Typed wrapper for a Lean `Prod α β` (ctor tag 0, 2 object fields). +#[derive(Clone, Copy)] +#[repr(transparent)] +pub struct LeanProd(LeanObject); + +impl Deref for LeanProd { + type Target = LeanObject; + #[inline] + fn deref(&self) -> &LeanObject { + &self.0 + } +} + +impl From for LeanObject { + #[inline] + fn from(x: LeanProd) -> Self { + x.0 + } +} + +impl LeanProd { + /// Build a pair `(fst, snd)`. + pub fn new(fst: impl Into, snd: impl Into) -> Self { + let ctor = LeanCtor::alloc(0, 2, 0); + ctor.set(0, fst); + ctor.set(1, snd); + Self(*ctor) + } + + /// Get the first element. + pub fn fst(&self) -> LeanObject { + let ctor = self.0.as_ctor(); + ctor.get(0) + } + + /// Get the second element. + pub fn snd(&self) -> LeanObject { + let ctor = self.0.as_ctor(); + ctor.get(1) + } +} + +// ============================================================================= +// From for LeanObject — allow wrapper types to be passed to set() etc. +// ============================================================================= + +impl From for LeanObject { + #[inline] + fn from(x: LeanArray) -> Self { + x.0 + } +} + +impl From for LeanObject { + #[inline] + fn from(x: LeanByteArray) -> Self { + x.0 + } +} + +impl From for LeanObject { + #[inline] + fn from(x: LeanString) -> Self { + x.0 + } +} + +impl From for LeanObject { + #[inline] + fn from(x: LeanCtor) -> Self { + x.0 + } +} + +impl From> for LeanObject { + #[inline] + fn from(x: LeanExternal) -> Self { + x.0 + } +} + +impl From for LeanObject { + #[inline] + fn from(x: LeanList) -> Self { + x.0 + } +} + +impl From for LeanObject { + #[inline] + fn from(x: LeanOption) -> Self { + x.0 + } +} + +impl From for LeanObject { + #[inline] + fn from(x: LeanExcept) -> Self { + x.0 + } +} + +impl From for LeanObject { + #[inline] + fn from(x: LeanIOResult) -> Self { + x.0 + } +} + +impl From for LeanObject { + #[inline] + fn from(x: u32) -> Self { + Self::box_u32(x) + } +} From ffeb1650d91a8d59f36a74059dc293406f707078 Mon Sep 17 00:00:00 2001 From: samuelburnham <45365069+samuelburnham@users.noreply.github.com> Date: Wed, 4 Mar 2026 15:27:22 -0500 Subject: [PATCH 23/27] Abstract LeanObject usage where possible --- lean-ffi/src/lib.rs | 5 - lean-ffi/src/nat.rs | 29 +-- lean-ffi/src/object.rs | 71 +++++++ src/ffi.rs | 7 + src/{iroh/_client.rs => ffi/_iroh.rs} | 27 ++- src/ffi/aiur/protocol.rs | 42 ++--- src/ffi/aiur/toplevel.rs | 61 +++--- src/ffi/compile.rs | 157 +++++++++------- src/ffi/graph.rs | 15 +- src/ffi/ix/constant.rs | 87 +++++---- src/ffi/ix/data.rs | 67 ++++--- src/ffi/ix/env.rs | 42 +++-- src/ffi/ix/expr.rs | 76 ++++---- src/ffi/ix/level.rs | 26 +-- src/ffi/ix/name.rs | 14 +- src/ffi/ixon/compare.rs | 32 ++-- src/ffi/ixon/constant.rs | 255 +++++++++++++++----------- src/ffi/ixon/enums.rs | 43 ++--- src/ffi/ixon/env.rs | 87 ++++----- src/ffi/ixon/expr.rs | 51 +++--- src/ffi/ixon/meta.rs | 196 +++++++++++--------- src/ffi/ixon/serialize.rs | 108 ++--------- src/ffi/ixon/sharing.rs | 14 +- src/ffi/ixon/univ.rs | 7 +- src/ffi/lean_env.rs | 196 ++++++++++---------- src/ffi/primitives.rs | 40 ++-- src/iroh.rs | 29 +-- src/iroh/_server.rs | 10 - src/lean.rs | 26 +++ src/lib.rs | 5 + 30 files changed, 956 insertions(+), 869 deletions(-) rename src/{iroh/_client.rs => ffi/_iroh.rs} (52%) delete mode 100644 src/iroh/_server.rs diff --git a/lean-ffi/src/lib.rs b/lean-ffi/src/lib.rs index da9ff1b8..3926e554 100644 --- a/lean-ffi/src/lib.rs +++ b/lean-ffi/src/lib.rs @@ -67,11 +67,6 @@ macro_rules! lean_domain_type { fn from(x: $name) -> Self { x.0 } } - impl From<$crate::object::LeanObject> for $name { - #[inline] - fn from(obj: $crate::object::LeanObject) -> Self { Self(obj) } - } - impl $name { #[inline] pub fn new(obj: $crate::object::LeanObject) -> Self { Self(obj) } diff --git a/lean-ffi/src/nat.rs b/lean-ffi/src/nat.rs index b5ded9c5..a099ccc4 100644 --- a/lean-ffi/src/nat.rs +++ b/lean-ffi/src/nat.rs @@ -3,7 +3,7 @@ //! Lean stores small naturals as tagged scalars and large ones as GMP //! `mpz_object`s on the heap. This module handles both representations. -use std::ffi::{c_int, c_void}; +use std::ffi::c_int; use std::fmt; use std::mem::MaybeUninit; @@ -36,28 +36,19 @@ impl Nat { u64::try_from(&self.0).ok() } - /// Decode a `Nat` from a Lean object pointer. Handles both scalar (unboxed) + /// Decode a `Nat` from a `LeanObject`. Handles both scalar (unboxed) /// and heap-allocated (GMP `mpz_object`) representations. - /// - /// # Safety - /// The pointer must be a valid Lean `Nat` object (scalar or mpz). - pub unsafe fn from_ptr(ptr: *const c_void) -> Nat { - let obj = unsafe { LeanObject::from_raw(ptr) }; + pub fn from_obj(obj: LeanObject) -> Nat { if obj.is_scalar() { let u = obj.unbox_usize(); Nat(BigUint::from_bytes_le(&u.to_le_bytes())) } else { // Heap-allocated big integer (mpz_object) - let mpz: &MpzObject = unsafe { &*ptr.cast() }; + let mpz: &MpzObject = unsafe { &*obj.as_ptr().cast() }; Nat(mpz.m_value.to_biguint()) } } - /// Decode a `Nat` from a `LeanObject`. Convenience wrapper over `from_ptr`. - pub fn from_obj(obj: LeanObject) -> Nat { - unsafe { Self::from_ptr(obj.as_ptr()) } - } - #[inline] pub fn from_le_bytes(bytes: &[u8]) -> Nat { Nat(BigUint::from_bytes_le(bytes)) @@ -133,7 +124,7 @@ unsafe extern "C" { /// Lean's internal mpz allocation — deep-copies the mpz value. /// Caller must still call mpz_clear on the original. - fn lean_alloc_mpz(v: *mut Mpz) -> *mut c_void; + fn lean_alloc_mpz(v: *mut Mpz) -> *mut std::ffi::c_void; } /// Create a Lean `Nat` from a little-endian array of u64 limbs. @@ -143,17 +134,17 @@ unsafe extern "C" { pub unsafe fn lean_nat_from_limbs( num_limbs: usize, limbs: *const u64, -) -> *mut c_void { +) -> LeanObject { if num_limbs == 0 { - return LeanObject::box_usize(0).as_mut_ptr(); + return LeanObject::box_usize(0); } let first = unsafe { *limbs }; if num_limbs == 1 && first <= LEAN_MAX_SMALL_NAT { #[allow(clippy::cast_possible_truncation)] // only targets 64-bit - return LeanObject::box_usize(first as usize).as_mut_ptr(); + return LeanObject::box_usize(first as usize); } if num_limbs == 1 { - return unsafe { lean_uint64_to_nat(first).cast() }; + return unsafe { LeanObject::from_lean_ptr(lean_uint64_to_nat(first)) }; } // Multi-limb: use GMP unsafe { @@ -165,6 +156,6 @@ pub unsafe fn lean_nat_from_limbs( // lean_alloc_mpz deep-copies; we must free the original let result = lean_alloc_mpz(value.as_mut_ptr()); mpz_clear(value.as_mut_ptr()); - result + LeanObject::from_raw(result) } } diff --git a/lean-ffi/src/object.rs b/lean-ffi/src/object.rs index 2fbf0f6c..31f6817e 100644 --- a/lean-ffi/src/object.rs +++ b/lean-ffi/src/object.rs @@ -98,6 +98,21 @@ impl LeanObject { } } + /// Create a `LeanObject` from a raw tag value for zero-field enum constructors. + /// Lean passes simple enums (all constructors have zero fields) as unboxed + /// tag values (0, 1, 2, ...) across FFI, not as `lean_box(tag)`. + #[inline] + pub fn from_enum_tag(tag: usize) -> Self { + Self(tag as *const c_void) + } + + /// Extract the raw tag value from a zero-field enum constructor. + /// Inverse of `from_enum_tag`. + #[inline] + pub fn as_enum_tag(self) -> usize { + self.0 as usize + } + /// Box a `usize` into a tagged scalar pointer. #[inline] pub fn box_usize(n: usize) -> Self { @@ -174,6 +189,62 @@ impl LeanObject { } } +// ============================================================================= +// LeanNat — Nat (scalar or heap mpz) +// ============================================================================= + +/// Typed wrapper for a Lean `Nat` (small = tagged scalar, big = heap `mpz_object`). +#[derive(Clone, Copy)] +#[repr(transparent)] +pub struct LeanNat(LeanObject); + +impl Deref for LeanNat { + type Target = LeanObject; + #[inline] + fn deref(&self) -> &LeanObject { + &self.0 + } +} + +impl From for LeanObject { + #[inline] + fn from(x: LeanNat) -> Self { + x.0 + } +} + +// ============================================================================= +// LeanBool — Bool (unboxed scalar: false = 0, true = 1) +// ============================================================================= + +/// Typed wrapper for a Lean `Bool` (always an unboxed scalar: false = 0, true = 1). +#[derive(Clone, Copy)] +#[repr(transparent)] +pub struct LeanBool(LeanObject); + +impl Deref for LeanBool { + type Target = LeanObject; + #[inline] + fn deref(&self) -> &LeanObject { + &self.0 + } +} + +impl From for LeanObject { + #[inline] + fn from(x: LeanBool) -> Self { + x.0 + } +} + +impl LeanBool { + /// Decode to a Rust `bool`. + #[inline] + pub fn to_bool(self) -> bool { + self.0.as_enum_tag() != 0 + } +} + // ============================================================================= // LeanArray — Array α (tag LEAN_TAG_ARRAY) // ============================================================================= diff --git a/src/ffi.rs b/src/ffi.rs index b31bb96e..8af44040 100644 --- a/src/ffi.rs +++ b/src/ffi.rs @@ -1,3 +1,10 @@ +// Lean and C don't support feature flags, so the _iroh module is exposed as a fallback for when the `net` feature is disabled and/or on the `aarch64-darwin` target. +// This fallback module contains dummy functions that can still be called via Lean->Rust FFI, but will return an error message that Lean then prints before exiting. +#[cfg(any( + not(feature = "net"), + all(target_os = "macos", target_arch = "aarch64") +))] +pub mod _iroh; pub mod aiur; pub mod byte_array; #[cfg(all( diff --git a/src/iroh/_client.rs b/src/ffi/_iroh.rs similarity index 52% rename from src/iroh/_client.rs rename to src/ffi/_iroh.rs index 70e17bc8..be0d5c4d 100644 --- a/src/iroh/_client.rs +++ b/src/ffi/_iroh.rs @@ -1,4 +1,4 @@ -use lean_ffi::object::{LeanExcept, LeanObject}; +use lean_ffi::object::{LeanArray, LeanExcept, LeanString}; const ERR_MSG: &str = "Iroh functions not supported when the Rust `net` feature is disabled \ or on MacOS aarch64-darwin"; @@ -6,10 +6,10 @@ const ERR_MSG: &str = "Iroh functions not supported when the Rust `net` feature /// `Iroh.Connect.putBytes' : @& String → @& Array String → @& String → @& String → Except String PutResponse` #[unsafe(no_mangle)] extern "C" fn rs_iroh_put( - _node_id: LeanObject, - _addrs: LeanObject, - _relay_url: LeanObject, - _input: LeanObject, + _node_id: LeanString, + _addrs: LeanArray, + _relay_url: LeanString, + _input: LeanString, ) -> LeanExcept { LeanExcept::error_string(ERR_MSG) } @@ -17,10 +17,19 @@ extern "C" fn rs_iroh_put( /// `Iroh.Connect.getBytes' : @& String → @& Array String → @& String → @& String → Except String GetResponse` #[unsafe(no_mangle)] extern "C" fn rs_iroh_get( - _node_id: LeanObject, - _addrs: LeanObject, - _relay_url: LeanObject, - _hash: LeanObject, + _node_id: LeanString, + _addrs: LeanArray, + _relay_url: LeanString, + _hash: LeanString, ) -> LeanExcept { LeanExcept::error_string(ERR_MSG) } + +/// `Iroh.Serve.serve' : Unit → Except String Unit` +#[unsafe(no_mangle)] +extern "C" fn rs_iroh_serve() -> LeanExcept { + LeanExcept::error_string( + "Iroh functions not supported when the Rust `net` feature is disabled \ + or on MacOS aarch64-darwin", + ) +} diff --git a/src/ffi/aiur/protocol.rs b/src/ffi/aiur/protocol.rs index 03d5b340..88222535 100644 --- a/src/ffi/aiur/protocol.rs +++ b/src/ffi/aiur/protocol.rs @@ -20,6 +20,7 @@ use crate::{ ffi::aiur::{ lean_unbox_g, lean_unbox_nat_as_usize, toplevel::decode_toplevel, }, + lean::{LeanAiurFriParameters, LeanAiurToplevel}, }; // ============================================================================= @@ -63,12 +64,12 @@ extern "C" fn rs_aiur_proof_of_bytes( /// `AiurSystem.build : @&Bytecode.Toplevel → @&CommitmentParameters → AiurSystem` #[unsafe(no_mangle)] extern "C" fn rs_aiur_system_build( - toplevel: LeanObject, + toplevel: LeanAiurToplevel, commitment_parameters: LeanObject, ) -> LeanExternal { let system = AiurSystem::build( decode_toplevel(toplevel), - lean_ptr_to_commitment_parameters(commitment_parameters), + decode_commitment_parameters(commitment_parameters), ); LeanExternal::alloc(system_class(), system) } @@ -77,12 +78,12 @@ extern "C" fn rs_aiur_system_build( #[unsafe(no_mangle)] extern "C" fn rs_aiur_system_verify( aiur_system_obj: LeanExternal, - fri_parameters: LeanObject, - claim: LeanObject, + fri_parameters: LeanAiurFriParameters, + claim: LeanArray, proof_obj: LeanExternal, ) -> LeanExcept { - let fri_parameters = lean_ctor_to_fri_parameters(fri_parameters); - let claim = claim.as_array().map(lean_unbox_g); + let fri_parameters = decode_fri_parameters(fri_parameters); + let claim = claim.map(lean_unbox_g); match aiur_system_obj.get().verify(fri_parameters, &claim, proof_obj.get()) { Ok(()) => LeanExcept::ok(LeanObject::box_usize(0)), Err(err) => LeanExcept::error_string(&format!("{err:?}")), @@ -94,17 +95,17 @@ extern "C" fn rs_aiur_system_verify( #[unsafe(no_mangle)] extern "C" fn rs_aiur_system_prove( aiur_system_obj: LeanExternal, - fri_parameters: LeanObject, + fri_parameters: LeanAiurFriParameters, fun_idx: LeanObject, - args: LeanObject, - io_data_arr: LeanObject, - io_map_arr: LeanObject, + args: LeanArray, + io_data_arr: LeanArray, + io_map_arr: LeanArray, ) -> LeanObject { - let fri_parameters = lean_ctor_to_fri_parameters(fri_parameters); + let fri_parameters = decode_fri_parameters(fri_parameters); let fun_idx = lean_unbox_nat_as_usize(fun_idx); - let args = args.as_array().map(lean_unbox_g); - let io_data = io_data_arr.as_array().map(lean_unbox_g); - let io_map = lean_array_to_io_buffer_map(io_map_arr); + let args = args.map(lean_unbox_g); + let io_data = io_data_arr.map(lean_unbox_g); + let io_map = decode_io_buffer_map(io_map_arr); let mut io_buffer = IOBuffer { data: io_data, map: io_map }; let (claim, proof) = @@ -158,19 +159,19 @@ extern "C" fn rs_aiur_system_prove( // ============================================================================= /// Build a Lean `Array G` from a slice of field elements. -fn build_g_array(values: &[G]) -> LeanObject { +fn build_g_array(values: &[G]) -> LeanArray { let arr = LeanArray::alloc(values.len()); for (i, g) in values.iter().enumerate() { arr.set(i, LeanObject::box_u64(g.as_canonical_u64())); } - *arr + arr } -fn lean_ptr_to_commitment_parameters(obj: LeanObject) -> CommitmentParameters { +fn decode_commitment_parameters(obj: LeanObject) -> CommitmentParameters { CommitmentParameters { log_blowup: lean_unbox_nat_as_usize(obj) } } -fn lean_ctor_to_fri_parameters(obj: LeanObject) -> FriParameters { +fn decode_fri_parameters(obj: LeanAiurFriParameters) -> FriParameters { let ctor = obj.as_ctor(); FriParameters { log_final_poly_len: lean_unbox_nat_as_usize(ctor.get(0)), @@ -180,10 +181,9 @@ fn lean_ctor_to_fri_parameters(obj: LeanObject) -> FriParameters { } } -fn lean_array_to_io_buffer_map( - obj: LeanObject, +fn decode_io_buffer_map( + arr: LeanArray, ) -> FxHashMap, IOKeyInfo> { - let arr = obj.as_array(); let mut map = FxHashMap::with_capacity_and_hasher(arr.len(), FxBuildHasher); for elt in arr.iter() { let pair = elt.as_ctor(); diff --git a/src/ffi/aiur/toplevel.rs b/src/ffi/aiur/toplevel.rs index 133dcd22..88c2d548 100644 --- a/src/ffi/aiur/toplevel.rs +++ b/src/ffi/aiur/toplevel.rs @@ -1,6 +1,6 @@ use multi_stark::p3_field::PrimeCharacteristicRing; -use lean_ffi::object::LeanObject; +use lean_ffi::object::{LeanCtor, LeanObject}; use crate::{ FxIndexMap, @@ -8,20 +8,20 @@ use crate::{ G, bytecode::{Block, Ctrl, Function, FunctionLayout, Op, Toplevel, ValIdx}, }, + lean::LeanAiurToplevel, }; use crate::ffi::aiur::{lean_unbox_g, lean_unbox_nat_as_usize}; -fn lean_ptr_to_vec_val_idx(obj: LeanObject) -> Vec { +fn decode_vec_val_idx(obj: LeanObject) -> Vec { obj.as_array().map(lean_unbox_nat_as_usize) } -fn lean_ptr_to_op(obj: LeanObject) -> Op { - let ctor = obj.as_ctor(); +fn decode_op(ctor: LeanCtor) -> Op { match ctor.tag() { 0 => { let [const_val] = ctor.objs::<1>(); - Op::Const(G::from_u64(const_val.as_ptr() as u64)) + Op::Const(G::from_u64(const_val.as_enum_tag() as u64)) }, 1 => { let [a, b] = ctor.objs::<2>(); @@ -42,13 +42,13 @@ fn lean_ptr_to_op(obj: LeanObject) -> Op { 5 => { let [fun_idx, val_idxs, output_size] = ctor.objs::<3>(); let fun_idx = lean_unbox_nat_as_usize(fun_idx); - let val_idxs = lean_ptr_to_vec_val_idx(val_idxs); + let val_idxs = decode_vec_val_idx(val_idxs); let output_size = lean_unbox_nat_as_usize(output_size); Op::Call(fun_idx, val_idxs, output_size) }, 6 => { let [val_idxs] = ctor.objs::<1>(); - Op::Store(lean_ptr_to_vec_val_idx(val_idxs)) + Op::Store(decode_vec_val_idx(val_idxs)) }, 7 => { let [width, val_idx] = ctor.objs::<2>(); @@ -56,16 +56,16 @@ fn lean_ptr_to_op(obj: LeanObject) -> Op { }, 8 => { let [a, b] = ctor.objs::<2>(); - Op::AssertEq(lean_ptr_to_vec_val_idx(a), lean_ptr_to_vec_val_idx(b)) + Op::AssertEq(decode_vec_val_idx(a), decode_vec_val_idx(b)) }, 9 => { let [key] = ctor.objs::<1>(); - Op::IOGetInfo(lean_ptr_to_vec_val_idx(key)) + Op::IOGetInfo(decode_vec_val_idx(key)) }, 10 => { let [key, idx, len] = ctor.objs::<3>(); Op::IOSetInfo( - lean_ptr_to_vec_val_idx(key), + decode_vec_val_idx(key), lean_unbox_nat_as_usize(idx), lean_unbox_nat_as_usize(len), ) @@ -76,7 +76,7 @@ fn lean_ptr_to_op(obj: LeanObject) -> Op { }, 12 => { let [data] = ctor.objs::<1>(); - Op::IOWrite(lean_ptr_to_vec_val_idx(data)) + Op::IOWrite(decode_vec_val_idx(data)) }, 13 => { let [byte] = ctor.objs::<1>(); @@ -129,27 +129,26 @@ fn lean_ptr_to_op(obj: LeanObject) -> Op { } } -fn lean_ptr_to_g_block_pair(obj: LeanObject) -> (G, Block) { - let ctor = obj.as_ctor(); +fn decode_g_block_pair(ctor: LeanCtor) -> (G, Block) { let [g_obj, block_obj] = ctor.objs::<2>(); let g = lean_unbox_g(g_obj); - let block = lean_ptr_to_block(block_obj); + let block = decode_block(block_obj.as_ctor()); (g, block) } -fn lean_ptr_to_ctrl(obj: LeanObject) -> Ctrl { - let ctor = obj.as_ctor(); +fn decode_ctrl(ctor: LeanCtor) -> Ctrl { match ctor.tag() { 0 => { let [val_idx_obj, cases_obj, default_obj] = ctor.objs::<3>(); let val_idx = lean_unbox_nat_as_usize(val_idx_obj); - let vec_cases = cases_obj.as_array().map(lean_ptr_to_g_block_pair); + let vec_cases = + cases_obj.as_array().map(|o| decode_g_block_pair(o.as_ctor())); let cases = FxIndexMap::from_iter(vec_cases); let default = if default_obj.is_scalar() { None } else { let inner_ctor = default_obj.as_ctor(); - let block = lean_ptr_to_block(inner_ctor.get(0)); + let block = decode_block(inner_ctor.get(0).as_ctor()); Some(Box::new(block)) }; Ctrl::Match(val_idx, cases, default) @@ -157,25 +156,23 @@ fn lean_ptr_to_ctrl(obj: LeanObject) -> Ctrl { 1 => { let [sel_idx_obj, val_idxs_obj] = ctor.objs::<2>(); let sel_idx = lean_unbox_nat_as_usize(sel_idx_obj); - let val_idxs = lean_ptr_to_vec_val_idx(val_idxs_obj); + let val_idxs = decode_vec_val_idx(val_idxs_obj); Ctrl::Return(sel_idx, val_idxs) }, _ => unreachable!(), } } -fn lean_ptr_to_block(obj: LeanObject) -> Block { - let ctor = obj.as_ctor(); +fn decode_block(ctor: LeanCtor) -> Block { let [ops_obj, ctrl_obj, min_sel_obj, max_sel_obj] = ctor.objs::<4>(); - let ops = ops_obj.as_array().map(lean_ptr_to_op); - let ctrl = lean_ptr_to_ctrl(ctrl_obj); + let ops = ops_obj.as_array().map(|o| decode_op(o.as_ctor())); + let ctrl = decode_ctrl(ctrl_obj.as_ctor()); let min_sel_included = lean_unbox_nat_as_usize(min_sel_obj); let max_sel_excluded = lean_unbox_nat_as_usize(max_sel_obj); Block { ops, ctrl, min_sel_included, max_sel_excluded } } -fn lean_ptr_to_function_layout(obj: LeanObject) -> FunctionLayout { - let ctor = obj.as_ctor(); +fn decode_function_layout(ctor: LeanCtor) -> FunctionLayout { let [input_size, selectors, auxiliaries, lookups] = ctor.objs::<4>(); FunctionLayout { input_size: lean_unbox_nat_as_usize(input_size), @@ -185,19 +182,19 @@ fn lean_ptr_to_function_layout(obj: LeanObject) -> FunctionLayout { } } -fn lean_ptr_to_function(obj: LeanObject) -> Function { - let ctor = obj.as_ctor(); +fn decode_function(ctor: LeanCtor) -> Function { let [body_obj, layout_obj, unconstrained_obj] = ctor.objs::<3>(); - let body = lean_ptr_to_block(body_obj); - let layout = lean_ptr_to_function_layout(layout_obj); - let unconstrained = unconstrained_obj.as_ptr() as usize != 0; + let body = decode_block(body_obj.as_ctor()); + let layout = decode_function_layout(layout_obj.as_ctor()); + let unconstrained = unconstrained_obj.as_enum_tag() != 0; Function { body, layout, unconstrained } } -pub(crate) fn decode_toplevel(obj: LeanObject) -> Toplevel { +pub(crate) fn decode_toplevel(obj: LeanAiurToplevel) -> Toplevel { let ctor = obj.as_ctor(); let [functions_obj, memory_sizes_obj] = ctor.objs::<2>(); - let functions = functions_obj.as_array().map(lean_ptr_to_function); + let functions = + functions_obj.as_array().map(|o| decode_function(o.as_ctor())); let memory_sizes = memory_sizes_obj.as_array().map(lean_unbox_nat_as_usize); Toplevel { functions, memory_sizes } } diff --git a/src/ffi/compile.rs b/src/ffi/compile.rs index 6c19b371..76c818a1 100644 --- a/src/ffi/compile.rs +++ b/src/ffi/compile.rs @@ -24,7 +24,8 @@ use crate::ix::ixon::{Comm, ConstantMeta}; use crate::lean::{ LeanIxBlockCompareDetail, LeanIxBlockCompareResult, LeanIxCompileError, LeanIxCompilePhases, LeanIxCondensedBlocks, LeanIxDecompileError, - LeanIxSerializeError, LeanIxonRawEnv, + LeanIxSerializeError, LeanIxonRawBlob, LeanIxonRawComm, LeanIxonRawConst, + LeanIxonRawEnv, LeanIxonRawNamed, }; use lean_ffi::nat::Nat; use lean_ffi::object::LeanIOResult; @@ -47,15 +48,15 @@ use crate::ffi::ixon::env::{ build_raw_env, build_raw_name_entry, decode_raw_env, decoded_to_ixon_env, }; use crate::ffi::ixon::meta::{build_constant_meta, build_ixon_comm}; -use crate::ffi::lean_env::{GlobalCache, lean_ptr_to_env, lean_ptr_to_name}; +use crate::ffi::lean_env::{GlobalCache, decode_env, decode_name}; // ============================================================================= // Helper builders // ============================================================================= /// Build a Lean String from a Rust &str. -fn build_lean_string(s: &str) -> LeanObject { - LeanString::new(s).into() +fn build_lean_string(s: &str) -> LeanString { + LeanString::new(s) } /// Build a Lean Nat from a usize. @@ -68,11 +69,14 @@ fn build_lean_nat_usize(n: usize) -> LeanObject { // ============================================================================= /// Build RawConst: { addr : Address, const : Ixon.Constant } -pub fn build_raw_const(addr: &Address, constant: &IxonConstant) -> LeanObject { +pub fn build_raw_const( + addr: &Address, + constant: &IxonConstant, +) -> LeanIxonRawConst { let ctor = LeanCtor::alloc(0, 2, 0); ctor.set(0, build_address_from_ixon(addr)); ctor.set(1, build_ixon_constant(constant)); - *ctor + LeanIxonRawConst::new(*ctor) } /// Build RawNamed: { name : Ix.Name, addr : Address, constMeta : Ixon.ConstantMeta } @@ -81,28 +85,28 @@ pub fn build_raw_named( name: &Name, addr: &Address, meta: &ConstantMeta, -) -> LeanObject { +) -> LeanIxonRawNamed { let ctor = LeanCtor::alloc(0, 3, 0); ctor.set(0, build_name(cache, name)); ctor.set(1, build_address_from_ixon(addr)); ctor.set(2, build_constant_meta(meta)); - *ctor + LeanIxonRawNamed::new(*ctor) } /// Build RawBlob: { addr : Address, bytes : ByteArray } -pub fn build_raw_blob(addr: &Address, bytes: &[u8]) -> LeanObject { +pub fn build_raw_blob(addr: &Address, bytes: &[u8]) -> LeanIxonRawBlob { let ctor = LeanCtor::alloc(0, 2, 0); ctor.set(0, build_address_from_ixon(addr)); ctor.set(1, LeanByteArray::from_bytes(bytes)); - *ctor + LeanIxonRawBlob::new(*ctor) } /// Build RawComm: { addr : Address, comm : Ixon.Comm } -pub fn build_raw_comm(addr: &Address, comm: &Comm) -> LeanObject { +pub fn build_raw_comm(addr: &Address, comm: &Comm) -> LeanIxonRawComm { let ctor = LeanCtor::alloc(0, 2, 0); ctor.set(0, build_address_from_ixon(addr)); ctor.set(1, build_ixon_comm(comm)); - *ctor + LeanIxonRawComm::new(*ctor) } // ============================================================================= @@ -127,7 +131,7 @@ pub extern "C" fn rs_roundtrip_rust_condensed_blocks( result.set(0, low_links); result.set(1, blocks); result.set(2, block_refs); - (*result).into() + LeanIxCondensedBlocks::new(*result) } /// Round-trip a RustCompilePhases structure. @@ -148,7 +152,7 @@ pub extern "C" fn rs_roundtrip_rust_compile_phases( result.set(0, raw_env); result.set(1, condensed); result.set(2, compile_env); - (*result).into() + LeanIxCompilePhases::new(*result) } // ============================================================================= @@ -176,7 +180,7 @@ pub extern "C" fn rs_roundtrip_block_compare_result( out.set_u64(0, lean_size); out.set_u64(8, rust_size); out.set_u64(16, first_diff); - (*out).into() + LeanIxBlockCompareResult::new(*out) }, _ => unreachable!("Invalid BlockCompareResult tag: {}", ctor.tag()), } @@ -192,13 +196,15 @@ pub extern "C" fn rs_roundtrip_block_compare_detail( let lean_sharing_len = ctor.scalar_u64(1, 0); let rust_sharing_len = ctor.scalar_u64(1, 8); - let result_obj = rs_roundtrip_block_compare_result(result_ptr.into()); + let result_obj = rs_roundtrip_block_compare_result( + LeanIxBlockCompareResult::new(result_ptr), + ); let out = LeanCtor::alloc(0, 1, 16); out.set(0, result_obj); out.set_u64(8, lean_sharing_len); out.set_u64(16, rust_sharing_len); - (*out).into() + LeanIxBlockCompareDetail::new(*out) } // ============================================================================= @@ -212,7 +218,7 @@ pub extern "C" fn rs_compile_env_full( ) -> LeanIOResult { ffi_io_guard(std::panic::AssertUnwindSafe(|| { // Phase 1: Decode Lean environment - let rust_env = lean_ptr_to_env(env_consts_ptr); + let rust_env = decode_env(env_consts_ptr.as_list()); let env_len = rust_env.len(); let rust_env = Arc::new(rust_env); @@ -309,7 +315,7 @@ pub extern "C" fn rs_compile_env_full( #[unsafe(no_mangle)] pub extern "C" fn rs_compile_env(env_consts_ptr: LeanObject) -> LeanIOResult { ffi_io_guard(std::panic::AssertUnwindSafe(|| { - let rust_env = lean_ptr_to_env(env_consts_ptr); + let rust_env = decode_env(env_consts_ptr.as_list()); let rust_env = Arc::new(rust_env); let compile_stt = match compile_env(&rust_env) { @@ -339,8 +345,8 @@ pub extern "C" fn rs_compile_env(env_consts_ptr: LeanObject) -> LeanIOResult { pub extern "C" fn rs_roundtrip_raw_env( raw_env_obj: LeanIxonRawEnv, ) -> LeanIxonRawEnv { - let env = decode_raw_env(*raw_env_obj); - build_raw_env(&env).into() + let env = decode_raw_env(raw_env_obj); + build_raw_env(&env) } /// FFI function to run all compilation phases and return combined results. @@ -349,7 +355,7 @@ pub extern "C" fn rs_compile_phases( env_consts_ptr: LeanObject, ) -> LeanIOResult { ffi_io_guard(std::panic::AssertUnwindSafe(|| { - let rust_env = lean_ptr_to_env(env_consts_ptr); + let rust_env = decode_env(env_consts_ptr.as_list()); let env_len = rust_env.len(); let rust_env = Arc::new(rust_env); @@ -449,7 +455,7 @@ pub extern "C" fn rs_compile_env_to_ixon( env_consts_ptr: LeanObject, ) -> LeanIOResult { ffi_io_guard(std::panic::AssertUnwindSafe(|| { - let rust_env = lean_ptr_to_env(env_consts_ptr); + let rust_env = decode_env(env_consts_ptr.as_list()); let rust_env = Arc::new(rust_env); let compile_stt = match compile_env(&rust_env) { @@ -535,7 +541,7 @@ pub extern "C" fn rs_canonicalize_env_to_ix( env_consts_ptr: LeanObject, ) -> LeanIOResult { ffi_io_guard(std::panic::AssertUnwindSafe(|| { - let rust_env = lean_ptr_to_env(env_consts_ptr); + let rust_env = decode_env(env_consts_ptr.as_list()); let mut cache = LeanBuildCache::with_capacity(rust_env.len()); let raw_env = build_raw_environment(&mut cache, &rust_env); LeanIOResult::ok(raw_env) @@ -564,7 +570,7 @@ pub struct RustCompiledEnv { #[unsafe(no_mangle)] extern "C" fn rs_test_ffi_roundtrip(name_ptr: LeanObject) -> u64 { let global_cache = GlobalCache::default(); - let name = lean_ptr_to_name(name_ptr, &global_cache); + let name = decode_name(name_ptr, &global_cache); // Return a magic number plus the hash of the name to verify it worked let hash = name.get_hash(); @@ -582,7 +588,7 @@ extern "C" fn rs_compile_env_rust_first( env_consts_ptr: LeanObject, ) -> *mut RustCompiledEnv { // Decode Lean environment - let lean_env = lean_ptr_to_env(env_consts_ptr); + let lean_env = decode_env(env_consts_ptr.as_list()); let lean_env = Arc::new(lean_env); // Compile with Rust @@ -632,7 +638,7 @@ extern "C" fn rs_compare_block( return 2u64 << 32; // not found } let global_cache = GlobalCache::default(); - let name = lean_ptr_to_name(lowlink_name, &global_cache); + let name = decode_name(lowlink_name, &global_cache); let rust_env = unsafe { &*rust_env }; let lean_data = lean_bytes.as_bytes(); @@ -694,7 +700,7 @@ extern "C" fn rs_get_block_bytes_len( return 0; } let global_cache = GlobalCache::default(); - let name = lean_ptr_to_name(lowlink_name, &global_cache); + let name = decode_name(lowlink_name, &global_cache); let rust_env = unsafe { &*rust_env }; @@ -715,7 +721,7 @@ extern "C" fn rs_copy_block_bytes( return; } let global_cache = GlobalCache::default(); - let name = lean_ptr_to_name(lowlink_name, &global_cache); + let name = decode_name(lowlink_name, &global_cache); let rust_env = unsafe { &*rust_env }; @@ -738,7 +744,7 @@ extern "C" fn rs_get_block_sharing_len( return 0; } let global_cache = GlobalCache::default(); - let name = lean_ptr_to_name(lowlink_name, &global_cache); + let name = decode_name(lowlink_name, &global_cache); let rust_env = unsafe { &*rust_env }; @@ -858,7 +864,7 @@ extern "C" fn rs_get_pre_sharing_exprs( return 0; } let global_cache = GlobalCache::default(); - let name = lean_ptr_to_name(lowlink_name, &global_cache); + let name = decode_name(lowlink_name, &global_cache); let rust_env = unsafe { &*rust_env }; @@ -959,7 +965,7 @@ extern "C" fn rs_get_pre_sharing_exprs_len( return 0; } let global_cache = GlobalCache::default(); - let name = lean_ptr_to_name(lowlink_name, &global_cache); + let name = decode_name(lowlink_name, &global_cache); let rust_env = unsafe { &*rust_env }; @@ -1020,7 +1026,7 @@ extern "C" fn rs_lookup_const_addr( return 0; } let global_cache = GlobalCache::default(); - let name = lean_ptr_to_name(name_ptr, &global_cache); + let name = decode_name(name_ptr, &global_cache); let rust_env = unsafe { &*rust_env }; @@ -1063,8 +1069,8 @@ use crate::ix::ixon::error::{CompileError, DecompileError, SerializeError}; /// 4: invalidBool (value : UInt8) → 0 obj + 1 scalar (UInt8) /// 5: addressError → 0 obj + 0 scalar /// 6: invalidShareIndex (idx : UInt64) (max : Nat) → 1 obj (Nat) + 8 scalar (UInt64) -pub fn build_serialize_error(se: &SerializeError) -> LeanObject { - match se { +pub fn build_serialize_error(se: &SerializeError) -> LeanIxSerializeError { + let obj = match se { SerializeError::UnexpectedEof { expected } => { let ctor = LeanCtor::alloc(0, 1, 0); ctor.set(0, build_lean_string(expected)); @@ -1100,11 +1106,12 @@ pub fn build_serialize_error(se: &SerializeError) -> LeanObject { ctor.set_u64(8, *idx); *ctor }, - } + }; + LeanIxSerializeError::new(obj) } /// Decode a Lean Ixon.SerializeError to a Rust SerializeError. -pub fn decode_serialize_error(obj: LeanObject) -> SerializeError { +pub fn decode_serialize_error(obj: LeanIxSerializeError) -> SerializeError { // Tag 5 (addressError) has 0 fields → Lean represents as scalar if obj.is_scalar() { let tag = obj.unbox_usize(); @@ -1138,7 +1145,7 @@ pub fn decode_serialize_error(obj: LeanObject) -> SerializeError { }, 5 => SerializeError::AddressError, 6 => { - let max = unsafe { Nat::from_ptr(ctor.get(0).as_ptr()) } + let max = Nat::from_obj(ctor.get(0)) .to_u64() .and_then(|x| usize::try_from(x).ok()) .unwrap_or(0); @@ -1156,8 +1163,8 @@ pub fn decode_serialize_error(obj: LeanObject) -> SerializeError { /// → 2 object fields (Nat, String) + 8 scalar bytes (UInt64) /// → `lean_alloc_ctor(tag, 2, 8)` /// → obj[0] = Nat, obj[1] = String, scalar[0] = UInt64 -pub fn build_decompile_error(err: &DecompileError) -> LeanObject { - match err { +pub fn build_decompile_error(err: &DecompileError) -> LeanIxDecompileError { + let obj = match err { DecompileError::InvalidRefIndex { idx, refs_len, constant } => { let ctor = LeanCtor::alloc(0, 2, 8); ctor.set(0, build_lean_nat_usize(*refs_len)); @@ -1224,15 +1231,16 @@ pub fn build_decompile_error(err: &DecompileError) -> LeanObject { ctor.set(0, build_serialize_error(se)); *ctor }, - } + }; + LeanIxDecompileError::new(obj) } /// Decode a Lean DecompileError to a Rust DecompileError. -pub fn decode_decompile_error(obj: LeanObject) -> DecompileError { +pub fn decode_decompile_error(obj: LeanIxDecompileError) -> DecompileError { let ctor = obj.as_ctor(); match ctor.tag() { 0 => { - let refs_len = unsafe { Nat::from_ptr(ctor.get(0).as_ptr()) } + let refs_len = Nat::from_obj(ctor.get(0)) .to_u64() .and_then(|x| usize::try_from(x).ok()) .unwrap_or(0); @@ -1241,7 +1249,7 @@ pub fn decode_decompile_error(obj: LeanObject) -> DecompileError { DecompileError::InvalidRefIndex { idx, refs_len, constant } }, 1 => { - let univs_len = unsafe { Nat::from_ptr(ctor.get(0).as_ptr()) } + let univs_len = Nat::from_obj(ctor.get(0)) .to_u64() .and_then(|x| usize::try_from(x).ok()) .unwrap_or(0); @@ -1250,7 +1258,7 @@ pub fn decode_decompile_error(obj: LeanObject) -> DecompileError { DecompileError::InvalidUnivIndex { idx, univs_len, constant } }, 2 => { - let max = unsafe { Nat::from_ptr(ctor.get(0).as_ptr()) } + let max = Nat::from_obj(ctor.get(0)) .to_u64() .and_then(|x| usize::try_from(x).ok()) .unwrap_or(0); @@ -1259,7 +1267,7 @@ pub fn decode_decompile_error(obj: LeanObject) -> DecompileError { DecompileError::InvalidShareIndex { idx, max, constant } }, 3 => { - let ctx_size = unsafe { Nat::from_ptr(ctor.get(0).as_ptr()) } + let ctx_size = Nat::from_obj(ctor.get(0)) .to_u64() .and_then(|x| usize::try_from(x).ok()) .unwrap_or(0); @@ -1268,7 +1276,7 @@ pub fn decode_decompile_error(obj: LeanObject) -> DecompileError { DecompileError::InvalidRecIndex { idx, ctx_size, constant } }, 4 => { - let max = unsafe { Nat::from_ptr(ctor.get(0).as_ptr()) } + let max = Nat::from_obj(ctor.get(0)) .to_u64() .and_then(|x| usize::try_from(x).ok()) .unwrap_or(0); @@ -1276,11 +1284,17 @@ pub fn decode_decompile_error(obj: LeanObject) -> DecompileError { let idx = ctor.scalar_u64(2, 0); DecompileError::InvalidUnivVarIndex { idx, max, constant } }, - 5 => DecompileError::MissingAddress(decode_ixon_address(ctor.get(0))), - 6 => DecompileError::MissingMetadata(decode_ixon_address(ctor.get(0))), - 7 => DecompileError::BlobNotFound(decode_ixon_address(ctor.get(0))), + 5 => DecompileError::MissingAddress(decode_ixon_address( + ctor.get(0).as_byte_array(), + )), + 6 => DecompileError::MissingMetadata(decode_ixon_address( + ctor.get(0).as_byte_array(), + )), + 7 => DecompileError::BlobNotFound(decode_ixon_address( + ctor.get(0).as_byte_array(), + )), 8 => { - let addr = decode_ixon_address(ctor.get(0)); + let addr = decode_ixon_address(ctor.get(0).as_byte_array()); let expected = ctor.get(1).as_string().to_string(); DecompileError::BadBlobFormat { addr, expected } }, @@ -1288,7 +1302,9 @@ pub fn decode_decompile_error(obj: LeanObject) -> DecompileError { let msg = ctor.get(0).as_string().to_string(); DecompileError::BadConstantFormat { msg } }, - 10 => DecompileError::Serialize(decode_serialize_error(ctor.get(0))), + 10 => DecompileError::Serialize(decode_serialize_error( + LeanIxSerializeError::new(ctor.get(0)), + )), _ => unreachable!("Invalid DecompileError tag: {}", ctor.tag()), } } @@ -1302,8 +1318,8 @@ pub fn decode_decompile_error(obj: LeanObject) -> DecompileError { /// 3: unsupportedExpr (desc : String) → 1 obj /// 4: unknownUnivParam (curr param : String) → 2 obj /// 5: serializeError (msg : String) → 1 obj -pub fn build_compile_error(err: &CompileError) -> LeanObject { - match err { +pub fn build_compile_error(err: &CompileError) -> LeanIxCompileError { + let obj = match err { CompileError::MissingConstant { name } => { let ctor = LeanCtor::alloc(0, 1, 0); ctor.set(0, build_lean_string(name)); @@ -1335,18 +1351,21 @@ pub fn build_compile_error(err: &CompileError) -> LeanObject { ctor.set(0, build_serialize_error(se)); *ctor }, - } + }; + LeanIxCompileError::new(obj) } /// Decode a Lean CompileError to a Rust CompileError. -pub fn decode_compile_error(obj: LeanObject) -> CompileError { +pub fn decode_compile_error(obj: LeanIxCompileError) -> CompileError { let ctor = obj.as_ctor(); match ctor.tag() { 0 => { let name = ctor.get(0).as_string().to_string(); CompileError::MissingConstant { name } }, - 1 => CompileError::MissingAddress(decode_ixon_address(ctor.get(0))), + 1 => CompileError::MissingAddress(decode_ixon_address( + ctor.get(0).as_byte_array(), + )), 2 => { let reason = ctor.get(0).as_string().to_string(); CompileError::InvalidMutualBlock { reason } @@ -1360,7 +1379,9 @@ pub fn decode_compile_error(obj: LeanObject) -> CompileError { let param = ctor.get(1).as_string().to_string(); CompileError::UnknownUnivParam { curr, param } }, - 5 => CompileError::Serialize(decode_serialize_error(ctor.get(0))), + 5 => CompileError::Serialize(decode_serialize_error( + LeanIxSerializeError::new(ctor.get(0)), + )), _ => unreachable!("Invalid CompileError tag: {}", ctor.tag()), } } @@ -1370,8 +1391,8 @@ pub fn decode_compile_error(obj: LeanObject) -> CompileError { pub extern "C" fn rs_roundtrip_decompile_error( obj: LeanIxDecompileError, ) -> LeanIxDecompileError { - let err = decode_decompile_error(*obj); - build_decompile_error(&err).into() + let err = decode_decompile_error(obj); + build_decompile_error(&err) } /// FFI: Round-trip a CompileError: Lean → Rust → Lean. @@ -1379,8 +1400,8 @@ pub extern "C" fn rs_roundtrip_decompile_error( pub extern "C" fn rs_roundtrip_compile_error( obj: LeanIxCompileError, ) -> LeanIxCompileError { - let err = decode_compile_error(*obj); - build_compile_error(&err).into() + let err = decode_compile_error(obj); + build_compile_error(&err) } /// FFI: Round-trip a SerializeError: Lean → Rust → Lean. @@ -1388,8 +1409,8 @@ pub extern "C" fn rs_roundtrip_compile_error( pub extern "C" fn rs_roundtrip_serialize_error( obj: LeanIxSerializeError, ) -> LeanIxSerializeError { - let err = decode_serialize_error(*obj); - build_serialize_error(&err).into() + let err = decode_serialize_error(obj); + build_serialize_error(&err) } // ============================================================================= @@ -1398,8 +1419,8 @@ pub extern "C" fn rs_roundtrip_serialize_error( /// FFI: Decompile an Ixon.RawEnv → Except DecompileError (Array (Ix.Name × Ix.ConstantInfo)). Pure. #[unsafe(no_mangle)] -pub extern "C" fn rs_decompile_env(raw_env_obj: LeanIxonRawEnv) -> LeanObject { - let decoded = decode_raw_env(*raw_env_obj); +pub extern "C" fn rs_decompile_env(raw_env_obj: LeanIxonRawEnv) -> LeanExcept { + let decoded = decode_raw_env(raw_env_obj); let env = decoded_to_ixon_env(&decoded); // Wrap in CompileState (decompile_env only uses .env) @@ -1425,8 +1446,8 @@ pub extern "C" fn rs_decompile_env(raw_env_obj: LeanIxonRawEnv) -> LeanObject { arr.set(i, *pair); } - LeanExcept::ok(arr).into() + LeanExcept::ok(arr) }, - Err(e) => LeanExcept::error(build_decompile_error(&e)).into(), + Err(e) => LeanExcept::error(build_decompile_error(&e)), } } diff --git a/src/ffi/graph.rs b/src/ffi/graph.rs index a1c22ff2..537f7f6e 100644 --- a/src/ffi/graph.rs +++ b/src/ffi/graph.rs @@ -5,17 +5,18 @@ use std::sync::Arc; use crate::ffi::ffi_io_guard; use crate::ix::condense::compute_sccs; use crate::ix::graph::build_ref_graph; +use crate::lean::LeanIxCondensedBlocks; use lean_ffi::object::{LeanArray, LeanCtor, LeanIOResult, LeanObject}; use crate::ffi::builder::LeanBuildCache; use crate::ffi::ix::name::build_name; -use crate::ffi::lean_env::lean_ptr_to_env; +use crate::ffi::lean_env::decode_env; /// Build an Array (Ix.Name × Array Ix.Name) from a RefMap. pub fn build_ref_graph_array( cache: &mut LeanBuildCache, refs: &crate::ix::graph::RefMap, -) -> LeanObject { +) -> LeanArray { let arr = LeanArray::alloc(refs.len()); for (i, (name, ref_set)) in refs.iter().enumerate() { let name_obj = build_name(cache, name); @@ -31,14 +32,14 @@ pub fn build_ref_graph_array( pair.set(1, *refs_arr); arr.set(i, *pair); } - *arr + arr } /// Build a RustCondensedBlocks structure. pub fn build_condensed_blocks( cache: &mut LeanBuildCache, condensed: &crate::ix::condense::CondensedBlocks, -) -> LeanObject { +) -> LeanIxCondensedBlocks { // Build lowLinks: Array (Ix.Name × Ix.Name) let low_links_arr = LeanArray::alloc(condensed.low_links.len()); for (i, (name, low_link)) in condensed.low_links.iter().enumerate() { @@ -85,7 +86,7 @@ pub fn build_condensed_blocks( result.set(0, *low_links_arr); result.set(1, *blocks_arr); result.set(2, *block_refs_arr); - *result + LeanIxCondensedBlocks::new(*result) } // ============================================================================= @@ -98,7 +99,7 @@ pub extern "C" fn rs_build_ref_graph( env_consts_ptr: LeanObject, ) -> LeanIOResult { ffi_io_guard(std::panic::AssertUnwindSafe(|| { - let rust_env = lean_ptr_to_env(env_consts_ptr); + let rust_env = decode_env(env_consts_ptr.as_list()); let rust_env = Arc::new(rust_env); let ref_graph = build_ref_graph(&rust_env); let mut cache = LeanBuildCache::with_capacity(rust_env.len()); @@ -111,7 +112,7 @@ pub extern "C" fn rs_build_ref_graph( #[unsafe(no_mangle)] pub extern "C" fn rs_compute_sccs(env_consts_ptr: LeanObject) -> LeanIOResult { ffi_io_guard(std::panic::AssertUnwindSafe(|| { - let rust_env = lean_ptr_to_env(env_consts_ptr); + let rust_env = decode_env(env_consts_ptr.as_list()); let rust_env = Arc::new(rust_env); let ref_graph = build_ref_graph(&rust_env); let condensed = compute_sccs(&ref_graph.out_refs); diff --git a/src/ffi/ix/constant.rs b/src/ffi/ix/constant.rs index 9656572c..4dc8f2e8 100644 --- a/src/ffi/ix/constant.rs +++ b/src/ffi/ix/constant.rs @@ -15,7 +15,10 @@ use crate::ix::env::{ DefinitionVal, InductiveVal, Name, OpaqueVal, QuotKind, QuotVal, RecursorRule, RecursorVal, ReducibilityHints, TheoremVal, }; -use crate::lean::LeanIxConstantInfo; +use crate::lean::{ + LeanIxConstantInfo, LeanIxConstantVal, LeanIxExpr, LeanIxName, + LeanIxRecursorRule, LeanIxReducibilityHints, +}; use lean_ffi::nat::Nat; use lean_ffi::object::{LeanArray, LeanCtor, LeanObject}; @@ -30,7 +33,7 @@ use crate::ffi::primitives::build_nat; pub fn build_constant_val( cache: &mut LeanBuildCache, cv: &ConstantVal, -) -> LeanObject { +) -> LeanIxConstantVal { // ConstantVal = { name : Name, levelParams : Array Name, type : Expr } let name_obj = build_name(cache, &cv.name); let level_params_obj = build_name_array(cache, &cv.level_params); @@ -40,14 +43,16 @@ pub fn build_constant_val( obj.set(0, name_obj); obj.set(1, level_params_obj); obj.set(2, type_obj); - *obj + LeanIxConstantVal::new(*obj) } /// Build ReducibilityHints. /// NOTE: In Lean 4, 0-field constructors are boxed scalars when the inductive has /// other constructors with fields. So opaque and abbrev use box_usize. -pub fn build_reducibility_hints(hints: &ReducibilityHints) -> LeanObject { - match hints { +pub fn build_reducibility_hints( + hints: &ReducibilityHints, +) -> LeanIxReducibilityHints { + let obj = match hints { // | opaque -- tag 0, boxed as scalar ReducibilityHints::Opaque => LeanObject::box_usize(0), // | abbrev -- tag 1, boxed as scalar @@ -59,7 +64,8 @@ pub fn build_reducibility_hints(hints: &ReducibilityHints) -> LeanObject { obj.set_u32(0, *h); *obj }, - } + }; + LeanIxReducibilityHints::new(obj) } /// Build a Ix.ConstantInfo from a Rust ConstantInfo. @@ -266,17 +272,20 @@ fn build_recursor_rules( /// Decode Ix.ConstantVal from Lean object. /// ConstantVal = { name : Name, levelParams : Array Name, type : Expr } -pub fn decode_constant_val(obj: LeanObject) -> ConstantVal { +pub fn decode_constant_val(obj: LeanIxConstantVal) -> ConstantVal { let ctor = obj.as_ctor(); - let name = decode_ix_name(ctor.get(0)); - let level_params: Vec = ctor.get(1).as_array().map(decode_ix_name); - let typ = decode_ix_expr(ctor.get(2)); + let name = decode_ix_name(LeanIxName::new(ctor.get(0))); + let level_params: Vec = + ctor.get(1).as_array().map(|x| decode_ix_name(LeanIxName::new(x))); + let typ = decode_ix_expr(LeanIxExpr::new(ctor.get(2))); ConstantVal { name, level_params, typ } } /// Decode Lean.ReducibilityHints from Lean object. -pub fn decode_reducibility_hints(obj: LeanObject) -> ReducibilityHints { +pub fn decode_reducibility_hints( + obj: LeanIxReducibilityHints, +) -> ReducibilityHints { if obj.is_scalar() { let tag = obj.as_ptr() as usize >> 1; match tag { @@ -299,17 +308,17 @@ pub fn decode_reducibility_hints(obj: LeanObject) -> ReducibilityHints { } /// Decode Ix.RecursorRule from Lean object. -fn decode_recursor_rule(obj: LeanObject) -> RecursorRule { +fn decode_recursor_rule(obj: LeanIxRecursorRule) -> RecursorRule { let ctor = obj.as_ctor(); RecursorRule { - ctor: decode_ix_name(ctor.get(0)), + ctor: decode_ix_name(LeanIxName::new(ctor.get(0))), n_fields: Nat::from_obj(ctor.get(1)), - rhs: decode_ix_expr(ctor.get(2)), + rhs: decode_ix_expr(LeanIxExpr::new(ctor.get(2))), } } /// Decode Ix.ConstantInfo from Lean object. -pub fn decode_constant_info(obj: LeanObject) -> ConstantInfo { +pub fn decode_constant_info(obj: LeanIxConstantInfo) -> ConstantInfo { let outer = obj.as_ctor(); let inner_obj = outer.get(0); let inner = inner_obj.as_ctor(); @@ -319,7 +328,7 @@ pub fn decode_constant_info(obj: LeanObject) -> ConstantInfo { let is_unsafe = inner.scalar_u8(1, 0) != 0; ConstantInfo::AxiomInfo(AxiomVal { - cnst: decode_constant_val(inner.get(0)), + cnst: decode_constant_val(LeanIxConstantVal::new(inner.get(0))), is_unsafe, }) }, @@ -333,26 +342,28 @@ pub fn decode_constant_info(obj: LeanObject) -> ConstantInfo { }; ConstantInfo::DefnInfo(DefinitionVal { - cnst: decode_constant_val(inner.get(0)), - value: decode_ix_expr(inner.get(1)), - hints: decode_reducibility_hints(inner.get(2)), + cnst: decode_constant_val(LeanIxConstantVal::new(inner.get(0))), + value: decode_ix_expr(LeanIxExpr::new(inner.get(1))), + hints: decode_reducibility_hints(LeanIxReducibilityHints::new( + inner.get(2), + )), safety, - all: decode_name_array(inner.get(3)), + all: decode_name_array(inner.get(3).as_array()), }) }, 2 => ConstantInfo::ThmInfo(TheoremVal { - cnst: decode_constant_val(inner.get(0)), - value: decode_ix_expr(inner.get(1)), - all: decode_name_array(inner.get(2)), + cnst: decode_constant_val(LeanIxConstantVal::new(inner.get(0))), + value: decode_ix_expr(LeanIxExpr::new(inner.get(1))), + all: decode_name_array(inner.get(2).as_array()), }), 3 => { let is_unsafe = inner.scalar_u8(3, 0) != 0; ConstantInfo::OpaqueInfo(OpaqueVal { - cnst: decode_constant_val(inner.get(0)), - value: decode_ix_expr(inner.get(1)), + cnst: decode_constant_val(LeanIxConstantVal::new(inner.get(0))), + value: decode_ix_expr(LeanIxExpr::new(inner.get(1))), is_unsafe, - all: decode_name_array(inner.get(2)), + all: decode_name_array(inner.get(2).as_array()), }) }, 4 => { @@ -366,7 +377,7 @@ pub fn decode_constant_info(obj: LeanObject) -> ConstantInfo { }; ConstantInfo::QuotInfo(QuotVal { - cnst: decode_constant_val(inner.get(0)), + cnst: decode_constant_val(LeanIxConstantVal::new(inner.get(0))), kind, }) }, @@ -376,11 +387,11 @@ pub fn decode_constant_info(obj: LeanObject) -> ConstantInfo { let is_reflexive = inner.scalar_u8(6, 2) != 0; ConstantInfo::InductInfo(InductiveVal { - cnst: decode_constant_val(inner.get(0)), + cnst: decode_constant_val(LeanIxConstantVal::new(inner.get(0))), num_params: Nat::from_obj(inner.get(1)), num_indices: Nat::from_obj(inner.get(2)), - all: decode_name_array(inner.get(3)), - ctors: decode_name_array(inner.get(4)), + all: decode_name_array(inner.get(3).as_array()), + ctors: decode_name_array(inner.get(4).as_array()), num_nested: Nat::from_obj(inner.get(5)), is_rec, is_unsafe, @@ -391,8 +402,8 @@ pub fn decode_constant_info(obj: LeanObject) -> ConstantInfo { let is_unsafe = inner.scalar_u8(5, 0) != 0; ConstantInfo::CtorInfo(ConstructorVal { - cnst: decode_constant_val(inner.get(0)), - induct: decode_ix_name(inner.get(1)), + cnst: decode_constant_val(LeanIxConstantVal::new(inner.get(0))), + induct: decode_ix_name(LeanIxName::new(inner.get(1))), cidx: Nat::from_obj(inner.get(2)), num_params: Nat::from_obj(inner.get(3)), num_fields: Nat::from_obj(inner.get(4)), @@ -403,12 +414,14 @@ pub fn decode_constant_info(obj: LeanObject) -> ConstantInfo { let k = inner.scalar_u8(7, 0) != 0; let is_unsafe = inner.scalar_u8(7, 1) != 0; - let rules: Vec = - inner.get(6).as_array().map(decode_recursor_rule); + let rules: Vec = inner + .get(6) + .as_array() + .map(|x| decode_recursor_rule(LeanIxRecursorRule::new(x))); ConstantInfo::RecInfo(RecursorVal { - cnst: decode_constant_val(inner.get(0)), - all: decode_name_array(inner.get(1)), + cnst: decode_constant_val(LeanIxConstantVal::new(inner.get(0))), + all: decode_name_array(inner.get(1).as_array()), num_params: Nat::from_obj(inner.get(2)), num_indices: Nat::from_obj(inner.get(3)), num_motives: Nat::from_obj(inner.get(4)), @@ -427,7 +440,7 @@ pub fn decode_constant_info(obj: LeanObject) -> ConstantInfo { pub extern "C" fn rs_roundtrip_ix_constant_info( info_ptr: LeanIxConstantInfo, ) -> LeanIxConstantInfo { - let info = decode_constant_info(*info_ptr); + let info = decode_constant_info(info_ptr); let mut cache = LeanBuildCache::new(); build_constant_info(&mut cache, &info) } diff --git a/src/ffi/ix/data.rs b/src/ffi/ix/data.rs index 80cfe42c..568b139e 100644 --- a/src/ffi/ix/data.rs +++ b/src/ffi/ix/data.rs @@ -4,11 +4,11 @@ use crate::ix::env::{ DataValue, Int, Name, SourceInfo, Substring, Syntax, SyntaxPreresolved, }; use crate::lean::{ - LeanIxDataValue, LeanIxInt, LeanIxSourceInfo, LeanIxSubstring, LeanIxSyntax, - LeanIxSyntaxPreresolved, + LeanIxDataValue, LeanIxInt, LeanIxName, LeanIxSourceInfo, LeanIxSubstring, + LeanIxSyntax, LeanIxSyntaxPreresolved, }; use lean_ffi::nat::Nat; -use lean_ffi::object::{LeanArray, LeanCtor, LeanObject, LeanString}; +use lean_ffi::object::{LeanArray, LeanCtor, LeanString}; use crate::ffi::builder::LeanBuildCache; use crate::ffi::ix::name::{build_name, decode_ix_name}; @@ -225,7 +225,7 @@ pub fn build_kvmap( /// Decode Ix.Int from Lean object. /// Ix.Int: ofNat (tag 0, 1 field) | negSucc (tag 1, 1 field) -pub fn decode_ix_int(obj: LeanObject) -> Int { +pub fn decode_ix_int(obj: LeanIxInt) -> Int { let ctor = obj.as_ctor(); let nat = Nat::from_obj(ctor.get(0)); match ctor.tag() { @@ -236,7 +236,7 @@ pub fn decode_ix_int(obj: LeanObject) -> Int { } /// Decode Ix.DataValue from a Lean object. -pub fn decode_data_value(obj: LeanObject) -> DataValue { +pub fn decode_data_value(obj: LeanIxDataValue) -> DataValue { let ctor = obj.as_ctor(); match ctor.tag() { 0 => { @@ -250,7 +250,7 @@ pub fn decode_data_value(obj: LeanObject) -> DataValue { }, 2 => { // ofName: 1 object field - DataValue::OfName(decode_ix_name(ctor.get(0))) + DataValue::OfName(decode_ix_name(LeanIxName::new(ctor.get(0)))) }, 3 => { // ofNat: 1 object field @@ -269,14 +269,16 @@ pub fn decode_data_value(obj: LeanObject) -> DataValue { }, 5 => { // ofSyntax: 1 object field - DataValue::OfSyntax(decode_ix_syntax(ctor.get(0)).into()) + DataValue::OfSyntax( + decode_ix_syntax(LeanIxSyntax::new(ctor.get(0))).into(), + ) }, _ => panic!("Invalid DataValue tag: {}", ctor.tag()), } } /// Decode Ix.Syntax from a Lean object. -pub fn decode_ix_syntax(obj: LeanObject) -> Syntax { +pub fn decode_ix_syntax(obj: LeanIxSyntax) -> Syntax { if obj.is_scalar() { return Syntax::Missing; } @@ -285,24 +287,27 @@ pub fn decode_ix_syntax(obj: LeanObject) -> Syntax { 0 => Syntax::Missing, 1 => { // node: info, kind, args - let info = decode_ix_source_info(ctor.get(0)); - let kind = decode_ix_name(ctor.get(1)); - let args: Vec = ctor.get(2).as_array().map(decode_ix_syntax); + let info = decode_ix_source_info(LeanIxSourceInfo::new(ctor.get(0))); + let kind = decode_ix_name(LeanIxName::new(ctor.get(1))); + let args: Vec = + ctor.get(2).as_array().map(|x| decode_ix_syntax(LeanIxSyntax::new(x))); Syntax::Node(info, kind, args) }, 2 => { // atom: info, val - let info = decode_ix_source_info(ctor.get(0)); + let info = decode_ix_source_info(LeanIxSourceInfo::new(ctor.get(0))); Syntax::Atom(info, ctor.get(1).as_string().to_string()) }, 3 => { // ident: info, rawVal, val, preresolved - let info = decode_ix_source_info(ctor.get(0)); - let raw_val = decode_substring(ctor.get(1)); - let val = decode_ix_name(ctor.get(2)); - let preresolved: Vec = - ctor.get(3).as_array().map(decode_syntax_preresolved); + let info = decode_ix_source_info(LeanIxSourceInfo::new(ctor.get(0))); + let raw_val = decode_substring(LeanIxSubstring::new(ctor.get(1))); + let val = decode_ix_name(LeanIxName::new(ctor.get(2))); + let preresolved: Vec = ctor + .get(3) + .as_array() + .map(|x| decode_syntax_preresolved(LeanIxSyntaxPreresolved::new(x))); Syntax::Ident(info, raw_val, val, preresolved) }, @@ -311,7 +316,7 @@ pub fn decode_ix_syntax(obj: LeanObject) -> Syntax { } /// Decode Ix.SourceInfo. -pub fn decode_ix_source_info(obj: LeanObject) -> SourceInfo { +pub fn decode_ix_source_info(obj: LeanIxSourceInfo) -> SourceInfo { if obj.is_scalar() { return SourceInfo::None; } @@ -320,9 +325,9 @@ pub fn decode_ix_source_info(obj: LeanObject) -> SourceInfo { 0 => { // original SourceInfo::Original( - decode_substring(ctor.get(0)), + decode_substring(LeanIxSubstring::new(ctor.get(0))), Nat::from_obj(ctor.get(1)), - decode_substring(ctor.get(2)), + decode_substring(LeanIxSubstring::new(ctor.get(2))), Nat::from_obj(ctor.get(3)), ) }, @@ -342,7 +347,7 @@ pub fn decode_ix_source_info(obj: LeanObject) -> SourceInfo { } /// Decode Ix.Substring. -pub fn decode_substring(obj: LeanObject) -> Substring { +pub fn decode_substring(obj: LeanIxSubstring) -> Substring { let ctor = obj.as_ctor(); Substring { str: ctor.get(0).as_string().to_string(), @@ -352,16 +357,18 @@ pub fn decode_substring(obj: LeanObject) -> Substring { } /// Decode Ix.SyntaxPreresolved. -pub fn decode_syntax_preresolved(obj: LeanObject) -> SyntaxPreresolved { +pub fn decode_syntax_preresolved( + obj: LeanIxSyntaxPreresolved, +) -> SyntaxPreresolved { let ctor = obj.as_ctor(); match ctor.tag() { 0 => { // namespace - SyntaxPreresolved::Namespace(decode_ix_name(ctor.get(0))) + SyntaxPreresolved::Namespace(decode_ix_name(LeanIxName::new(ctor.get(0)))) }, 1 => { // decl - let name = decode_ix_name(ctor.get(0)); + let name = decode_ix_name(LeanIxName::new(ctor.get(0))); let aliases: Vec = ctor.get(1).as_array().map(|obj| obj.as_string().to_string()); @@ -378,7 +385,7 @@ pub fn decode_syntax_preresolved(obj: LeanObject) -> SyntaxPreresolved { /// Round-trip an Ix.Int: decode from Lean, re-encode. #[unsafe(no_mangle)] pub extern "C" fn rs_roundtrip_ix_int(int_ptr: LeanIxInt) -> LeanIxInt { - let int_val = decode_ix_int(*int_ptr); + let int_val = decode_ix_int(int_ptr); build_int(&int_val) } @@ -387,7 +394,7 @@ pub extern "C" fn rs_roundtrip_ix_int(int_ptr: LeanIxInt) -> LeanIxInt { pub extern "C" fn rs_roundtrip_ix_substring( sub_ptr: LeanIxSubstring, ) -> LeanIxSubstring { - let sub = decode_substring(*sub_ptr); + let sub = decode_substring(sub_ptr); build_substring(&sub) } @@ -396,7 +403,7 @@ pub extern "C" fn rs_roundtrip_ix_substring( pub extern "C" fn rs_roundtrip_ix_source_info( si_ptr: LeanIxSourceInfo, ) -> LeanIxSourceInfo { - let si = decode_ix_source_info(*si_ptr); + let si = decode_ix_source_info(si_ptr); build_source_info(&si) } @@ -405,7 +412,7 @@ pub extern "C" fn rs_roundtrip_ix_source_info( pub extern "C" fn rs_roundtrip_ix_syntax_preresolved( sp_ptr: LeanIxSyntaxPreresolved, ) -> LeanIxSyntaxPreresolved { - let sp = decode_syntax_preresolved(*sp_ptr); + let sp = decode_syntax_preresolved(sp_ptr); let mut cache = LeanBuildCache::new(); build_syntax_preresolved(&mut cache, &sp) } @@ -415,7 +422,7 @@ pub extern "C" fn rs_roundtrip_ix_syntax_preresolved( pub extern "C" fn rs_roundtrip_ix_syntax( syn_ptr: LeanIxSyntax, ) -> LeanIxSyntax { - let syn = decode_ix_syntax(*syn_ptr); + let syn = decode_ix_syntax(syn_ptr); let mut cache = LeanBuildCache::new(); build_syntax(&mut cache, &syn) } @@ -425,7 +432,7 @@ pub extern "C" fn rs_roundtrip_ix_syntax( pub extern "C" fn rs_roundtrip_ix_data_value( dv_ptr: LeanIxDataValue, ) -> LeanIxDataValue { - let dv = decode_data_value(*dv_ptr); + let dv = decode_data_value(dv_ptr); let mut cache = LeanBuildCache::new(); build_data_value(&mut cache, &dv) } diff --git a/src/ffi/ix/env.rs b/src/ffi/ix/env.rs index 9175bc7b..b0a2cc1d 100644 --- a/src/ffi/ix/env.rs +++ b/src/ffi/ix/env.rs @@ -3,7 +3,9 @@ use rustc_hash::FxHashMap; use crate::ix::env::{ConstantInfo, Name}; -use crate::lean::{LeanIxEnvironment, LeanIxRawEnvironment}; +use crate::lean::{ + LeanIxConstantInfo, LeanIxEnvironment, LeanIxName, LeanIxRawEnvironment, +}; use lean_ffi::object::{LeanArray, LeanCtor, LeanObject}; use crate::ffi::builder::LeanBuildCache; @@ -79,7 +81,7 @@ pub fn build_hashmap_from_pairs( pub fn build_raw_environment( cache: &mut LeanBuildCache, consts: &FxHashMap, -) -> LeanObject { +) -> LeanIxRawEnvironment { // Build consts array: Array (Name × ConstantInfo) let consts_arr = LeanArray::alloc(consts.len()); for (i, (name, info)) in consts.iter().enumerate() { @@ -92,7 +94,7 @@ pub fn build_raw_environment( consts_arr.set(i, pair); } - *consts_arr + LeanIxRawEnvironment::new(*consts_arr) } // ============================================================================= @@ -169,9 +171,15 @@ where /// /// NOTE: Environment with a single field is UNBOXED by Lean, /// so the pointer IS the HashMap directly, not a structure containing it. -pub fn decode_ix_environment(obj: LeanObject) -> FxHashMap { +pub fn decode_ix_environment( + obj: LeanIxEnvironment, +) -> FxHashMap { // Environment is unboxed - obj IS the HashMap directly - let consts_pairs = decode_hashmap(obj, decode_ix_name, decode_constant_info); + let consts_pairs = decode_hashmap( + *obj, + |x| decode_ix_name(LeanIxName::new(x)), + |x| decode_constant_info(LeanIxConstantInfo::new(x)), + ); let mut consts: FxHashMap = FxHashMap::default(); for (name, info) in consts_pairs { consts.insert(name, info); @@ -183,15 +191,15 @@ pub fn decode_ix_environment(obj: LeanObject) -> FxHashMap { /// RawEnvironment = { consts : Array (Name × ConstantInfo) } /// NOTE: Unboxed to just Array. This version deduplicates by name. pub fn decode_ix_raw_environment( - obj: LeanObject, + obj: LeanIxRawEnvironment, ) -> FxHashMap { let arr = obj.as_array(); let mut consts: FxHashMap = FxHashMap::default(); for pair_obj in arr.iter() { let pair = pair_obj.as_ctor(); - let name = decode_ix_name(pair.get(0)); - let info = decode_constant_info(pair.get(1)); + let name = decode_ix_name(LeanIxName::new(pair.get(0))); + let info = decode_constant_info(LeanIxConstantInfo::new(pair.get(1))); consts.insert(name, info); } @@ -201,15 +209,15 @@ pub fn decode_ix_raw_environment( /// Decode Ix.RawEnvironment from Lean object preserving array structure. /// This version preserves all entries including duplicates. pub fn decode_ix_raw_environment_vec( - obj: LeanObject, + obj: LeanIxRawEnvironment, ) -> Vec<(Name, ConstantInfo)> { let arr = obj.as_array(); let mut consts = Vec::with_capacity(arr.len()); for pair_obj in arr.iter() { let pair = pair_obj.as_ctor(); - let name = decode_ix_name(pair.get(0)); - let info = decode_constant_info(pair.get(1)); + let name = decode_ix_name(LeanIxName::new(pair.get(0))); + let info = decode_constant_info(LeanIxConstantInfo::new(pair.get(1))); consts.push((name, info)); } @@ -220,7 +228,7 @@ pub fn decode_ix_raw_environment_vec( pub fn build_raw_environment_from_vec( cache: &mut LeanBuildCache, consts: &[(Name, ConstantInfo)], -) -> LeanObject { +) -> LeanIxRawEnvironment { let consts_arr = LeanArray::alloc(consts.len()); for (i, (name, info)) in consts.iter().enumerate() { let key_obj = build_name(cache, name); @@ -230,7 +238,7 @@ pub fn build_raw_environment_from_vec( pair.set(1, val_obj); consts_arr.set(i, pair); } - *consts_arr + LeanIxRawEnvironment::new(*consts_arr) } // ============================================================================= @@ -242,9 +250,9 @@ pub fn build_raw_environment_from_vec( pub extern "C" fn rs_roundtrip_ix_environment( env_ptr: LeanIxEnvironment, ) -> LeanIxRawEnvironment { - let env = decode_ix_environment(*env_ptr); + let env = decode_ix_environment(env_ptr); let mut cache = LeanBuildCache::with_capacity(env.len()); - build_raw_environment(&mut cache, &env).into() + build_raw_environment(&mut cache, &env) } /// Round-trip an Ix.RawEnvironment: decode from Lean, re-encode. @@ -253,7 +261,7 @@ pub extern "C" fn rs_roundtrip_ix_environment( pub extern "C" fn rs_roundtrip_ix_raw_environment( env_ptr: LeanIxRawEnvironment, ) -> LeanIxRawEnvironment { - let env = decode_ix_raw_environment_vec(*env_ptr); + let env = decode_ix_raw_environment_vec(env_ptr); let mut cache = LeanBuildCache::with_capacity(env.len()); - build_raw_environment_from_vec(&mut cache, &env).into() + build_raw_environment_from_vec(&mut cache, &env) } diff --git a/src/ffi/ix/expr.rs b/src/ffi/ix/expr.rs index 229b2845..48ca78ba 100644 --- a/src/ffi/ix/expr.rs +++ b/src/ffi/ix/expr.rs @@ -17,7 +17,10 @@ use crate::ix::env::{ BinderInfo, DataValue, Expr, ExprData, Level, Literal, Name, }; -use crate::lean::LeanIxExpr; +use crate::lean::{ + LeanIxBinderInfo, LeanIxDataValue, LeanIxExpr, LeanIxLevel, LeanIxLiteral, + LeanIxName, +}; use lean_ffi::nat::Nat; use lean_ffi::object::{LeanArray, LeanCtor, LeanObject, LeanString}; @@ -175,17 +178,15 @@ fn build_name_datavalue_pair( name: &Name, dv: &DataValue, ) -> LeanObject { - let name_obj = build_name(cache, name); - let dv_obj = build_data_value(cache, dv); let pair = LeanCtor::alloc(0, 2, 0); - pair.set(0, name_obj); - pair.set(1, dv_obj); + pair.set(0, build_name(cache, name)); + pair.set(1, build_data_value(cache, dv)); *pair } /// Build a Literal (natVal or strVal). -pub fn build_literal(lit: &Literal) -> LeanObject { - match lit { +pub fn build_literal(lit: &Literal) -> LeanIxLiteral { + let obj = match lit { Literal::NatVal(n) => { let obj = LeanCtor::alloc(0, 1, 0); obj.set(0, build_nat(n)); @@ -196,13 +197,14 @@ pub fn build_literal(lit: &Literal) -> LeanObject { obj.set(0, LeanString::new(s.as_str())); *obj }, - } + }; + LeanIxLiteral::new(obj) } /// Build Ix.BinderInfo enum. /// BinderInfo is a 4-constructor enum with no fields, stored as boxed scalar. -pub fn build_binder_info(bi: &BinderInfo) -> LeanObject { - LeanObject::box_usize(binder_info_to_u8(bi) as usize) +pub fn build_binder_info(bi: &BinderInfo) -> LeanIxBinderInfo { + LeanIxBinderInfo::new(LeanObject::box_usize(binder_info_to_u8(bi) as usize)) } /// Convert BinderInfo to u8 tag. @@ -216,7 +218,7 @@ pub fn binder_info_to_u8(bi: &BinderInfo) -> u8 { } /// Decode a Lean Ix.Expr to Rust Expr. -pub fn decode_ix_expr(obj: LeanObject) -> Expr { +pub fn decode_ix_expr(obj: LeanIxExpr) -> Expr { let ctor = obj.as_ctor(); match ctor.tag() { 0 => { @@ -226,37 +228,38 @@ pub fn decode_ix_expr(obj: LeanObject) -> Expr { }, 1 => { // fvar - let name = decode_ix_name(ctor.get(0)); + let name = decode_ix_name(LeanIxName::new(ctor.get(0))); Expr::fvar(name) }, 2 => { // mvar - let name = decode_ix_name(ctor.get(0)); + let name = decode_ix_name(LeanIxName::new(ctor.get(0))); Expr::mvar(name) }, 3 => { // sort - let level = decode_ix_level(ctor.get(0)); + let level = decode_ix_level(LeanIxLevel::new(ctor.get(0))); Expr::sort(level) }, 4 => { // const - let name = decode_ix_name(ctor.get(0)); - let levels: Vec = ctor.get(1).as_array().map(decode_ix_level); + let name = decode_ix_name(LeanIxName::new(ctor.get(0))); + let levels: Vec = + ctor.get(1).as_array().map(|x| decode_ix_level(LeanIxLevel::new(x))); Expr::cnst(name, levels) }, 5 => { // app - let fn_expr = decode_ix_expr(ctor.get(0)); - let arg_expr = decode_ix_expr(ctor.get(1)); + let fn_expr = decode_ix_expr(LeanIxExpr::new(ctor.get(0))); + let arg_expr = decode_ix_expr(LeanIxExpr::new(ctor.get(1))); Expr::app(fn_expr, arg_expr) }, 6 => { // lam: name, ty, body, hash, bi (scalar) - let name = decode_ix_name(ctor.get(0)); - let ty = decode_ix_expr(ctor.get(1)); - let body = decode_ix_expr(ctor.get(2)); + let name = decode_ix_name(LeanIxName::new(ctor.get(0))); + let ty = decode_ix_expr(LeanIxExpr::new(ctor.get(1))); + let body = decode_ix_expr(LeanIxExpr::new(ctor.get(2))); // Read BinderInfo scalar (4 obj fields: name, ty, body, hash) let bi_byte = ctor.scalar_u8(4, 0); @@ -266,9 +269,9 @@ pub fn decode_ix_expr(obj: LeanObject) -> Expr { }, 7 => { // forallE: same layout as lam - let name = decode_ix_name(ctor.get(0)); - let ty = decode_ix_expr(ctor.get(1)); - let body = decode_ix_expr(ctor.get(2)); + let name = decode_ix_name(LeanIxName::new(ctor.get(0))); + let ty = decode_ix_expr(LeanIxExpr::new(ctor.get(1))); + let body = decode_ix_expr(LeanIxExpr::new(ctor.get(2))); // 4 obj fields: name, ty, body, hash let bi_byte = ctor.scalar_u8(4, 0); @@ -278,10 +281,10 @@ pub fn decode_ix_expr(obj: LeanObject) -> Expr { }, 8 => { // letE: name, ty, val, body, hash, nonDep (scalar) - let name = decode_ix_name(ctor.get(0)); - let ty = decode_ix_expr(ctor.get(1)); - let val = decode_ix_expr(ctor.get(2)); - let body = decode_ix_expr(ctor.get(3)); + let name = decode_ix_name(LeanIxName::new(ctor.get(0))); + let ty = decode_ix_expr(LeanIxExpr::new(ctor.get(1))); + let val = decode_ix_expr(LeanIxExpr::new(ctor.get(2))); + let body = decode_ix_expr(LeanIxExpr::new(ctor.get(3))); // 5 obj fields: name, ty, val, body, hash let non_dep = ctor.scalar_u8(5, 0) != 0; @@ -290,7 +293,7 @@ pub fn decode_ix_expr(obj: LeanObject) -> Expr { }, 9 => { // lit - let lit = decode_literal(ctor.get(0)); + let lit = decode_literal(LeanIxLiteral::new(ctor.get(0))); Expr::lit(lit) }, 10 => { @@ -298,14 +301,14 @@ pub fn decode_ix_expr(obj: LeanObject) -> Expr { let data: Vec<(Name, DataValue)> = ctor.get(0).as_array().map(decode_name_data_value); - let inner = decode_ix_expr(ctor.get(1)); + let inner = decode_ix_expr(LeanIxExpr::new(ctor.get(1))); Expr::mdata(data, inner) }, 11 => { // proj: typeName, idx, struct, hash - let type_name = decode_ix_name(ctor.get(0)); + let type_name = decode_ix_name(LeanIxName::new(ctor.get(0))); let idx = Nat::from_obj(ctor.get(1)); - let struct_expr = decode_ix_expr(ctor.get(2)); + let struct_expr = decode_ix_expr(LeanIxExpr::new(ctor.get(2))); Expr::proj(type_name, idx, struct_expr) }, @@ -314,7 +317,7 @@ pub fn decode_ix_expr(obj: LeanObject) -> Expr { } /// Decode Lean.Literal from a Lean object. -pub fn decode_literal(obj: LeanObject) -> Literal { +pub fn decode_literal(obj: LeanIxLiteral) -> Literal { let ctor = obj.as_ctor(); match ctor.tag() { 0 => { @@ -332,10 +335,9 @@ pub fn decode_literal(obj: LeanObject) -> Literal { /// Decode a (Name × DataValue) pair for mdata. fn decode_name_data_value(obj: LeanObject) -> (Name, DataValue) { - // Prod: ctor 0 with 2 fields let ctor = obj.as_ctor(); - let name = decode_ix_name(ctor.get(0)); - let dv = decode_data_value(ctor.get(1)); + let name = decode_ix_name(LeanIxName::new(ctor.get(0))); + let dv = decode_data_value(LeanIxDataValue::new(ctor.get(1))); (name, dv) } @@ -353,7 +355,7 @@ pub fn decode_binder_info(bi_byte: u8) -> BinderInfo { /// Round-trip an Ix.Expr: decode from Lean, re-encode via LeanBuildCache. #[unsafe(no_mangle)] pub extern "C" fn rs_roundtrip_ix_expr(expr_ptr: LeanIxExpr) -> LeanIxExpr { - let expr = decode_ix_expr(*expr_ptr); + let expr = decode_ix_expr(expr_ptr); let mut cache = LeanBuildCache::new(); build_expr(&mut cache, &expr) } diff --git a/src/ffi/ix/level.rs b/src/ffi/ix/level.rs index 61ba7048..2bf0c630 100644 --- a/src/ffi/ix/level.rs +++ b/src/ffi/ix/level.rs @@ -9,8 +9,8 @@ //! - Tag 5: mvar (n : Name) (hash : Address) use crate::ix::env::{Level, LevelData}; -use crate::lean::LeanIxLevel; -use lean_ffi::object::{LeanArray, LeanCtor, LeanObject}; +use crate::lean::{LeanIxLevel, LeanIxName}; +use lean_ffi::object::{LeanArray, LeanCtor}; use crate::ffi::builder::LeanBuildCache; use crate::ffi::ix::address::build_address; @@ -89,30 +89,30 @@ pub fn build_level_array( } /// Decode a Lean Ix.Level to Rust Level. -pub fn decode_ix_level(obj: LeanObject) -> Level { +pub fn decode_ix_level(obj: LeanIxLevel) -> Level { let ctor = obj.as_ctor(); match ctor.tag() { 0 => Level::zero(), 1 => { - let x = decode_ix_level(ctor.get(0)); + let x = decode_ix_level(LeanIxLevel::new(ctor.get(0))); Level::succ(x) }, 2 => { - let x = decode_ix_level(ctor.get(0)); - let y = decode_ix_level(ctor.get(1)); + let x = decode_ix_level(LeanIxLevel::new(ctor.get(0))); + let y = decode_ix_level(LeanIxLevel::new(ctor.get(1))); Level::max(x, y) }, 3 => { - let x = decode_ix_level(ctor.get(0)); - let y = decode_ix_level(ctor.get(1)); + let x = decode_ix_level(LeanIxLevel::new(ctor.get(0))); + let y = decode_ix_level(LeanIxLevel::new(ctor.get(1))); Level::imax(x, y) }, 4 => { - let n = decode_ix_name(ctor.get(0)); + let n = decode_ix_name(LeanIxName::new(ctor.get(0))); Level::param(n) }, 5 => { - let n = decode_ix_name(ctor.get(0)); + let n = decode_ix_name(LeanIxName::new(ctor.get(0))); Level::mvar(n) }, _ => panic!("Invalid Ix.Level tag: {}", ctor.tag()), @@ -120,14 +120,14 @@ pub fn decode_ix_level(obj: LeanObject) -> Level { } /// Decode Array of Levels from Lean pointer. -pub fn decode_level_array(obj: LeanObject) -> Vec { - obj.as_array().map(decode_ix_level) +pub fn decode_level_array(obj: LeanArray) -> Vec { + obj.map(|x| decode_ix_level(LeanIxLevel::new(x))) } /// Round-trip an Ix.Level: decode from Lean, re-encode via LeanBuildCache. #[unsafe(no_mangle)] pub extern "C" fn rs_roundtrip_ix_level(level_ptr: LeanIxLevel) -> LeanIxLevel { - let level = decode_ix_level(*level_ptr); + let level = decode_ix_level(level_ptr); let mut cache = LeanBuildCache::new(); build_level(&mut cache, &level) } diff --git a/src/ffi/ix/name.rs b/src/ffi/ix/name.rs index fe153b1c..aeef2393 100644 --- a/src/ffi/ix/name.rs +++ b/src/ffi/ix/name.rs @@ -8,7 +8,7 @@ use crate::ix::env::{Name, NameData}; use crate::lean::LeanIxName; use lean_ffi::nat::Nat; -use lean_ffi::object::{LeanArray, LeanCtor, LeanObject, LeanString}; +use lean_ffi::object::{LeanArray, LeanCtor, LeanString}; use crate::ffi::builder::LeanBuildCache; use crate::ffi::ix::address::build_address; @@ -66,7 +66,7 @@ pub fn build_name_array( } /// Decode a Lean Ix.Name to Rust Name. -pub fn decode_ix_name(obj: LeanObject) -> Name { +pub fn decode_ix_name(obj: LeanIxName) -> Name { let ctor = obj.as_ctor(); match ctor.tag() { 0 => { @@ -75,13 +75,13 @@ pub fn decode_ix_name(obj: LeanObject) -> Name { }, 1 => { // str: parent, s, hash - let parent = decode_ix_name(ctor.get(0)); + let parent = decode_ix_name(LeanIxName::new(ctor.get(0))); let s = ctor.get(1).as_string().to_string(); Name::str(parent, s) }, 2 => { // num: parent, i, hash - let parent = decode_ix_name(ctor.get(0)); + let parent = decode_ix_name(LeanIxName::new(ctor.get(0))); let i = Nat::from_obj(ctor.get(1)); Name::num(parent, i) }, @@ -90,14 +90,14 @@ pub fn decode_ix_name(obj: LeanObject) -> Name { } /// Decode Array of Names from Lean pointer. -pub fn decode_name_array(obj: LeanObject) -> Vec { - obj.as_array().map(decode_ix_name) +pub fn decode_name_array(obj: LeanArray) -> Vec { + obj.map(|x| decode_ix_name(LeanIxName::new(x))) } /// Round-trip an Ix.Name: decode from Lean, re-encode via LeanBuildCache. #[unsafe(no_mangle)] pub extern "C" fn rs_roundtrip_ix_name(name_ptr: LeanIxName) -> LeanIxName { - let name = decode_ix_name(*name_ptr); + let name = decode_ix_name(name_ptr); let mut cache = LeanBuildCache::new(); build_name(&mut cache, &name) } diff --git a/src/ffi/ixon/compare.rs b/src/ffi/ixon/compare.rs index 998a2eeb..f3595cf9 100644 --- a/src/ffi/ixon/compare.rs +++ b/src/ffi/ixon/compare.rs @@ -6,11 +6,11 @@ use crate::ix::compile::{BlockCache, CompileState, compile_env, compile_expr}; use crate::ix::env::Name; use crate::ix::ixon::serialize::put_expr; use crate::ix::mutual::MutCtx; -use crate::lean::LeanIxBlockCompareDetail; +use crate::lean::{LeanIxBlockCompareDetail, LeanIxBlockCompareResult}; use lean_ffi::object::{LeanByteArray, LeanCtor, LeanObject}; use crate::ffi::lean_env::{ - Cache as LeanCache, GlobalCache, lean_ptr_to_expr, lean_ptr_to_name, + Cache as LeanCache, GlobalCache, decode_expr, decode_name, }; /// Rust-side compiled environment for block comparison. @@ -28,7 +28,7 @@ pub extern "C" fn rs_compare_expr_compilation( // Decode Lean.Expr to Rust's representation let global_cache = GlobalCache::default(); let mut cache = LeanCache::new(&global_cache); - let lean_expr = lean_ptr_to_expr(lean_expr_ptr, &mut cache); + let lean_expr = decode_expr(lean_expr_ptr, &mut cache); // Create universe params for de Bruijn indexing (u0, u1, u2, ...) let univ_params: Vec = (0..univ_ctx_size) @@ -68,8 +68,8 @@ fn build_block_compare_result( lean_size: u64, rust_size: u64, first_diff_offset: u64, -) -> LeanObject { - if matched { +) -> LeanIxBlockCompareResult { + let obj = if matched { *LeanCtor::alloc(0, 0, 0) // match } else if not_found { *LeanCtor::alloc(2, 0, 0) // notFound @@ -80,20 +80,21 @@ fn build_block_compare_result( ctor.set_u64(8, rust_size); ctor.set_u64(16, first_diff_offset); *ctor - } + }; + LeanIxBlockCompareResult::new(obj) } /// Build a BlockCompareDetail Lean object. fn build_block_compare_detail( - result: LeanObject, + result: LeanIxBlockCompareResult, lean_sharing_len: u64, rust_sharing_len: u64, -) -> LeanObject { +) -> LeanIxBlockCompareDetail { let ctor = LeanCtor::alloc(0, 1, 16); ctor.set(0, result); ctor.set_u64(8, lean_sharing_len); ctor.set_u64(8 + 8, rust_sharing_len); - *ctor + LeanIxBlockCompareDetail::new(*ctor) } /// Compare a single block by lowlink name. @@ -109,7 +110,7 @@ pub unsafe extern "C" fn rs_compare_block_v2( lean_sharing_len: u64, ) -> LeanIxBlockCompareDetail { let global_cache = GlobalCache::default(); - let name = lean_ptr_to_name(lowlink_name, &global_cache); + let name = decode_name(lowlink_name, &global_cache); let rust_env = unsafe { &*rust_env }; let lean_data = lean_bytes.as_bytes(); @@ -121,7 +122,7 @@ pub unsafe extern "C" fn rs_compare_block_v2( // Block not found in Rust compilation let result = build_block_compare_result(false, true, lean_data.len() as u64, 0, 0); - return build_block_compare_detail(result, lean_sharing_len, 0).into(); + return build_block_compare_detail(result, lean_sharing_len, 0); }, }; @@ -139,8 +140,7 @@ pub unsafe extern "C" fn rs_compare_block_v2( result, lean_sharing_len, rust_sharing_len, - ) - .into(); + ); } // Mismatch: find first differing byte @@ -163,7 +163,7 @@ pub unsafe extern "C" fn rs_compare_block_v2( rust_bytes.len() as u64, first_diff_offset, ); - build_block_compare_detail(result, lean_sharing_len, rust_sharing_len).into() + build_block_compare_detail(result, lean_sharing_len, rust_sharing_len) } /// Free a RustBlockEnv pointer. @@ -185,10 +185,10 @@ pub unsafe extern "C" fn rs_free_compiled_env(ptr: *mut RustBlockEnv) { pub extern "C" fn rs_build_compiled_env( env_consts_ptr: LeanObject, ) -> *mut RustBlockEnv { - use crate::ffi::lean_env::lean_ptr_to_env; + use crate::ffi::lean_env::decode_env; // Decode Lean environment - let rust_env = lean_ptr_to_env(env_consts_ptr); + let rust_env = decode_env(env_consts_ptr.as_list()); let rust_env = std::sync::Arc::new(rust_env); // Compile diff --git a/src/ffi/ixon/constant.rs b/src/ffi/ixon/constant.rs index 5559e4cf..b674b2e6 100644 --- a/src/ffi/ixon/constant.rs +++ b/src/ffi/ixon/constant.rs @@ -18,11 +18,11 @@ use crate::ix::ixon::constant::{ use crate::lean::{ LeanIxAddress, LeanIxonAxiom, LeanIxonConstant, LeanIxonConstantInfo, LeanIxonConstructor, LeanIxonConstructorProj, LeanIxonDefinition, - LeanIxonDefinitionProj, LeanIxonInductive, LeanIxonInductiveProj, - LeanIxonMutConst, LeanIxonQuotient, LeanIxonRecursor, LeanIxonRecursorProj, - LeanIxonRecursorRule, + LeanIxonDefinitionProj, LeanIxonExpr, LeanIxonInductive, + LeanIxonInductiveProj, LeanIxonMutConst, LeanIxonQuotient, LeanIxonRecursor, + LeanIxonRecursorProj, LeanIxonRecursorRule, }; -use lean_ffi::object::{LeanArray, LeanByteArray, LeanCtor, LeanObject}; +use lean_ffi::object::{LeanArray, LeanByteArray, LeanCtor}; use crate::ffi::ixon::expr::{ build_ixon_expr, build_ixon_expr_array, decode_ixon_expr, @@ -47,7 +47,7 @@ pub fn build_address_array(addrs: &[Address]) -> LeanArray { /// Build Ixon.Definition /// Lean stores scalar fields ordered by size (largest first). /// Layout: header(8) + typ(8) + value(8) + lvls(8) + kind(1) + safety(1) + padding(6) -pub fn build_ixon_definition(def: &IxonDefinition) -> LeanObject { +pub fn build_ixon_definition(def: &IxonDefinition) -> LeanIxonDefinition { let typ_obj = build_ixon_expr(&def.typ); let value_obj = build_ixon_expr(&def.value); // 2 obj fields, 16 scalar bytes (lvls(8) + kind(1) + safety(1) + padding(6)) @@ -68,22 +68,24 @@ pub fn build_ixon_definition(def: &IxonDefinition) -> LeanObject { crate::ix::env::DefinitionSafety::Partial => 2, }; ctor.set_u8(25, safety_val); - *ctor + LeanIxonDefinition::new(*ctor) } /// Build Ixon.RecursorRule -pub fn build_ixon_recursor_rule(rule: &IxonRecursorRule) -> LeanObject { +pub fn build_ixon_recursor_rule( + rule: &IxonRecursorRule, +) -> LeanIxonRecursorRule { let rhs_obj = build_ixon_expr(&rule.rhs); // 1 obj field, 8 scalar bytes let ctor = LeanCtor::alloc(0, 1, 8); ctor.set(0, rhs_obj); ctor.set_u64(8, rule.fields); - *ctor + LeanIxonRecursorRule::new(*ctor) } /// Build Ixon.Recursor /// Scalars ordered by size: lvls(8) + params(8) + indices(8) + motives(8) + minors(8) + k(1) + isUnsafe(1) + padding(6) -pub fn build_ixon_recursor(rec: &IxonRecursor) -> LeanObject { +pub fn build_ixon_recursor(rec: &IxonRecursor) -> LeanIxonRecursor { let typ_obj = build_ixon_expr(&rec.typ); // Build rules array let rules_arr = LeanArray::alloc(rec.rules.len()); @@ -102,12 +104,12 @@ pub fn build_ixon_recursor(rec: &IxonRecursor) -> LeanObject { ctor.set_u64(48, rec.minors); ctor.set_u8(56, if rec.k { 1 } else { 0 }); ctor.set_u8(57, if rec.is_unsafe { 1 } else { 0 }); - *ctor + LeanIxonRecursor::new(*ctor) } /// Build Ixon.Axiom /// Scalars ordered by size: lvls(8) + isUnsafe(1) + padding(7) -pub fn build_ixon_axiom(ax: &IxonAxiom) -> LeanObject { +pub fn build_ixon_axiom(ax: &IxonAxiom) -> LeanIxonAxiom { let typ_obj = build_ixon_expr(&ax.typ); // 1 obj field, 16 scalar bytes (lvls(8) + isUnsafe(1) + padding(7)) let ctor = LeanCtor::alloc(0, 1, 16); @@ -115,13 +117,13 @@ pub fn build_ixon_axiom(ax: &IxonAxiom) -> LeanObject { // Scalar offsets from obj_cptr: 1*8=8 base ctor.set_u64(8, ax.lvls); ctor.set_u8(16, if ax.is_unsafe { 1 } else { 0 }); - *ctor + LeanIxonAxiom::new(*ctor) } /// Build Ixon.Quotient /// QuotKind is a simple enum stored as scalar u8, not object field. /// Scalars ordered by size: lvls(8) + kind(1) + padding(7) -pub fn build_ixon_quotient(quot: &IxonQuotient) -> LeanObject { +pub fn build_ixon_quotient(quot: &IxonQuotient) -> LeanIxonQuotient { let typ_obj = build_ixon_expr(".typ); // 1 obj field (typ), 16 scalar bytes (lvls(8) + kind(1) + padding(7)) let ctor = LeanCtor::alloc(0, 1, 16); @@ -135,12 +137,12 @@ pub fn build_ixon_quotient(quot: &IxonQuotient) -> LeanObject { crate::ix::env::QuotKind::Ind => 3, }; ctor.set_u8(16, kind_val); - *ctor + LeanIxonQuotient::new(*ctor) } /// Build Ixon.Constructor /// Scalars ordered by size: lvls(8) + cidx(8) + params(8) + fields(8) + isUnsafe(1) + padding(7) -pub fn build_ixon_constructor(c: &IxonConstructor) -> LeanObject { +pub fn build_ixon_constructor(c: &IxonConstructor) -> LeanIxonConstructor { let typ_obj = build_ixon_expr(&c.typ); // 1 obj field, 40 scalar bytes (4×8 + 1 + 7 padding) let ctor = LeanCtor::alloc(0, 1, 40); @@ -151,12 +153,12 @@ pub fn build_ixon_constructor(c: &IxonConstructor) -> LeanObject { ctor.set_u64(24, c.params); ctor.set_u64(32, c.fields); ctor.set_u8(40, if c.is_unsafe { 1 } else { 0 }); - *ctor + LeanIxonConstructor::new(*ctor) } /// Build Ixon.Inductive /// Scalars ordered by size: lvls(8) + params(8) + indices(8) + nested(8) + recr(1) + refl(1) + isUnsafe(1) + padding(5) -pub fn build_ixon_inductive(ind: &IxonInductive) -> LeanObject { +pub fn build_ixon_inductive(ind: &IxonInductive) -> LeanIxonInductive { let typ_obj = build_ixon_expr(&ind.typ); // Build ctors array let ctors_arr = LeanArray::alloc(ind.ctors.len()); @@ -175,49 +177,51 @@ pub fn build_ixon_inductive(ind: &IxonInductive) -> LeanObject { ctor.set_u8(48, if ind.recr { 1 } else { 0 }); ctor.set_u8(49, if ind.refl { 1 } else { 0 }); ctor.set_u8(50, if ind.is_unsafe { 1 } else { 0 }); - *ctor + LeanIxonInductive::new(*ctor) } /// Build Ixon.InductiveProj -pub fn build_inductive_proj(proj: &InductiveProj) -> LeanObject { +pub fn build_inductive_proj(proj: &InductiveProj) -> LeanIxonInductiveProj { let block_obj = build_address_from_ixon(&proj.block); let ctor = LeanCtor::alloc(0, 1, 8); ctor.set(0, block_obj); ctor.set_u64(8, proj.idx); - *ctor + LeanIxonInductiveProj::new(*ctor) } /// Build Ixon.ConstructorProj -pub fn build_constructor_proj(proj: &ConstructorProj) -> LeanObject { +pub fn build_constructor_proj( + proj: &ConstructorProj, +) -> LeanIxonConstructorProj { let block_obj = build_address_from_ixon(&proj.block); let ctor = LeanCtor::alloc(0, 1, 16); ctor.set(0, block_obj); ctor.set_u64(8, proj.idx); ctor.set_u64(16, proj.cidx); - *ctor + LeanIxonConstructorProj::new(*ctor) } /// Build Ixon.RecursorProj -pub fn build_recursor_proj(proj: &RecursorProj) -> LeanObject { +pub fn build_recursor_proj(proj: &RecursorProj) -> LeanIxonRecursorProj { let block_obj = build_address_from_ixon(&proj.block); let ctor = LeanCtor::alloc(0, 1, 8); ctor.set(0, block_obj); ctor.set_u64(8, proj.idx); - *ctor + LeanIxonRecursorProj::new(*ctor) } /// Build Ixon.DefinitionProj -pub fn build_definition_proj(proj: &DefinitionProj) -> LeanObject { +pub fn build_definition_proj(proj: &DefinitionProj) -> LeanIxonDefinitionProj { let block_obj = build_address_from_ixon(&proj.block); let ctor = LeanCtor::alloc(0, 1, 8); ctor.set(0, block_obj); ctor.set_u64(8, proj.idx); - *ctor + LeanIxonDefinitionProj::new(*ctor) } /// Build Ixon.MutConst -pub fn build_mut_const(mc: &MutConst) -> LeanObject { - match mc { +pub fn build_mut_const(mc: &MutConst) -> LeanIxonMutConst { + let obj = match mc { MutConst::Defn(def) => { let def_obj = build_ixon_definition(def); let ctor = LeanCtor::alloc(0, 1, 0); @@ -236,12 +240,15 @@ pub fn build_mut_const(mc: &MutConst) -> LeanObject { ctor.set(0, rec_obj); *ctor }, - } + }; + LeanIxonMutConst::new(obj) } /// Build Ixon.ConstantInfo (9 constructors) -pub fn build_ixon_constant_info(info: &IxonConstantInfo) -> LeanObject { - match info { +pub fn build_ixon_constant_info( + info: &IxonConstantInfo, +) -> LeanIxonConstantInfo { + let obj = match info { IxonConstantInfo::Defn(def) => { let def_obj = build_ixon_definition(def); let ctor = LeanCtor::alloc(0, 1, 0); @@ -299,11 +306,12 @@ pub fn build_ixon_constant_info(info: &IxonConstantInfo) -> LeanObject { ctor.set(0, arr); *ctor }, - } + }; + LeanIxonConstantInfo::new(obj) } /// Build Ixon.Constant -pub fn build_ixon_constant(constant: &IxonConstant) -> LeanObject { +pub fn build_ixon_constant(constant: &IxonConstant) -> LeanIxonConstant { let info_obj = build_ixon_constant_info(&constant.info); let sharing_obj = build_ixon_expr_array(&constant.sharing); let refs_obj = build_address_array(&constant.refs); @@ -313,7 +321,7 @@ pub fn build_ixon_constant(constant: &IxonConstant) -> LeanObject { ctor.set(1, sharing_obj); ctor.set(2, refs_obj); ctor.set(3, univs_obj); - *ctor + LeanIxonConstant::new(*ctor) } // ============================================================================= @@ -321,24 +329,23 @@ pub fn build_ixon_constant(constant: &IxonConstant) -> LeanObject { // ============================================================================= /// Decode a ByteArray (Address) to Address. -pub fn decode_ixon_address(obj: LeanObject) -> Address { - let ba = obj.as_byte_array(); - Address::from_slice(&ba.as_bytes()[..32]).expect("Address should be 32 bytes") +pub fn decode_ixon_address(obj: LeanIxAddress) -> Address { + Address::from_slice(&obj.as_bytes()[..32]) + .expect("Address should be 32 bytes") } /// Decode Array Address. -pub fn decode_ixon_address_array(obj: LeanObject) -> Vec
{ - let arr = obj.as_array(); - arr.map(decode_ixon_address) +pub fn decode_ixon_address_array(obj: LeanArray) -> Vec
{ + obj.map(|x| decode_ixon_address(x.as_byte_array())) } /// Decode Ixon.Definition. /// Lean stores scalar fields ordered by size (largest first). /// Layout: header(8) + typ(8) + value(8) + lvls(8) + kind(1) + safety(1) + padding(6) -pub fn decode_ixon_definition(obj: LeanObject) -> IxonDefinition { +pub fn decode_ixon_definition(obj: LeanIxonDefinition) -> IxonDefinition { let ctor = obj.as_ctor(); - let typ = Arc::new(decode_ixon_expr(ctor.get(0))); - let value = Arc::new(decode_ixon_expr(ctor.get(1))); + let typ = Arc::new(decode_ixon_expr(LeanIxonExpr::new(ctor.get(0)))); + let value = Arc::new(decode_ixon_expr(LeanIxonExpr::new(ctor.get(1)))); let lvls = ctor.scalar_u64(2, 0); let kind_val = ctor.scalar_u8(2, 8); let kind = match kind_val { @@ -358,20 +365,23 @@ pub fn decode_ixon_definition(obj: LeanObject) -> IxonDefinition { } /// Decode Ixon.RecursorRule. -pub fn decode_ixon_recursor_rule(obj: LeanObject) -> IxonRecursorRule { +pub fn decode_ixon_recursor_rule( + obj: LeanIxonRecursorRule, +) -> IxonRecursorRule { let ctor = obj.as_ctor(); - let rhs = Arc::new(decode_ixon_expr(ctor.get(0))); + let rhs = Arc::new(decode_ixon_expr(LeanIxonExpr::new(ctor.get(0)))); let fields = ctor.scalar_u64(1, 0); IxonRecursorRule { fields, rhs } } /// Decode Ixon.Recursor. /// Scalars ordered by size: lvls(8) + params(8) + indices(8) + motives(8) + minors(8) + k(1) + isUnsafe(1) + padding(6) -pub fn decode_ixon_recursor(obj: LeanObject) -> IxonRecursor { +pub fn decode_ixon_recursor(obj: LeanIxonRecursor) -> IxonRecursor { let ctor = obj.as_ctor(); - let typ = Arc::new(decode_ixon_expr(ctor.get(0))); + let typ = Arc::new(decode_ixon_expr(LeanIxonExpr::new(ctor.get(0)))); let rules_arr = ctor.get(1).as_array(); - let rules = rules_arr.map(decode_ixon_recursor_rule); + let rules = + rules_arr.map(|x| decode_ixon_recursor_rule(LeanIxonRecursorRule::new(x))); let lvls = ctor.scalar_u64(2, 0); let params = ctor.scalar_u64(2, 8); let indices = ctor.scalar_u64(2, 16); @@ -394,9 +404,9 @@ pub fn decode_ixon_recursor(obj: LeanObject) -> IxonRecursor { /// Decode Ixon.Axiom. /// Scalars ordered by size: lvls(8) + isUnsafe(1) + padding(7) -pub fn decode_ixon_axiom(obj: LeanObject) -> IxonAxiom { +pub fn decode_ixon_axiom(obj: LeanIxonAxiom) -> IxonAxiom { let ctor = obj.as_ctor(); - let typ = Arc::new(decode_ixon_expr(ctor.get(0))); + let typ = Arc::new(decode_ixon_expr(LeanIxonExpr::new(ctor.get(0)))); let lvls = ctor.scalar_u64(1, 0); let is_unsafe = ctor.scalar_u8(1, 8) != 0; IxonAxiom { is_unsafe, lvls, typ } @@ -404,9 +414,9 @@ pub fn decode_ixon_axiom(obj: LeanObject) -> IxonAxiom { /// Decode Ixon.Quotient. /// QuotKind is a scalar (not object field). Scalars: lvls(8) + kind(1) + padding(7) -pub fn decode_ixon_quotient(obj: LeanObject) -> IxonQuotient { +pub fn decode_ixon_quotient(obj: LeanIxonQuotient) -> IxonQuotient { let ctor = obj.as_ctor(); - let typ = Arc::new(decode_ixon_expr(ctor.get(0))); + let typ = Arc::new(decode_ixon_expr(LeanIxonExpr::new(ctor.get(0)))); let lvls = ctor.scalar_u64(1, 0); let kind_val = ctor.scalar_u8(1, 8); let kind = match kind_val { @@ -421,9 +431,9 @@ pub fn decode_ixon_quotient(obj: LeanObject) -> IxonQuotient { /// Decode Ixon.Constructor. /// Scalars ordered by size: lvls(8) + cidx(8) + params(8) + fields(8) + isUnsafe(1) + padding(7) -pub fn decode_ixon_constructor(obj: LeanObject) -> IxonConstructor { +pub fn decode_ixon_constructor(obj: LeanIxonConstructor) -> IxonConstructor { let ctor = obj.as_ctor(); - let typ = Arc::new(decode_ixon_expr(ctor.get(0))); + let typ = Arc::new(decode_ixon_expr(LeanIxonExpr::new(ctor.get(0)))); let lvls = ctor.scalar_u64(1, 0); let cidx = ctor.scalar_u64(1, 8); let params = ctor.scalar_u64(1, 16); @@ -434,11 +444,12 @@ pub fn decode_ixon_constructor(obj: LeanObject) -> IxonConstructor { /// Decode Ixon.Inductive. /// Scalars ordered by size: lvls(8) + params(8) + indices(8) + nested(8) + recr(1) + refl(1) + isUnsafe(1) + padding(5) -pub fn decode_ixon_inductive(obj: LeanObject) -> IxonInductive { +pub fn decode_ixon_inductive(obj: LeanIxonInductive) -> IxonInductive { let ctor = obj.as_ctor(); - let typ = Arc::new(decode_ixon_expr(ctor.get(0))); + let typ = Arc::new(decode_ixon_expr(LeanIxonExpr::new(ctor.get(0)))); let ctors_arr = ctor.get(1).as_array(); - let ctors = ctors_arr.map(decode_ixon_constructor); + let ctors = + ctors_arr.map(|x| decode_ixon_constructor(LeanIxonConstructor::new(x))); let lvls = ctor.scalar_u64(2, 0); let params = ctor.scalar_u64(2, 8); let indices = ctor.scalar_u64(2, 16); @@ -460,66 +471,86 @@ pub fn decode_ixon_inductive(obj: LeanObject) -> IxonInductive { } /// Decode Ixon.InductiveProj. -pub fn decode_ixon_inductive_proj(obj: LeanObject) -> InductiveProj { +pub fn decode_ixon_inductive_proj(obj: LeanIxonInductiveProj) -> InductiveProj { let ctor = obj.as_ctor(); - let block = decode_ixon_address(ctor.get(0)); + let block = decode_ixon_address(ctor.get(0).as_byte_array()); let idx = ctor.scalar_u64(1, 0); InductiveProj { idx, block } } /// Decode Ixon.ConstructorProj. -pub fn decode_ixon_constructor_proj(obj: LeanObject) -> ConstructorProj { +pub fn decode_ixon_constructor_proj( + obj: LeanIxonConstructorProj, +) -> ConstructorProj { let ctor = obj.as_ctor(); - let block = decode_ixon_address(ctor.get(0)); + let block = decode_ixon_address(ctor.get(0).as_byte_array()); let idx = ctor.scalar_u64(1, 0); let cidx = ctor.scalar_u64(1, 8); ConstructorProj { idx, cidx, block } } /// Decode Ixon.RecursorProj. -pub fn decode_ixon_recursor_proj(obj: LeanObject) -> RecursorProj { +pub fn decode_ixon_recursor_proj(obj: LeanIxonRecursorProj) -> RecursorProj { let ctor = obj.as_ctor(); - let block = decode_ixon_address(ctor.get(0)); + let block = decode_ixon_address(ctor.get(0).as_byte_array()); let idx = ctor.scalar_u64(1, 0); RecursorProj { idx, block } } /// Decode Ixon.DefinitionProj. -pub fn decode_ixon_definition_proj(obj: LeanObject) -> DefinitionProj { +pub fn decode_ixon_definition_proj( + obj: LeanIxonDefinitionProj, +) -> DefinitionProj { let ctor = obj.as_ctor(); - let block = decode_ixon_address(ctor.get(0)); + let block = decode_ixon_address(ctor.get(0).as_byte_array()); let idx = ctor.scalar_u64(1, 0); DefinitionProj { idx, block } } /// Decode Ixon.MutConst. -pub fn decode_ixon_mut_const(obj: LeanObject) -> MutConst { +pub fn decode_ixon_mut_const(obj: LeanIxonMutConst) -> MutConst { let ctor = obj.as_ctor(); let inner = ctor.get(0); match ctor.tag() { - 0 => MutConst::Defn(decode_ixon_definition(inner)), - 1 => MutConst::Indc(decode_ixon_inductive(inner)), - 2 => MutConst::Recr(decode_ixon_recursor(inner)), + 0 => MutConst::Defn(decode_ixon_definition(LeanIxonDefinition::new(inner))), + 1 => MutConst::Indc(decode_ixon_inductive(LeanIxonInductive::new(inner))), + 2 => MutConst::Recr(decode_ixon_recursor(LeanIxonRecursor::new(inner))), tag => panic!("Invalid Ixon.MutConst tag: {}", tag), } } /// Decode Ixon.ConstantInfo. -pub fn decode_ixon_constant_info(obj: LeanObject) -> IxonConstantInfo { +pub fn decode_ixon_constant_info( + obj: LeanIxonConstantInfo, +) -> IxonConstantInfo { let ctor = obj.as_ctor(); let inner = ctor.get(0); match ctor.tag() { - 0 => IxonConstantInfo::Defn(decode_ixon_definition(inner)), - 1 => IxonConstantInfo::Recr(decode_ixon_recursor(inner)), - 2 => IxonConstantInfo::Axio(decode_ixon_axiom(inner)), - 3 => IxonConstantInfo::Quot(decode_ixon_quotient(inner)), - 4 => IxonConstantInfo::CPrj(decode_ixon_constructor_proj(inner)), - 5 => IxonConstantInfo::RPrj(decode_ixon_recursor_proj(inner)), - 6 => IxonConstantInfo::IPrj(decode_ixon_inductive_proj(inner)), - 7 => IxonConstantInfo::DPrj(decode_ixon_definition_proj(inner)), + 0 => IxonConstantInfo::Defn(decode_ixon_definition( + LeanIxonDefinition::new(inner), + )), + 1 => { + IxonConstantInfo::Recr(decode_ixon_recursor(LeanIxonRecursor::new(inner))) + }, + 2 => IxonConstantInfo::Axio(decode_ixon_axiom(LeanIxonAxiom::new(inner))), + 3 => { + IxonConstantInfo::Quot(decode_ixon_quotient(LeanIxonQuotient::new(inner))) + }, + 4 => IxonConstantInfo::CPrj(decode_ixon_constructor_proj( + LeanIxonConstructorProj::new(inner), + )), + 5 => IxonConstantInfo::RPrj(decode_ixon_recursor_proj( + LeanIxonRecursorProj::new(inner), + )), + 6 => IxonConstantInfo::IPrj(decode_ixon_inductive_proj( + LeanIxonInductiveProj::new(inner), + )), + 7 => IxonConstantInfo::DPrj(decode_ixon_definition_proj( + LeanIxonDefinitionProj::new(inner), + )), 8 => { let arr = inner.as_array(); - let muts = arr.map(decode_ixon_mut_const); + let muts = arr.map(|x| decode_ixon_mut_const(LeanIxonMutConst::new(x))); IxonConstantInfo::Muts(muts) }, tag => panic!("Invalid Ixon.ConstantInfo tag: {}", tag), @@ -527,13 +558,13 @@ pub fn decode_ixon_constant_info(obj: LeanObject) -> IxonConstantInfo { } /// Decode Ixon.Constant. -pub fn decode_ixon_constant(obj: LeanObject) -> IxonConstant { +pub fn decode_ixon_constant(obj: LeanIxonConstant) -> IxonConstant { let ctor = obj.as_ctor(); IxonConstant { - info: decode_ixon_constant_info(ctor.get(0)), - sharing: decode_ixon_expr_array(ctor.get(1)), - refs: decode_ixon_address_array(ctor.get(2)), - univs: decode_ixon_univ_array(ctor.get(3)), + info: decode_ixon_constant_info(LeanIxonConstantInfo::new(ctor.get(0))), + sharing: decode_ixon_expr_array(ctor.get(1).as_array()), + refs: decode_ixon_address_array(ctor.get(2).as_array()), + univs: decode_ixon_univ_array(ctor.get(3).as_array()), } } @@ -546,8 +577,8 @@ pub fn decode_ixon_constant(obj: LeanObject) -> IxonConstant { pub extern "C" fn rs_roundtrip_ixon_definition( obj: LeanIxonDefinition, ) -> LeanIxonDefinition { - let def = decode_ixon_definition(*obj); - build_ixon_definition(&def).into() + let def = decode_ixon_definition(obj); + build_ixon_definition(&def) } /// Round-trip Ixon.Recursor. @@ -555,15 +586,15 @@ pub extern "C" fn rs_roundtrip_ixon_definition( pub extern "C" fn rs_roundtrip_ixon_recursor( obj: LeanIxonRecursor, ) -> LeanIxonRecursor { - let rec = decode_ixon_recursor(*obj); - build_ixon_recursor(&rec).into() + let rec = decode_ixon_recursor(obj); + build_ixon_recursor(&rec) } /// Round-trip Ixon.Axiom. #[unsafe(no_mangle)] pub extern "C" fn rs_roundtrip_ixon_axiom(obj: LeanIxonAxiom) -> LeanIxonAxiom { - let ax = decode_ixon_axiom(*obj); - build_ixon_axiom(&ax).into() + let ax = decode_ixon_axiom(obj); + build_ixon_axiom(&ax) } /// Round-trip Ixon.Quotient. @@ -571,8 +602,8 @@ pub extern "C" fn rs_roundtrip_ixon_axiom(obj: LeanIxonAxiom) -> LeanIxonAxiom { pub extern "C" fn rs_roundtrip_ixon_quotient( obj: LeanIxonQuotient, ) -> LeanIxonQuotient { - let quot = decode_ixon_quotient(*obj); - build_ixon_quotient(").into() + let quot = decode_ixon_quotient(obj); + build_ixon_quotient(") } /// Round-trip Ixon.ConstantInfo. @@ -580,8 +611,8 @@ pub extern "C" fn rs_roundtrip_ixon_quotient( pub extern "C" fn rs_roundtrip_ixon_constant_info( obj: LeanIxonConstantInfo, ) -> LeanIxonConstantInfo { - let info = decode_ixon_constant_info(*obj); - build_ixon_constant_info(&info).into() + let info = decode_ixon_constant_info(obj); + build_ixon_constant_info(&info) } /// Round-trip Ixon.Constant. @@ -589,8 +620,8 @@ pub extern "C" fn rs_roundtrip_ixon_constant_info( pub extern "C" fn rs_roundtrip_ixon_constant( obj: LeanIxonConstant, ) -> LeanIxonConstant { - let constant = decode_ixon_constant(*obj); - build_ixon_constant(&constant).into() + let constant = decode_ixon_constant(obj); + build_ixon_constant(&constant) } /// Round-trip Ixon.RecursorRule. @@ -598,8 +629,8 @@ pub extern "C" fn rs_roundtrip_ixon_constant( pub extern "C" fn rs_roundtrip_ixon_recursor_rule( obj: LeanIxonRecursorRule, ) -> LeanIxonRecursorRule { - let rule = decode_ixon_recursor_rule(*obj); - build_ixon_recursor_rule(&rule).into() + let rule = decode_ixon_recursor_rule(obj); + build_ixon_recursor_rule(&rule) } /// Round-trip Ixon.Constructor. @@ -607,8 +638,8 @@ pub extern "C" fn rs_roundtrip_ixon_recursor_rule( pub extern "C" fn rs_roundtrip_ixon_constructor( obj: LeanIxonConstructor, ) -> LeanIxonConstructor { - let c = decode_ixon_constructor(*obj); - build_ixon_constructor(&c).into() + let c = decode_ixon_constructor(obj); + build_ixon_constructor(&c) } /// Round-trip Ixon.Inductive. @@ -616,8 +647,8 @@ pub extern "C" fn rs_roundtrip_ixon_constructor( pub extern "C" fn rs_roundtrip_ixon_inductive( obj: LeanIxonInductive, ) -> LeanIxonInductive { - let ind = decode_ixon_inductive(*obj); - build_ixon_inductive(&ind).into() + let ind = decode_ixon_inductive(obj); + build_ixon_inductive(&ind) } /// Round-trip Ixon.InductiveProj. @@ -625,8 +656,8 @@ pub extern "C" fn rs_roundtrip_ixon_inductive( pub extern "C" fn rs_roundtrip_ixon_inductive_proj( obj: LeanIxonInductiveProj, ) -> LeanIxonInductiveProj { - let proj = decode_ixon_inductive_proj(*obj); - build_inductive_proj(&proj).into() + let proj = decode_ixon_inductive_proj(obj); + build_inductive_proj(&proj) } /// Round-trip Ixon.ConstructorProj. @@ -634,8 +665,8 @@ pub extern "C" fn rs_roundtrip_ixon_inductive_proj( pub extern "C" fn rs_roundtrip_ixon_constructor_proj( obj: LeanIxonConstructorProj, ) -> LeanIxonConstructorProj { - let proj = decode_ixon_constructor_proj(*obj); - build_constructor_proj(&proj).into() + let proj = decode_ixon_constructor_proj(obj); + build_constructor_proj(&proj) } /// Round-trip Ixon.RecursorProj. @@ -643,8 +674,8 @@ pub extern "C" fn rs_roundtrip_ixon_constructor_proj( pub extern "C" fn rs_roundtrip_ixon_recursor_proj( obj: LeanIxonRecursorProj, ) -> LeanIxonRecursorProj { - let proj = decode_ixon_recursor_proj(*obj); - build_recursor_proj(&proj).into() + let proj = decode_ixon_recursor_proj(obj); + build_recursor_proj(&proj) } /// Round-trip Ixon.DefinitionProj. @@ -652,8 +683,8 @@ pub extern "C" fn rs_roundtrip_ixon_recursor_proj( pub extern "C" fn rs_roundtrip_ixon_definition_proj( obj: LeanIxonDefinitionProj, ) -> LeanIxonDefinitionProj { - let proj = decode_ixon_definition_proj(*obj); - build_definition_proj(&proj).into() + let proj = decode_ixon_definition_proj(obj); + build_definition_proj(&proj) } /// Round-trip Ixon.MutConst. @@ -661,6 +692,6 @@ pub extern "C" fn rs_roundtrip_ixon_definition_proj( pub extern "C" fn rs_roundtrip_ixon_mut_const( obj: LeanIxonMutConst, ) -> LeanIxonMutConst { - let mc = decode_ixon_mut_const(*obj); - build_mut_const(&mc).into() + let mc = decode_ixon_mut_const(obj); + build_mut_const(&mc) } diff --git a/src/ffi/ixon/enums.rs b/src/ffi/ixon/enums.rs index fce3f393..a0b5f592 100644 --- a/src/ffi/ixon/enums.rs +++ b/src/ffi/ixon/enums.rs @@ -1,39 +1,38 @@ //! Ixon enum types: DefKind, DefinitionSafety, QuotKind build/decode/roundtrip FFI. -use std::ffi::c_void; - use crate::ix::env::{DefinitionSafety, QuotKind}; use crate::ix::ixon::constant::DefKind; use crate::lean::{ LeanIxonDefKind, LeanIxonDefinitionSafety, LeanIxonQuotKind, }; use lean_ffi::object::LeanObject; - /// Build Ixon.DefKind /// | defn -- tag 0 /// | opaq -- tag 1 /// | thm -- tag 2 /// Simple enums are passed as raw (unboxed) tag values across Lean FFI. -pub fn build_def_kind(kind: &DefKind) -> LeanObject { +pub fn build_def_kind(kind: &DefKind) -> LeanIxonDefKind { let tag = match kind { DefKind::Definition => 0, DefKind::Opaque => 1, DefKind::Theorem => 2, }; - unsafe { LeanObject::from_raw(tag as *const c_void) } + LeanIxonDefKind::new(LeanObject::from_enum_tag(tag)) } /// Build Ixon.DefinitionSafety /// | unsaf -- tag 0 /// | safe -- tag 1 /// | part -- tag 2 -pub fn build_ixon_definition_safety(safety: &DefinitionSafety) -> LeanObject { +pub fn build_ixon_definition_safety( + safety: &DefinitionSafety, +) -> LeanIxonDefinitionSafety { let tag = match safety { DefinitionSafety::Unsafe => 0, DefinitionSafety::Safe => 1, DefinitionSafety::Partial => 2, }; - unsafe { LeanObject::from_raw(tag as *const c_void) } + LeanIxonDefinitionSafety::new(LeanObject::from_enum_tag(tag)) } /// Build Ixon.QuotKind @@ -41,14 +40,14 @@ pub fn build_ixon_definition_safety(safety: &DefinitionSafety) -> LeanObject { /// | ctor -- tag 1 /// | lift -- tag 2 /// | ind -- tag 3 -pub fn build_ixon_quot_kind(kind: &QuotKind) -> LeanObject { +pub fn build_ixon_quot_kind(kind: &QuotKind) -> LeanIxonQuotKind { let tag = match kind { QuotKind::Type => 0, QuotKind::Ctor => 1, QuotKind::Lift => 2, QuotKind::Ind => 3, }; - unsafe { LeanObject::from_raw(tag as *const c_void) } + LeanIxonQuotKind::new(LeanObject::from_enum_tag(tag)) } // ============================================================================= @@ -56,8 +55,8 @@ pub fn build_ixon_quot_kind(kind: &QuotKind) -> LeanObject { // ============================================================================= /// Decode Ixon.DefKind (simple enum, raw unboxed tag value). -pub fn decode_ixon_def_kind(obj: LeanObject) -> DefKind { - let tag = obj.as_ptr() as usize; +pub fn decode_ixon_def_kind(obj: LeanIxonDefKind) -> DefKind { + let tag = obj.as_enum_tag(); match tag { 0 => DefKind::Definition, 1 => DefKind::Opaque, @@ -67,8 +66,10 @@ pub fn decode_ixon_def_kind(obj: LeanObject) -> DefKind { } /// Decode Ixon.DefinitionSafety (simple enum, raw unboxed tag value). -pub fn decode_ixon_definition_safety(obj: LeanObject) -> DefinitionSafety { - let tag = obj.as_ptr() as usize; +pub fn decode_ixon_definition_safety( + obj: LeanIxonDefinitionSafety, +) -> DefinitionSafety { + let tag = obj.as_enum_tag(); match tag { 0 => DefinitionSafety::Unsafe, 1 => DefinitionSafety::Safe, @@ -78,8 +79,8 @@ pub fn decode_ixon_definition_safety(obj: LeanObject) -> DefinitionSafety { } /// Decode Ixon.QuotKind (simple enum, raw unboxed tag value). -pub fn decode_ixon_quot_kind(obj: LeanObject) -> QuotKind { - let tag = obj.as_ptr() as usize; +pub fn decode_ixon_quot_kind(obj: LeanIxonQuotKind) -> QuotKind { + let tag = obj.as_enum_tag(); match tag { 0 => QuotKind::Type, 1 => QuotKind::Ctor, @@ -98,8 +99,8 @@ pub fn decode_ixon_quot_kind(obj: LeanObject) -> QuotKind { pub extern "C" fn rs_roundtrip_ixon_def_kind( obj: LeanIxonDefKind, ) -> LeanIxonDefKind { - let kind = decode_ixon_def_kind(*obj); - build_def_kind(&kind).into() + let kind = decode_ixon_def_kind(obj); + build_def_kind(&kind) } /// Round-trip Ixon.DefinitionSafety. @@ -107,8 +108,8 @@ pub extern "C" fn rs_roundtrip_ixon_def_kind( pub extern "C" fn rs_roundtrip_ixon_definition_safety( obj: LeanIxonDefinitionSafety, ) -> LeanIxonDefinitionSafety { - let safety = decode_ixon_definition_safety(*obj); - build_ixon_definition_safety(&safety).into() + let safety = decode_ixon_definition_safety(obj); + build_ixon_definition_safety(&safety) } /// Round-trip Ixon.QuotKind. @@ -116,6 +117,6 @@ pub extern "C" fn rs_roundtrip_ixon_definition_safety( pub extern "C" fn rs_roundtrip_ixon_quot_kind( obj: LeanIxonQuotKind, ) -> LeanIxonQuotKind { - let kind = decode_ixon_quot_kind(*obj); - build_ixon_quot_kind(&kind).into() + let kind = decode_ixon_quot_kind(obj); + build_ixon_quot_kind(&kind) } diff --git a/src/ffi/ixon/env.rs b/src/ffi/ixon/env.rs index 879a05f0..e45c08ba 100644 --- a/src/ffi/ixon/env.rs +++ b/src/ffi/ixon/env.rs @@ -9,10 +9,12 @@ use crate::ix::ixon::comm::Comm; use crate::ix::ixon::constant::Constant as IxonConstant; use crate::ix::ixon::env::{Env as IxonEnv, Named as IxonNamed}; use crate::ix::ixon::metadata::ConstantMeta; -use crate::lean::LeanIxonRawEnv; -use lean_ffi::object::{ - LeanArray, LeanByteArray, LeanCtor, LeanExcept, LeanObject, +use crate::lean::{ + LeanIxName, LeanIxonComm, LeanIxonConstant, LeanIxonConstantMeta, + LeanIxonRawBlob, LeanIxonRawComm, LeanIxonRawConst, LeanIxonRawEnv, + LeanIxonRawNameEntry, LeanIxonRawNamed, }; +use lean_ffi::object::{LeanArray, LeanByteArray, LeanCtor, LeanExcept}; use crate::ffi::builder::LeanBuildCache; use crate::ffi::ix::name::{build_name, decode_ix_name}; @@ -34,20 +36,20 @@ pub struct DecodedComm { /// Decode Ixon.Comm from Lean pointer. /// Comm = { secret : Address, payload : Address } -pub fn decode_comm(obj: LeanObject) -> DecodedComm { +pub fn decode_comm(obj: LeanIxonComm) -> DecodedComm { let ctor = obj.as_ctor(); DecodedComm { - secret: decode_ixon_address(ctor.get(0)), - payload: decode_ixon_address(ctor.get(1)), + secret: decode_ixon_address(ctor.get(0).as_byte_array()), + payload: decode_ixon_address(ctor.get(1).as_byte_array()), } } /// Build Ixon.Comm Lean object. -pub fn build_comm(comm: &DecodedComm) -> LeanObject { +pub fn build_comm(comm: &DecodedComm) -> LeanIxonComm { let ctor = LeanCtor::alloc(0, 2, 0); ctor.set(0, build_address_from_ixon(&comm.secret)); ctor.set(1, build_address_from_ixon(&comm.payload)); - *ctor + LeanIxonComm::new(*ctor) } // ============================================================================= @@ -61,20 +63,20 @@ pub struct DecodedRawConst { } /// Decode Ixon.RawConst from Lean pointer. -pub fn decode_raw_const(obj: LeanObject) -> DecodedRawConst { +pub fn decode_raw_const(obj: LeanIxonRawConst) -> DecodedRawConst { let ctor = obj.as_ctor(); DecodedRawConst { - addr: decode_ixon_address(ctor.get(0)), - constant: decode_ixon_constant(ctor.get(1)), + addr: decode_ixon_address(ctor.get(0).as_byte_array()), + constant: decode_ixon_constant(LeanIxonConstant::new(ctor.get(1))), } } /// Build Ixon.RawConst Lean object. -pub fn build_raw_const(rc: &DecodedRawConst) -> LeanObject { +pub fn build_raw_const(rc: &DecodedRawConst) -> LeanIxonRawConst { let ctor = LeanCtor::alloc(0, 2, 0); ctor.set(0, build_address_from_ixon(&rc.addr)); ctor.set(1, build_ixon_constant(&rc.constant)); - *ctor + LeanIxonRawConst::new(*ctor) } // ============================================================================= @@ -89,12 +91,12 @@ pub struct DecodedRawNamed { } /// Decode Ixon.RawNamed from Lean pointer. -pub fn decode_raw_named(obj: LeanObject) -> DecodedRawNamed { +pub fn decode_raw_named(obj: LeanIxonRawNamed) -> DecodedRawNamed { let ctor = obj.as_ctor(); DecodedRawNamed { - name: decode_ix_name(ctor.get(0)), - addr: decode_ixon_address(ctor.get(1)), - const_meta: decode_constant_meta(ctor.get(2)), + name: decode_ix_name(LeanIxName::new(ctor.get(0))), + addr: decode_ixon_address(ctor.get(1).as_byte_array()), + const_meta: decode_constant_meta(LeanIxonConstantMeta::new(ctor.get(2))), } } @@ -102,12 +104,12 @@ pub fn decode_raw_named(obj: LeanObject) -> DecodedRawNamed { pub fn build_raw_named( cache: &mut LeanBuildCache, rn: &DecodedRawNamed, -) -> LeanObject { +) -> LeanIxonRawNamed { let ctor = LeanCtor::alloc(0, 3, 0); ctor.set(0, build_name(cache, &rn.name)); ctor.set(1, build_address_from_ixon(&rn.addr)); ctor.set(2, build_constant_meta(&rn.const_meta)); - *ctor + LeanIxonRawNamed::new(*ctor) } // ============================================================================= @@ -121,21 +123,21 @@ pub struct DecodedRawBlob { } /// Decode Ixon.RawBlob from Lean pointer. -pub fn decode_raw_blob(obj: LeanObject) -> DecodedRawBlob { +pub fn decode_raw_blob(obj: LeanIxonRawBlob) -> DecodedRawBlob { let ctor = obj.as_ctor(); let ba = ctor.get(1).as_byte_array(); DecodedRawBlob { - addr: decode_ixon_address(ctor.get(0)), + addr: decode_ixon_address(ctor.get(0).as_byte_array()), bytes: ba.as_bytes().to_vec(), } } /// Build Ixon.RawBlob Lean object. -pub fn build_raw_blob(rb: &DecodedRawBlob) -> LeanObject { +pub fn build_raw_blob(rb: &DecodedRawBlob) -> LeanIxonRawBlob { let ctor = LeanCtor::alloc(0, 2, 0); ctor.set(0, build_address_from_ixon(&rb.addr)); ctor.set(1, LeanByteArray::from_bytes(&rb.bytes)); - *ctor + LeanIxonRawBlob::new(*ctor) } // ============================================================================= @@ -149,20 +151,20 @@ pub struct DecodedRawComm { } /// Decode Ixon.RawComm from Lean pointer. -pub fn decode_raw_comm(obj: LeanObject) -> DecodedRawComm { +pub fn decode_raw_comm(obj: LeanIxonRawComm) -> DecodedRawComm { let ctor = obj.as_ctor(); DecodedRawComm { - addr: decode_ixon_address(ctor.get(0)), - comm: decode_comm(ctor.get(1)), + addr: decode_ixon_address(ctor.get(0).as_byte_array()), + comm: decode_comm(LeanIxonComm::new(ctor.get(1))), } } /// Build Ixon.RawComm Lean object. -pub fn build_raw_comm(rc: &DecodedRawComm) -> LeanObject { +pub fn build_raw_comm(rc: &DecodedRawComm) -> LeanIxonRawComm { let ctor = LeanCtor::alloc(0, 2, 0); ctor.set(0, build_address_from_ixon(&rc.addr)); ctor.set(1, build_comm(&rc.comm)); - *ctor + LeanIxonRawComm::new(*ctor) } // ============================================================================= @@ -176,11 +178,11 @@ pub struct DecodedRawNameEntry { } /// Decode Ixon.RawNameEntry from Lean pointer. -pub fn decode_raw_name_entry(obj: LeanObject) -> DecodedRawNameEntry { +pub fn decode_raw_name_entry(obj: LeanIxonRawNameEntry) -> DecodedRawNameEntry { let ctor = obj.as_ctor(); DecodedRawNameEntry { - addr: decode_ixon_address(ctor.get(0)), - name: decode_ix_name(ctor.get(1)), + addr: decode_ixon_address(ctor.get(0).as_byte_array()), + name: decode_ix_name(LeanIxName::new(ctor.get(1))), } } @@ -189,11 +191,11 @@ pub fn build_raw_name_entry( cache: &mut LeanBuildCache, addr: &Address, name: &Name, -) -> LeanObject { +) -> LeanIxonRawNameEntry { let ctor = LeanCtor::alloc(0, 2, 0); ctor.set(0, build_address_from_ixon(addr)); ctor.set(1, build_name(cache, name)); - *ctor + LeanIxonRawNameEntry::new(*ctor) } // ============================================================================= @@ -210,7 +212,7 @@ pub struct DecodedRawEnv { } /// Decode Ixon.RawEnv from Lean pointer. -pub fn decode_raw_env(obj: LeanObject) -> DecodedRawEnv { +pub fn decode_raw_env(obj: LeanIxonRawEnv) -> DecodedRawEnv { let ctor = obj.as_ctor(); let consts_arr = ctor.get(0).as_array(); let named_arr = ctor.get(1).as_array(); @@ -219,16 +221,17 @@ pub fn decode_raw_env(obj: LeanObject) -> DecodedRawEnv { let names_arr = ctor.get(4).as_array(); DecodedRawEnv { - consts: consts_arr.map(decode_raw_const), - named: named_arr.map(decode_raw_named), - blobs: blobs_arr.map(decode_raw_blob), - comms: comms_arr.map(decode_raw_comm), - names: names_arr.map(decode_raw_name_entry), + consts: consts_arr.map(|x| decode_raw_const(LeanIxonRawConst::new(x))), + named: named_arr.map(|x| decode_raw_named(LeanIxonRawNamed::new(x))), + blobs: blobs_arr.map(|x| decode_raw_blob(LeanIxonRawBlob::new(x))), + comms: comms_arr.map(|x| decode_raw_comm(LeanIxonRawComm::new(x))), + names: names_arr + .map(|x| decode_raw_name_entry(LeanIxonRawNameEntry::new(x))), } } /// Build Ixon.RawEnv Lean object. -pub fn build_raw_env(env: &DecodedRawEnv) -> LeanObject { +pub fn build_raw_env(env: &DecodedRawEnv) -> LeanIxonRawEnv { let mut cache = LeanBuildCache::new(); // Build consts array @@ -268,7 +271,7 @@ pub fn build_raw_env(env: &DecodedRawEnv) -> LeanObject { ctor.set(2, blobs_arr); ctor.set(3, comms_arr); ctor.set(4, names_arr); - *ctor + LeanIxonRawEnv::new(*ctor) } // ============================================================================= @@ -352,7 +355,7 @@ pub fn ixon_env_to_decoded(env: &IxonEnv) -> DecodedRawEnv { /// FFI: Serialize an Ixon.RawEnv -> ByteArray via Rust's Env.put. Pure. #[unsafe(no_mangle)] pub extern "C" fn rs_ser_env(obj: LeanIxonRawEnv) -> LeanByteArray { - let decoded = decode_raw_env(*obj); + let decoded = decode_raw_env(obj); let env = decoded_to_ixon_env(&decoded); let mut buf = Vec::new(); env.put(&mut buf).expect("Env serialization failed"); diff --git a/src/ffi/ixon/expr.rs b/src/ffi/ixon/expr.rs index b984cb1a..b5ee586b 100644 --- a/src/ffi/ixon/expr.rs +++ b/src/ffi/ixon/expr.rs @@ -4,11 +4,11 @@ use std::sync::Arc; use crate::ix::ixon::expr::Expr as IxonExpr; use crate::lean::LeanIxonExpr; -use lean_ffi::object::{LeanArray, LeanCtor, LeanObject}; +use lean_ffi::object::{LeanArray, LeanCtor}; /// Build Ixon.Expr (12 constructors). -pub fn build_ixon_expr(expr: &IxonExpr) -> LeanObject { - match expr { +pub fn build_ixon_expr(expr: &IxonExpr) -> LeanIxonExpr { + let obj = match expr { IxonExpr::Sort(idx) => { let ctor = LeanCtor::alloc(0, 0, 8); ctor.set_u64(0, *idx); @@ -101,7 +101,8 @@ pub fn build_ixon_expr(expr: &IxonExpr) -> LeanObject { ctor.set_u64(0, *idx); *ctor }, - } + }; + LeanIxonExpr::new(obj) } /// Build an Array of Ixon.Expr. @@ -118,9 +119,8 @@ pub fn build_ixon_expr_array(exprs: &[Arc]) -> LeanArray { // ============================================================================= /// Decode Array UInt64 from Lean. -fn decode_u64_array(obj: LeanObject) -> Vec { - let arr = obj.as_array(); - arr +fn decode_u64_array(obj: LeanArray) -> Vec { + obj .iter() .map(|elem| { if elem.is_scalar() { @@ -134,7 +134,7 @@ fn decode_u64_array(obj: LeanObject) -> Vec { } /// Decode Ixon.Expr (12 constructors). -pub fn decode_ixon_expr(obj: LeanObject) -> IxonExpr { +pub fn decode_ixon_expr(obj: LeanIxonExpr) -> IxonExpr { let ctor = obj.as_ctor(); let tag = ctor.tag(); match tag { @@ -147,19 +147,17 @@ pub fn decode_ixon_expr(obj: LeanObject) -> IxonExpr { IxonExpr::Var(idx) }, 2 => { - let arr_obj = ctor.get(0); let ref_idx = ctor.scalar_u64(1, 0); - let univ_idxs = decode_u64_array(arr_obj); + let univ_idxs = decode_u64_array(ctor.get(0).as_array()); IxonExpr::Ref(ref_idx, univ_idxs) }, 3 => { - let arr_obj = ctor.get(0); let rec_idx = ctor.scalar_u64(1, 0); - let univ_idxs = decode_u64_array(arr_obj); + let univ_idxs = decode_u64_array(ctor.get(0).as_array()); IxonExpr::Rec(rec_idx, univ_idxs) }, 4 => { - let val_obj = ctor.get(0); + let val_obj = LeanIxonExpr::new(ctor.get(0)); let type_ref_idx = ctor.scalar_u64(1, 0); let field_idx = ctor.scalar_u64(1, 8); IxonExpr::Prj( @@ -177,33 +175,33 @@ pub fn decode_ixon_expr(obj: LeanObject) -> IxonExpr { IxonExpr::Nat(ref_idx) }, 7 => { - let f_obj = ctor.get(0); - let a_obj = ctor.get(1); + let f_obj = LeanIxonExpr::new(ctor.get(0)); + let a_obj = LeanIxonExpr::new(ctor.get(1)); IxonExpr::App( Arc::new(decode_ixon_expr(f_obj)), Arc::new(decode_ixon_expr(a_obj)), ) }, 8 => { - let ty_obj = ctor.get(0); - let body_obj = ctor.get(1); + let ty_obj = LeanIxonExpr::new(ctor.get(0)); + let body_obj = LeanIxonExpr::new(ctor.get(1)); IxonExpr::Lam( Arc::new(decode_ixon_expr(ty_obj)), Arc::new(decode_ixon_expr(body_obj)), ) }, 9 => { - let ty_obj = ctor.get(0); - let body_obj = ctor.get(1); + let ty_obj = LeanIxonExpr::new(ctor.get(0)); + let body_obj = LeanIxonExpr::new(ctor.get(1)); IxonExpr::All( Arc::new(decode_ixon_expr(ty_obj)), Arc::new(decode_ixon_expr(body_obj)), ) }, 10 => { - let ty_obj = ctor.get(0); - let val_obj = ctor.get(1); - let body_obj = ctor.get(2); + let ty_obj = LeanIxonExpr::new(ctor.get(0)); + let val_obj = LeanIxonExpr::new(ctor.get(1)); + let body_obj = LeanIxonExpr::new(ctor.get(2)); let non_dep = ctor.scalar_u8(3, 0) != 0; IxonExpr::Let( non_dep, @@ -221,9 +219,8 @@ pub fn decode_ixon_expr(obj: LeanObject) -> IxonExpr { } /// Decode Array Ixon.Expr. -pub fn decode_ixon_expr_array(obj: LeanObject) -> Vec> { - let arr = obj.as_array(); - arr.map(|e| Arc::new(decode_ixon_expr(e))) +pub fn decode_ixon_expr_array(obj: LeanArray) -> Vec> { + obj.map(|e| Arc::new(decode_ixon_expr(LeanIxonExpr::new(e)))) } // ============================================================================= @@ -233,6 +230,6 @@ pub fn decode_ixon_expr_array(obj: LeanObject) -> Vec> { /// Round-trip Ixon.Expr. #[unsafe(no_mangle)] pub extern "C" fn rs_roundtrip_ixon_expr(obj: LeanIxonExpr) -> LeanIxonExpr { - let expr = decode_ixon_expr(*obj); - build_ixon_expr(&expr).into() + let expr = decode_ixon_expr(obj); + build_ixon_expr(&expr) } diff --git a/src/ffi/ixon/meta.rs b/src/ffi/ixon/meta.rs index 630cf09c..7aa3cfaa 100644 --- a/src/ffi/ixon/meta.rs +++ b/src/ffi/ixon/meta.rs @@ -10,8 +10,9 @@ use crate::ix::ixon::metadata::{ ConstantMeta, DataValue as IxonDataValue, ExprMeta, ExprMetaData, KVMap, }; use crate::lean::{ - LeanIxonComm, LeanIxonConstantMeta, LeanIxonDataValue, LeanIxonExprMetaArena, - LeanIxonExprMetaData, LeanIxonNamed, + LeanIxReducibilityHints, LeanIxonComm, LeanIxonConstantMeta, + LeanIxonDataValue, LeanIxonExprMetaArena, LeanIxonExprMetaData, + LeanIxonNamed, }; use lean_ffi::object::{LeanArray, LeanCtor, LeanObject}; @@ -29,8 +30,8 @@ use crate::ffi::ixon::constant::{ // ============================================================================= /// Build Ixon.DataValue (for metadata) -pub fn build_ixon_data_value(dv: &IxonDataValue) -> LeanObject { - match dv { +pub fn build_ixon_data_value(dv: &IxonDataValue) -> LeanIxonDataValue { + let obj = match dv { IxonDataValue::OfString(addr) => { let ctor = LeanCtor::alloc(0, 1, 0); ctor.set(0, build_address_from_ixon(addr)); @@ -61,22 +62,29 @@ pub fn build_ixon_data_value(dv: &IxonDataValue) -> LeanObject { ctor.set(0, build_address_from_ixon(addr)); *ctor }, - } + }; + LeanIxonDataValue::new(obj) } /// Decode Ixon.DataValue. -pub fn decode_ixon_data_value(obj: LeanObject) -> IxonDataValue { +pub fn decode_ixon_data_value(obj: LeanIxonDataValue) -> IxonDataValue { let ctor = obj.as_ctor(); match ctor.tag() { - 0 => IxonDataValue::OfString(decode_ixon_address(ctor.get(0))), + 0 => { + IxonDataValue::OfString(decode_ixon_address(ctor.get(0).as_byte_array())) + }, 1 => { let b = ctor.scalar_u8(0, 0) != 0; IxonDataValue::OfBool(b) }, - 2 => IxonDataValue::OfName(decode_ixon_address(ctor.get(0))), - 3 => IxonDataValue::OfNat(decode_ixon_address(ctor.get(0))), - 4 => IxonDataValue::OfInt(decode_ixon_address(ctor.get(0))), - 5 => IxonDataValue::OfSyntax(decode_ixon_address(ctor.get(0))), + 2 => { + IxonDataValue::OfName(decode_ixon_address(ctor.get(0).as_byte_array())) + }, + 3 => IxonDataValue::OfNat(decode_ixon_address(ctor.get(0).as_byte_array())), + 4 => IxonDataValue::OfInt(decode_ixon_address(ctor.get(0).as_byte_array())), + 5 => { + IxonDataValue::OfSyntax(decode_ixon_address(ctor.get(0).as_byte_array())) + }, tag => panic!("Invalid Ixon.DataValue tag: {}", tag), } } @@ -107,24 +115,22 @@ pub fn build_kvmap_array(kvmaps: &[KVMap]) -> LeanArray { } /// Decode KVMap (Array (Address × DataValue)). -pub fn decode_ixon_kvmap(obj: LeanObject) -> KVMap { - let arr = obj.as_array(); - arr +pub fn decode_ixon_kvmap(obj: LeanArray) -> KVMap { + obj .iter() .map(|pair| { let pair_ctor = pair.as_ctor(); ( - decode_ixon_address(pair_ctor.get(0)), - decode_ixon_data_value(pair_ctor.get(1)), + decode_ixon_address(pair_ctor.get(0).as_byte_array()), + decode_ixon_data_value(LeanIxonDataValue::new(pair_ctor.get(1))), ) }) .collect() } /// Decode Array KVMap. -fn decode_kvmap_array(obj: LeanObject) -> Vec { - let arr = obj.as_array(); - arr.map(decode_ixon_kvmap) +fn decode_kvmap_array(obj: LeanArray) -> Vec { + obj.map(|x| decode_ixon_kvmap(x.as_array())) } // ============================================================================= @@ -132,7 +138,7 @@ fn decode_kvmap_array(obj: LeanObject) -> Vec { // ============================================================================= /// Decode Array Address. -fn decode_address_array(obj: LeanObject) -> Vec
{ +fn decode_address_array(obj: LeanArray) -> Vec
{ decode_ixon_address_array(obj) } @@ -146,9 +152,8 @@ fn build_u64_array(vals: &[u64]) -> LeanArray { } /// Decode Array UInt64. -fn decode_u64_array(obj: LeanObject) -> Vec { - let arr = obj.as_array(); - arr.iter().map(|elem| elem.unbox_u64()).collect() +fn decode_u64_array(obj: LeanArray) -> Vec { + obj.iter().map(|elem| elem.unbox_u64()).collect() } // ============================================================================= @@ -166,8 +171,8 @@ fn decode_u64_array(obj: LeanObject) -> Vec { /// | ref | 4 | 1 (name: Address) | 0 | /// | prj | 5 | 1 (structName: Address) | 8 (1× u64) | /// | mdata | 6 | 1 (mdata: Array) | 8 (1× u64) | -pub fn build_expr_meta_data(node: &ExprMetaData) -> LeanObject { - match node { +pub fn build_expr_meta_data(node: &ExprMetaData) -> LeanIxonExprMetaData { + let obj = match node { ExprMetaData::Leaf => LeanObject::box_usize(0), ExprMetaData::App { children } => { @@ -223,11 +228,12 @@ pub fn build_expr_meta_data(node: &ExprMetaData) -> LeanObject { ctor.set_u64(8, *child); *ctor }, - } + }; + LeanIxonExprMetaData::new(obj) } /// Decode Ixon.ExprMetaData from Lean pointer. -pub fn decode_expr_meta_data(obj: LeanObject) -> ExprMetaData { +pub fn decode_expr_meta_data(obj: LeanIxonExprMetaData) -> ExprMetaData { // Leaf (tag 0, no fields) is represented as a scalar lean_box(0) if obj.is_scalar() { let tag = obj.as_ptr() as usize >> 1; @@ -246,7 +252,7 @@ pub fn decode_expr_meta_data(obj: LeanObject) -> ExprMetaData { 2 => { // binder: 1 obj field (name), scalar (Lean ABI: u64s first, then u8): // [tyChild: u64 @ 0] [bodyChild: u64 @ 8] [info: u8 @ 16] - let name = decode_ixon_address(ctor.get(0)); + let name = decode_ixon_address(ctor.get(0).as_byte_array()); let ty_child = ctor.scalar_u64(1, 0); let body_child = ctor.scalar_u64(1, 8); let info_byte = ctor.scalar_u8(1, 16); @@ -262,7 +268,7 @@ pub fn decode_expr_meta_data(obj: LeanObject) -> ExprMetaData { 3 => { // letBinder: 1 obj field (name), 3× u64 scalar - let name = decode_ixon_address(ctor.get(0)); + let name = decode_ixon_address(ctor.get(0).as_byte_array()); let ty_child = ctor.scalar_u64(1, 0); let val_child = ctor.scalar_u64(1, 8); let body_child = ctor.scalar_u64(1, 16); @@ -274,19 +280,21 @@ pub fn decode_expr_meta_data(obj: LeanObject) -> ExprMetaData { 4 => { // ref: 1 obj field (name), 0 scalar - ExprMetaData::Ref { name: decode_ixon_address(ctor.get(0)) } + ExprMetaData::Ref { + name: decode_ixon_address(ctor.get(0).as_byte_array()), + } }, 5 => { // prj: 1 obj field (structName), 1× u64 scalar - let struct_name = decode_ixon_address(ctor.get(0)); + let struct_name = decode_ixon_address(ctor.get(0).as_byte_array()); let child = ctor.scalar_u64(1, 0); ExprMetaData::Prj { struct_name, child } }, 6 => { // mdata: 1 obj field (mdata: Array KVMap), 1× u64 scalar - let mdata = decode_kvmap_array(ctor.get(0)); + let mdata = decode_kvmap_array(ctor.get(0).as_array()); let child = ctor.scalar_u64(1, 0); ExprMetaData::Mdata { mdata, child } }, @@ -312,9 +320,11 @@ pub fn build_expr_meta_arena(arena: &ExprMeta) -> LeanArray { /// Decode Ixon.ExprMetaArena from Lean pointer. /// Single-field struct is unboxed — obj IS the Array directly. -pub fn decode_expr_meta_arena(obj: LeanObject) -> ExprMeta { +pub fn decode_expr_meta_arena(obj: LeanIxonExprMetaArena) -> ExprMeta { let arr = obj.as_array(); - ExprMeta { nodes: arr.map(decode_expr_meta_data) } + ExprMeta { + nodes: arr.map(|x| decode_expr_meta_data(LeanIxonExprMetaData::new(x))), + } } // ============================================================================= @@ -332,8 +342,8 @@ pub fn decode_expr_meta_arena(obj: LeanObject) -> ExprMeta { /// | indc | 4 | 6 (name, lvls, ctors, all, ctx, arena) | 8 (1× u64) | /// | ctor | 5 | 4 (name, lvls, induct, arena) | 8 (1× u64) | /// | recr | 6 | 7 (name, lvls, rules, all, ctx, arena, ruleRoots) | 8 (1× u64) | -pub fn build_constant_meta(meta: &ConstantMeta) -> LeanObject { - match meta { +pub fn build_constant_meta(meta: &ConstantMeta) -> LeanIxonConstantMeta { + let obj = match meta { ConstantMeta::Empty => LeanObject::box_usize(0), ConstantMeta::Def { @@ -419,11 +429,12 @@ pub fn build_constant_meta(meta: &ConstantMeta) -> LeanObject { ctor.set_u64(7 * 8, *type_root); *ctor }, - } + }; + LeanIxonConstantMeta::new(obj) } /// Decode Ixon.ConstantMeta from Lean pointer. -pub fn decode_constant_meta(obj: LeanObject) -> ConstantMeta { +pub fn decode_constant_meta(obj: LeanIxonConstantMeta) -> ConstantMeta { // Empty (tag 0, no fields) is represented as a scalar lean_box(0) if obj.is_scalar() { let tag = obj.as_ptr() as usize >> 1; @@ -434,12 +445,14 @@ pub fn decode_constant_meta(obj: LeanObject) -> ConstantMeta { match ctor.tag() { 1 => { // defn: 6 obj fields, 2× u64 scalar - let name = decode_ixon_address(ctor.get(0)); - let lvls = decode_address_array(ctor.get(1)); - let hints = decode_reducibility_hints(ctor.get(2)); - let all = decode_address_array(ctor.get(3)); - let ctx = decode_address_array(ctor.get(4)); - let arena = decode_expr_meta_arena(ctor.get(5)); + let name = decode_ixon_address(ctor.get(0).as_byte_array()); + let lvls = decode_address_array(ctor.get(1).as_array()); + let hints = + decode_reducibility_hints(LeanIxReducibilityHints::new(ctor.get(2))); + let all = decode_address_array(ctor.get(3).as_array()); + let ctx = decode_address_array(ctor.get(4).as_array()); + let arena = + decode_expr_meta_arena(LeanIxonExprMetaArena::new(ctor.get(5))); let type_root = ctor.scalar_u64(6, 0); let value_root = ctor.scalar_u64(6, 8); ConstantMeta::Def { @@ -456,53 +469,58 @@ pub fn decode_constant_meta(obj: LeanObject) -> ConstantMeta { 2 => { // axio: 3 obj fields, 1× u64 scalar - let name = decode_ixon_address(ctor.get(0)); - let lvls = decode_address_array(ctor.get(1)); - let arena = decode_expr_meta_arena(ctor.get(2)); + let name = decode_ixon_address(ctor.get(0).as_byte_array()); + let lvls = decode_address_array(ctor.get(1).as_array()); + let arena = + decode_expr_meta_arena(LeanIxonExprMetaArena::new(ctor.get(2))); let type_root = ctor.scalar_u64(3, 0); ConstantMeta::Axio { name, lvls, arena, type_root } }, 3 => { // quot: 3 obj fields, 1× u64 scalar - let name = decode_ixon_address(ctor.get(0)); - let lvls = decode_address_array(ctor.get(1)); - let arena = decode_expr_meta_arena(ctor.get(2)); + let name = decode_ixon_address(ctor.get(0).as_byte_array()); + let lvls = decode_address_array(ctor.get(1).as_array()); + let arena = + decode_expr_meta_arena(LeanIxonExprMetaArena::new(ctor.get(2))); let type_root = ctor.scalar_u64(3, 0); ConstantMeta::Quot { name, lvls, arena, type_root } }, 4 => { // indc: 6 obj fields, 1× u64 scalar - let name = decode_ixon_address(ctor.get(0)); - let lvls = decode_address_array(ctor.get(1)); - let ctors = decode_address_array(ctor.get(2)); - let all = decode_address_array(ctor.get(3)); - let ctx = decode_address_array(ctor.get(4)); - let arena = decode_expr_meta_arena(ctor.get(5)); + let name = decode_ixon_address(ctor.get(0).as_byte_array()); + let lvls = decode_address_array(ctor.get(1).as_array()); + let ctors = decode_address_array(ctor.get(2).as_array()); + let all = decode_address_array(ctor.get(3).as_array()); + let ctx = decode_address_array(ctor.get(4).as_array()); + let arena = + decode_expr_meta_arena(LeanIxonExprMetaArena::new(ctor.get(5))); let type_root = ctor.scalar_u64(6, 0); ConstantMeta::Indc { name, lvls, ctors, all, ctx, arena, type_root } }, 5 => { // ctor: 4 obj fields, 1× u64 scalar - let name = decode_ixon_address(ctor.get(0)); - let lvls = decode_address_array(ctor.get(1)); - let induct = decode_ixon_address(ctor.get(2)); - let arena = decode_expr_meta_arena(ctor.get(3)); + let name = decode_ixon_address(ctor.get(0).as_byte_array()); + let lvls = decode_address_array(ctor.get(1).as_array()); + let induct = decode_ixon_address(ctor.get(2).as_byte_array()); + let arena = + decode_expr_meta_arena(LeanIxonExprMetaArena::new(ctor.get(3))); let type_root = ctor.scalar_u64(4, 0); ConstantMeta::Ctor { name, lvls, induct, arena, type_root } }, 6 => { // recr: 7 obj fields, 1× u64 scalar - let name = decode_ixon_address(ctor.get(0)); - let lvls = decode_address_array(ctor.get(1)); - let rules = decode_address_array(ctor.get(2)); - let all = decode_address_array(ctor.get(3)); - let ctx = decode_address_array(ctor.get(4)); - let arena = decode_expr_meta_arena(ctor.get(5)); - let rule_roots = decode_u64_array(ctor.get(6)); + let name = decode_ixon_address(ctor.get(0).as_byte_array()); + let lvls = decode_address_array(ctor.get(1).as_array()); + let rules = decode_address_array(ctor.get(2).as_array()); + let all = decode_address_array(ctor.get(3).as_array()); + let ctx = decode_address_array(ctor.get(4).as_array()); + let arena = + decode_expr_meta_arena(LeanIxonExprMetaArena::new(ctor.get(5))); + let rule_roots = decode_u64_array(ctor.get(6).as_array()); let type_root = ctor.scalar_u64(7, 0); ConstantMeta::Rec { name, @@ -525,40 +543,40 @@ pub fn decode_constant_meta(obj: LeanObject) -> ConstantMeta { // ============================================================================= /// Build Ixon.Named { addr : Address, constMeta : ConstantMeta } -pub fn build_named(addr: &Address, meta: &ConstantMeta) -> LeanObject { +pub fn build_named(addr: &Address, meta: &ConstantMeta) -> LeanIxonNamed { let addr_obj = build_address_from_ixon(addr); let meta_obj = build_constant_meta(meta); let ctor = LeanCtor::alloc(0, 2, 0); ctor.set(0, addr_obj); ctor.set(1, meta_obj); - *ctor + LeanIxonNamed::new(*ctor) } /// Decode Ixon.Named. -pub fn decode_named(obj: LeanObject) -> Named { +pub fn decode_named(obj: LeanIxonNamed) -> Named { let ctor = obj.as_ctor(); Named { - addr: decode_ixon_address(ctor.get(0)), - meta: decode_constant_meta(ctor.get(1)), + addr: decode_ixon_address(ctor.get(0).as_byte_array()), + meta: decode_constant_meta(LeanIxonConstantMeta::new(ctor.get(1))), } } /// Build Ixon.Comm { secret : Address, payload : Address } -pub fn build_ixon_comm(comm: &Comm) -> LeanObject { +pub fn build_ixon_comm(comm: &Comm) -> LeanIxonComm { let secret_obj = build_address_from_ixon(&comm.secret); let payload_obj = build_address_from_ixon(&comm.payload); let ctor = LeanCtor::alloc(0, 2, 0); ctor.set(0, secret_obj); ctor.set(1, payload_obj); - *ctor + LeanIxonComm::new(*ctor) } /// Decode Ixon.Comm. -pub fn decode_ixon_comm(obj: LeanObject) -> Comm { +pub fn decode_ixon_comm(obj: LeanIxonComm) -> Comm { let ctor = obj.as_ctor(); Comm { - secret: decode_ixon_address(ctor.get(0)), - payload: decode_ixon_address(ctor.get(1)), + secret: decode_ixon_address(ctor.get(0).as_byte_array()), + payload: decode_ixon_address(ctor.get(1).as_byte_array()), } } @@ -571,15 +589,15 @@ pub fn decode_ixon_comm(obj: LeanObject) -> Comm { pub extern "C" fn rs_roundtrip_ixon_data_value( obj: LeanIxonDataValue, ) -> LeanIxonDataValue { - let dv = decode_ixon_data_value(*obj); - build_ixon_data_value(&dv).into() + let dv = decode_ixon_data_value(obj); + build_ixon_data_value(&dv) } /// Round-trip Ixon.Comm. #[unsafe(no_mangle)] pub extern "C" fn rs_roundtrip_ixon_comm(obj: LeanIxonComm) -> LeanIxonComm { - let comm = decode_ixon_comm(*obj); - build_ixon_comm(&comm).into() + let comm = decode_ixon_comm(obj); + build_ixon_comm(&comm) } /// Round-trip Ixon.ExprMetaData. @@ -587,8 +605,8 @@ pub extern "C" fn rs_roundtrip_ixon_comm(obj: LeanIxonComm) -> LeanIxonComm { pub extern "C" fn rs_roundtrip_ixon_expr_meta_data( obj: LeanIxonExprMetaData, ) -> LeanIxonExprMetaData { - let node = decode_expr_meta_data(*obj); - build_expr_meta_data(&node).into() + let node = decode_expr_meta_data(obj); + build_expr_meta_data(&node) } /// Round-trip Ixon.ExprMetaArena. @@ -596,8 +614,8 @@ pub extern "C" fn rs_roundtrip_ixon_expr_meta_data( pub extern "C" fn rs_roundtrip_ixon_expr_meta_arena( obj: LeanIxonExprMetaArena, ) -> LeanIxonExprMetaArena { - let arena = decode_expr_meta_arena(*obj); - (*build_expr_meta_arena(&arena)).into() + let arena = decode_expr_meta_arena(obj); + LeanIxonExprMetaArena::new(*build_expr_meta_arena(&arena)) } /// Round-trip Ixon.ConstantMeta (full arena-based). @@ -605,13 +623,13 @@ pub extern "C" fn rs_roundtrip_ixon_expr_meta_arena( pub extern "C" fn rs_roundtrip_ixon_constant_meta( obj: LeanIxonConstantMeta, ) -> LeanIxonConstantMeta { - let meta = decode_constant_meta(*obj); - build_constant_meta(&meta).into() + let meta = decode_constant_meta(obj); + build_constant_meta(&meta) } /// Round-trip Ixon.Named (with real metadata). #[unsafe(no_mangle)] pub extern "C" fn rs_roundtrip_ixon_named(obj: LeanIxonNamed) -> LeanIxonNamed { - let named = decode_named(*obj); - build_named(&named.addr, &named.meta).into() + let named = decode_named(obj); + build_named(&named.addr, &named.meta) } diff --git a/src/ffi/ixon/serialize.rs b/src/ffi/ixon/serialize.rs index 7ed48d9f..3b9b6261 100644 --- a/src/ffi/ixon/serialize.rs +++ b/src/ffi/ixon/serialize.rs @@ -6,94 +6,16 @@ use std::sync::Arc; use crate::ix::address::Address; -use crate::ix::ixon::expr::Expr as IxonExpr; use crate::ix::ixon::serialize::put_expr; use crate::ix::ixon::sharing::hash_expr; use crate::ix::ixon::univ::{Univ as IxonUniv, put_univ}; use crate::lean::{ LeanIxAddress, LeanIxonConstant, LeanIxonExpr, LeanIxonRawEnv, LeanIxonUniv, }; -use lean_ffi::object::{LeanByteArray, LeanObject}; +use lean_ffi::object::LeanByteArray; use crate::ffi::ixon::constant::{decode_ixon_address, decode_ixon_constant}; - -/// Unbox a Lean UInt64, handling both scalar and boxed representations. -fn lean_ptr_to_u64(obj: LeanObject) -> u64 { - if obj.is_scalar() { obj.unbox_usize() as u64 } else { obj.unbox_u64() } -} - -/// Decode a Lean `Ixon.Expr` to a Rust `IxonExpr`. -pub fn lean_ptr_to_ixon_expr(obj: LeanObject) -> Arc { - assert!(!obj.is_scalar(), "Ixon.Expr should not be scalar"); - let ctor = obj.as_ctor(); - match ctor.tag() { - 0 => { - let idx = ctor.scalar_u64(0, 0); - Arc::new(IxonExpr::Sort(idx)) - }, - 1 => { - let idx = ctor.scalar_u64(0, 0); - Arc::new(IxonExpr::Var(idx)) - }, - 2 => { - let ref_idx = ctor.scalar_u64(1, 0); - let univs_arr = ctor.get(0).as_array(); - let univs = univs_arr.map(lean_ptr_to_u64); - Arc::new(IxonExpr::Ref(ref_idx, univs)) - }, - 3 => { - let rec_idx = ctor.scalar_u64(1, 0); - let univs_arr = ctor.get(0).as_array(); - let univs = univs_arr.map(lean_ptr_to_u64); - Arc::new(IxonExpr::Rec(rec_idx, univs)) - }, - 4 => { - let type_idx = ctor.scalar_u64(1, 0); - let field_idx = ctor.scalar_u64(1, 8); - let val = lean_ptr_to_ixon_expr(ctor.get(0)); - Arc::new(IxonExpr::Prj(type_idx, field_idx, val)) - }, - 5 => { - let idx = ctor.scalar_u64(0, 0); - Arc::new(IxonExpr::Str(idx)) - }, - 6 => { - let idx = ctor.scalar_u64(0, 0); - Arc::new(IxonExpr::Nat(idx)) - }, - 7 => { - let [fun_obj, arg_obj] = ctor.objs::<2>(); - let fun_ = lean_ptr_to_ixon_expr(fun_obj); - let arg = lean_ptr_to_ixon_expr(arg_obj); - Arc::new(IxonExpr::App(fun_, arg)) - }, - 8 => { - let [ty_obj, body_obj] = ctor.objs::<2>(); - let ty = lean_ptr_to_ixon_expr(ty_obj); - let body = lean_ptr_to_ixon_expr(body_obj); - Arc::new(IxonExpr::Lam(ty, body)) - }, - 9 => { - let [ty_obj, body_obj] = ctor.objs::<2>(); - let ty = lean_ptr_to_ixon_expr(ty_obj); - let body = lean_ptr_to_ixon_expr(body_obj); - Arc::new(IxonExpr::All(ty, body)) - }, - 10 => { - let [ty_obj, val_obj, body_obj] = ctor.objs::<3>(); - let non_dep = ctor.scalar_bool(3, 0); - let ty = lean_ptr_to_ixon_expr(ty_obj); - let val = lean_ptr_to_ixon_expr(val_obj); - let body = lean_ptr_to_ixon_expr(body_obj); - Arc::new(IxonExpr::Let(non_dep, ty, val, body)) - }, - 11 => { - let idx = ctor.scalar_u64(0, 0); - Arc::new(IxonExpr::Share(idx)) - }, - tag => panic!("Unknown Ixon.Expr tag: {tag}"), - } -} +use crate::ffi::ixon::expr::decode_ixon_expr; /// Check if Lean's computed hash matches Rust's computed hash. #[unsafe(no_mangle)] @@ -101,14 +23,14 @@ pub extern "C" fn rs_expr_hash_matches( expr_obj: LeanIxonExpr, expected_hash: LeanIxAddress, ) -> bool { - let expr = lean_ptr_to_ixon_expr(*expr_obj); + let expr = Arc::new(decode_ixon_expr(expr_obj)); let hash = hash_expr(&expr); - let expected = decode_ixon_address(*expected_hash); + let expected = decode_ixon_address(expected_hash); Address::from_slice(hash.as_bytes()).is_ok_and(|h| h == expected) } /// Decode a Lean `Ixon.Univ` to a Rust `IxonUniv`. -fn lean_ptr_to_ixon_univ(obj: LeanObject) -> Arc { +fn decode_ixon_univ(obj: LeanIxonUniv) -> Arc { if obj.is_scalar() { return IxonUniv::zero(); } @@ -116,15 +38,21 @@ fn lean_ptr_to_ixon_univ(obj: LeanObject) -> Arc { match ctor.tag() { 1 => { let [inner] = ctor.objs::<1>(); - IxonUniv::succ(lean_ptr_to_ixon_univ(inner)) + IxonUniv::succ(decode_ixon_univ(LeanIxonUniv::new(inner))) }, 2 => { let [a, b] = ctor.objs::<2>(); - IxonUniv::max(lean_ptr_to_ixon_univ(a), lean_ptr_to_ixon_univ(b)) + IxonUniv::max( + decode_ixon_univ(LeanIxonUniv::new(a)), + decode_ixon_univ(LeanIxonUniv::new(b)), + ) }, 3 => { let [a, b] = ctor.objs::<2>(); - IxonUniv::imax(lean_ptr_to_ixon_univ(a), lean_ptr_to_ixon_univ(b)) + IxonUniv::imax( + decode_ixon_univ(LeanIxonUniv::new(a)), + decode_ixon_univ(LeanIxonUniv::new(b)), + ) }, 4 => IxonUniv::var(ctor.scalar_u64(0, 0)), tag => panic!("Unknown Ixon.Univ tag: {tag}"), @@ -137,7 +65,7 @@ pub extern "C" fn rs_eq_univ_serialization( univ_obj: LeanIxonUniv, bytes_obj: LeanByteArray, ) -> bool { - let univ = lean_ptr_to_ixon_univ(*univ_obj); + let univ = decode_ixon_univ(univ_obj); let bytes_data = bytes_obj.as_bytes(); let mut buf = Vec::with_capacity(bytes_data.len()); put_univ(&univ, &mut buf); @@ -150,7 +78,7 @@ pub extern "C" fn rs_eq_expr_serialization( expr_obj: LeanIxonExpr, bytes_obj: LeanByteArray, ) -> bool { - let expr = lean_ptr_to_ixon_expr(*expr_obj); + let expr = decode_ixon_expr(expr_obj); let bytes_data = bytes_obj.as_bytes(); let mut buf = Vec::with_capacity(bytes_data.len()); put_expr(&expr, &mut buf); @@ -163,7 +91,7 @@ pub extern "C" fn rs_eq_constant_serialization( constant_obj: LeanIxonConstant, bytes_obj: LeanByteArray, ) -> bool { - let constant = decode_ixon_constant(*constant_obj); + let constant = decode_ixon_constant(constant_obj); let bytes_data = bytes_obj.as_bytes(); let mut buf = Vec::with_capacity(bytes_data.len()); constant.put(&mut buf); @@ -180,7 +108,7 @@ pub extern "C" fn rs_eq_env_serialization( use crate::ffi::ixon::env::decode_raw_env; use crate::ix::ixon::env::Env; - let decoded = decode_raw_env(*raw_env_obj); + let decoded = decode_raw_env(raw_env_obj); let bytes_data = bytes_obj.as_bytes(); // Deserialize Lean's bytes using Rust's deserializer diff --git a/src/ffi/ixon/sharing.rs b/src/ffi/ixon/sharing.rs index 34b26c85..fdb259be 100644 --- a/src/ffi/ixon/sharing.rs +++ b/src/ffi/ixon/sharing.rs @@ -7,17 +7,19 @@ use crate::ix::ixon::serialize::put_expr; use crate::ix::ixon::sharing::{ analyze_block, build_sharing_vec, decide_sharing, }; +use crate::lean::LeanIxonExpr; use lean_ffi::object::{LeanArray, LeanByteArray}; use crate::ffi::ixon::expr::decode_ixon_expr_array; -use crate::ffi::ixon::serialize::lean_ptr_to_ixon_expr; +use crate::ffi::ixon::expr::decode_ixon_expr; /// FFI: Debug sharing analysis - print usage counts for subterms with usage >= 2. /// This helps diagnose why Lean and Rust make different sharing decisions. #[unsafe(no_mangle)] pub extern "C" fn rs_debug_sharing_analysis(exprs_obj: LeanArray) { let arr = exprs_obj; - let exprs: Vec> = arr.map(lean_ptr_to_ixon_expr); + let exprs: Vec> = + arr.map(|x| Arc::new(decode_ixon_expr(LeanIxonExpr::new(x)))); println!("[Rust] Analyzing {} input expressions", exprs.len()); @@ -58,7 +60,7 @@ pub extern "C" fn rs_debug_sharing_analysis(exprs_obj: LeanArray) { /// Returns the number of shared items Rust would produce. #[unsafe(no_mangle)] extern "C" fn rs_analyze_sharing_count(exprs_obj: LeanArray) -> u64 { - let exprs = decode_ixon_expr_array(*exprs_obj); + let exprs = decode_ixon_expr_array(exprs_obj); let (info_map, _ptr_to_hash) = analyze_block(&exprs, false); let shared_hashes = decide_sharing(&info_map); @@ -75,7 +77,7 @@ extern "C" fn rs_run_sharing_analysis( out_sharing_vec: LeanByteArray, out_rewritten: LeanByteArray, ) -> u64 { - let exprs = decode_ixon_expr_array(*exprs_obj); + let exprs = decode_ixon_expr_array(exprs_obj); let (info_map, ptr_to_hash) = analyze_block(&exprs, false); let shared_hashes = decide_sharing(&info_map); @@ -114,10 +116,10 @@ extern "C" fn rs_compare_sharing_analysis( _lean_rewritten_obj: LeanArray, ) -> u64 { // Decode input expressions - let exprs = decode_ixon_expr_array(*exprs_obj); + let exprs = decode_ixon_expr_array(exprs_obj); // Decode Lean's sharing vector - let lean_sharing = decode_ixon_expr_array(*lean_sharing_obj); + let lean_sharing = decode_ixon_expr_array(lean_sharing_obj); // Run Rust's sharing analysis let (info_map, ptr_to_hash) = analyze_block(&exprs, false); diff --git a/src/ffi/ixon/univ.rs b/src/ffi/ixon/univ.rs index fa72a958..b4bad24e 100644 --- a/src/ffi/ixon/univ.rs +++ b/src/ffi/ixon/univ.rs @@ -70,9 +70,8 @@ impl LeanIxonUniv { } /// Decode Array Ixon.Univ. - pub fn decode_array(obj: LeanObject) -> Vec> { - let arr = obj.as_array(); - arr.map(|elem| Arc::new(Self::new(elem).decode())) + pub fn decode_array(obj: LeanArray) -> Vec> { + obj.map(|elem| Arc::new(Self::new(elem).decode())) } } @@ -82,7 +81,7 @@ pub fn build_ixon_univ_array(univs: &[Arc]) -> LeanArray { } /// Decode Array Ixon.Univ (standalone wrapper). -pub fn decode_ixon_univ_array(obj: LeanObject) -> Vec> { +pub fn decode_ixon_univ_array(obj: LeanArray) -> Vec> { LeanIxonUniv::decode_array(obj) } diff --git a/src/ffi/lean_env.rs b/src/ffi/lean_env.rs index 62c1454b..dc38df09 100644 --- a/src/ffi/lean_env.rs +++ b/src/ffi/lean_env.rs @@ -20,7 +20,7 @@ use std::sync::Arc; use rustc_hash::FxHashMap; use lean_ffi::nat::Nat; -use lean_ffi::object::LeanObject; +use lean_ffi::object::{LeanList, LeanObject}; use crate::{ ix::compile::compile_env, @@ -94,12 +94,12 @@ impl<'g> Cache<'g> { } } -fn collect_list_objs(obj: LeanObject) -> Vec { - obj.as_list().iter().collect() +fn collect_list_objs(list: LeanList) -> Vec { + list.iter().collect() } // Name decoding with global cache -pub fn lean_ptr_to_name(obj: LeanObject, global: &GlobalCache) -> Name { +pub fn decode_name(obj: LeanObject, global: &GlobalCache) -> Name { let ptr = obj.as_ptr(); // Fast path: check if already cached if let Some(name) = global.names.get(&ptr) { @@ -113,7 +113,7 @@ pub fn lean_ptr_to_name(obj: LeanObject, global: &GlobalCache) -> Name { let ctor = obj.as_ctor(); let [pre, pos] = ctor.objs(); // Recursive call - will also use global cache - let pre = lean_ptr_to_name(pre, global); + let pre = decode_name(pre, global); match ctor.tag() { 1 => Name::str(pre, pos.as_string().to_string()), 2 => Name::num(pre, Nat::from_obj(pos)), @@ -125,7 +125,7 @@ pub fn lean_ptr_to_name(obj: LeanObject, global: &GlobalCache) -> Name { global.names.entry(ptr).or_insert(name).clone() } -fn lean_ptr_to_level(obj: LeanObject, cache: &mut Cache<'_>) -> Level { +fn decode_level(obj: LeanObject, cache: &mut Cache<'_>) -> Level { let ptr = obj.as_ptr(); if let Some(cached) = cache.local.univs.get(&ptr) { return cached.clone(); @@ -136,25 +136,25 @@ fn lean_ptr_to_level(obj: LeanObject, cache: &mut Cache<'_>) -> Level { let ctor = obj.as_ctor(); match ctor.tag() { 1 => { - let [u] = ctor.objs::<1>().map(|o| lean_ptr_to_level(o, cache)); + let [u] = ctor.objs::<1>().map(|o| decode_level(o, cache)); Level::succ(u) }, 2 => { - let [u, v] = ctor.objs::<2>().map(|o| lean_ptr_to_level(o, cache)); + let [u, v] = ctor.objs::<2>().map(|o| decode_level(o, cache)); Level::max(u, v) }, 3 => { - let [u, v] = ctor.objs::<2>().map(|o| lean_ptr_to_level(o, cache)); + let [u, v] = ctor.objs::<2>().map(|o| decode_level(o, cache)); Level::imax(u, v) }, 4 => { let [name] = - ctor.objs::<1>().map(|o| lean_ptr_to_name(o, cache.global)); + ctor.objs::<1>().map(|o| decode_name(o, cache.global)); Level::param(name) }, 5 => { let [name] = - ctor.objs::<1>().map(|o| lean_ptr_to_name(o, cache.global)); + ctor.objs::<1>().map(|o| decode_name(o, cache.global)); Level::mvar(name) }, _ => unreachable!(), @@ -164,7 +164,7 @@ fn lean_ptr_to_level(obj: LeanObject, cache: &mut Cache<'_>) -> Level { level } -fn lean_ptr_to_substring(obj: LeanObject) -> Substring { +fn decode_substring(obj: LeanObject) -> Substring { let ctor = obj.as_ctor(); let [str_obj, start_pos, stop_pos] = ctor.objs(); let str = str_obj.as_string().to_string(); @@ -173,7 +173,7 @@ fn lean_ptr_to_substring(obj: LeanObject) -> Substring { Substring { str, start_pos, stop_pos } } -fn lean_ptr_to_source_info(obj: LeanObject) -> SourceInfo { +fn decode_source_info(obj: LeanObject) -> SourceInfo { if obj.is_scalar() { return SourceInfo::None; } @@ -181,9 +181,9 @@ fn lean_ptr_to_source_info(obj: LeanObject) -> SourceInfo { match ctor.tag() { 0 => { let [leading, pos, trailing, end_pos] = ctor.objs(); - let leading = lean_ptr_to_substring(leading); + let leading = decode_substring(leading); let pos = Nat::from_obj(pos); - let trailing = lean_ptr_to_substring(trailing); + let trailing = decode_substring(trailing); let end_pos = Nat::from_obj(end_pos); SourceInfo::Original(leading, pos, trailing, end_pos) }, @@ -198,7 +198,7 @@ fn lean_ptr_to_source_info(obj: LeanObject) -> SourceInfo { } } -fn lean_ptr_to_syntax_preresolved( +fn decode_syntax_preresolved( obj: LeanObject, cache: &mut Cache<'_>, ) -> SyntaxPreresolved { @@ -206,12 +206,12 @@ fn lean_ptr_to_syntax_preresolved( match ctor.tag() { 0 => { let [name_obj] = ctor.objs::<1>(); - let name = lean_ptr_to_name(name_obj, cache.global); + let name = decode_name(name_obj, cache.global); SyntaxPreresolved::Namespace(name) }, 1 => { let [name_obj, fields_obj] = ctor.objs(); - let name = lean_ptr_to_name(name_obj, cache.global); + let name = decode_name(name_obj, cache.global); let fields: Vec = fields_obj .as_list() .iter() @@ -223,7 +223,7 @@ fn lean_ptr_to_syntax_preresolved( } } -fn lean_ptr_to_syntax(obj: LeanObject, cache: &mut Cache<'_>) -> Syntax { +fn decode_syntax(obj: LeanObject, cache: &mut Cache<'_>) -> Syntax { if obj.is_scalar() { return Syntax::Missing; } @@ -231,25 +231,25 @@ fn lean_ptr_to_syntax(obj: LeanObject, cache: &mut Cache<'_>) -> Syntax { match ctor.tag() { 1 => { let [info, kind, args] = ctor.objs(); - let info = lean_ptr_to_source_info(info); - let kind = lean_ptr_to_name(kind, cache.global); + let info = decode_source_info(info); + let kind = decode_name(kind, cache.global); let args: Vec<_> = - args.as_array().iter().map(|o| lean_ptr_to_syntax(o, cache)).collect(); + args.as_array().iter().map(|o| decode_syntax(o, cache)).collect(); Syntax::Node(info, kind, args) }, 2 => { let [info, val] = ctor.objs(); - let info = lean_ptr_to_source_info(info); + let info = decode_source_info(info); Syntax::Atom(info, val.as_string().to_string()) }, 3 => { let [info, raw_val, val, preresolved] = ctor.objs(); - let info = lean_ptr_to_source_info(info); - let raw_val = lean_ptr_to_substring(raw_val); - let val = lean_ptr_to_name(val, cache.global); - let preresolved = collect_list_objs(preresolved) + let info = decode_source_info(info); + let raw_val = decode_substring(raw_val); + let val = decode_name(val, cache.global); + let preresolved = collect_list_objs(preresolved.as_list()) .into_iter() - .map(|o| lean_ptr_to_syntax_preresolved(o, cache)) + .map(|o| decode_syntax_preresolved(o, cache)) .collect(); Syntax::Ident(info, raw_val, val, preresolved) }, @@ -257,19 +257,19 @@ fn lean_ptr_to_syntax(obj: LeanObject, cache: &mut Cache<'_>) -> Syntax { } } -fn lean_ptr_to_name_data_value( +fn decode_name_data_value( obj: LeanObject, cache: &mut Cache<'_>, ) -> (Name, DataValue) { let ctor = obj.as_ctor(); let [name_obj, data_value_obj] = ctor.objs(); - let name = lean_ptr_to_name(name_obj, cache.global); + let name = decode_name(name_obj, cache.global); let dv_ctor = data_value_obj.as_ctor(); let [inner] = dv_ctor.objs::<1>(); let data_value = match dv_ctor.tag() { 0 => DataValue::OfString(inner.as_string().to_string()), 1 => DataValue::OfBool(inner.as_ptr() as usize == 1), - 2 => DataValue::OfName(lean_ptr_to_name(inner, cache.global)), + 2 => DataValue::OfName(decode_name(inner, cache.global)), 3 => DataValue::OfNat(Nat::from_obj(inner)), 4 => { let inner_ctor = inner.as_ctor(); @@ -282,13 +282,13 @@ fn lean_ptr_to_name_data_value( }; DataValue::OfInt(int) }, - 5 => DataValue::OfSyntax(lean_ptr_to_syntax(inner, cache).into()), + 5 => DataValue::OfSyntax(decode_syntax(inner, cache).into()), _ => unreachable!(), }; (name, data_value) } -pub fn lean_ptr_to_expr(obj: LeanObject, cache: &mut Cache<'_>) -> Expr { +pub fn decode_expr(obj: LeanObject, cache: &mut Cache<'_>) -> Expr { let ptr = obj.as_ptr(); if let Some(cached) = cache.local.exprs.get(&ptr) { return cached.clone(); @@ -301,39 +301,39 @@ pub fn lean_ptr_to_expr(obj: LeanObject, cache: &mut Cache<'_>) -> Expr { }, 1 => { let [name_obj, _hash] = ctor.objs(); - let name = lean_ptr_to_name(name_obj, cache.global); + let name = decode_name(name_obj, cache.global); Expr::fvar(name) }, 2 => { let [name_obj, _hash] = ctor.objs(); - let name = lean_ptr_to_name(name_obj, cache.global); + let name = decode_name(name_obj, cache.global); Expr::mvar(name) }, 3 => { let [u, _hash] = ctor.objs(); - let u = lean_ptr_to_level(u, cache); + let u = decode_level(u, cache); Expr::sort(u) }, 4 => { let [name_obj, levels, _hash] = ctor.objs(); - let name = lean_ptr_to_name(name_obj, cache.global); - let levels = collect_list_objs(levels) + let name = decode_name(name_obj, cache.global); + let levels = collect_list_objs(levels.as_list()) .into_iter() - .map(|o| lean_ptr_to_level(o, cache)) + .map(|o| decode_level(o, cache)) .collect(); Expr::cnst(name, levels) }, 5 => { let [f, a, _hash] = ctor.objs(); - let f = lean_ptr_to_expr(f, cache); - let a = lean_ptr_to_expr(a, cache); + let f = decode_expr(f, cache); + let a = decode_expr(a, cache); Expr::app(f, a) }, 6 => { let [binder_name, binder_typ, body, _hash, binder_info] = ctor.objs(); - let binder_name = lean_ptr_to_name(binder_name, cache.global); - let binder_typ = lean_ptr_to_expr(binder_typ, cache); - let body = lean_ptr_to_expr(body, cache); + let binder_name = decode_name(binder_name, cache.global); + let binder_typ = decode_expr(binder_typ, cache); + let body = decode_expr(body, cache); let binder_info = match binder_info.as_ptr() as usize { 0 => BinderInfo::Default, 1 => BinderInfo::Implicit, @@ -345,9 +345,9 @@ pub fn lean_ptr_to_expr(obj: LeanObject, cache: &mut Cache<'_>) -> Expr { }, 7 => { let [binder_name, binder_typ, body, _hash, binder_info] = ctor.objs(); - let binder_name = lean_ptr_to_name(binder_name, cache.global); - let binder_typ = lean_ptr_to_expr(binder_typ, cache); - let body = lean_ptr_to_expr(body, cache); + let binder_name = decode_name(binder_name, cache.global); + let binder_typ = decode_expr(binder_typ, cache); + let body = decode_expr(body, cache); let binder_info = match binder_info.as_ptr() as usize { 0 => BinderInfo::Default, 1 => BinderInfo::Implicit, @@ -359,10 +359,10 @@ pub fn lean_ptr_to_expr(obj: LeanObject, cache: &mut Cache<'_>) -> Expr { }, 8 => { let [decl_name, typ, value, body, _hash, nondep] = ctor.objs(); - let decl_name = lean_ptr_to_name(decl_name, cache.global); - let typ = lean_ptr_to_expr(typ, cache); - let value = lean_ptr_to_expr(value, cache); - let body = lean_ptr_to_expr(body, cache); + let decl_name = decode_name(decl_name, cache.global); + let typ = decode_expr(typ, cache); + let value = decode_expr(value, cache); + let body = decode_expr(body, cache); let nondep = nondep.as_ptr() as usize == 1; Expr::letE(decl_name, typ, value, body, nondep) }, @@ -378,18 +378,18 @@ pub fn lean_ptr_to_expr(obj: LeanObject, cache: &mut Cache<'_>) -> Expr { }, 10 => { let [data, expr_obj] = ctor.objs(); - let kv_map: Vec<_> = collect_list_objs(data) + let kv_map: Vec<_> = collect_list_objs(data.as_list()) .into_iter() - .map(|o| lean_ptr_to_name_data_value(o, cache)) + .map(|o| decode_name_data_value(o, cache)) .collect(); - let expr = lean_ptr_to_expr(expr_obj, cache); + let expr = decode_expr(expr_obj, cache); Expr::mdata(kv_map, expr) }, 11 => { let [typ_name, idx, struct_expr] = ctor.objs(); - let typ_name = lean_ptr_to_name(typ_name, cache.global); + let typ_name = decode_name(typ_name, cache.global); let idx = Nat::from_obj(idx); - let struct_expr = lean_ptr_to_expr(struct_expr, cache); + let struct_expr = decode_expr(struct_expr, cache); Expr::proj(typ_name, idx, struct_expr) }, _ => unreachable!(), @@ -398,34 +398,34 @@ pub fn lean_ptr_to_expr(obj: LeanObject, cache: &mut Cache<'_>) -> Expr { expr } -fn lean_ptr_to_recursor_rule( +fn decode_recursor_rule( obj: LeanObject, cache: &mut Cache<'_>, ) -> RecursorRule { let ctor = obj.as_ctor(); let [ctor_name, n_fields, rhs] = ctor.objs(); - let ctor_name = lean_ptr_to_name(ctor_name, cache.global); + let ctor_name = decode_name(ctor_name, cache.global); let n_fields = Nat::from_obj(n_fields); - let rhs = lean_ptr_to_expr(rhs, cache); + let rhs = decode_expr(rhs, cache); RecursorRule { ctor: ctor_name, n_fields, rhs } } -fn lean_ptr_to_constant_val( +fn decode_constant_val( obj: LeanObject, cache: &mut Cache<'_>, ) -> ConstantVal { let ctor = obj.as_ctor(); let [name_obj, level_params, typ] = ctor.objs(); - let name = lean_ptr_to_name(name_obj, cache.global); - let level_params: Vec<_> = collect_list_objs(level_params) + let name = decode_name(name_obj, cache.global); + let level_params: Vec<_> = collect_list_objs(level_params.as_list()) .into_iter() - .map(|o| lean_ptr_to_name(o, cache.global)) + .map(|o| decode_name(o, cache.global)) .collect(); - let typ = lean_ptr_to_expr(typ, cache); + let typ = decode_expr(typ, cache); ConstantVal { name, level_params, typ } } -pub fn lean_ptr_to_constant_info( +pub fn decode_constant_info( obj: LeanObject, cache: &mut Cache<'_>, ) -> ConstantInfo { @@ -436,14 +436,14 @@ pub fn lean_ptr_to_constant_info( match ctor.tag() { 0 => { let [constant_val, is_unsafe] = inner.objs(); - let constant_val = lean_ptr_to_constant_val(constant_val, cache); + let constant_val = decode_constant_val(constant_val, cache); let is_unsafe = is_unsafe.as_ptr() as usize == 1; ConstantInfo::AxiomInfo(AxiomVal { cnst: constant_val, is_unsafe }) }, 1 => { let [constant_val, value, hints, all, safety] = inner.objs(); - let constant_val = lean_ptr_to_constant_val(constant_val, cache); - let value = lean_ptr_to_expr(value, cache); + let constant_val = decode_constant_val(constant_val, cache); + let value = decode_expr(value, cache); let hints = if hints.is_scalar() { match hints.unbox_usize() { 0 => ReducibilityHints::Opaque, @@ -455,9 +455,9 @@ pub fn lean_ptr_to_constant_info( let [height] = hints_ctor.objs::<1>(); ReducibilityHints::Regular(height.as_ptr() as u32) }; - let all: Vec<_> = collect_list_objs(all) + let all: Vec<_> = collect_list_objs(all.as_list()) .into_iter() - .map(|o| lean_ptr_to_name(o, cache.global)) + .map(|o| decode_name(o, cache.global)) .collect(); let safety = match safety.as_ptr() as usize { 0 => DefinitionSafety::Unsafe, @@ -475,21 +475,21 @@ pub fn lean_ptr_to_constant_info( }, 2 => { let [constant_val, value, all] = inner.objs(); - let constant_val = lean_ptr_to_constant_val(constant_val, cache); - let value = lean_ptr_to_expr(value, cache); - let all: Vec<_> = collect_list_objs(all) + let constant_val = decode_constant_val(constant_val, cache); + let value = decode_expr(value, cache); + let all: Vec<_> = collect_list_objs(all.as_list()) .into_iter() - .map(|o| lean_ptr_to_name(o, cache.global)) + .map(|o| decode_name(o, cache.global)) .collect(); ConstantInfo::ThmInfo(TheoremVal { cnst: constant_val, value, all }) }, 3 => { let [constant_val, value, all, is_unsafe] = inner.objs(); - let constant_val = lean_ptr_to_constant_val(constant_val, cache); - let value = lean_ptr_to_expr(value, cache); - let all: Vec<_> = collect_list_objs(all) + let constant_val = decode_constant_val(constant_val, cache); + let value = decode_expr(value, cache); + let all: Vec<_> = collect_list_objs(all.as_list()) .into_iter() - .map(|o| lean_ptr_to_name(o, cache.global)) + .map(|o| decode_name(o, cache.global)) .collect(); let is_unsafe = is_unsafe.as_ptr() as usize == 1; ConstantInfo::OpaqueInfo(OpaqueVal { @@ -501,7 +501,7 @@ pub fn lean_ptr_to_constant_info( }, 4 => { let [constant_val, kind] = inner.objs(); - let constant_val = lean_ptr_to_constant_val(constant_val, cache); + let constant_val = decode_constant_val(constant_val, cache); let kind = match kind.as_ptr() as usize { 0 => QuotKind::Type, 1 => QuotKind::Ctor, @@ -521,16 +521,16 @@ pub fn lean_ptr_to_constant_info( num_nested, bools, ] = inner.objs(); - let constant_val = lean_ptr_to_constant_val(constant_val, cache); + let constant_val = decode_constant_val(constant_val, cache); let num_params = Nat::from_obj(num_params); let num_indices = Nat::from_obj(num_indices); - let all: Vec<_> = collect_list_objs(all) + let all: Vec<_> = collect_list_objs(all.as_list()) .into_iter() - .map(|o| lean_ptr_to_name(o, cache.global)) + .map(|o| decode_name(o, cache.global)) .collect(); - let ctors: Vec<_> = collect_list_objs(ctors) + let ctors: Vec<_> = collect_list_objs(ctors.as_list()) .into_iter() - .map(|o| lean_ptr_to_name(o, cache.global)) + .map(|o| decode_name(o, cache.global)) .collect(); let num_nested = Nat::from_obj(num_nested); let [is_rec, is_unsafe, is_reflexive, ..] = @@ -550,8 +550,8 @@ pub fn lean_ptr_to_constant_info( 6 => { let [constant_val, induct, cidx, num_params, num_fields, is_unsafe] = inner.objs(); - let constant_val = lean_ptr_to_constant_val(constant_val, cache); - let induct = lean_ptr_to_name(induct, cache.global); + let constant_val = decode_constant_val(constant_val, cache); + let induct = decode_name(induct, cache.global); let cidx = Nat::from_obj(cidx); let num_params = Nat::from_obj(num_params); let num_fields = Nat::from_obj(num_fields); @@ -576,18 +576,18 @@ pub fn lean_ptr_to_constant_info( rules, bools, ] = inner.objs(); - let constant_val = lean_ptr_to_constant_val(constant_val, cache); - let all: Vec<_> = collect_list_objs(all) + let constant_val = decode_constant_val(constant_val, cache); + let all: Vec<_> = collect_list_objs(all.as_list()) .into_iter() - .map(|o| lean_ptr_to_name(o, cache.global)) + .map(|o| decode_name(o, cache.global)) .collect(); let num_params = Nat::from_obj(num_params); let num_indices = Nat::from_obj(num_indices); let num_motives = Nat::from_obj(num_motives); let num_minors = Nat::from_obj(num_minors); - let rules: Vec<_> = collect_list_objs(rules) + let rules: Vec<_> = collect_list_objs(rules.as_list()) .into_iter() - .map(|o| lean_ptr_to_recursor_rule(o, cache)) + .map(|o| decode_recursor_rule(o, cache)) .collect(); let [k, is_unsafe, ..] = (bools.as_ptr() as usize).to_le_bytes().map(|b| b == 1); @@ -615,18 +615,18 @@ fn decode_name_constant_info( let mut cache = Cache::new(global); let ctor = obj.as_ctor(); let [name_obj, constant_info] = ctor.objs(); - let name = lean_ptr_to_name(name_obj, global); - let constant_info = lean_ptr_to_constant_info(constant_info, &mut cache); + let name = decode_name(name_obj, global); + let constant_info = decode_constant_info(constant_info, &mut cache); (name, constant_info) } // Decode a Lean environment in parallel with hybrid caching. -pub fn lean_ptr_to_env(obj: LeanObject) -> Env { +pub fn decode_env(obj: LeanList) -> Env { // Phase 1: Collect pointers (sequential) let objs = collect_list_objs(obj); if objs.len() < PARALLEL_THRESHOLD { - return lean_ptr_to_env_sequential(obj); + return decode_env_sequential(obj); } // Estimate: ~3 unique names per constant on average @@ -651,7 +651,7 @@ pub fn lean_ptr_to_env(obj: LeanObject) -> Env { } /// Sequential fallback for small environments. -pub fn lean_ptr_to_env_sequential(obj: LeanObject) -> Env { +pub fn decode_env_sequential(obj: LeanList) -> Env { let objs = collect_list_objs(obj); let global = GlobalCache::new(); let mut env = Env::default(); @@ -669,7 +669,7 @@ pub fn lean_ptr_to_env_sequential(obj: LeanObject) -> Env { // roundtrip and size analysis. Output is intentionally suppressed; re-enable // individual `eprintln!` lines when debugging locally. #[unsafe(no_mangle)] -extern "C" fn rs_tmp_decode_const_map(obj: LeanObject) -> usize { +extern "C" fn rs_tmp_decode_const_map(obj: LeanList) -> usize { // Enable hash-consed size tracking for debugging // TODO: Make this configurable via CLI instead of hardcoded crate::ix::compile::TRACK_HASH_CONSED_SIZE @@ -680,7 +680,7 @@ extern "C" fn rs_tmp_decode_const_map(obj: LeanObject) -> usize { crate::ix::compile::ANALYZE_SHARING .store(false, std::sync::atomic::Ordering::Relaxed); - let env = lean_ptr_to_env(obj); + let env = decode_env(obj); let env = Arc::new(env); if let Ok(stt) = compile_env(&env) { if let Ok(dstt) = decompile_env(&stt) { diff --git a/src/ffi/primitives.rs b/src/ffi/primitives.rs index f50ba36c..63c5f84d 100644 --- a/src/ffi/primitives.rs +++ b/src/ffi/primitives.rs @@ -8,7 +8,8 @@ use lean_ffi::nat::Nat; use lean_ffi::object::{ - LeanArray, LeanByteArray, LeanCtor, LeanList, LeanObject, LeanString, + LeanArray, LeanBool, LeanByteArray, LeanCtor, LeanList, LeanNat, LeanObject, + LeanString, }; // ============================================================================= @@ -34,12 +35,7 @@ pub fn build_nat(n: &Nat) -> LeanObject { arr[..chunk.len()].copy_from_slice(chunk); limbs.push(u64::from_le_bytes(arr)); } - unsafe { - LeanObject::from_raw(lean_ffi::nat::lean_nat_from_limbs( - limbs.len(), - limbs.as_ptr(), - )) - } + unsafe { lean_ffi::nat::lean_nat_from_limbs(limbs.len(), limbs.as_ptr()) } } // ============================================================================= @@ -48,8 +44,8 @@ pub fn build_nat(n: &Nat) -> LeanObject { /// Round-trip a Nat: decode from Lean, re-encode to Lean. #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_nat(nat_ptr: LeanObject) -> LeanObject { - let nat = Nat::from_obj(nat_ptr); +pub extern "C" fn rs_roundtrip_nat(nat_ptr: LeanNat) -> LeanObject { + let nat = Nat::from_obj(*nat_ptr); build_nat(&nat) } @@ -83,8 +79,7 @@ pub extern "C" fn rs_roundtrip_bytearray(ba: LeanByteArray) -> LeanByteArray { /// Round-trip a Bool: decode from Lean, re-encode. /// Bool in Lean is passed as unboxed scalar: false = 0, true = 1 #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_bool(bool_ptr: LeanObject) -> LeanObject { - // Bool is passed as unboxed scalar - just return it as-is +pub extern "C" fn rs_roundtrip_bool(bool_ptr: LeanBool) -> LeanBool { bool_ptr } @@ -114,11 +109,10 @@ fn build_array_nat(nats: &[Nat]) -> LeanArray { /// Round-trip a Point (structure with x, y : Nat). /// Point is a structure, which in Lean is represented as a constructor with tag 0. #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_point(point_ptr: LeanObject) -> LeanObject { - let ctor = point_ptr.as_ctor(); +pub extern "C" fn rs_roundtrip_point(point_ptr: LeanCtor) -> LeanObject { // Point is a structure (single constructor, tag 0) with 2 Nat fields - let x = Nat::from_obj(ctor.get(0)); - let y = Nat::from_obj(ctor.get(1)); + let x = Nat::from_obj(point_ptr.get(0)); + let y = Nat::from_obj(point_ptr.get(1)); // Re-encode as Point let point = LeanCtor::alloc(0, 2, 0); @@ -129,12 +123,11 @@ pub extern "C" fn rs_roundtrip_point(point_ptr: LeanObject) -> LeanObject { /// Round-trip a NatTree (inductive with leaf : Nat → NatTree | node : NatTree → NatTree → NatTree). #[unsafe(no_mangle)] -pub extern "C" fn rs_roundtrip_nat_tree(tree_ptr: LeanObject) -> LeanObject { +pub extern "C" fn rs_roundtrip_nat_tree(tree_ptr: LeanCtor) -> LeanObject { roundtrip_nat_tree_recursive(tree_ptr) } -fn roundtrip_nat_tree_recursive(obj: LeanObject) -> LeanObject { - let ctor = obj.as_ctor(); +fn roundtrip_nat_tree_recursive(ctor: LeanCtor) -> LeanObject { match ctor.tag() { 0 => { // leaf : Nat → NatTree @@ -145,8 +138,8 @@ fn roundtrip_nat_tree_recursive(obj: LeanObject) -> LeanObject { }, 1 => { // node : NatTree → NatTree → NatTree - let left = roundtrip_nat_tree_recursive(ctor.get(0)); - let right = roundtrip_nat_tree_recursive(ctor.get(1)); + let left = roundtrip_nat_tree_recursive(ctor.get(0).as_ctor()); + let right = roundtrip_nat_tree_recursive(ctor.get(1).as_ctor()); let node = LeanCtor::alloc(1, 2, 0); node.set(0, left); node.set(1, right); @@ -256,12 +249,11 @@ pub extern "C" fn rs_roundtrip_dhashmap_raw_nat_nat( /// - cons key value tail: ctor 1, 3 fields #[unsafe(no_mangle)] pub extern "C" fn rs_roundtrip_hashmap_nat_nat( - map_ptr: LeanObject, + map_ptr: LeanCtor, ) -> LeanObject { - let raw_ctor = map_ptr.as_ctor(); // Due to unboxing, map_ptr points directly to Raw - let size = Nat::from_obj(raw_ctor.get(0)); - let buckets = raw_ctor.get(1).as_array(); + let size = Nat::from_obj(map_ptr.get(0)); + let buckets = map_ptr.get(1).as_array(); // Decode buckets (Array of AssocLists) let mut pairs: Vec<(Nat, Nat)> = Vec::new(); diff --git a/src/iroh.rs b/src/iroh.rs index 489a3f2a..2e32a2c7 100644 --- a/src/iroh.rs +++ b/src/iroh.rs @@ -1,33 +1,6 @@ -//! The client, server, and common modules are enabled by the `net` feature. However, Iroh doesn't work on `aarch64-darwin`, so they are always disabled for that target. -//! -//! Lean and C don't support feature flags, so the `_client` and `_server` modules are exposed as a fallback for when the `net` feature is disabled and/or on the `aarch64-darwin` target. -//! -//! These fallback modules contain dummy functions that can still be called via Lean->C->Rust FFI, but will return an error message that Lean then prints before exiting. - -#[cfg(any( - not(feature = "net"), - all(target_os = "macos", target_arch = "aarch64") -))] -pub mod _client; -#[cfg(any( - not(feature = "net"), - all(target_os = "macos", target_arch = "aarch64") -))] -pub mod _server; -#[cfg(all( - feature = "net", - not(all(target_os = "macos", target_arch = "aarch64")) -))] pub mod client; -#[cfg(all( - feature = "net", - not(all(target_os = "macos", target_arch = "aarch64")) -))] pub mod server; -#[cfg(all( - feature = "net", - not(all(target_os = "macos", target_arch = "aarch64")) -))] + pub mod common { use bincode::{Decode, Encode}; use serde::{Deserialize, Serialize}; diff --git a/src/iroh/_server.rs b/src/iroh/_server.rs deleted file mode 100644 index 29af5a56..00000000 --- a/src/iroh/_server.rs +++ /dev/null @@ -1,10 +0,0 @@ -use lean_ffi::object::LeanExcept; - -/// `Iroh.Serve.serve' : Unit → Except String Unit` -#[unsafe(no_mangle)] -extern "C" fn rs_iroh_serve() -> LeanExcept { - LeanExcept::error_string( - "Iroh functions not supported when the Rust `net` feature is disabled \ - or on MacOS aarch64-darwin", - ) -} diff --git a/src/lean.rs b/src/lean.rs index f5839388..29aba13e 100644 --- a/src/lean.rs +++ b/src/lean.rs @@ -13,6 +13,16 @@ lean_ffi::lean_domain_type! { LeanIxExpr; /// Lean `Ix.ConstantInfo` object. LeanIxConstantInfo; + /// Lean `Ix.ConstantVal` object. + LeanIxConstantVal; + /// Lean `Ix.ReducibilityHints` object. + LeanIxReducibilityHints; + /// Lean `Ix.Literal` object. + LeanIxLiteral; + /// Lean `Ix.BinderInfo` object. + LeanIxBinderInfo; + /// Lean `Ix.RecursorRule` object. + LeanIxRecursorRule; /// Lean `Ix.RawEnvironment` object. LeanIxRawEnvironment; /// Lean `Ix.Environment` object. @@ -89,6 +99,22 @@ lean_ffi::lean_domain_type! { LeanIxonComm; /// Lean `Ixon.RawEnv` object. LeanIxonRawEnv; + /// Lean `Ixon.RawConst` object. + LeanIxonRawConst; + /// Lean `Ixon.RawNamed` object. + LeanIxonRawNamed; + /// Lean `Ixon.RawBlob` object. + LeanIxonRawBlob; + /// Lean `Ixon.RawComm` object. + LeanIxonRawComm; + /// Lean `Ixon.RawNameEntry` object. + LeanIxonRawNameEntry; + + // Aiur types + /// Lean `Aiur.Bytecode.Toplevel` object. + LeanAiurToplevel; + /// Lean `Aiur.FriParameters` object. + LeanAiurFriParameters; // Error types /// Lean `Ixon.SerializeError` object. diff --git a/src/lib.rs b/src/lib.rs index a4bca1e1..181c9e3f 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -14,6 +14,11 @@ use rustc_hash::FxBuildHasher; pub mod aiur; pub mod ffi; +// Iroh functionality is enabled by the `net` feature. However, Iroh doesn't work on `aarch64-darwin`, so it is always disabled for that target. +#[cfg(all( + feature = "net", + not(all(target_os = "macos", target_arch = "aarch64")) +))] pub mod iroh; pub mod ix; pub mod lean; From 6b5494df94138d63300c300692daaff6c1d86049 Mon Sep 17 00:00:00 2001 From: samuelburnham <45365069+samuelburnham@users.noreply.github.com> Date: Wed, 4 Mar 2026 16:46:29 -0500 Subject: [PATCH 24/27] Encapsulate `build` and `decode` functions as methods --- docs/ffi.md | 4 +- lean-ffi/src/object.rs | 16 + src/ffi/aiur/protocol.rs | 12 +- src/ffi/compile.rs | 760 ++++++++++++++------------- src/ffi/graph.rs | 114 ++-- src/ffi/ix/address.rs | 42 +- src/ffi/ix/constant.rs | 787 ++++++++++++++-------------- src/ffi/ix/data.rs | 677 ++++++++++++------------ src/ffi/ix/env.rs | 202 ++++---- src/ffi/ix/expr.rs | 583 ++++++++++----------- src/ffi/ix/level.rs | 207 ++++---- src/ffi/ix/name.rs | 150 +++--- src/ffi/ixon/compare.rs | 86 ++-- src/ffi/ixon/constant.rs | 1030 ++++++++++++++++++------------------- src/ffi/ixon/enums.rs | 153 +++--- src/ffi/ixon/env.rs | 368 ++++++------- src/ffi/ixon/expr.rs | 398 +++++++------- src/ffi/ixon/meta.rs | 920 +++++++++++++++++---------------- src/ffi/ixon/serialize.rs | 53 +- src/ffi/ixon/sharing.rs | 14 +- src/ffi/ixon/univ.rs | 10 - src/lean.rs | 35 +- 22 files changed, 3317 insertions(+), 3304 deletions(-) diff --git a/docs/ffi.md b/docs/ffi.md index b02ad5e6..a86a88d6 100644 --- a/docs/ffi.md +++ b/docs/ffi.md @@ -45,7 +45,9 @@ define their own constructor methods using `LeanCtor` (e.g. or `*const c_void` directly in an `extern "C" fn`, but this is generally not recommended as internal Rust functions may pass in the wrong object more easily, and any low-level constructors would not be hidden behind the -API boundary. +API boundary. To enforce this, the `From for LeanObject` trait is +implemented to get the underlying `LeanObject`, but creating a wrapper type +from a `LeanObject` requires an explicit constructor for clarity. A key concept in this design is that ownership of the data is transferred to Lean, making it responsible for deallocation. If the data type is intended to be diff --git a/lean-ffi/src/object.rs b/lean-ffi/src/object.rs index 31f6817e..6e73647e 100644 --- a/lean-ffi/src/object.rs +++ b/lean-ffi/src/object.rs @@ -213,6 +213,14 @@ impl From for LeanObject { } } +impl LeanNat { + /// Wrap a raw `LeanObject` as a `LeanNat`. + #[inline] + pub fn new(obj: LeanObject) -> Self { + Self(obj) + } +} + // ============================================================================= // LeanBool — Bool (unboxed scalar: false = 0, true = 1) // ============================================================================= @@ -237,6 +245,14 @@ impl From for LeanObject { } } +impl LeanBool { + /// Wrap a raw `LeanObject` as a `LeanBool`. + #[inline] + pub fn new(obj: LeanObject) -> Self { + Self(obj) + } +} + impl LeanBool { /// Decode to a Rust `bool`. #[inline] diff --git a/src/ffi/aiur/protocol.rs b/src/ffi/aiur/protocol.rs index 88222535..7083f4e4 100644 --- a/src/ffi/aiur/protocol.rs +++ b/src/ffi/aiur/protocol.rs @@ -8,7 +8,7 @@ use std::sync::OnceLock; use lean_ffi::object::{ ExternalClass, LeanArray, LeanByteArray, LeanCtor, LeanExcept, LeanExternal, - LeanObject, + LeanNat, LeanObject, }; use crate::{ @@ -65,7 +65,7 @@ extern "C" fn rs_aiur_proof_of_bytes( #[unsafe(no_mangle)] extern "C" fn rs_aiur_system_build( toplevel: LeanAiurToplevel, - commitment_parameters: LeanObject, + commitment_parameters: LeanNat, ) -> LeanExternal { let system = AiurSystem::build( decode_toplevel(toplevel), @@ -96,13 +96,13 @@ extern "C" fn rs_aiur_system_verify( extern "C" fn rs_aiur_system_prove( aiur_system_obj: LeanExternal, fri_parameters: LeanAiurFriParameters, - fun_idx: LeanObject, + fun_idx: LeanNat, args: LeanArray, io_data_arr: LeanArray, io_map_arr: LeanArray, ) -> LeanObject { let fri_parameters = decode_fri_parameters(fri_parameters); - let fun_idx = lean_unbox_nat_as_usize(fun_idx); + let fun_idx = lean_unbox_nat_as_usize(*fun_idx); let args = args.map(lean_unbox_g); let io_data = io_data_arr.map(lean_unbox_g); let io_map = decode_io_buffer_map(io_map_arr); @@ -167,8 +167,8 @@ fn build_g_array(values: &[G]) -> LeanArray { arr } -fn decode_commitment_parameters(obj: LeanObject) -> CommitmentParameters { - CommitmentParameters { log_blowup: lean_unbox_nat_as_usize(obj) } +fn decode_commitment_parameters(obj: LeanNat) -> CommitmentParameters { + CommitmentParameters { log_blowup: lean_unbox_nat_as_usize(*obj) } } fn decode_fri_parameters(obj: LeanAiurFriParameters) -> FriParameters { diff --git a/src/ffi/compile.rs b/src/ffi/compile.rs index 76c818a1..6737c225 100644 --- a/src/ffi/compile.rs +++ b/src/ffi/compile.rs @@ -23,31 +23,24 @@ use crate::ix::ixon::serialize::put_expr; use crate::ix::ixon::{Comm, ConstantMeta}; use crate::lean::{ LeanIxBlockCompareDetail, LeanIxBlockCompareResult, LeanIxCompileError, - LeanIxCompilePhases, LeanIxCondensedBlocks, LeanIxDecompileError, + LeanIxCompilePhases, LeanIxCondensedBlocks, LeanIxConstantInfo, + LeanIxDecompileError, LeanIxName, LeanIxRawEnvironment, LeanIxSerializeError, LeanIxonRawBlob, LeanIxonRawComm, LeanIxonRawConst, - LeanIxonRawEnv, LeanIxonRawNamed, + LeanIxonRawEnv, LeanIxonRawNameEntry, LeanIxonRawNamed, }; use lean_ffi::nat::Nat; use lean_ffi::object::LeanIOResult; use lean_ffi::object::{ - LeanArray, LeanByteArray, LeanCtor, LeanExcept, LeanObject, LeanString, + LeanArray, LeanByteArray, LeanCtor, LeanExcept, LeanList, LeanObject, + LeanString, }; use dashmap::DashMap; use dashmap::DashSet; use crate::ffi::builder::LeanBuildCache; -use crate::ffi::graph::build_condensed_blocks; -use crate::ffi::ix::constant::build_constant_info; -use crate::ffi::ix::env::build_raw_environment; -use crate::ffi::ix::name::build_name; -use crate::ffi::ixon::constant::{ - build_address_from_ixon, build_ixon_constant, decode_ixon_address, -}; -use crate::ffi::ixon::env::{ - build_raw_env, build_raw_name_entry, decode_raw_env, decoded_to_ixon_env, -}; -use crate::ffi::ixon::meta::{build_constant_meta, build_ixon_comm}; +use crate::lean::LeanIxAddress; +use crate::ffi::ixon::env::decoded_to_ixon_env; use crate::ffi::lean_env::{GlobalCache, decode_env, decode_name}; // ============================================================================= @@ -68,45 +61,32 @@ fn build_lean_nat_usize(n: usize) -> LeanObject { // Raw* Builder Functions for Compile FFI // ============================================================================= -/// Build RawConst: { addr : Address, const : Ixon.Constant } -pub fn build_raw_const( +/// Build RawConst using type method. +fn build_raw_const( addr: &Address, constant: &IxonConstant, ) -> LeanIxonRawConst { - let ctor = LeanCtor::alloc(0, 2, 0); - ctor.set(0, build_address_from_ixon(addr)); - ctor.set(1, build_ixon_constant(constant)); - LeanIxonRawConst::new(*ctor) + LeanIxonRawConst::build_from_parts(addr, constant) } -/// Build RawNamed: { name : Ix.Name, addr : Address, constMeta : Ixon.ConstantMeta } -pub fn build_raw_named( +/// Build RawNamed using type method. +fn build_raw_named( cache: &mut LeanBuildCache, name: &Name, addr: &Address, meta: &ConstantMeta, ) -> LeanIxonRawNamed { - let ctor = LeanCtor::alloc(0, 3, 0); - ctor.set(0, build_name(cache, name)); - ctor.set(1, build_address_from_ixon(addr)); - ctor.set(2, build_constant_meta(meta)); - LeanIxonRawNamed::new(*ctor) + LeanIxonRawNamed::build_from_parts(cache, name, addr, meta) } -/// Build RawBlob: { addr : Address, bytes : ByteArray } -pub fn build_raw_blob(addr: &Address, bytes: &[u8]) -> LeanIxonRawBlob { - let ctor = LeanCtor::alloc(0, 2, 0); - ctor.set(0, build_address_from_ixon(addr)); - ctor.set(1, LeanByteArray::from_bytes(bytes)); - LeanIxonRawBlob::new(*ctor) +/// Build RawBlob using type method. +fn build_raw_blob(addr: &Address, bytes: &[u8]) -> LeanIxonRawBlob { + LeanIxonRawBlob::build_from_parts(addr, bytes) } -/// Build RawComm: { addr : Address, comm : Ixon.Comm } -pub fn build_raw_comm(addr: &Address, comm: &Comm) -> LeanIxonRawComm { - let ctor = LeanCtor::alloc(0, 2, 0); - ctor.set(0, build_address_from_ixon(addr)); - ctor.set(1, build_ixon_comm(comm)); - LeanIxonRawComm::new(*ctor) +/// Build RawComm using type method. +fn build_raw_comm(addr: &Address, comm: &Comm) -> LeanIxonRawComm { + LeanIxonRawComm::build_from_parts(addr, comm) } // ============================================================================= @@ -214,11 +194,11 @@ pub extern "C" fn rs_roundtrip_block_compare_detail( /// FFI function to run the complete compilation pipeline and return all data. #[unsafe(no_mangle)] pub extern "C" fn rs_compile_env_full( - env_consts_ptr: LeanObject, + env_consts_ptr: LeanList, ) -> LeanIOResult { ffi_io_guard(std::panic::AssertUnwindSafe(|| { // Phase 1: Decode Lean environment - let rust_env = decode_env(env_consts_ptr.as_list()); + let rust_env = decode_env(env_consts_ptr); let env_len = rust_env.len(); let rust_env = Arc::new(rust_env); @@ -239,8 +219,8 @@ pub extern "C" fn rs_compile_env_full( // Phase 4: Build Lean structures let mut cache = LeanBuildCache::with_capacity(env_len); - let raw_env = build_raw_environment(&mut cache, &rust_env); - let condensed_obj = build_condensed_blocks(&mut cache, &condensed); + let raw_env = LeanIxRawEnvironment::build(&mut cache, &rust_env); + let condensed_obj = LeanIxCondensedBlocks::build(&mut cache, &condensed); // Collect blocks let mut blocks_data: Vec<(Name, Vec, usize)> = Vec::new(); @@ -267,7 +247,7 @@ pub extern "C" fn rs_compile_env_full( // Build blocks array let blocks_arr = LeanArray::alloc(blocks_data.len()); for (i, (name, bytes, sharing_len)) in blocks_data.iter().enumerate() { - let name_obj = build_name(&mut cache, name); + let name_obj = LeanIxName::build(&mut cache, name); let ba = LeanByteArray::from_bytes(bytes); // Block: { name: Ix.Name, bytes: ByteArray, sharingLen: UInt64 } @@ -286,7 +266,7 @@ pub extern "C" fn rs_compile_env_full( let name = entry.key(); let addr = entry.value(); - let name_obj = build_name(&mut cache, name); + let name_obj = LeanIxName::build(&mut cache, name); let addr_ba = LeanByteArray::from_bytes(addr.as_bytes()); let entry_obj = LeanCtor::alloc(0, 2, 0); @@ -313,9 +293,9 @@ pub extern "C" fn rs_compile_env_full( /// FFI function to compile a Lean environment to serialized Ixon.Env bytes. #[unsafe(no_mangle)] -pub extern "C" fn rs_compile_env(env_consts_ptr: LeanObject) -> LeanIOResult { +pub extern "C" fn rs_compile_env(env_consts_ptr: LeanList) -> LeanIOResult { ffi_io_guard(std::panic::AssertUnwindSafe(|| { - let rust_env = decode_env(env_consts_ptr.as_list()); + let rust_env = decode_env(env_consts_ptr); let rust_env = Arc::new(rust_env); let compile_stt = match compile_env(&rust_env) { @@ -345,28 +325,28 @@ pub extern "C" fn rs_compile_env(env_consts_ptr: LeanObject) -> LeanIOResult { pub extern "C" fn rs_roundtrip_raw_env( raw_env_obj: LeanIxonRawEnv, ) -> LeanIxonRawEnv { - let env = decode_raw_env(raw_env_obj); - build_raw_env(&env) + let env = raw_env_obj.decode(); + LeanIxonRawEnv::build(&env) } /// FFI function to run all compilation phases and return combined results. #[unsafe(no_mangle)] pub extern "C" fn rs_compile_phases( - env_consts_ptr: LeanObject, + env_consts_ptr: LeanList, ) -> LeanIOResult { ffi_io_guard(std::panic::AssertUnwindSafe(|| { - let rust_env = decode_env(env_consts_ptr.as_list()); + let rust_env = decode_env(env_consts_ptr); let env_len = rust_env.len(); let rust_env = Arc::new(rust_env); let mut cache = LeanBuildCache::with_capacity(env_len); - let raw_env = build_raw_environment(&mut cache, &rust_env); + let raw_env = LeanIxRawEnvironment::build(&mut cache, &rust_env); let ref_graph = build_ref_graph(&rust_env); let condensed = compute_sccs(&ref_graph.out_refs); - let condensed_obj = build_condensed_blocks(&mut cache, &condensed); + let condensed_obj = LeanIxCondensedBlocks::build(&mut cache, &condensed); let compile_stt = match compile_env(&rust_env) { Ok(stt) => stt, @@ -430,7 +410,7 @@ pub extern "C" fn rs_compile_phases( .collect(); let names_arr = LeanArray::alloc(names.len()); for (i, (addr, name)) in names.iter().enumerate() { - names_arr.set(i, build_raw_name_entry(&mut cache, addr, name)); + names_arr.set(i, LeanIxonRawNameEntry::build(&mut cache, addr, name)); } let raw_ixon_env = LeanCtor::alloc(0, 5, 0); @@ -452,10 +432,10 @@ pub extern "C" fn rs_compile_phases( /// FFI function to compile a Lean environment to a RawEnv. #[unsafe(no_mangle)] pub extern "C" fn rs_compile_env_to_ixon( - env_consts_ptr: LeanObject, + env_consts_ptr: LeanList, ) -> LeanIOResult { ffi_io_guard(std::panic::AssertUnwindSafe(|| { - let rust_env = decode_env(env_consts_ptr.as_list()); + let rust_env = decode_env(env_consts_ptr); let rust_env = Arc::new(rust_env); let compile_stt = match compile_env(&rust_env) { @@ -522,7 +502,7 @@ pub extern "C" fn rs_compile_env_to_ixon( .collect(); let names_arr = LeanArray::alloc(names.len()); for (i, (addr, name)) in names.iter().enumerate() { - names_arr.set(i, build_raw_name_entry(&mut cache, addr, name)); + names_arr.set(i, LeanIxonRawNameEntry::build(&mut cache, addr, name)); } let result = LeanCtor::alloc(0, 5, 0); @@ -538,12 +518,12 @@ pub extern "C" fn rs_compile_env_to_ixon( /// FFI function to canonicalize environment to Ix.RawEnvironment. #[unsafe(no_mangle)] pub extern "C" fn rs_canonicalize_env_to_ix( - env_consts_ptr: LeanObject, + env_consts_ptr: LeanList, ) -> LeanIOResult { ffi_io_guard(std::panic::AssertUnwindSafe(|| { - let rust_env = decode_env(env_consts_ptr.as_list()); + let rust_env = decode_env(env_consts_ptr); let mut cache = LeanBuildCache::with_capacity(rust_env.len()); - let raw_env = build_raw_environment(&mut cache, &rust_env); + let raw_env = LeanIxRawEnvironment::build(&mut cache, &rust_env); LeanIOResult::ok(raw_env) })) } @@ -585,10 +565,10 @@ extern "C" fn rs_test_ffi_roundtrip(name_ptr: LeanObject) -> u64 { /// FFI: Compile entire environment with Rust, returning a handle to RustCompiledEnv. #[unsafe(no_mangle)] extern "C" fn rs_compile_env_rust_first( - env_consts_ptr: LeanObject, + env_consts_ptr: LeanList, ) -> *mut RustCompiledEnv { // Decode Lean environment - let lean_env = decode_env(env_consts_ptr.as_list()); + let lean_env = decode_env(env_consts_ptr); let lean_env = Arc::new(lean_env); // Compile with Rust @@ -1059,330 +1039,336 @@ extern "C" fn rs_get_compiled_const_count( use crate::ix::ixon::error::{CompileError, DecompileError, SerializeError}; -/// Build a Lean Ixon.SerializeError from a Rust SerializeError. -/// -/// Tags 0–6: -/// 0: unexpectedEof (expected : String) → 1 obj -/// 1: invalidTag (tag : UInt8) (context : String) → 1 obj + 1 scalar (UInt8) -/// 2: invalidFlag (flag : UInt8) (context : String) → 1 obj + 1 scalar (UInt8) -/// 3: invalidVariant (variant : UInt64) (context : String) → 1 obj + 8 scalar (UInt64) -/// 4: invalidBool (value : UInt8) → 0 obj + 1 scalar (UInt8) -/// 5: addressError → 0 obj + 0 scalar -/// 6: invalidShareIndex (idx : UInt64) (max : Nat) → 1 obj (Nat) + 8 scalar (UInt64) -pub fn build_serialize_error(se: &SerializeError) -> LeanIxSerializeError { - let obj = match se { - SerializeError::UnexpectedEof { expected } => { - let ctor = LeanCtor::alloc(0, 1, 0); - ctor.set(0, build_lean_string(expected)); - *ctor - }, - SerializeError::InvalidTag { tag, context } => { - let ctor = LeanCtor::alloc(1, 1, 1); - ctor.set(0, build_lean_string(context)); - ctor.set_u8(8, *tag); - *ctor - }, - SerializeError::InvalidFlag { flag, context } => { - let ctor = LeanCtor::alloc(2, 1, 1); - ctor.set(0, build_lean_string(context)); - ctor.set_u8(8, *flag); - *ctor - }, - SerializeError::InvalidVariant { variant, context } => { - let ctor = LeanCtor::alloc(3, 1, 8); - ctor.set(0, build_lean_string(context)); - ctor.set_u64(8, *variant); - *ctor - }, - SerializeError::InvalidBool { value } => { - let ctor = LeanCtor::alloc(4, 0, 1); - ctor.set_u8(0, *value); - *ctor - }, - SerializeError::AddressError => LeanObject::box_usize(5), - SerializeError::InvalidShareIndex { idx, max } => { - let ctor = LeanCtor::alloc(6, 1, 8); - ctor.set(0, build_lean_nat_usize(*max)); - ctor.set_u64(8, *idx); - *ctor - }, - }; - LeanIxSerializeError::new(obj) -} - -/// Decode a Lean Ixon.SerializeError to a Rust SerializeError. -pub fn decode_serialize_error(obj: LeanIxSerializeError) -> SerializeError { - // Tag 5 (addressError) has 0 fields → Lean represents as scalar - if obj.is_scalar() { - let tag = obj.unbox_usize(); - assert_eq!(tag, 5, "Invalid scalar SerializeError tag: {}", tag); - return SerializeError::AddressError; +impl LeanIxSerializeError { + /// Build a Lean Ixon.SerializeError from a Rust SerializeError. + /// + /// Tags 0–6: + /// 0: unexpectedEof (expected : String) → 1 obj + /// 1: invalidTag (tag : UInt8) (context : String) → 1 obj + 1 scalar (UInt8) + /// 2: invalidFlag (flag : UInt8) (context : String) → 1 obj + 1 scalar (UInt8) + /// 3: invalidVariant (variant : UInt64) (context : String) → 1 obj + 8 scalar (UInt64) + /// 4: invalidBool (value : UInt8) → 0 obj + 1 scalar (UInt8) + /// 5: addressError → 0 obj + 0 scalar + /// 6: invalidShareIndex (idx : UInt64) (max : Nat) → 1 obj (Nat) + 8 scalar (UInt64) + pub fn build(se: &SerializeError) -> Self { + let obj = match se { + SerializeError::UnexpectedEof { expected } => { + let ctor = LeanCtor::alloc(0, 1, 0); + ctor.set(0, build_lean_string(expected)); + *ctor + }, + SerializeError::InvalidTag { tag, context } => { + let ctor = LeanCtor::alloc(1, 1, 1); + ctor.set(0, build_lean_string(context)); + ctor.set_u8(8, *tag); + *ctor + }, + SerializeError::InvalidFlag { flag, context } => { + let ctor = LeanCtor::alloc(2, 1, 1); + ctor.set(0, build_lean_string(context)); + ctor.set_u8(8, *flag); + *ctor + }, + SerializeError::InvalidVariant { variant, context } => { + let ctor = LeanCtor::alloc(3, 1, 8); + ctor.set(0, build_lean_string(context)); + ctor.set_u64(8, *variant); + *ctor + }, + SerializeError::InvalidBool { value } => { + let ctor = LeanCtor::alloc(4, 0, 1); + ctor.set_u8(0, *value); + *ctor + }, + SerializeError::AddressError => LeanObject::box_usize(5), + SerializeError::InvalidShareIndex { idx, max } => { + let ctor = LeanCtor::alloc(6, 1, 8); + ctor.set(0, build_lean_nat_usize(*max)); + ctor.set_u64(8, *idx); + *ctor + }, + }; + Self::new(obj) } - let ctor = obj.as_ctor(); - match ctor.tag() { - 0 => { - let expected = ctor.get(0).as_string().to_string(); - SerializeError::UnexpectedEof { expected } - }, - 1 => { - let context = ctor.get(0).as_string().to_string(); - let tag_val = ctor.scalar_u8(1, 0); - SerializeError::InvalidTag { tag: tag_val, context } - }, - 2 => { - let context = ctor.get(0).as_string().to_string(); - let flag = ctor.scalar_u8(1, 0); - SerializeError::InvalidFlag { flag, context } - }, - 3 => { - let context = ctor.get(0).as_string().to_string(); - let variant = ctor.scalar_u64(1, 0); - SerializeError::InvalidVariant { variant, context } - }, - 4 => { - let value = ctor.scalar_u8(0, 0); - SerializeError::InvalidBool { value } - }, - 5 => SerializeError::AddressError, - 6 => { - let max = Nat::from_obj(ctor.get(0)) - .to_u64() - .and_then(|x| usize::try_from(x).ok()) - .unwrap_or(0); - let idx = ctor.scalar_u64(1, 0); - SerializeError::InvalidShareIndex { idx, max } - }, - _ => unreachable!("Invalid SerializeError tag: {}", ctor.tag()), + + /// Decode a Lean Ixon.SerializeError to a Rust SerializeError. + pub fn decode(self) -> SerializeError { + // Tag 5 (addressError) has 0 fields → Lean represents as scalar + if self.is_scalar() { + let tag = self.unbox_usize(); + assert_eq!(tag, 5, "Invalid scalar SerializeError tag: {}", tag); + return SerializeError::AddressError; + } + let ctor = self.as_ctor(); + match ctor.tag() { + 0 => { + let expected = ctor.get(0).as_string().to_string(); + SerializeError::UnexpectedEof { expected } + }, + 1 => { + let context = ctor.get(0).as_string().to_string(); + let tag_val = ctor.scalar_u8(1, 0); + SerializeError::InvalidTag { tag: tag_val, context } + }, + 2 => { + let context = ctor.get(0).as_string().to_string(); + let flag = ctor.scalar_u8(1, 0); + SerializeError::InvalidFlag { flag, context } + }, + 3 => { + let context = ctor.get(0).as_string().to_string(); + let variant = ctor.scalar_u64(1, 0); + SerializeError::InvalidVariant { variant, context } + }, + 4 => { + let value = ctor.scalar_u8(0, 0); + SerializeError::InvalidBool { value } + }, + 5 => SerializeError::AddressError, + 6 => { + let max = Nat::from_obj(ctor.get(0)) + .to_u64() + .and_then(|x| usize::try_from(x).ok()) + .unwrap_or(0); + let idx = ctor.scalar_u64(1, 0); + SerializeError::InvalidShareIndex { idx, max } + }, + _ => unreachable!("Invalid SerializeError tag: {}", ctor.tag()), + } } } -/// Build a Lean DecompileError from a Rust DecompileError. -/// -/// Layout for index variants (tags 0–4): -/// `(idx : UInt64) (len/max : Nat) (constant : String)` -/// → 2 object fields (Nat, String) + 8 scalar bytes (UInt64) -/// → `lean_alloc_ctor(tag, 2, 8)` -/// → obj[0] = Nat, obj[1] = String, scalar[0] = UInt64 -pub fn build_decompile_error(err: &DecompileError) -> LeanIxDecompileError { - let obj = match err { - DecompileError::InvalidRefIndex { idx, refs_len, constant } => { - let ctor = LeanCtor::alloc(0, 2, 8); - ctor.set(0, build_lean_nat_usize(*refs_len)); - ctor.set(1, build_lean_string(constant)); - ctor.set_u64(2 * 8, *idx); - *ctor - }, - DecompileError::InvalidUnivIndex { idx, univs_len, constant } => { - let ctor = LeanCtor::alloc(1, 2, 8); - ctor.set(0, build_lean_nat_usize(*univs_len)); - ctor.set(1, build_lean_string(constant)); - ctor.set_u64(2 * 8, *idx); - *ctor - }, - DecompileError::InvalidShareIndex { idx, max, constant } => { - let ctor = LeanCtor::alloc(2, 2, 8); - ctor.set(0, build_lean_nat_usize(*max)); - ctor.set(1, build_lean_string(constant)); - ctor.set_u64(2 * 8, *idx); - *ctor - }, - DecompileError::InvalidRecIndex { idx, ctx_size, constant } => { - let ctor = LeanCtor::alloc(3, 2, 8); - ctor.set(0, build_lean_nat_usize(*ctx_size)); - ctor.set(1, build_lean_string(constant)); - ctor.set_u64(2 * 8, *idx); - *ctor - }, - DecompileError::InvalidUnivVarIndex { idx, max, constant } => { - let ctor = LeanCtor::alloc(4, 2, 8); - ctor.set(0, build_lean_nat_usize(*max)); - ctor.set(1, build_lean_string(constant)); - ctor.set_u64(2 * 8, *idx); - *ctor - }, - DecompileError::MissingAddress(addr) => { - let ctor = LeanCtor::alloc(5, 1, 0); - ctor.set(0, build_address_from_ixon(addr)); - *ctor - }, - DecompileError::MissingMetadata(addr) => { - let ctor = LeanCtor::alloc(6, 1, 0); - ctor.set(0, build_address_from_ixon(addr)); - *ctor - }, - DecompileError::BlobNotFound(addr) => { - let ctor = LeanCtor::alloc(7, 1, 0); - ctor.set(0, build_address_from_ixon(addr)); - *ctor - }, - DecompileError::BadBlobFormat { addr, expected } => { - let ctor = LeanCtor::alloc(8, 2, 0); - ctor.set(0, build_address_from_ixon(addr)); - ctor.set(1, build_lean_string(expected)); - *ctor - }, - DecompileError::BadConstantFormat { msg } => { - let ctor = LeanCtor::alloc(9, 1, 0); - ctor.set(0, build_lean_string(msg)); - *ctor - }, - DecompileError::Serialize(se) => { - let ctor = LeanCtor::alloc(10, 1, 0); - ctor.set(0, build_serialize_error(se)); - *ctor - }, - }; - LeanIxDecompileError::new(obj) -} +impl LeanIxDecompileError { + /// Build a Lean DecompileError from a Rust DecompileError. + /// + /// Layout for index variants (tags 0–4): + /// `(idx : UInt64) (len/max : Nat) (constant : String)` + /// → 2 object fields (Nat, String) + 8 scalar bytes (UInt64) + /// → `lean_alloc_ctor(tag, 2, 8)` + /// → obj[0] = Nat, obj[1] = String, scalar[0] = UInt64 + pub fn build(err: &DecompileError) -> Self { + let obj = match err { + DecompileError::InvalidRefIndex { idx, refs_len, constant } => { + let ctor = LeanCtor::alloc(0, 2, 8); + ctor.set(0, build_lean_nat_usize(*refs_len)); + ctor.set(1, build_lean_string(constant)); + ctor.set_u64(2 * 8, *idx); + *ctor + }, + DecompileError::InvalidUnivIndex { idx, univs_len, constant } => { + let ctor = LeanCtor::alloc(1, 2, 8); + ctor.set(0, build_lean_nat_usize(*univs_len)); + ctor.set(1, build_lean_string(constant)); + ctor.set_u64(2 * 8, *idx); + *ctor + }, + DecompileError::InvalidShareIndex { idx, max, constant } => { + let ctor = LeanCtor::alloc(2, 2, 8); + ctor.set(0, build_lean_nat_usize(*max)); + ctor.set(1, build_lean_string(constant)); + ctor.set_u64(2 * 8, *idx); + *ctor + }, + DecompileError::InvalidRecIndex { idx, ctx_size, constant } => { + let ctor = LeanCtor::alloc(3, 2, 8); + ctor.set(0, build_lean_nat_usize(*ctx_size)); + ctor.set(1, build_lean_string(constant)); + ctor.set_u64(2 * 8, *idx); + *ctor + }, + DecompileError::InvalidUnivVarIndex { idx, max, constant } => { + let ctor = LeanCtor::alloc(4, 2, 8); + ctor.set(0, build_lean_nat_usize(*max)); + ctor.set(1, build_lean_string(constant)); + ctor.set_u64(2 * 8, *idx); + *ctor + }, + DecompileError::MissingAddress(addr) => { + let ctor = LeanCtor::alloc(5, 1, 0); + ctor.set(0, LeanIxAddress::build(addr)); + *ctor + }, + DecompileError::MissingMetadata(addr) => { + let ctor = LeanCtor::alloc(6, 1, 0); + ctor.set(0, LeanIxAddress::build(addr)); + *ctor + }, + DecompileError::BlobNotFound(addr) => { + let ctor = LeanCtor::alloc(7, 1, 0); + ctor.set(0, LeanIxAddress::build(addr)); + *ctor + }, + DecompileError::BadBlobFormat { addr, expected } => { + let ctor = LeanCtor::alloc(8, 2, 0); + ctor.set(0, LeanIxAddress::build(addr)); + ctor.set(1, build_lean_string(expected)); + *ctor + }, + DecompileError::BadConstantFormat { msg } => { + let ctor = LeanCtor::alloc(9, 1, 0); + ctor.set(0, build_lean_string(msg)); + *ctor + }, + DecompileError::Serialize(se) => { + let ctor = LeanCtor::alloc(10, 1, 0); + ctor.set(0, LeanIxSerializeError::build(se)); + *ctor + }, + }; + Self::new(obj) + } -/// Decode a Lean DecompileError to a Rust DecompileError. -pub fn decode_decompile_error(obj: LeanIxDecompileError) -> DecompileError { - let ctor = obj.as_ctor(); - match ctor.tag() { - 0 => { - let refs_len = Nat::from_obj(ctor.get(0)) - .to_u64() - .and_then(|x| usize::try_from(x).ok()) - .unwrap_or(0); - let constant = ctor.get(1).as_string().to_string(); - let idx = ctor.scalar_u64(2, 0); - DecompileError::InvalidRefIndex { idx, refs_len, constant } - }, - 1 => { - let univs_len = Nat::from_obj(ctor.get(0)) - .to_u64() - .and_then(|x| usize::try_from(x).ok()) - .unwrap_or(0); - let constant = ctor.get(1).as_string().to_string(); - let idx = ctor.scalar_u64(2, 0); - DecompileError::InvalidUnivIndex { idx, univs_len, constant } - }, - 2 => { - let max = Nat::from_obj(ctor.get(0)) - .to_u64() - .and_then(|x| usize::try_from(x).ok()) - .unwrap_or(0); - let constant = ctor.get(1).as_string().to_string(); - let idx = ctor.scalar_u64(2, 0); - DecompileError::InvalidShareIndex { idx, max, constant } - }, - 3 => { - let ctx_size = Nat::from_obj(ctor.get(0)) - .to_u64() - .and_then(|x| usize::try_from(x).ok()) - .unwrap_or(0); - let constant = ctor.get(1).as_string().to_string(); - let idx = ctor.scalar_u64(2, 0); - DecompileError::InvalidRecIndex { idx, ctx_size, constant } - }, - 4 => { - let max = Nat::from_obj(ctor.get(0)) - .to_u64() - .and_then(|x| usize::try_from(x).ok()) - .unwrap_or(0); - let constant = ctor.get(1).as_string().to_string(); - let idx = ctor.scalar_u64(2, 0); - DecompileError::InvalidUnivVarIndex { idx, max, constant } - }, - 5 => DecompileError::MissingAddress(decode_ixon_address( - ctor.get(0).as_byte_array(), - )), - 6 => DecompileError::MissingMetadata(decode_ixon_address( - ctor.get(0).as_byte_array(), - )), - 7 => DecompileError::BlobNotFound(decode_ixon_address( - ctor.get(0).as_byte_array(), - )), - 8 => { - let addr = decode_ixon_address(ctor.get(0).as_byte_array()); - let expected = ctor.get(1).as_string().to_string(); - DecompileError::BadBlobFormat { addr, expected } - }, - 9 => { - let msg = ctor.get(0).as_string().to_string(); - DecompileError::BadConstantFormat { msg } - }, - 10 => DecompileError::Serialize(decode_serialize_error( - LeanIxSerializeError::new(ctor.get(0)), - )), - _ => unreachable!("Invalid DecompileError tag: {}", ctor.tag()), + /// Decode a Lean DecompileError to a Rust DecompileError. + pub fn decode(self) -> DecompileError { + let ctor = self.as_ctor(); + match ctor.tag() { + 0 => { + let refs_len = Nat::from_obj(ctor.get(0)) + .to_u64() + .and_then(|x| usize::try_from(x).ok()) + .unwrap_or(0); + let constant = ctor.get(1).as_string().to_string(); + let idx = ctor.scalar_u64(2, 0); + DecompileError::InvalidRefIndex { idx, refs_len, constant } + }, + 1 => { + let univs_len = Nat::from_obj(ctor.get(0)) + .to_u64() + .and_then(|x| usize::try_from(x).ok()) + .unwrap_or(0); + let constant = ctor.get(1).as_string().to_string(); + let idx = ctor.scalar_u64(2, 0); + DecompileError::InvalidUnivIndex { idx, univs_len, constant } + }, + 2 => { + let max = Nat::from_obj(ctor.get(0)) + .to_u64() + .and_then(|x| usize::try_from(x).ok()) + .unwrap_or(0); + let constant = ctor.get(1).as_string().to_string(); + let idx = ctor.scalar_u64(2, 0); + DecompileError::InvalidShareIndex { idx, max, constant } + }, + 3 => { + let ctx_size = Nat::from_obj(ctor.get(0)) + .to_u64() + .and_then(|x| usize::try_from(x).ok()) + .unwrap_or(0); + let constant = ctor.get(1).as_string().to_string(); + let idx = ctor.scalar_u64(2, 0); + DecompileError::InvalidRecIndex { idx, ctx_size, constant } + }, + 4 => { + let max = Nat::from_obj(ctor.get(0)) + .to_u64() + .and_then(|x| usize::try_from(x).ok()) + .unwrap_or(0); + let constant = ctor.get(1).as_string().to_string(); + let idx = ctor.scalar_u64(2, 0); + DecompileError::InvalidUnivVarIndex { idx, max, constant } + }, + 5 => DecompileError::MissingAddress( + LeanIxAddress::new(ctor.get(0)).decode(), + ), + 6 => DecompileError::MissingMetadata( + LeanIxAddress::new(ctor.get(0)).decode(), + ), + 7 => DecompileError::BlobNotFound( + LeanIxAddress::new(ctor.get(0)).decode(), + ), + 8 => { + let addr = LeanIxAddress::new(ctor.get(0)).decode(); + let expected = ctor.get(1).as_string().to_string(); + DecompileError::BadBlobFormat { addr, expected } + }, + 9 => { + let msg = ctor.get(0).as_string().to_string(); + DecompileError::BadConstantFormat { msg } + }, + 10 => DecompileError::Serialize( + LeanIxSerializeError::new(ctor.get(0)).decode(), + ), + _ => unreachable!("Invalid DecompileError tag: {}", ctor.tag()), + } } } -/// Build a Lean CompileError from a Rust CompileError. -/// -/// Tags 0–5: -/// 0: missingConstant (name : String) → 1 obj -/// 1: missingAddress (addr : Address) → 1 obj -/// 2: invalidMutualBlock (reason : String) → 1 obj -/// 3: unsupportedExpr (desc : String) → 1 obj -/// 4: unknownUnivParam (curr param : String) → 2 obj -/// 5: serializeError (msg : String) → 1 obj -pub fn build_compile_error(err: &CompileError) -> LeanIxCompileError { - let obj = match err { - CompileError::MissingConstant { name } => { - let ctor = LeanCtor::alloc(0, 1, 0); - ctor.set(0, build_lean_string(name)); - *ctor - }, - CompileError::MissingAddress(addr) => { - let ctor = LeanCtor::alloc(1, 1, 0); - ctor.set(0, build_address_from_ixon(addr)); - *ctor - }, - CompileError::InvalidMutualBlock { reason } => { - let ctor = LeanCtor::alloc(2, 1, 0); - ctor.set(0, build_lean_string(reason)); - *ctor - }, - CompileError::UnsupportedExpr { desc } => { - let ctor = LeanCtor::alloc(3, 1, 0); - ctor.set(0, build_lean_string(desc)); - *ctor - }, - CompileError::UnknownUnivParam { curr, param } => { - let ctor = LeanCtor::alloc(4, 2, 0); - ctor.set(0, build_lean_string(curr)); - ctor.set(1, build_lean_string(param)); - *ctor - }, - CompileError::Serialize(se) => { - let ctor = LeanCtor::alloc(5, 1, 0); - ctor.set(0, build_serialize_error(se)); - *ctor - }, - }; - LeanIxCompileError::new(obj) -} +impl LeanIxCompileError { + /// Build a Lean CompileError from a Rust CompileError. + /// + /// Tags 0–5: + /// 0: missingConstant (name : String) → 1 obj + /// 1: missingAddress (addr : Address) → 1 obj + /// 2: invalidMutualBlock (reason : String) → 1 obj + /// 3: unsupportedExpr (desc : String) → 1 obj + /// 4: unknownUnivParam (curr param : String) → 2 obj + /// 5: serializeError (msg : String) → 1 obj + pub fn build(err: &CompileError) -> Self { + let obj = match err { + CompileError::MissingConstant { name } => { + let ctor = LeanCtor::alloc(0, 1, 0); + ctor.set(0, build_lean_string(name)); + *ctor + }, + CompileError::MissingAddress(addr) => { + let ctor = LeanCtor::alloc(1, 1, 0); + ctor.set(0, LeanIxAddress::build(addr)); + *ctor + }, + CompileError::InvalidMutualBlock { reason } => { + let ctor = LeanCtor::alloc(2, 1, 0); + ctor.set(0, build_lean_string(reason)); + *ctor + }, + CompileError::UnsupportedExpr { desc } => { + let ctor = LeanCtor::alloc(3, 1, 0); + ctor.set(0, build_lean_string(desc)); + *ctor + }, + CompileError::UnknownUnivParam { curr, param } => { + let ctor = LeanCtor::alloc(4, 2, 0); + ctor.set(0, build_lean_string(curr)); + ctor.set(1, build_lean_string(param)); + *ctor + }, + CompileError::Serialize(se) => { + let ctor = LeanCtor::alloc(5, 1, 0); + ctor.set(0, LeanIxSerializeError::build(se)); + *ctor + }, + }; + Self::new(obj) + } -/// Decode a Lean CompileError to a Rust CompileError. -pub fn decode_compile_error(obj: LeanIxCompileError) -> CompileError { - let ctor = obj.as_ctor(); - match ctor.tag() { - 0 => { - let name = ctor.get(0).as_string().to_string(); - CompileError::MissingConstant { name } - }, - 1 => CompileError::MissingAddress(decode_ixon_address( - ctor.get(0).as_byte_array(), - )), - 2 => { - let reason = ctor.get(0).as_string().to_string(); - CompileError::InvalidMutualBlock { reason } - }, - 3 => { - let desc = ctor.get(0).as_string().to_string(); - CompileError::UnsupportedExpr { desc } - }, - 4 => { - let curr = ctor.get(0).as_string().to_string(); - let param = ctor.get(1).as_string().to_string(); - CompileError::UnknownUnivParam { curr, param } - }, - 5 => CompileError::Serialize(decode_serialize_error( - LeanIxSerializeError::new(ctor.get(0)), - )), - _ => unreachable!("Invalid CompileError tag: {}", ctor.tag()), + /// Decode a Lean CompileError to a Rust CompileError. + pub fn decode(self) -> CompileError { + let ctor = self.as_ctor(); + match ctor.tag() { + 0 => { + let name = ctor.get(0).as_string().to_string(); + CompileError::MissingConstant { name } + }, + 1 => CompileError::MissingAddress( + LeanIxAddress::new(ctor.get(0)).decode(), + ), + 2 => { + let reason = ctor.get(0).as_string().to_string(); + CompileError::InvalidMutualBlock { reason } + }, + 3 => { + let desc = ctor.get(0).as_string().to_string(); + CompileError::UnsupportedExpr { desc } + }, + 4 => { + let curr = ctor.get(0).as_string().to_string(); + let param = ctor.get(1).as_string().to_string(); + CompileError::UnknownUnivParam { curr, param } + }, + 5 => CompileError::Serialize( + LeanIxSerializeError::new(ctor.get(0)).decode(), + ), + _ => unreachable!("Invalid CompileError tag: {}", ctor.tag()), + } } } @@ -1391,8 +1377,8 @@ pub fn decode_compile_error(obj: LeanIxCompileError) -> CompileError { pub extern "C" fn rs_roundtrip_decompile_error( obj: LeanIxDecompileError, ) -> LeanIxDecompileError { - let err = decode_decompile_error(obj); - build_decompile_error(&err) + let err = obj.decode(); + LeanIxDecompileError::build(&err) } /// FFI: Round-trip a CompileError: Lean → Rust → Lean. @@ -1400,8 +1386,8 @@ pub extern "C" fn rs_roundtrip_decompile_error( pub extern "C" fn rs_roundtrip_compile_error( obj: LeanIxCompileError, ) -> LeanIxCompileError { - let err = decode_compile_error(obj); - build_compile_error(&err) + let err = obj.decode(); + LeanIxCompileError::build(&err) } /// FFI: Round-trip a SerializeError: Lean → Rust → Lean. @@ -1409,8 +1395,8 @@ pub extern "C" fn rs_roundtrip_compile_error( pub extern "C" fn rs_roundtrip_serialize_error( obj: LeanIxSerializeError, ) -> LeanIxSerializeError { - let err = decode_serialize_error(obj); - build_serialize_error(&err) + let err = obj.decode(); + LeanIxSerializeError::build(&err) } // ============================================================================= @@ -1420,7 +1406,7 @@ pub extern "C" fn rs_roundtrip_serialize_error( /// FFI: Decompile an Ixon.RawEnv → Except DecompileError (Array (Ix.Name × Ix.ConstantInfo)). Pure. #[unsafe(no_mangle)] pub extern "C" fn rs_decompile_env(raw_env_obj: LeanIxonRawEnv) -> LeanExcept { - let decoded = decode_raw_env(raw_env_obj); + let decoded = raw_env_obj.decode(); let env = decoded_to_ixon_env(&decoded); // Wrap in CompileState (decompile_env only uses .env) @@ -1438,8 +1424,8 @@ pub extern "C" fn rs_decompile_env(raw_env_obj: LeanIxonRawEnv) -> LeanExcept { let arr = LeanArray::alloc(entries.len()); for (i, (name, info)) in entries.iter().enumerate() { - let name_obj = build_name(&mut cache, name); - let info_obj = build_constant_info(&mut cache, info); + let name_obj = LeanIxName::build(&mut cache, name); + let info_obj = LeanIxConstantInfo::build(&mut cache, info); let pair = LeanCtor::alloc(0, 2, 0); pair.set(0, name_obj); pair.set(1, info_obj); @@ -1448,6 +1434,6 @@ pub extern "C" fn rs_decompile_env(raw_env_obj: LeanIxonRawEnv) -> LeanExcept { LeanExcept::ok(arr) }, - Err(e) => LeanExcept::error(build_decompile_error(&e)), + Err(e) => LeanExcept::error(LeanIxDecompileError::build(&e)), } } diff --git a/src/ffi/graph.rs b/src/ffi/graph.rs index 537f7f6e..6402d076 100644 --- a/src/ffi/graph.rs +++ b/src/ffi/graph.rs @@ -6,11 +6,11 @@ use crate::ffi::ffi_io_guard; use crate::ix::condense::compute_sccs; use crate::ix::graph::build_ref_graph; use crate::lean::LeanIxCondensedBlocks; -use lean_ffi::object::{LeanArray, LeanCtor, LeanIOResult, LeanObject}; +use lean_ffi::object::{LeanArray, LeanCtor, LeanIOResult, LeanList}; use crate::ffi::builder::LeanBuildCache; -use crate::ffi::ix::name::build_name; use crate::ffi::lean_env::decode_env; +use crate::lean::LeanIxName; /// Build an Array (Ix.Name × Array Ix.Name) from a RefMap. pub fn build_ref_graph_array( @@ -19,11 +19,11 @@ pub fn build_ref_graph_array( ) -> LeanArray { let arr = LeanArray::alloc(refs.len()); for (i, (name, ref_set)) in refs.iter().enumerate() { - let name_obj = build_name(cache, name); + let name_obj = LeanIxName::build(cache, name); let refs_arr = LeanArray::alloc(ref_set.len()); for (j, ref_name) in ref_set.iter().enumerate() { - let ref_name_obj = build_name(cache, ref_name); + let ref_name_obj = LeanIxName::build(cache, ref_name); refs_arr.set(j, ref_name_obj); } @@ -35,58 +35,60 @@ pub fn build_ref_graph_array( arr } -/// Build a RustCondensedBlocks structure. -pub fn build_condensed_blocks( - cache: &mut LeanBuildCache, - condensed: &crate::ix::condense::CondensedBlocks, -) -> LeanIxCondensedBlocks { - // Build lowLinks: Array (Ix.Name × Ix.Name) - let low_links_arr = LeanArray::alloc(condensed.low_links.len()); - for (i, (name, low_link)) in condensed.low_links.iter().enumerate() { - let name_obj = build_name(cache, name); - let low_link_obj = build_name(cache, low_link); - let pair = LeanCtor::alloc(0, 2, 0); - pair.set(0, name_obj); - pair.set(1, low_link_obj); - low_links_arr.set(i, *pair); - } +impl LeanIxCondensedBlocks { + /// Build a RustCondensedBlocks structure. + pub fn build( + cache: &mut LeanBuildCache, + condensed: &crate::ix::condense::CondensedBlocks, + ) -> Self { + // Build lowLinks: Array (Ix.Name × Ix.Name) + let low_links_arr = LeanArray::alloc(condensed.low_links.len()); + for (i, (name, low_link)) in condensed.low_links.iter().enumerate() { + let name_obj = LeanIxName::build(cache, name); + let low_link_obj = LeanIxName::build(cache, low_link); + let pair = LeanCtor::alloc(0, 2, 0); + pair.set(0, name_obj); + pair.set(1, low_link_obj); + low_links_arr.set(i, *pair); + } - // Build blocks: Array (Ix.Name × Array Ix.Name) - let blocks_arr = LeanArray::alloc(condensed.blocks.len()); - for (i, (name, block_set)) in condensed.blocks.iter().enumerate() { - let name_obj = build_name(cache, name); - let block_names_arr = LeanArray::alloc(block_set.len()); - for (j, block_name) in block_set.iter().enumerate() { - let block_name_obj = build_name(cache, block_name); - block_names_arr.set(j, block_name_obj); + // Build blocks: Array (Ix.Name × Array Ix.Name) + let blocks_arr = LeanArray::alloc(condensed.blocks.len()); + for (i, (name, block_set)) in condensed.blocks.iter().enumerate() { + let name_obj = LeanIxName::build(cache, name); + let block_names_arr = LeanArray::alloc(block_set.len()); + for (j, block_name) in block_set.iter().enumerate() { + let block_name_obj = LeanIxName::build(cache, block_name); + block_names_arr.set(j, block_name_obj); + } + let pair = LeanCtor::alloc(0, 2, 0); + pair.set(0, name_obj); + pair.set(1, *block_names_arr); + blocks_arr.set(i, *pair); } - let pair = LeanCtor::alloc(0, 2, 0); - pair.set(0, name_obj); - pair.set(1, *block_names_arr); - blocks_arr.set(i, *pair); - } - // Build blockRefs: Array (Ix.Name × Array Ix.Name) - let block_refs_arr = LeanArray::alloc(condensed.block_refs.len()); - for (i, (name, ref_set)) in condensed.block_refs.iter().enumerate() { - let name_obj = build_name(cache, name); - let refs_arr = LeanArray::alloc(ref_set.len()); - for (j, ref_name) in ref_set.iter().enumerate() { - let ref_name_obj = build_name(cache, ref_name); - refs_arr.set(j, ref_name_obj); + // Build blockRefs: Array (Ix.Name × Array Ix.Name) + let block_refs_arr = LeanArray::alloc(condensed.block_refs.len()); + for (i, (name, ref_set)) in condensed.block_refs.iter().enumerate() { + let name_obj = LeanIxName::build(cache, name); + let refs_arr = LeanArray::alloc(ref_set.len()); + for (j, ref_name) in ref_set.iter().enumerate() { + let ref_name_obj = LeanIxName::build(cache, ref_name); + refs_arr.set(j, ref_name_obj); + } + let pair = LeanCtor::alloc(0, 2, 0); + pair.set(0, name_obj); + pair.set(1, *refs_arr); + block_refs_arr.set(i, *pair); } - let pair = LeanCtor::alloc(0, 2, 0); - pair.set(0, name_obj); - pair.set(1, *refs_arr); - block_refs_arr.set(i, *pair); - } - // Build RustCondensedBlocks structure (3 fields) - let result = LeanCtor::alloc(0, 3, 0); - result.set(0, *low_links_arr); - result.set(1, *blocks_arr); - result.set(2, *block_refs_arr); - LeanIxCondensedBlocks::new(*result) + // Build RustCondensedBlocks structure (3 fields) + let result = LeanCtor::alloc(0, 3, 0); + result.set(0, *low_links_arr); + result.set(1, *blocks_arr); + result.set(2, *block_refs_arr); + Self::new(*result) + } } // ============================================================================= @@ -96,10 +98,10 @@ pub fn build_condensed_blocks( /// FFI function to build a reference graph from a Lean environment. #[unsafe(no_mangle)] pub extern "C" fn rs_build_ref_graph( - env_consts_ptr: LeanObject, + env_consts_ptr: LeanList, ) -> LeanIOResult { ffi_io_guard(std::panic::AssertUnwindSafe(|| { - let rust_env = decode_env(env_consts_ptr.as_list()); + let rust_env = decode_env(env_consts_ptr); let rust_env = Arc::new(rust_env); let ref_graph = build_ref_graph(&rust_env); let mut cache = LeanBuildCache::with_capacity(rust_env.len()); @@ -110,14 +112,14 @@ pub extern "C" fn rs_build_ref_graph( /// FFI function to compute SCCs from a Lean environment. #[unsafe(no_mangle)] -pub extern "C" fn rs_compute_sccs(env_consts_ptr: LeanObject) -> LeanIOResult { +pub extern "C" fn rs_compute_sccs(env_consts_ptr: LeanList) -> LeanIOResult { ffi_io_guard(std::panic::AssertUnwindSafe(|| { - let rust_env = decode_env(env_consts_ptr.as_list()); + let rust_env = decode_env(env_consts_ptr); let rust_env = Arc::new(rust_env); let ref_graph = build_ref_graph(&rust_env); let condensed = compute_sccs(&ref_graph.out_refs); let mut cache = LeanBuildCache::with_capacity(rust_env.len()); - let result = build_condensed_blocks(&mut cache, &condensed); + let result = LeanIxCondensedBlocks::build(&mut cache, &condensed); LeanIOResult::ok(result) })) } diff --git a/src/ffi/ix/address.rs b/src/ffi/ix/address.rs index 62d0a6ea..9531e9ba 100644 --- a/src/ffi/ix/address.rs +++ b/src/ffi/ix/address.rs @@ -2,13 +2,40 @@ //! //! Address = { hash : ByteArray } - ByteArray wrapper for blake3 Hash +use crate::ix::address::Address; use crate::lean::LeanIxAddress; -use lean_ffi::object::LeanByteArray; +use lean_ffi::object::{LeanArray, LeanByteArray}; -/// Build a Ix.Address from a blake3::Hash. -/// Address = { hash : ByteArray } - single field struct, so UNBOXED to ByteArray -pub fn build_address(hash: &blake3::Hash) -> LeanIxAddress { - LeanByteArray::from_bytes(hash.as_bytes()) +impl LeanIxAddress { + /// Build an Ix.Address from a blake3::Hash. + pub fn build_from_hash(hash: &blake3::Hash) -> Self { + LeanByteArray::from_bytes(hash.as_bytes()).into() + } + + /// Build an Ix.Address from an Ixon Address (which is just a [u8; 32]). + pub fn build(addr: &Address) -> Self { + LeanByteArray::from_bytes(addr.as_bytes()).into() + } + + /// Build an Array of Addresses. + pub fn build_array(addrs: &[Address]) -> LeanArray { + let arr = LeanArray::alloc(addrs.len()); + for (i, addr) in addrs.iter().enumerate() { + arr.set(i, Self::build(addr)); + } + arr + } + + /// Decode a ByteArray (Address) to Address. + pub fn decode(self) -> Address { + Address::from_slice(&self.as_bytes()[..32]) + .expect("Address should be 32 bytes") + } + + /// Decode Array Address. + pub fn decode_array(obj: LeanArray) -> Vec
{ + obj.map(|x| LeanIxAddress::new(x).decode()) + } } /// Round-trip an Ix.Address: decode ByteArray, re-encode. @@ -17,7 +44,6 @@ pub fn build_address(hash: &blake3::Hash) -> LeanIxAddress { pub extern "C" fn rs_roundtrip_ix_address( addr: LeanIxAddress, ) -> LeanIxAddress { - // Address is a single-field struct { hash : ByteArray } - // Due to unboxing, addr IS the ByteArray directly - LeanByteArray::from_bytes(addr.as_bytes()) + let decoded = addr.decode(); + LeanIxAddress::build(&decoded) } diff --git a/src/ffi/ix/constant.rs b/src/ffi/ix/constant.rs index 4dc8f2e8..50489cad 100644 --- a/src/ffi/ix/constant.rs +++ b/src/ffi/ix/constant.rs @@ -23,415 +23,424 @@ use lean_ffi::nat::Nat; use lean_ffi::object::{LeanArray, LeanCtor, LeanObject}; use crate::ffi::builder::LeanBuildCache; -use crate::ffi::ix::expr::{build_expr, decode_ix_expr}; -use crate::ffi::ix::name::{ - build_name, build_name_array, decode_ix_name, decode_name_array, -}; use crate::ffi::primitives::build_nat; -/// Build a Ix.ConstantVal structure. -pub fn build_constant_val( - cache: &mut LeanBuildCache, - cv: &ConstantVal, -) -> LeanIxConstantVal { - // ConstantVal = { name : Name, levelParams : Array Name, type : Expr } - let name_obj = build_name(cache, &cv.name); - let level_params_obj = build_name_array(cache, &cv.level_params); - let type_obj = build_expr(cache, &cv.typ); - - let obj = LeanCtor::alloc(0, 3, 0); - obj.set(0, name_obj); - obj.set(1, level_params_obj); - obj.set(2, type_obj); - LeanIxConstantVal::new(*obj) -} +// ============================================================================= +// ConstantVal +// ============================================================================= -/// Build ReducibilityHints. -/// NOTE: In Lean 4, 0-field constructors are boxed scalars when the inductive has -/// other constructors with fields. So opaque and abbrev use box_usize. -pub fn build_reducibility_hints( - hints: &ReducibilityHints, -) -> LeanIxReducibilityHints { - let obj = match hints { - // | opaque -- tag 0, boxed as scalar - ReducibilityHints::Opaque => LeanObject::box_usize(0), - // | abbrev -- tag 1, boxed as scalar - ReducibilityHints::Abbrev => LeanObject::box_usize(1), - // | regular (h : UInt32) -- tag 2, object constructor - ReducibilityHints::Regular(h) => { - // UInt32 is a scalar, stored inline - let obj = LeanCtor::alloc(2, 0, 4); - obj.set_u32(0, *h); - *obj - }, - }; - LeanIxReducibilityHints::new(obj) -} +impl LeanIxConstantVal { + /// Build a Ix.ConstantVal structure. + pub fn build(cache: &mut LeanBuildCache, cv: &ConstantVal) -> Self { + // ConstantVal = { name : Name, levelParams : Array Name, type : Expr } + let name_obj = LeanIxName::build(cache, &cv.name); + let level_params_obj = LeanIxName::build_array(cache, &cv.level_params); + let type_obj = LeanIxExpr::build(cache, &cv.typ); + + let obj = LeanCtor::alloc(0, 3, 0); + obj.set(0, name_obj); + obj.set(1, level_params_obj); + obj.set(2, type_obj); + Self::new(*obj) + } -/// Build a Ix.ConstantInfo from a Rust ConstantInfo. -pub fn build_constant_info( - cache: &mut LeanBuildCache, - info: &ConstantInfo, -) -> LeanIxConstantInfo { - let result = match info { - // | axiomInfo (v : AxiomVal) -- tag 0 - ConstantInfo::AxiomInfo(v) => { - // AxiomVal = { cnst : ConstantVal, isUnsafe : Bool } - let cnst_obj = build_constant_val(cache, &v.cnst); - let axiom_val = LeanCtor::alloc(0, 1, 1); - axiom_val.set(0, cnst_obj); - axiom_val.set_u8(8, v.is_unsafe as u8); - - let obj = LeanCtor::alloc(0, 1, 0); - obj.set(0, axiom_val); - *obj - }, - // | defnInfo (v : DefinitionVal) -- tag 1 - ConstantInfo::DefnInfo(v) => { - // DefinitionVal = { cnst, value, hints, safety, all } - // Memory layout: 4 obj fields (cnst, value, hints, all), 1 scalar byte (safety) - let cnst_obj = build_constant_val(cache, &v.cnst); - let value_obj = build_expr(cache, &v.value); - let hints_obj = build_reducibility_hints(&v.hints); - let all_obj = build_name_array(cache, &v.all); - let safety_byte = match v.safety { - DefinitionSafety::Unsafe => 0u8, - DefinitionSafety::Safe => 1u8, - DefinitionSafety::Partial => 2u8, - }; - - let defn_val = LeanCtor::alloc(0, 4, 1); - defn_val.set(0, cnst_obj); - defn_val.set(1, value_obj); - defn_val.set(2, hints_obj); - defn_val.set(3, all_obj); - defn_val.set_u8(4 * 8, safety_byte); - - let obj = LeanCtor::alloc(1, 1, 0); - obj.set(0, defn_val); - *obj - }, - // | thmInfo (v : TheoremVal) -- tag 2 - ConstantInfo::ThmInfo(v) => { - // TheoremVal = { cnst, value, all } - let cnst_obj = build_constant_val(cache, &v.cnst); - let value_obj = build_expr(cache, &v.value); - let all_obj = build_name_array(cache, &v.all); - - let thm_val = LeanCtor::alloc(0, 3, 0); - thm_val.set(0, cnst_obj); - thm_val.set(1, value_obj); - thm_val.set(2, all_obj); - - let obj = LeanCtor::alloc(2, 1, 0); - obj.set(0, thm_val); - *obj - }, - // | opaqueInfo (v : OpaqueVal) -- tag 3 - ConstantInfo::OpaqueInfo(v) => { - // OpaqueVal = { cnst, value, isUnsafe, all } - let cnst_obj = build_constant_val(cache, &v.cnst); - let value_obj = build_expr(cache, &v.value); - let all_obj = build_name_array(cache, &v.all); - - let opaque_val = LeanCtor::alloc(0, 3, 1); - opaque_val.set(0, cnst_obj); - opaque_val.set(1, value_obj); - opaque_val.set(2, all_obj); - opaque_val.set_u8(3 * 8, v.is_unsafe as u8); - - let obj = LeanCtor::alloc(3, 1, 0); - obj.set(0, opaque_val); - *obj - }, - // | quotInfo (v : QuotVal) -- tag 4 - ConstantInfo::QuotInfo(v) => { - // QuotVal = { cnst, kind } - // Memory layout: 1 obj field (cnst), 1 scalar byte (kind) - let cnst_obj = build_constant_val(cache, &v.cnst); - let kind_byte = match v.kind { - QuotKind::Type => 0u8, - QuotKind::Ctor => 1u8, - QuotKind::Lift => 2u8, - QuotKind::Ind => 3u8, - }; - - let quot_val = LeanCtor::alloc(0, 1, 1); - quot_val.set(0, cnst_obj); - quot_val.set_u8(8, kind_byte); - - let obj = LeanCtor::alloc(4, 1, 0); - obj.set(0, quot_val); - *obj - }, - // | inductInfo (v : InductiveVal) -- tag 5 - ConstantInfo::InductInfo(v) => { - // InductiveVal = { cnst, numParams, numIndices, all, ctors, numNested, isRec, isUnsafe, isReflexive } - let cnst_obj = build_constant_val(cache, &v.cnst); - let num_params_obj = build_nat(&v.num_params); - let num_indices_obj = build_nat(&v.num_indices); - let all_obj = build_name_array(cache, &v.all); - let ctors_obj = build_name_array(cache, &v.ctors); - let num_nested_obj = build_nat(&v.num_nested); - - // 6 object fields, 3 scalar bytes for bools - let induct_val = LeanCtor::alloc(0, 6, 3); - induct_val.set(0, cnst_obj); - induct_val.set(1, num_params_obj); - induct_val.set(2, num_indices_obj); - induct_val.set(3, all_obj); - induct_val.set(4, ctors_obj); - induct_val.set(5, num_nested_obj); - induct_val.set_u8(6 * 8, v.is_rec as u8); - induct_val.set_u8(6 * 8 + 1, v.is_unsafe as u8); - induct_val.set_u8(6 * 8 + 2, v.is_reflexive as u8); - - let obj = LeanCtor::alloc(5, 1, 0); - obj.set(0, induct_val); - *obj - }, - // | ctorInfo (v : ConstructorVal) -- tag 6 - ConstantInfo::CtorInfo(v) => { - // ConstructorVal = { cnst, induct, cidx, numParams, numFields, isUnsafe } - let cnst_obj = build_constant_val(cache, &v.cnst); - let induct_obj = build_name(cache, &v.induct); - let cidx_obj = build_nat(&v.cidx); - let num_params_obj = build_nat(&v.num_params); - let num_fields_obj = build_nat(&v.num_fields); - - // 5 object fields, 1 scalar byte for bool - let ctor_val = LeanCtor::alloc(0, 5, 1); - ctor_val.set(0, cnst_obj); - ctor_val.set(1, induct_obj); - ctor_val.set(2, cidx_obj); - ctor_val.set(3, num_params_obj); - ctor_val.set(4, num_fields_obj); - ctor_val.set_u8(5 * 8, v.is_unsafe as u8); - - let obj = LeanCtor::alloc(6, 1, 0); - obj.set(0, ctor_val); - *obj - }, - // | recInfo (v : RecursorVal) -- tag 7 - ConstantInfo::RecInfo(v) => { - // RecursorVal = { cnst, all, numParams, numIndices, numMotives, numMinors, rules, k, isUnsafe } - let cnst_obj = build_constant_val(cache, &v.cnst); - let all_obj = build_name_array(cache, &v.all); - let num_params_obj = build_nat(&v.num_params); - let num_indices_obj = build_nat(&v.num_indices); - let num_motives_obj = build_nat(&v.num_motives); - let num_minors_obj = build_nat(&v.num_minors); - let rules_obj = build_recursor_rules(cache, &v.rules); - - // 7 object fields, 2 scalar bytes for bools - let rec_val = LeanCtor::alloc(0, 7, 2); - rec_val.set(0, cnst_obj); - rec_val.set(1, all_obj); - rec_val.set(2, num_params_obj); - rec_val.set(3, num_indices_obj); - rec_val.set(4, num_motives_obj); - rec_val.set(5, num_minors_obj); - rec_val.set(6, rules_obj); - rec_val.set_u8(7 * 8, v.k as u8); - rec_val.set_u8(7 * 8 + 1, v.is_unsafe as u8); - - let obj = LeanCtor::alloc(7, 1, 0); - obj.set(0, rec_val); - *obj - }, - }; - - LeanIxConstantInfo::new(result) -} + /// Decode Ix.ConstantVal from Lean object. + /// ConstantVal = { name : Name, levelParams : Array Name, type : Expr } + pub fn decode(self) -> ConstantVal { + let ctor = self.as_ctor(); + let name = LeanIxName::new(ctor.get(0)).decode(); + let level_params: Vec = + ctor.get(1).as_array().map(|x| LeanIxName::new(x).decode()); + let typ = LeanIxExpr::new(ctor.get(2)).decode(); -/// Build an Array of RecursorRule. -fn build_recursor_rules( - cache: &mut LeanBuildCache, - rules: &[RecursorRule], -) -> LeanArray { - let arr = LeanArray::alloc(rules.len()); - for (i, rule) in rules.iter().enumerate() { - // RecursorRule = { ctor : Name, nFields : Nat, rhs : Expr } - let ctor_obj = build_name(cache, &rule.ctor); - let n_fields_obj = build_nat(&rule.n_fields); - let rhs_obj = build_expr(cache, &rule.rhs); - - let rule_obj = LeanCtor::alloc(0, 3, 0); - rule_obj.set(0, ctor_obj); - rule_obj.set(1, n_fields_obj); - rule_obj.set(2, rhs_obj); - - arr.set(i, rule_obj); + ConstantVal { name, level_params, typ } } - arr } // ============================================================================= -// ConstantInfo Decoders +// ReducibilityHints // ============================================================================= -/// Decode Ix.ConstantVal from Lean object. -/// ConstantVal = { name : Name, levelParams : Array Name, type : Expr } -pub fn decode_constant_val(obj: LeanIxConstantVal) -> ConstantVal { - let ctor = obj.as_ctor(); - let name = decode_ix_name(LeanIxName::new(ctor.get(0))); - let level_params: Vec = - ctor.get(1).as_array().map(|x| decode_ix_name(LeanIxName::new(x))); - let typ = decode_ix_expr(LeanIxExpr::new(ctor.get(2))); +impl LeanIxReducibilityHints { + /// Build ReducibilityHints. + /// NOTE: In Lean 4, 0-field constructors are boxed scalars when the inductive has + /// other constructors with fields. So opaque and abbrev use box_usize. + pub fn build(hints: &ReducibilityHints) -> Self { + let obj = match hints { + // | opaque -- tag 0, boxed as scalar + ReducibilityHints::Opaque => LeanObject::box_usize(0), + // | abbrev -- tag 1, boxed as scalar + ReducibilityHints::Abbrev => LeanObject::box_usize(1), + // | regular (h : UInt32) -- tag 2, object constructor + ReducibilityHints::Regular(h) => { + // UInt32 is a scalar, stored inline + let obj = LeanCtor::alloc(2, 0, 4); + obj.set_u32(0, *h); + *obj + }, + }; + Self::new(obj) + } - ConstantVal { name, level_params, typ } -} + /// Decode Lean.ReducibilityHints from Lean object. + pub fn decode(self) -> ReducibilityHints { + if self.is_scalar() { + let tag = self.as_ptr() as usize >> 1; + match tag { + 0 => return ReducibilityHints::Opaque, + 1 => return ReducibilityHints::Abbrev, + _ => panic!("Invalid ReducibilityHints scalar tag: {}", tag), + } + } -/// Decode Lean.ReducibilityHints from Lean object. -pub fn decode_reducibility_hints( - obj: LeanIxReducibilityHints, -) -> ReducibilityHints { - if obj.is_scalar() { - let tag = obj.as_ptr() as usize >> 1; - match tag { - 0 => return ReducibilityHints::Opaque, - 1 => return ReducibilityHints::Abbrev, - _ => panic!("Invalid ReducibilityHints scalar tag: {}", tag), + let ctor = self.as_ctor(); + match ctor.tag() { + 0 => ReducibilityHints::Opaque, + 1 => ReducibilityHints::Abbrev, + 2 => { + // regular: 0 obj fields, 4 scalar bytes (UInt32) + ReducibilityHints::Regular(ctor.scalar_u32(0, 0)) + }, + _ => panic!("Invalid ReducibilityHints tag: {}", ctor.tag()), } } +} - let ctor = obj.as_ctor(); - match ctor.tag() { - 0 => ReducibilityHints::Opaque, - 1 => ReducibilityHints::Abbrev, - 2 => { - // regular: 0 obj fields, 4 scalar bytes (UInt32) - ReducibilityHints::Regular(ctor.scalar_u32(0, 0)) - }, - _ => panic!("Invalid ReducibilityHints tag: {}", ctor.tag()), +// ============================================================================= +// RecursorRule +// ============================================================================= + +impl LeanIxRecursorRule { + /// Decode Ix.RecursorRule from Lean object. + pub fn decode(self) -> RecursorRule { + let ctor = self.as_ctor(); + RecursorRule { + ctor: LeanIxName::new(ctor.get(0)).decode(), + n_fields: Nat::from_obj(ctor.get(1)), + rhs: LeanIxExpr::new(ctor.get(2)).decode(), + } } } -/// Decode Ix.RecursorRule from Lean object. -fn decode_recursor_rule(obj: LeanIxRecursorRule) -> RecursorRule { - let ctor = obj.as_ctor(); - RecursorRule { - ctor: decode_ix_name(LeanIxName::new(ctor.get(0))), - n_fields: Nat::from_obj(ctor.get(1)), - rhs: decode_ix_expr(LeanIxExpr::new(ctor.get(2))), +// ============================================================================= +// ConstantInfo +// ============================================================================= + +impl LeanIxRecursorRule { + /// Build an Array of RecursorRule. + pub fn build_array( + cache: &mut LeanBuildCache, + rules: &[RecursorRule], + ) -> LeanArray { + let arr = LeanArray::alloc(rules.len()); + for (i, rule) in rules.iter().enumerate() { + // RecursorRule = { ctor : Name, nFields : Nat, rhs : Expr } + let ctor_obj = LeanIxName::build(cache, &rule.ctor); + let n_fields_obj = build_nat(&rule.n_fields); + let rhs_obj = LeanIxExpr::build(cache, &rule.rhs); + + let rule_obj = LeanCtor::alloc(0, 3, 0); + rule_obj.set(0, ctor_obj); + rule_obj.set(1, n_fields_obj); + rule_obj.set(2, rhs_obj); + + arr.set(i, rule_obj); + } + arr } } -/// Decode Ix.ConstantInfo from Lean object. -pub fn decode_constant_info(obj: LeanIxConstantInfo) -> ConstantInfo { - let outer = obj.as_ctor(); - let inner_obj = outer.get(0); - let inner = inner_obj.as_ctor(); - - match outer.tag() { - 0 => { - let is_unsafe = inner.scalar_u8(1, 0) != 0; - - ConstantInfo::AxiomInfo(AxiomVal { - cnst: decode_constant_val(LeanIxConstantVal::new(inner.get(0))), - is_unsafe, - }) - }, - 1 => { - let safety_byte = inner.scalar_u8(4, 0); - let safety = match safety_byte { - 0 => DefinitionSafety::Unsafe, - 1 => DefinitionSafety::Safe, - 2 => DefinitionSafety::Partial, - _ => panic!("Invalid DefinitionSafety: {}", safety_byte), - }; - - ConstantInfo::DefnInfo(DefinitionVal { - cnst: decode_constant_val(LeanIxConstantVal::new(inner.get(0))), - value: decode_ix_expr(LeanIxExpr::new(inner.get(1))), - hints: decode_reducibility_hints(LeanIxReducibilityHints::new( - inner.get(2), - )), - safety, - all: decode_name_array(inner.get(3).as_array()), - }) - }, - 2 => ConstantInfo::ThmInfo(TheoremVal { - cnst: decode_constant_val(LeanIxConstantVal::new(inner.get(0))), - value: decode_ix_expr(LeanIxExpr::new(inner.get(1))), - all: decode_name_array(inner.get(2).as_array()), - }), - 3 => { - let is_unsafe = inner.scalar_u8(3, 0) != 0; - - ConstantInfo::OpaqueInfo(OpaqueVal { - cnst: decode_constant_val(LeanIxConstantVal::new(inner.get(0))), - value: decode_ix_expr(LeanIxExpr::new(inner.get(1))), - is_unsafe, - all: decode_name_array(inner.get(2).as_array()), - }) - }, - 4 => { - let kind_byte = inner.scalar_u8(1, 0); - let kind = match kind_byte { - 0 => QuotKind::Type, - 1 => QuotKind::Ctor, - 2 => QuotKind::Lift, - 3 => QuotKind::Ind, - _ => panic!("Invalid QuotKind: {}", kind_byte), - }; - - ConstantInfo::QuotInfo(QuotVal { - cnst: decode_constant_val(LeanIxConstantVal::new(inner.get(0))), - kind, - }) - }, - 5 => { - let is_rec = inner.scalar_u8(6, 0) != 0; - let is_unsafe = inner.scalar_u8(6, 1) != 0; - let is_reflexive = inner.scalar_u8(6, 2) != 0; - - ConstantInfo::InductInfo(InductiveVal { - cnst: decode_constant_val(LeanIxConstantVal::new(inner.get(0))), - num_params: Nat::from_obj(inner.get(1)), - num_indices: Nat::from_obj(inner.get(2)), - all: decode_name_array(inner.get(3).as_array()), - ctors: decode_name_array(inner.get(4).as_array()), - num_nested: Nat::from_obj(inner.get(5)), - is_rec, - is_unsafe, - is_reflexive, - }) - }, - 6 => { - let is_unsafe = inner.scalar_u8(5, 0) != 0; - - ConstantInfo::CtorInfo(ConstructorVal { - cnst: decode_constant_val(LeanIxConstantVal::new(inner.get(0))), - induct: decode_ix_name(LeanIxName::new(inner.get(1))), - cidx: Nat::from_obj(inner.get(2)), - num_params: Nat::from_obj(inner.get(3)), - num_fields: Nat::from_obj(inner.get(4)), - is_unsafe, - }) - }, - 7 => { - let k = inner.scalar_u8(7, 0) != 0; - let is_unsafe = inner.scalar_u8(7, 1) != 0; - - let rules: Vec = inner - .get(6) - .as_array() - .map(|x| decode_recursor_rule(LeanIxRecursorRule::new(x))); - - ConstantInfo::RecInfo(RecursorVal { - cnst: decode_constant_val(LeanIxConstantVal::new(inner.get(0))), - all: decode_name_array(inner.get(1).as_array()), - num_params: Nat::from_obj(inner.get(2)), - num_indices: Nat::from_obj(inner.get(3)), - num_motives: Nat::from_obj(inner.get(4)), - num_minors: Nat::from_obj(inner.get(5)), - rules, - k, - is_unsafe, - }) - }, - _ => panic!("Invalid ConstantInfo tag: {}", outer.tag()), +impl LeanIxConstantInfo { + /// Build a Ix.ConstantInfo from a Rust ConstantInfo. + pub fn build( + cache: &mut LeanBuildCache, + info: &ConstantInfo, + ) -> Self { + let result = match info { + // | axiomInfo (v : AxiomVal) -- tag 0 + ConstantInfo::AxiomInfo(v) => { + // AxiomVal = { cnst : ConstantVal, isUnsafe : Bool } + let cnst_obj = LeanIxConstantVal::build(cache, &v.cnst); + let axiom_val = LeanCtor::alloc(0, 1, 1); + axiom_val.set(0, cnst_obj); + axiom_val.set_u8(8, v.is_unsafe as u8); + + let obj = LeanCtor::alloc(0, 1, 0); + obj.set(0, axiom_val); + *obj + }, + // | defnInfo (v : DefinitionVal) -- tag 1 + ConstantInfo::DefnInfo(v) => { + // DefinitionVal = { cnst, value, hints, safety, all } + // Memory layout: 4 obj fields (cnst, value, hints, all), 1 scalar byte (safety) + let cnst_obj = LeanIxConstantVal::build(cache, &v.cnst); + let value_obj = LeanIxExpr::build(cache, &v.value); + let hints_obj = LeanIxReducibilityHints::build(&v.hints); + let all_obj = LeanIxName::build_array(cache, &v.all); + let safety_byte = match v.safety { + DefinitionSafety::Unsafe => 0u8, + DefinitionSafety::Safe => 1u8, + DefinitionSafety::Partial => 2u8, + }; + + let defn_val = LeanCtor::alloc(0, 4, 1); + defn_val.set(0, cnst_obj); + defn_val.set(1, value_obj); + defn_val.set(2, hints_obj); + defn_val.set(3, all_obj); + defn_val.set_u8(4 * 8, safety_byte); + + let obj = LeanCtor::alloc(1, 1, 0); + obj.set(0, defn_val); + *obj + }, + // | thmInfo (v : TheoremVal) -- tag 2 + ConstantInfo::ThmInfo(v) => { + // TheoremVal = { cnst, value, all } + let cnst_obj = LeanIxConstantVal::build(cache, &v.cnst); + let value_obj = LeanIxExpr::build(cache, &v.value); + let all_obj = LeanIxName::build_array(cache, &v.all); + + let thm_val = LeanCtor::alloc(0, 3, 0); + thm_val.set(0, cnst_obj); + thm_val.set(1, value_obj); + thm_val.set(2, all_obj); + + let obj = LeanCtor::alloc(2, 1, 0); + obj.set(0, thm_val); + *obj + }, + // | opaqueInfo (v : OpaqueVal) -- tag 3 + ConstantInfo::OpaqueInfo(v) => { + // OpaqueVal = { cnst, value, isUnsafe, all } + let cnst_obj = LeanIxConstantVal::build(cache, &v.cnst); + let value_obj = LeanIxExpr::build(cache, &v.value); + let all_obj = LeanIxName::build_array(cache, &v.all); + + let opaque_val = LeanCtor::alloc(0, 3, 1); + opaque_val.set(0, cnst_obj); + opaque_val.set(1, value_obj); + opaque_val.set(2, all_obj); + opaque_val.set_u8(3 * 8, v.is_unsafe as u8); + + let obj = LeanCtor::alloc(3, 1, 0); + obj.set(0, opaque_val); + *obj + }, + // | quotInfo (v : QuotVal) -- tag 4 + ConstantInfo::QuotInfo(v) => { + // QuotVal = { cnst, kind } + // Memory layout: 1 obj field (cnst), 1 scalar byte (kind) + let cnst_obj = LeanIxConstantVal::build(cache, &v.cnst); + let kind_byte = match v.kind { + QuotKind::Type => 0u8, + QuotKind::Ctor => 1u8, + QuotKind::Lift => 2u8, + QuotKind::Ind => 3u8, + }; + + let quot_val = LeanCtor::alloc(0, 1, 1); + quot_val.set(0, cnst_obj); + quot_val.set_u8(8, kind_byte); + + let obj = LeanCtor::alloc(4, 1, 0); + obj.set(0, quot_val); + *obj + }, + // | inductInfo (v : InductiveVal) -- tag 5 + ConstantInfo::InductInfo(v) => { + // InductiveVal = { cnst, numParams, numIndices, all, ctors, numNested, isRec, isUnsafe, isReflexive } + let cnst_obj = LeanIxConstantVal::build(cache, &v.cnst); + let num_params_obj = build_nat(&v.num_params); + let num_indices_obj = build_nat(&v.num_indices); + let all_obj = LeanIxName::build_array(cache, &v.all); + let ctors_obj = LeanIxName::build_array(cache, &v.ctors); + let num_nested_obj = build_nat(&v.num_nested); + + // 6 object fields, 3 scalar bytes for bools + let induct_val = LeanCtor::alloc(0, 6, 3); + induct_val.set(0, cnst_obj); + induct_val.set(1, num_params_obj); + induct_val.set(2, num_indices_obj); + induct_val.set(3, all_obj); + induct_val.set(4, ctors_obj); + induct_val.set(5, num_nested_obj); + induct_val.set_u8(6 * 8, v.is_rec as u8); + induct_val.set_u8(6 * 8 + 1, v.is_unsafe as u8); + induct_val.set_u8(6 * 8 + 2, v.is_reflexive as u8); + + let obj = LeanCtor::alloc(5, 1, 0); + obj.set(0, induct_val); + *obj + }, + // | ctorInfo (v : ConstructorVal) -- tag 6 + ConstantInfo::CtorInfo(v) => { + // ConstructorVal = { cnst, induct, cidx, numParams, numFields, isUnsafe } + let cnst_obj = LeanIxConstantVal::build(cache, &v.cnst); + let induct_obj = LeanIxName::build(cache, &v.induct); + let cidx_obj = build_nat(&v.cidx); + let num_params_obj = build_nat(&v.num_params); + let num_fields_obj = build_nat(&v.num_fields); + + // 5 object fields, 1 scalar byte for bool + let ctor_val = LeanCtor::alloc(0, 5, 1); + ctor_val.set(0, cnst_obj); + ctor_val.set(1, induct_obj); + ctor_val.set(2, cidx_obj); + ctor_val.set(3, num_params_obj); + ctor_val.set(4, num_fields_obj); + ctor_val.set_u8(5 * 8, v.is_unsafe as u8); + + let obj = LeanCtor::alloc(6, 1, 0); + obj.set(0, ctor_val); + *obj + }, + // | recInfo (v : RecursorVal) -- tag 7 + ConstantInfo::RecInfo(v) => { + // RecursorVal = { cnst, all, numParams, numIndices, numMotives, numMinors, rules, k, isUnsafe } + let cnst_obj = LeanIxConstantVal::build(cache, &v.cnst); + let all_obj = LeanIxName::build_array(cache, &v.all); + let num_params_obj = build_nat(&v.num_params); + let num_indices_obj = build_nat(&v.num_indices); + let num_motives_obj = build_nat(&v.num_motives); + let num_minors_obj = build_nat(&v.num_minors); + let rules_obj = LeanIxRecursorRule::build_array(cache, &v.rules); + + // 7 object fields, 2 scalar bytes for bools + let rec_val = LeanCtor::alloc(0, 7, 2); + rec_val.set(0, cnst_obj); + rec_val.set(1, all_obj); + rec_val.set(2, num_params_obj); + rec_val.set(3, num_indices_obj); + rec_val.set(4, num_motives_obj); + rec_val.set(5, num_minors_obj); + rec_val.set(6, rules_obj); + rec_val.set_u8(7 * 8, v.k as u8); + rec_val.set_u8(7 * 8 + 1, v.is_unsafe as u8); + + let obj = LeanCtor::alloc(7, 1, 0); + obj.set(0, rec_val); + *obj + }, + }; + + Self::new(result) + } + + /// Decode Ix.ConstantInfo from Lean object. + pub fn decode(self) -> ConstantInfo { + let outer = self.as_ctor(); + let inner_obj = outer.get(0); + let inner = inner_obj.as_ctor(); + + match outer.tag() { + 0 => { + let is_unsafe = inner.scalar_u8(1, 0) != 0; + + ConstantInfo::AxiomInfo(AxiomVal { + cnst: LeanIxConstantVal::new(inner.get(0)).decode(), + is_unsafe, + }) + }, + 1 => { + let safety_byte = inner.scalar_u8(4, 0); + let safety = match safety_byte { + 0 => DefinitionSafety::Unsafe, + 1 => DefinitionSafety::Safe, + 2 => DefinitionSafety::Partial, + _ => panic!("Invalid DefinitionSafety: {}", safety_byte), + }; + + ConstantInfo::DefnInfo(DefinitionVal { + cnst: LeanIxConstantVal::new(inner.get(0)).decode(), + value: LeanIxExpr::new(inner.get(1)).decode(), + hints: LeanIxReducibilityHints::new(inner.get(2)).decode(), + safety, + all: LeanIxName::decode_array(inner.get(3).as_array()), + }) + }, + 2 => ConstantInfo::ThmInfo(TheoremVal { + cnst: LeanIxConstantVal::new(inner.get(0)).decode(), + value: LeanIxExpr::new(inner.get(1)).decode(), + all: LeanIxName::decode_array(inner.get(2).as_array()), + }), + 3 => { + let is_unsafe = inner.scalar_u8(3, 0) != 0; + + ConstantInfo::OpaqueInfo(OpaqueVal { + cnst: LeanIxConstantVal::new(inner.get(0)).decode(), + value: LeanIxExpr::new(inner.get(1)).decode(), + is_unsafe, + all: LeanIxName::decode_array(inner.get(2).as_array()), + }) + }, + 4 => { + let kind_byte = inner.scalar_u8(1, 0); + let kind = match kind_byte { + 0 => QuotKind::Type, + 1 => QuotKind::Ctor, + 2 => QuotKind::Lift, + 3 => QuotKind::Ind, + _ => panic!("Invalid QuotKind: {}", kind_byte), + }; + + ConstantInfo::QuotInfo(QuotVal { + cnst: LeanIxConstantVal::new(inner.get(0)).decode(), + kind, + }) + }, + 5 => { + let is_rec = inner.scalar_u8(6, 0) != 0; + let is_unsafe = inner.scalar_u8(6, 1) != 0; + let is_reflexive = inner.scalar_u8(6, 2) != 0; + + ConstantInfo::InductInfo(InductiveVal { + cnst: LeanIxConstantVal::new(inner.get(0)).decode(), + num_params: Nat::from_obj(inner.get(1)), + num_indices: Nat::from_obj(inner.get(2)), + all: LeanIxName::decode_array(inner.get(3).as_array()), + ctors: LeanIxName::decode_array(inner.get(4).as_array()), + num_nested: Nat::from_obj(inner.get(5)), + is_rec, + is_unsafe, + is_reflexive, + }) + }, + 6 => { + let is_unsafe = inner.scalar_u8(5, 0) != 0; + + ConstantInfo::CtorInfo(ConstructorVal { + cnst: LeanIxConstantVal::new(inner.get(0)).decode(), + induct: LeanIxName::new(inner.get(1)).decode(), + cidx: Nat::from_obj(inner.get(2)), + num_params: Nat::from_obj(inner.get(3)), + num_fields: Nat::from_obj(inner.get(4)), + is_unsafe, + }) + }, + 7 => { + let k = inner.scalar_u8(7, 0) != 0; + let is_unsafe = inner.scalar_u8(7, 1) != 0; + + let rules: Vec = inner + .get(6) + .as_array() + .map(|x| LeanIxRecursorRule::new(x).decode()); + + ConstantInfo::RecInfo(RecursorVal { + cnst: LeanIxConstantVal::new(inner.get(0)).decode(), + all: LeanIxName::decode_array(inner.get(1).as_array()), + num_params: Nat::from_obj(inner.get(2)), + num_indices: Nat::from_obj(inner.get(3)), + num_motives: Nat::from_obj(inner.get(4)), + num_minors: Nat::from_obj(inner.get(5)), + rules, + k, + is_unsafe, + }) + }, + _ => panic!("Invalid ConstantInfo tag: {}", outer.tag()), + } } } @@ -440,7 +449,7 @@ pub fn decode_constant_info(obj: LeanIxConstantInfo) -> ConstantInfo { pub extern "C" fn rs_roundtrip_ix_constant_info( info_ptr: LeanIxConstantInfo, ) -> LeanIxConstantInfo { - let info = decode_constant_info(info_ptr); + let info = info_ptr.decode(); let mut cache = LeanBuildCache::new(); - build_constant_info(&mut cache, &info) + LeanIxConstantInfo::build(&mut cache, &info) } diff --git a/src/ffi/ix/data.rs b/src/ffi/ix/data.rs index 568b139e..24a36177 100644 --- a/src/ffi/ix/data.rs +++ b/src/ffi/ix/data.rs @@ -11,370 +11,375 @@ use lean_ffi::nat::Nat; use lean_ffi::object::{LeanArray, LeanCtor, LeanString}; use crate::ffi::builder::LeanBuildCache; -use crate::ffi::ix::name::{build_name, decode_ix_name}; use crate::ffi::primitives::build_nat; -/// Build a Ix.Int (ofNat or negSucc). -pub fn build_int(int: &Int) -> LeanIxInt { - match int { - Int::OfNat(n) => { - let obj = LeanCtor::alloc(0, 1, 0); - obj.set(0, build_nat(n)); - LeanIxInt::new(*obj) - }, - Int::NegSucc(n) => { - let obj = LeanCtor::alloc(1, 1, 0); - obj.set(0, build_nat(n)); - LeanIxInt::new(*obj) - }, +impl LeanIxInt { + /// Build a Ix.Int (ofNat or negSucc). + pub fn build(int: &Int) -> Self { + match int { + Int::OfNat(n) => { + let obj = LeanCtor::alloc(0, 1, 0); + obj.set(0, build_nat(n)); + Self::new(*obj) + }, + Int::NegSucc(n) => { + let obj = LeanCtor::alloc(1, 1, 0); + obj.set(0, build_nat(n)); + Self::new(*obj) + }, + } } -} - -/// Build a Ix.Substring. -pub fn build_substring(ss: &Substring) -> LeanIxSubstring { - let obj = LeanCtor::alloc(0, 3, 0); - obj.set(0, LeanString::new(ss.str.as_str())); - obj.set(1, build_nat(&ss.start_pos)); - obj.set(2, build_nat(&ss.stop_pos)); - LeanIxSubstring::new(*obj) -} -/// Build a Ix.SourceInfo. -pub fn build_source_info(si: &SourceInfo) -> LeanIxSourceInfo { - match si { - // | original (leading : Substring) (pos : Nat) (trailing : Substring) (endPos : Nat) -- tag 0 - SourceInfo::Original(leading, pos, trailing, end_pos) => { - let obj = LeanCtor::alloc(0, 4, 0); - obj.set(0, build_substring(leading)); - obj.set(1, build_nat(pos)); - obj.set(2, build_substring(trailing)); - obj.set(3, build_nat(end_pos)); - LeanIxSourceInfo::new(*obj) - }, - // | synthetic (pos : Nat) (endPos : Nat) (canonical : Bool) -- tag 1 - SourceInfo::Synthetic(pos, end_pos, canonical) => { - let obj = LeanCtor::alloc(1, 2, 1); - obj.set(0, build_nat(pos)); - obj.set(1, build_nat(end_pos)); - obj.set_u8(2 * 8, *canonical as u8); - LeanIxSourceInfo::new(*obj) - }, - // | none -- tag 2 - SourceInfo::None => LeanIxSourceInfo::new(*LeanCtor::alloc(2, 0, 0)), + /// Decode Ix.Int from Lean object. + /// Ix.Int: ofNat (tag 0, 1 field) | negSucc (tag 1, 1 field) + pub fn decode(self) -> Int { + let ctor = self.as_ctor(); + let nat = Nat::from_obj(ctor.get(0)); + match ctor.tag() { + 0 => Int::OfNat(nat), + 1 => Int::NegSucc(nat), + _ => panic!("Invalid Ix.Int tag: {}", ctor.tag()), + } } } -/// Build a Ix.SyntaxPreresolved. -pub fn build_syntax_preresolved( - cache: &mut LeanBuildCache, - sp: &SyntaxPreresolved, -) -> LeanIxSyntaxPreresolved { - match sp { - // | namespace (name : Name) -- tag 0 - SyntaxPreresolved::Namespace(name) => { - let obj = LeanCtor::alloc(0, 1, 0); - obj.set(0, build_name(cache, name)); - LeanIxSyntaxPreresolved::new(*obj) - }, - // | decl (name : Name) (aliases : Array String) -- tag 1 - SyntaxPreresolved::Decl(name, aliases) => { - let name_obj = build_name(cache, name); - let aliases_obj = build_string_array(aliases); - let obj = LeanCtor::alloc(1, 2, 0); - obj.set(0, name_obj); - obj.set(1, aliases_obj); - LeanIxSyntaxPreresolved::new(*obj) - }, +impl LeanIxSubstring { + /// Build a Ix.Substring. + pub fn build(ss: &Substring) -> Self { + let obj = LeanCtor::alloc(0, 3, 0); + obj.set(0, LeanString::new(ss.str.as_str())); + obj.set(1, build_nat(&ss.start_pos)); + obj.set(2, build_nat(&ss.stop_pos)); + Self::new(*obj) } -} -/// Build an Array of Strings. -pub fn build_string_array(strings: &[String]) -> LeanArray { - let arr = LeanArray::alloc(strings.len()); - for (i, s) in strings.iter().enumerate() { - arr.set(i, LeanString::new(s.as_str())); + /// Decode Ix.Substring. + pub fn decode(self) -> Substring { + let ctor = self.as_ctor(); + Substring { + str: ctor.get(0).as_string().to_string(), + start_pos: Nat::from_obj(ctor.get(1)), + stop_pos: Nat::from_obj(ctor.get(2)), + } } - arr } -/// Build a Ix.Syntax. -pub fn build_syntax(cache: &mut LeanBuildCache, syn: &Syntax) -> LeanIxSyntax { - match syn { - // | missing -- tag 0 - Syntax::Missing => LeanIxSyntax::new(*LeanCtor::alloc(0, 0, 0)), - // | node (info : SourceInfo) (kind : Name) (args : Array Syntax) -- tag 1 - Syntax::Node(info, kind, args) => { - let info_obj = build_source_info(info); - let kind_obj = build_name(cache, kind); - let args_obj = build_syntax_array(cache, args); - let obj = LeanCtor::alloc(1, 3, 0); - obj.set(0, info_obj); - obj.set(1, kind_obj); - obj.set(2, args_obj); - LeanIxSyntax::new(*obj) - }, - // | atom (info : SourceInfo) (val : String) -- tag 2 - Syntax::Atom(info, val) => { - let info_obj = build_source_info(info); - let obj = LeanCtor::alloc(2, 2, 0); - obj.set(0, info_obj); - obj.set(1, LeanString::new(val.as_str())); - LeanIxSyntax::new(*obj) - }, - // | ident (info : SourceInfo) (rawVal : Substring) (val : Name) (preresolved : Array SyntaxPreresolved) -- tag 3 - Syntax::Ident(info, raw_val, val, preresolved) => { - let info_obj = build_source_info(info); - let raw_val_obj = build_substring(raw_val); - let val_obj = build_name(cache, val); - let preresolved_obj = build_syntax_preresolved_array(cache, preresolved); - let obj = LeanCtor::alloc(3, 4, 0); - obj.set(0, info_obj); - obj.set(1, raw_val_obj); - obj.set(2, val_obj); - obj.set(3, preresolved_obj); - LeanIxSyntax::new(*obj) - }, +impl LeanIxSourceInfo { + /// Build a Ix.SourceInfo. + pub fn build(si: &SourceInfo) -> Self { + match si { + // | original (leading : Substring) (pos : Nat) (trailing : Substring) (endPos : Nat) -- tag 0 + SourceInfo::Original(leading, pos, trailing, end_pos) => { + let obj = LeanCtor::alloc(0, 4, 0); + obj.set(0, LeanIxSubstring::build(leading)); + obj.set(1, build_nat(pos)); + obj.set(2, LeanIxSubstring::build(trailing)); + obj.set(3, build_nat(end_pos)); + Self::new(*obj) + }, + // | synthetic (pos : Nat) (endPos : Nat) (canonical : Bool) -- tag 1 + SourceInfo::Synthetic(pos, end_pos, canonical) => { + let obj = LeanCtor::alloc(1, 2, 1); + obj.set(0, build_nat(pos)); + obj.set(1, build_nat(end_pos)); + obj.set_u8(2 * 8, *canonical as u8); + Self::new(*obj) + }, + // | none -- tag 2 + SourceInfo::None => Self::new(*LeanCtor::alloc(2, 0, 0)), + } } -} -/// Build an Array of Syntax. -pub fn build_syntax_array( - cache: &mut LeanBuildCache, - items: &[Syntax], -) -> LeanArray { - let arr = LeanArray::alloc(items.len()); - for (i, item) in items.iter().enumerate() { - arr.set(i, build_syntax(cache, item)); + /// Decode Ix.SourceInfo. + pub fn decode(self) -> SourceInfo { + if self.is_scalar() { + return SourceInfo::None; + } + let ctor = self.as_ctor(); + match ctor.tag() { + 0 => { + // original + SourceInfo::Original( + LeanIxSubstring::new(ctor.get(0)).decode(), + Nat::from_obj(ctor.get(1)), + LeanIxSubstring::new(ctor.get(2)).decode(), + Nat::from_obj(ctor.get(3)), + ) + }, + 1 => { + // synthetic: 2 obj fields (pos, end_pos), 1 scalar byte (canonical) + let canonical = ctor.scalar_u8(2, 0) != 0; + + SourceInfo::Synthetic( + Nat::from_obj(ctor.get(0)), + Nat::from_obj(ctor.get(1)), + canonical, + ) + }, + 2 => SourceInfo::None, + _ => panic!("Invalid SourceInfo tag: {}", ctor.tag()), + } } - arr } -/// Build an Array of SyntaxPreresolved. -pub fn build_syntax_preresolved_array( - cache: &mut LeanBuildCache, - items: &[SyntaxPreresolved], -) -> LeanArray { - let arr = LeanArray::alloc(items.len()); - for (i, item) in items.iter().enumerate() { - arr.set(i, build_syntax_preresolved(cache, item)); +impl LeanIxSyntaxPreresolved { + /// Build a Ix.SyntaxPreresolved. + pub fn build( + cache: &mut LeanBuildCache, + sp: &SyntaxPreresolved, + ) -> Self { + match sp { + // | namespace (name : Name) -- tag 0 + SyntaxPreresolved::Namespace(name) => { + let obj = LeanCtor::alloc(0, 1, 0); + obj.set(0, LeanIxName::build(cache, name)); + Self::new(*obj) + }, + // | decl (name : Name) (aliases : Array String) -- tag 1 + SyntaxPreresolved::Decl(name, aliases) => { + let name_obj = LeanIxName::build(cache, name); + let aliases_obj = build_string_array(aliases); + let obj = LeanCtor::alloc(1, 2, 0); + obj.set(0, name_obj); + obj.set(1, aliases_obj); + Self::new(*obj) + }, + } } - arr -} -/// Build Ix.DataValue. -pub fn build_data_value( - cache: &mut LeanBuildCache, - dv: &DataValue, -) -> LeanIxDataValue { - match dv { - DataValue::OfString(s) => { - let obj = LeanCtor::alloc(0, 1, 0); - obj.set(0, LeanString::new(s.as_str())); - LeanIxDataValue::new(*obj) - }, - DataValue::OfBool(b) => { - // 0 object fields, 1 scalar byte - let obj = LeanCtor::alloc(1, 0, 1); - obj.set_u8(0, *b as u8); - LeanIxDataValue::new(*obj) - }, - DataValue::OfName(n) => { - let obj = LeanCtor::alloc(2, 1, 0); - obj.set(0, build_name(cache, n)); - LeanIxDataValue::new(*obj) - }, - DataValue::OfNat(n) => { - let obj = LeanCtor::alloc(3, 1, 0); - obj.set(0, build_nat(n)); - LeanIxDataValue::new(*obj) - }, - DataValue::OfInt(i) => { - let obj = LeanCtor::alloc(4, 1, 0); - obj.set(0, build_int(i)); - LeanIxDataValue::new(*obj) - }, - DataValue::OfSyntax(syn) => { - let obj = LeanCtor::alloc(5, 1, 0); - obj.set(0, build_syntax(cache, syn)); - LeanIxDataValue::new(*obj) - }, + /// Decode Ix.SyntaxPreresolved. + pub fn decode(self) -> SyntaxPreresolved { + let ctor = self.as_ctor(); + match ctor.tag() { + 0 => { + // namespace + SyntaxPreresolved::Namespace(LeanIxName::new(ctor.get(0)).decode()) + }, + 1 => { + // decl + let name = LeanIxName::new(ctor.get(0)).decode(); + let aliases: Vec = + ctor.get(1).as_array().map(|obj| obj.as_string().to_string()); + + SyntaxPreresolved::Decl(name, aliases) + }, + _ => panic!("Invalid SyntaxPreresolved tag: {}", ctor.tag()), + } } } -/// Build an Array of (Name × DataValue) for mdata. -pub fn build_kvmap( - cache: &mut LeanBuildCache, - data: &[(Name, DataValue)], -) -> LeanArray { - let arr = LeanArray::alloc(data.len()); - for (i, (name, dv)) in data.iter().enumerate() { - let name_obj = build_name(cache, name); - let dv_obj = build_data_value(cache, dv); - // Prod (Name × DataValue) - let pair = LeanCtor::alloc(0, 2, 0); - pair.set(0, name_obj); - pair.set(1, dv_obj); - arr.set(i, pair); +/// Build an Array of Strings. +pub fn build_string_array(strings: &[String]) -> LeanArray { + let arr = LeanArray::alloc(strings.len()); + for (i, s) in strings.iter().enumerate() { + arr.set(i, LeanString::new(s.as_str())); } arr } -// ============================================================================= -// Decode Functions -// ============================================================================= - -/// Decode Ix.Int from Lean object. -/// Ix.Int: ofNat (tag 0, 1 field) | negSucc (tag 1, 1 field) -pub fn decode_ix_int(obj: LeanIxInt) -> Int { - let ctor = obj.as_ctor(); - let nat = Nat::from_obj(ctor.get(0)); - match ctor.tag() { - 0 => Int::OfNat(nat), - 1 => Int::NegSucc(nat), - _ => panic!("Invalid Ix.Int tag: {}", ctor.tag()), +impl LeanIxSyntax { + /// Build a Ix.Syntax. + pub fn build(cache: &mut LeanBuildCache, syn: &Syntax) -> Self { + match syn { + // | missing -- tag 0 + Syntax::Missing => Self::new(*LeanCtor::alloc(0, 0, 0)), + // | node (info : SourceInfo) (kind : Name) (args : Array Syntax) -- tag 1 + Syntax::Node(info, kind, args) => { + let info_obj = LeanIxSourceInfo::build(info); + let kind_obj = LeanIxName::build(cache, kind); + let args_obj = Self::build_array(cache, args); + let obj = LeanCtor::alloc(1, 3, 0); + obj.set(0, info_obj); + obj.set(1, kind_obj); + obj.set(2, args_obj); + Self::new(*obj) + }, + // | atom (info : SourceInfo) (val : String) -- tag 2 + Syntax::Atom(info, val) => { + let info_obj = LeanIxSourceInfo::build(info); + let obj = LeanCtor::alloc(2, 2, 0); + obj.set(0, info_obj); + obj.set(1, LeanString::new(val.as_str())); + Self::new(*obj) + }, + // | ident (info : SourceInfo) (rawVal : Substring) (val : Name) (preresolved : Array SyntaxPreresolved) -- tag 3 + Syntax::Ident(info, raw_val, val, preresolved) => { + let info_obj = LeanIxSourceInfo::build(info); + let raw_val_obj = LeanIxSubstring::build(raw_val); + let val_obj = LeanIxName::build(cache, val); + let preresolved_obj = Self::build_preresolved_array(cache, preresolved); + let obj = LeanCtor::alloc(3, 4, 0); + obj.set(0, info_obj); + obj.set(1, raw_val_obj); + obj.set(2, val_obj); + obj.set(3, preresolved_obj); + Self::new(*obj) + }, + } } -} -/// Decode Ix.DataValue from a Lean object. -pub fn decode_data_value(obj: LeanIxDataValue) -> DataValue { - let ctor = obj.as_ctor(); - match ctor.tag() { - 0 => { - // ofString: 1 object field - DataValue::OfString(ctor.get(0).as_string().to_string()) - }, - 1 => { - // ofBool: 0 object fields, 1 scalar byte - let b = ctor.scalar_u8(0, 0) != 0; - DataValue::OfBool(b) - }, - 2 => { - // ofName: 1 object field - DataValue::OfName(decode_ix_name(LeanIxName::new(ctor.get(0)))) - }, - 3 => { - // ofNat: 1 object field - DataValue::OfNat(Nat::from_obj(ctor.get(0))) - }, - 4 => { - // ofInt: 1 object field - let inner = ctor.get(0); - let inner_ctor = inner.as_ctor(); - let nat = Nat::from_obj(inner_ctor.get(0)); - match inner_ctor.tag() { - 0 => DataValue::OfInt(Int::OfNat(nat)), - 1 => DataValue::OfInt(Int::NegSucc(nat)), - _ => panic!("Invalid Int tag: {}", inner_ctor.tag()), - } - }, - 5 => { - // ofSyntax: 1 object field - DataValue::OfSyntax( - decode_ix_syntax(LeanIxSyntax::new(ctor.get(0))).into(), - ) - }, - _ => panic!("Invalid DataValue tag: {}", ctor.tag()), + /// Build an Array of Syntax. + pub fn build_array( + cache: &mut LeanBuildCache, + items: &[Syntax], + ) -> LeanArray { + let arr = LeanArray::alloc(items.len()); + for (i, item) in items.iter().enumerate() { + arr.set(i, Self::build(cache, item)); + } + arr } -} -/// Decode Ix.Syntax from a Lean object. -pub fn decode_ix_syntax(obj: LeanIxSyntax) -> Syntax { - if obj.is_scalar() { - return Syntax::Missing; + /// Build an Array of SyntaxPreresolved. + fn build_preresolved_array( + cache: &mut LeanBuildCache, + items: &[SyntaxPreresolved], + ) -> LeanArray { + let arr = LeanArray::alloc(items.len()); + for (i, item) in items.iter().enumerate() { + arr.set(i, LeanIxSyntaxPreresolved::build(cache, item)); + } + arr } - let ctor = obj.as_ctor(); - match ctor.tag() { - 0 => Syntax::Missing, - 1 => { - // node: info, kind, args - let info = decode_ix_source_info(LeanIxSourceInfo::new(ctor.get(0))); - let kind = decode_ix_name(LeanIxName::new(ctor.get(1))); - let args: Vec = - ctor.get(2).as_array().map(|x| decode_ix_syntax(LeanIxSyntax::new(x))); - Syntax::Node(info, kind, args) - }, - 2 => { - // atom: info, val - let info = decode_ix_source_info(LeanIxSourceInfo::new(ctor.get(0))); - Syntax::Atom(info, ctor.get(1).as_string().to_string()) - }, - 3 => { - // ident: info, rawVal, val, preresolved - let info = decode_ix_source_info(LeanIxSourceInfo::new(ctor.get(0))); - let raw_val = decode_substring(LeanIxSubstring::new(ctor.get(1))); - let val = decode_ix_name(LeanIxName::new(ctor.get(2))); - let preresolved: Vec = ctor - .get(3) - .as_array() - .map(|x| decode_syntax_preresolved(LeanIxSyntaxPreresolved::new(x))); - - Syntax::Ident(info, raw_val, val, preresolved) - }, - _ => panic!("Invalid Syntax tag: {}", ctor.tag()), - } -} + /// Decode Ix.Syntax from a Lean object. + pub fn decode(self) -> Syntax { + if self.is_scalar() { + return Syntax::Missing; + } + let ctor = self.as_ctor(); + match ctor.tag() { + 0 => Syntax::Missing, + 1 => { + // node: info, kind, args + let info = LeanIxSourceInfo::new(ctor.get(0)).decode(); + let kind = LeanIxName::new(ctor.get(1)).decode(); + let args: Vec = + ctor.get(2).as_array().map(|x| Self::new(x).decode()); -/// Decode Ix.SourceInfo. -pub fn decode_ix_source_info(obj: LeanIxSourceInfo) -> SourceInfo { - if obj.is_scalar() { - return SourceInfo::None; - } - let ctor = obj.as_ctor(); - match ctor.tag() { - 0 => { - // original - SourceInfo::Original( - decode_substring(LeanIxSubstring::new(ctor.get(0))), - Nat::from_obj(ctor.get(1)), - decode_substring(LeanIxSubstring::new(ctor.get(2))), - Nat::from_obj(ctor.get(3)), - ) - }, - 1 => { - // synthetic: 2 obj fields (pos, end_pos), 1 scalar byte (canonical) - let canonical = ctor.scalar_u8(2, 0) != 0; + Syntax::Node(info, kind, args) + }, + 2 => { + // atom: info, val + let info = LeanIxSourceInfo::new(ctor.get(0)).decode(); + Syntax::Atom(info, ctor.get(1).as_string().to_string()) + }, + 3 => { + // ident: info, rawVal, val, preresolved + let info = LeanIxSourceInfo::new(ctor.get(0)).decode(); + let raw_val = LeanIxSubstring::new(ctor.get(1)).decode(); + let val = LeanIxName::new(ctor.get(2)).decode(); + let preresolved: Vec = ctor + .get(3) + .as_array() + .map(|x| LeanIxSyntaxPreresolved::new(x).decode()); - SourceInfo::Synthetic( - Nat::from_obj(ctor.get(0)), - Nat::from_obj(ctor.get(1)), - canonical, - ) - }, - 2 => SourceInfo::None, - _ => panic!("Invalid SourceInfo tag: {}", ctor.tag()), + Syntax::Ident(info, raw_val, val, preresolved) + }, + _ => panic!("Invalid Syntax tag: {}", ctor.tag()), + } } } -/// Decode Ix.Substring. -pub fn decode_substring(obj: LeanIxSubstring) -> Substring { - let ctor = obj.as_ctor(); - Substring { - str: ctor.get(0).as_string().to_string(), - start_pos: Nat::from_obj(ctor.get(1)), - stop_pos: Nat::from_obj(ctor.get(2)), +impl LeanIxDataValue { + /// Build Ix.DataValue. + pub fn build( + cache: &mut LeanBuildCache, + dv: &DataValue, + ) -> Self { + match dv { + DataValue::OfString(s) => { + let obj = LeanCtor::alloc(0, 1, 0); + obj.set(0, LeanString::new(s.as_str())); + Self::new(*obj) + }, + DataValue::OfBool(b) => { + // 0 object fields, 1 scalar byte + let obj = LeanCtor::alloc(1, 0, 1); + obj.set_u8(0, *b as u8); + Self::new(*obj) + }, + DataValue::OfName(n) => { + let obj = LeanCtor::alloc(2, 1, 0); + obj.set(0, LeanIxName::build(cache, n)); + Self::new(*obj) + }, + DataValue::OfNat(n) => { + let obj = LeanCtor::alloc(3, 1, 0); + obj.set(0, build_nat(n)); + Self::new(*obj) + }, + DataValue::OfInt(i) => { + let obj = LeanCtor::alloc(4, 1, 0); + obj.set(0, LeanIxInt::build(i)); + Self::new(*obj) + }, + DataValue::OfSyntax(syn) => { + let obj = LeanCtor::alloc(5, 1, 0); + obj.set(0, LeanIxSyntax::build(cache, syn)); + Self::new(*obj) + }, + } } -} -/// Decode Ix.SyntaxPreresolved. -pub fn decode_syntax_preresolved( - obj: LeanIxSyntaxPreresolved, -) -> SyntaxPreresolved { - let ctor = obj.as_ctor(); - match ctor.tag() { - 0 => { - // namespace - SyntaxPreresolved::Namespace(decode_ix_name(LeanIxName::new(ctor.get(0)))) - }, - 1 => { - // decl - let name = decode_ix_name(LeanIxName::new(ctor.get(0))); - let aliases: Vec = - ctor.get(1).as_array().map(|obj| obj.as_string().to_string()); + /// Build an Array of (Name × DataValue) for mdata. + pub fn build_kvmap( + cache: &mut LeanBuildCache, + data: &[(Name, DataValue)], + ) -> LeanArray { + let arr = LeanArray::alloc(data.len()); + for (i, (name, dv)) in data.iter().enumerate() { + let name_obj = LeanIxName::build(cache, name); + let dv_obj = Self::build(cache, dv); + // Prod (Name × DataValue) + let pair = LeanCtor::alloc(0, 2, 0); + pair.set(0, name_obj); + pair.set(1, dv_obj); + arr.set(i, pair); + } + arr + } - SyntaxPreresolved::Decl(name, aliases) - }, - _ => panic!("Invalid SyntaxPreresolved tag: {}", ctor.tag()), + /// Decode Ix.DataValue from a Lean object. + pub fn decode(self) -> DataValue { + let ctor = self.as_ctor(); + match ctor.tag() { + 0 => { + // ofString: 1 object field + DataValue::OfString(ctor.get(0).as_string().to_string()) + }, + 1 => { + // ofBool: 0 object fields, 1 scalar byte + let b = ctor.scalar_u8(0, 0) != 0; + DataValue::OfBool(b) + }, + 2 => { + // ofName: 1 object field + DataValue::OfName(LeanIxName::new(ctor.get(0)).decode()) + }, + 3 => { + // ofNat: 1 object field + DataValue::OfNat(Nat::from_obj(ctor.get(0))) + }, + 4 => { + // ofInt: 1 object field + let inner = ctor.get(0); + let inner_ctor = inner.as_ctor(); + let nat = Nat::from_obj(inner_ctor.get(0)); + match inner_ctor.tag() { + 0 => DataValue::OfInt(Int::OfNat(nat)), + 1 => DataValue::OfInt(Int::NegSucc(nat)), + _ => panic!("Invalid Int tag: {}", inner_ctor.tag()), + } + }, + 5 => { + // ofSyntax: 1 object field + DataValue::OfSyntax( + LeanIxSyntax::new(ctor.get(0)).decode().into(), + ) + }, + _ => panic!("Invalid DataValue tag: {}", ctor.tag()), + } } } @@ -385,8 +390,8 @@ pub fn decode_syntax_preresolved( /// Round-trip an Ix.Int: decode from Lean, re-encode. #[unsafe(no_mangle)] pub extern "C" fn rs_roundtrip_ix_int(int_ptr: LeanIxInt) -> LeanIxInt { - let int_val = decode_ix_int(int_ptr); - build_int(&int_val) + let int_val = int_ptr.decode(); + LeanIxInt::build(&int_val) } /// Round-trip an Ix.Substring: decode from Lean, re-encode. @@ -394,8 +399,8 @@ pub extern "C" fn rs_roundtrip_ix_int(int_ptr: LeanIxInt) -> LeanIxInt { pub extern "C" fn rs_roundtrip_ix_substring( sub_ptr: LeanIxSubstring, ) -> LeanIxSubstring { - let sub = decode_substring(sub_ptr); - build_substring(&sub) + let sub = sub_ptr.decode(); + LeanIxSubstring::build(&sub) } /// Round-trip an Ix.SourceInfo: decode from Lean, re-encode. @@ -403,8 +408,8 @@ pub extern "C" fn rs_roundtrip_ix_substring( pub extern "C" fn rs_roundtrip_ix_source_info( si_ptr: LeanIxSourceInfo, ) -> LeanIxSourceInfo { - let si = decode_ix_source_info(si_ptr); - build_source_info(&si) + let si = si_ptr.decode(); + LeanIxSourceInfo::build(&si) } /// Round-trip an Ix.SyntaxPreresolved: decode from Lean, re-encode. @@ -412,9 +417,9 @@ pub extern "C" fn rs_roundtrip_ix_source_info( pub extern "C" fn rs_roundtrip_ix_syntax_preresolved( sp_ptr: LeanIxSyntaxPreresolved, ) -> LeanIxSyntaxPreresolved { - let sp = decode_syntax_preresolved(sp_ptr); + let sp = sp_ptr.decode(); let mut cache = LeanBuildCache::new(); - build_syntax_preresolved(&mut cache, &sp) + LeanIxSyntaxPreresolved::build(&mut cache, &sp) } /// Round-trip an Ix.Syntax: decode from Lean, re-encode. @@ -422,9 +427,9 @@ pub extern "C" fn rs_roundtrip_ix_syntax_preresolved( pub extern "C" fn rs_roundtrip_ix_syntax( syn_ptr: LeanIxSyntax, ) -> LeanIxSyntax { - let syn = decode_ix_syntax(syn_ptr); + let syn = syn_ptr.decode(); let mut cache = LeanBuildCache::new(); - build_syntax(&mut cache, &syn) + LeanIxSyntax::build(&mut cache, &syn) } /// Round-trip an Ix.DataValue: decode from Lean, re-encode. @@ -432,7 +437,7 @@ pub extern "C" fn rs_roundtrip_ix_syntax( pub extern "C" fn rs_roundtrip_ix_data_value( dv_ptr: LeanIxDataValue, ) -> LeanIxDataValue { - let dv = decode_data_value(dv_ptr); + let dv = dv_ptr.decode(); let mut cache = LeanBuildCache::new(); - build_data_value(&mut cache, &dv) + LeanIxDataValue::build(&mut cache, &dv) } diff --git a/src/ffi/ix/env.rs b/src/ffi/ix/env.rs index b0a2cc1d..9d4512c5 100644 --- a/src/ffi/ix/env.rs +++ b/src/ffi/ix/env.rs @@ -9,8 +9,6 @@ use crate::lean::{ use lean_ffi::object::{LeanArray, LeanCtor, LeanObject}; use crate::ffi::builder::LeanBuildCache; -use crate::ffi::ix::constant::{build_constant_info, decode_constant_info}; -use crate::ffi::ix::name::{build_name, decode_ix_name}; // ============================================================================= // HashMap Building @@ -66,39 +64,7 @@ pub fn build_hashmap_from_pairs( } // ============================================================================= -// Environment Building -// ============================================================================= - -/// Build a Ix.RawEnvironment from collected caches. -/// RawEnvironment has arrays that Lean will convert to HashMaps. -/// -/// Ix.RawEnvironment = { -/// consts : Array (Name × ConstantInfo) -/// } -/// -/// NOTE: RawEnvironment with a single field is UNBOXED by Lean, -/// so we return just the array, not a structure containing it. -pub fn build_raw_environment( - cache: &mut LeanBuildCache, - consts: &FxHashMap, -) -> LeanIxRawEnvironment { - // Build consts array: Array (Name × ConstantInfo) - let consts_arr = LeanArray::alloc(consts.len()); - for (i, (name, info)) in consts.iter().enumerate() { - let key_obj = build_name(cache, name); - let val_obj = build_constant_info(cache, info); - // Build pair (Name × ConstantInfo) - let pair = LeanCtor::alloc(0, 2, 0); - pair.set(0, key_obj); - pair.set(1, val_obj); - consts_arr.set(i, pair); - } - - LeanIxRawEnvironment::new(*consts_arr) -} - -// ============================================================================= -// Environment Decoder +// Environment Building / Decoding // ============================================================================= /// Decode a HashMap's AssocList and collect key-value pairs using a custom decoder. @@ -163,82 +129,108 @@ where pairs } -/// Decode Ix.Environment from Lean object. -/// -/// Ix.Environment = { -/// consts : HashMap Name ConstantInfo -/// } -/// -/// NOTE: Environment with a single field is UNBOXED by Lean, -/// so the pointer IS the HashMap directly, not a structure containing it. -pub fn decode_ix_environment( - obj: LeanIxEnvironment, -) -> FxHashMap { - // Environment is unboxed - obj IS the HashMap directly - let consts_pairs = decode_hashmap( - *obj, - |x| decode_ix_name(LeanIxName::new(x)), - |x| decode_constant_info(LeanIxConstantInfo::new(x)), - ); - let mut consts: FxHashMap = FxHashMap::default(); - for (name, info) in consts_pairs { - consts.insert(name, info); +impl LeanIxRawEnvironment { + /// Build a Ix.RawEnvironment from collected caches. + /// RawEnvironment has arrays that Lean will convert to HashMaps. + /// + /// Ix.RawEnvironment = { + /// consts : Array (Name × ConstantInfo) + /// } + /// + /// NOTE: RawEnvironment with a single field is UNBOXED by Lean, + /// so we return just the array, not a structure containing it. + pub fn build( + cache: &mut LeanBuildCache, + consts: &FxHashMap, + ) -> Self { + // Build consts array: Array (Name × ConstantInfo) + let consts_arr = LeanArray::alloc(consts.len()); + for (i, (name, info)) in consts.iter().enumerate() { + let key_obj = LeanIxName::build(cache, name); + let val_obj = LeanIxConstantInfo::build(cache, info); + // Build pair (Name × ConstantInfo) + let pair = LeanCtor::alloc(0, 2, 0); + pair.set(0, key_obj); + pair.set(1, val_obj); + consts_arr.set(i, pair); + } + + Self::new(*consts_arr) } - consts -} -/// Decode Ix.RawEnvironment from Lean object into HashMap. -/// RawEnvironment = { consts : Array (Name × ConstantInfo) } -/// NOTE: Unboxed to just Array. This version deduplicates by name. -pub fn decode_ix_raw_environment( - obj: LeanIxRawEnvironment, -) -> FxHashMap { - let arr = obj.as_array(); - let mut consts: FxHashMap = FxHashMap::default(); - - for pair_obj in arr.iter() { - let pair = pair_obj.as_ctor(); - let name = decode_ix_name(LeanIxName::new(pair.get(0))); - let info = decode_constant_info(LeanIxConstantInfo::new(pair.get(1))); - consts.insert(name, info); + /// Build Ix.RawEnvironment from Vec, preserving order and duplicates. + pub fn build_from_vec( + cache: &mut LeanBuildCache, + consts: &[(Name, ConstantInfo)], + ) -> Self { + let consts_arr = LeanArray::alloc(consts.len()); + for (i, (name, info)) in consts.iter().enumerate() { + let key_obj = LeanIxName::build(cache, name); + let val_obj = LeanIxConstantInfo::build(cache, info); + let pair = LeanCtor::alloc(0, 2, 0); + pair.set(0, key_obj); + pair.set(1, val_obj); + consts_arr.set(i, pair); + } + Self::new(*consts_arr) } - consts -} + /// Decode Ix.RawEnvironment from Lean object into HashMap. + /// RawEnvironment = { consts : Array (Name × ConstantInfo) } + /// NOTE: Unboxed to just Array. This version deduplicates by name. + pub fn decode(self) -> FxHashMap { + let arr = self.as_array(); + let mut consts: FxHashMap = FxHashMap::default(); + + for pair_obj in arr.iter() { + let pair = pair_obj.as_ctor(); + let name = LeanIxName::new(pair.get(0)).decode(); + let info = LeanIxConstantInfo::new(pair.get(1)).decode(); + consts.insert(name, info); + } -/// Decode Ix.RawEnvironment from Lean object preserving array structure. -/// This version preserves all entries including duplicates. -pub fn decode_ix_raw_environment_vec( - obj: LeanIxRawEnvironment, -) -> Vec<(Name, ConstantInfo)> { - let arr = obj.as_array(); - let mut consts = Vec::with_capacity(arr.len()); - - for pair_obj in arr.iter() { - let pair = pair_obj.as_ctor(); - let name = decode_ix_name(LeanIxName::new(pair.get(0))); - let info = decode_constant_info(LeanIxConstantInfo::new(pair.get(1))); - consts.push((name, info)); + consts } - consts + /// Decode Ix.RawEnvironment from Lean object preserving array structure. + /// This version preserves all entries including duplicates. + pub fn decode_to_vec(self) -> Vec<(Name, ConstantInfo)> { + let arr = self.as_array(); + let mut consts = Vec::with_capacity(arr.len()); + + for pair_obj in arr.iter() { + let pair = pair_obj.as_ctor(); + let name = LeanIxName::new(pair.get(0)).decode(); + let info = LeanIxConstantInfo::new(pair.get(1)).decode(); + consts.push((name, info)); + } + + consts + } } -/// Build Ix.RawEnvironment from Vec, preserving order and duplicates. -pub fn build_raw_environment_from_vec( - cache: &mut LeanBuildCache, - consts: &[(Name, ConstantInfo)], -) -> LeanIxRawEnvironment { - let consts_arr = LeanArray::alloc(consts.len()); - for (i, (name, info)) in consts.iter().enumerate() { - let key_obj = build_name(cache, name); - let val_obj = build_constant_info(cache, info); - let pair = LeanCtor::alloc(0, 2, 0); - pair.set(0, key_obj); - pair.set(1, val_obj); - consts_arr.set(i, pair); +impl LeanIxEnvironment { + /// Decode Ix.Environment from Lean object. + /// + /// Ix.Environment = { + /// consts : HashMap Name ConstantInfo + /// } + /// + /// NOTE: Environment with a single field is UNBOXED by Lean, + /// so the pointer IS the HashMap directly, not a structure containing it. + pub fn decode(self) -> FxHashMap { + // Environment is unboxed - obj IS the HashMap directly + let consts_pairs = decode_hashmap( + *self, + |x| LeanIxName::new(x).decode(), + |x| LeanIxConstantInfo::new(x).decode(), + ); + let mut consts: FxHashMap = FxHashMap::default(); + for (name, info) in consts_pairs { + consts.insert(name, info); + } + consts } - LeanIxRawEnvironment::new(*consts_arr) } // ============================================================================= @@ -250,9 +242,9 @@ pub fn build_raw_environment_from_vec( pub extern "C" fn rs_roundtrip_ix_environment( env_ptr: LeanIxEnvironment, ) -> LeanIxRawEnvironment { - let env = decode_ix_environment(env_ptr); + let env = env_ptr.decode(); let mut cache = LeanBuildCache::with_capacity(env.len()); - build_raw_environment(&mut cache, &env) + LeanIxRawEnvironment::build(&mut cache, &env) } /// Round-trip an Ix.RawEnvironment: decode from Lean, re-encode. @@ -261,7 +253,7 @@ pub extern "C" fn rs_roundtrip_ix_environment( pub extern "C" fn rs_roundtrip_ix_raw_environment( env_ptr: LeanIxRawEnvironment, ) -> LeanIxRawEnvironment { - let env = decode_ix_raw_environment_vec(env_ptr); + let env = env_ptr.decode_to_vec(); let mut cache = LeanBuildCache::with_capacity(env.len()); - build_raw_environment_from_vec(&mut cache, &env) + LeanIxRawEnvironment::build_from_vec(&mut cache, &env) } diff --git a/src/ffi/ix/expr.rs b/src/ffi/ix/expr.rs index 48ca78ba..fae10519 100644 --- a/src/ffi/ix/expr.rs +++ b/src/ffi/ix/expr.rs @@ -22,340 +22,315 @@ use crate::lean::{ LeanIxName, }; use lean_ffi::nat::Nat; -use lean_ffi::object::{LeanArray, LeanCtor, LeanObject, LeanString}; +use lean_ffi::object::{LeanCtor, LeanObject, LeanString}; use crate::ffi::builder::LeanBuildCache; -use crate::ffi::ix::address::build_address; -use crate::ffi::ix::data::{build_data_value, decode_data_value}; -use crate::ffi::ix::level::{build_level, build_level_array, decode_ix_level}; -use crate::ffi::ix::name::{build_name, decode_ix_name}; use crate::ffi::primitives::build_nat; +use crate::lean::LeanIxAddress; -/// Build a Lean Ix.Expr with embedded hash. -/// Uses caching to avoid rebuilding the same expression. -pub fn build_expr(cache: &mut LeanBuildCache, expr: &Expr) -> LeanIxExpr { - let hash = *expr.get_hash(); - if let Some(&cached) = cache.exprs.get(&hash) { - cached.inc_ref(); - return cached; - } - - let result = match expr.as_data() { - ExprData::Bvar(idx, h) => { - let obj = LeanCtor::alloc(0, 2, 0); - obj.set(0, build_nat(idx)); - obj.set(1, build_address(h)); - LeanIxExpr::new(*obj) - }, - ExprData::Fvar(name, h) => { - let obj = LeanCtor::alloc(1, 2, 0); - obj.set(0, build_name(cache, name)); - obj.set(1, build_address(h)); - LeanIxExpr::new(*obj) - }, - ExprData::Mvar(name, h) => { - let obj = LeanCtor::alloc(2, 2, 0); - obj.set(0, build_name(cache, name)); - obj.set(1, build_address(h)); - LeanIxExpr::new(*obj) - }, - ExprData::Sort(level, h) => { - let obj = LeanCtor::alloc(3, 2, 0); - obj.set(0, build_level(cache, level)); - obj.set(1, build_address(h)); - LeanIxExpr::new(*obj) - }, - ExprData::Const(name, levels, h) => { - let name_obj = build_name(cache, name); - let levels_obj = build_level_array(cache, levels); - let obj = LeanCtor::alloc(4, 3, 0); - obj.set(0, name_obj); - obj.set(1, levels_obj); - obj.set(2, build_address(h)); - LeanIxExpr::new(*obj) - }, - ExprData::App(fn_expr, arg_expr, h) => { - let fn_obj = build_expr(cache, fn_expr); - let arg_obj = build_expr(cache, arg_expr); - let obj = LeanCtor::alloc(5, 3, 0); - obj.set(0, fn_obj); - obj.set(1, arg_obj); - obj.set(2, build_address(h)); - LeanIxExpr::new(*obj) - }, - ExprData::Lam(name, ty, body, bi, h) => { - let name_obj = build_name(cache, name); - let ty_obj = build_expr(cache, ty); - let body_obj = build_expr(cache, body); - let hash_obj = build_address(h); - // 4 object fields, 1 scalar byte for BinderInfo - let obj = LeanCtor::alloc(6, 4, 1); - obj.set(0, name_obj); - obj.set(1, ty_obj); - obj.set(2, body_obj); - obj.set(3, hash_obj); - obj.set_u8(4 * 8, binder_info_to_u8(bi)); - LeanIxExpr::new(*obj) - }, - ExprData::ForallE(name, ty, body, bi, h) => { - let name_obj = build_name(cache, name); - let ty_obj = build_expr(cache, ty); - let body_obj = build_expr(cache, body); - let hash_obj = build_address(h); - let obj = LeanCtor::alloc(7, 4, 1); - obj.set(0, name_obj); - obj.set(1, ty_obj); - obj.set(2, body_obj); - obj.set(3, hash_obj); - obj.set_u8(4 * 8, binder_info_to_u8(bi)); - LeanIxExpr::new(*obj) - }, - ExprData::LetE(name, ty, val, body, non_dep, h) => { - let name_obj = build_name(cache, name); - let ty_obj = build_expr(cache, ty); - let val_obj = build_expr(cache, val); - let body_obj = build_expr(cache, body); - let hash_obj = build_address(h); - // 5 object fields, 1 scalar byte for Bool - let obj = LeanCtor::alloc(8, 5, 1); - obj.set(0, name_obj); - obj.set(1, ty_obj); - obj.set(2, val_obj); - obj.set(3, body_obj); - obj.set(4, hash_obj); - obj.set_u8(5 * 8, *non_dep as u8); - LeanIxExpr::new(*obj) - }, - ExprData::Lit(lit, h) => { - let lit_obj = build_literal(lit); - let obj = LeanCtor::alloc(9, 2, 0); - obj.set(0, lit_obj); - obj.set(1, build_address(h)); - LeanIxExpr::new(*obj) - }, - ExprData::Mdata(md, inner, h) => { - let md_obj = build_mdata_array(cache, md); - let inner_obj = build_expr(cache, inner); - let obj = LeanCtor::alloc(10, 3, 0); - obj.set(0, md_obj); - obj.set(1, inner_obj); - obj.set(2, build_address(h)); - LeanIxExpr::new(*obj) - }, - ExprData::Proj(type_name, idx, struct_expr, h) => { - let name_obj = build_name(cache, type_name); - let idx_obj = build_nat(idx); - let struct_obj = build_expr(cache, struct_expr); - let obj = LeanCtor::alloc(11, 4, 0); - obj.set(0, name_obj); - obj.set(1, idx_obj); - obj.set(2, struct_obj); - obj.set(3, build_address(h)); - LeanIxExpr::new(*obj) - }, - }; +impl LeanIxExpr { + /// Build a Lean Ix.Expr with embedded hash. + /// Uses caching to avoid rebuilding the same expression. + pub fn build(cache: &mut LeanBuildCache, expr: &Expr) -> Self { + let hash = *expr.get_hash(); + if let Some(&cached) = cache.exprs.get(&hash) { + cached.inc_ref(); + return cached; + } - cache.exprs.insert(hash, result); - result -} + let result = match expr.as_data() { + ExprData::Bvar(idx, h) => { + let obj = LeanCtor::alloc(0, 2, 0); + obj.set(0, build_nat(idx)); + obj.set(1, LeanIxAddress::build_from_hash(h)); + Self::new(*obj) + }, + ExprData::Fvar(name, h) => { + let obj = LeanCtor::alloc(1, 2, 0); + obj.set(0, LeanIxName::build(cache, name)); + obj.set(1, LeanIxAddress::build_from_hash(h)); + Self::new(*obj) + }, + ExprData::Mvar(name, h) => { + let obj = LeanCtor::alloc(2, 2, 0); + obj.set(0, LeanIxName::build(cache, name)); + obj.set(1, LeanIxAddress::build_from_hash(h)); + Self::new(*obj) + }, + ExprData::Sort(level, h) => { + let obj = LeanCtor::alloc(3, 2, 0); + obj.set(0, LeanIxLevel::build(cache, level)); + obj.set(1, LeanIxAddress::build_from_hash(h)); + Self::new(*obj) + }, + ExprData::Const(name, levels, h) => { + let name_obj = LeanIxName::build(cache, name); + let levels_obj = LeanIxLevel::build_array(cache, levels); + let obj = LeanCtor::alloc(4, 3, 0); + obj.set(0, name_obj); + obj.set(1, levels_obj); + obj.set(2, LeanIxAddress::build_from_hash(h)); + Self::new(*obj) + }, + ExprData::App(fn_expr, arg_expr, h) => { + let fn_obj = Self::build(cache, fn_expr); + let arg_obj = Self::build(cache, arg_expr); + let obj = LeanCtor::alloc(5, 3, 0); + obj.set(0, fn_obj); + obj.set(1, arg_obj); + obj.set(2, LeanIxAddress::build_from_hash(h)); + Self::new(*obj) + }, + ExprData::Lam(name, ty, body, bi, h) => { + let name_obj = LeanIxName::build(cache, name); + let ty_obj = Self::build(cache, ty); + let body_obj = Self::build(cache, body); + let hash_obj = LeanIxAddress::build_from_hash(h); + // 4 object fields, 1 scalar byte for BinderInfo + let obj = LeanCtor::alloc(6, 4, 1); + obj.set(0, name_obj); + obj.set(1, ty_obj); + obj.set(2, body_obj); + obj.set(3, hash_obj); + obj.set_u8(4 * 8, LeanIxBinderInfo::to_u8(bi)); + Self::new(*obj) + }, + ExprData::ForallE(name, ty, body, bi, h) => { + let name_obj = LeanIxName::build(cache, name); + let ty_obj = Self::build(cache, ty); + let body_obj = Self::build(cache, body); + let hash_obj = LeanIxAddress::build_from_hash(h); + let obj = LeanCtor::alloc(7, 4, 1); + obj.set(0, name_obj); + obj.set(1, ty_obj); + obj.set(2, body_obj); + obj.set(3, hash_obj); + obj.set_u8(4 * 8, LeanIxBinderInfo::to_u8(bi)); + Self::new(*obj) + }, + ExprData::LetE(name, ty, val, body, non_dep, h) => { + let name_obj = LeanIxName::build(cache, name); + let ty_obj = Self::build(cache, ty); + let val_obj = Self::build(cache, val); + let body_obj = Self::build(cache, body); + let hash_obj = LeanIxAddress::build_from_hash(h); + // 5 object fields, 1 scalar byte for Bool + let obj = LeanCtor::alloc(8, 5, 1); + obj.set(0, name_obj); + obj.set(1, ty_obj); + obj.set(2, val_obj); + obj.set(3, body_obj); + obj.set(4, hash_obj); + obj.set_u8(5 * 8, *non_dep as u8); + Self::new(*obj) + }, + ExprData::Lit(lit, h) => { + let lit_obj = LeanIxLiteral::build(lit); + let obj = LeanCtor::alloc(9, 2, 0); + obj.set(0, lit_obj); + obj.set(1, LeanIxAddress::build_from_hash(h)); + Self::new(*obj) + }, + ExprData::Mdata(md, inner, h) => { + let md_obj = LeanIxDataValue::build_kvmap(cache, md); + let inner_obj = Self::build(cache, inner); + let obj = LeanCtor::alloc(10, 3, 0); + obj.set(0, md_obj); + obj.set(1, inner_obj); + obj.set(2, LeanIxAddress::build_from_hash(h)); + Self::new(*obj) + }, + ExprData::Proj(type_name, idx, struct_expr, h) => { + let name_obj = LeanIxName::build(cache, type_name); + let idx_obj = build_nat(idx); + let struct_obj = Self::build(cache, struct_expr); + let obj = LeanCtor::alloc(11, 4, 0); + obj.set(0, name_obj); + obj.set(1, idx_obj); + obj.set(2, struct_obj); + obj.set(3, LeanIxAddress::build_from_hash(h)); + Self::new(*obj) + }, + }; -/// Build an Array of (Name × DataValue) for mdata. -fn build_mdata_array( - cache: &mut LeanBuildCache, - md: &[(Name, DataValue)], -) -> LeanArray { - let arr = LeanArray::alloc(md.len()); - for (i, (name, dv)) in md.iter().enumerate() { - let pair = build_name_datavalue_pair(cache, name, dv); - arr.set(i, pair); + cache.exprs.insert(hash, result); + result } - arr -} -/// Build a (Name, DataValue) pair (Prod). -fn build_name_datavalue_pair( - cache: &mut LeanBuildCache, - name: &Name, - dv: &DataValue, -) -> LeanObject { - let pair = LeanCtor::alloc(0, 2, 0); - pair.set(0, build_name(cache, name)); - pair.set(1, build_data_value(cache, dv)); - *pair -} + /// Decode a Lean Ix.Expr to Rust Expr. + pub fn decode(self) -> Expr { + let ctor = self.as_ctor(); + match ctor.tag() { + 0 => { + // bvar + let idx = Nat::from_obj(ctor.get(0)); + Expr::bvar(idx) + }, + 1 => { + // fvar + let name = LeanIxName::new(ctor.get(0)).decode(); + Expr::fvar(name) + }, + 2 => { + // mvar + let name = LeanIxName::new(ctor.get(0)).decode(); + Expr::mvar(name) + }, + 3 => { + // sort + let level = LeanIxLevel::new(ctor.get(0)).decode(); + Expr::sort(level) + }, + 4 => { + // const + let name = LeanIxName::new(ctor.get(0)).decode(); + let levels: Vec = + ctor.get(1).as_array().map(|x| LeanIxLevel::new(x).decode()); -/// Build a Literal (natVal or strVal). -pub fn build_literal(lit: &Literal) -> LeanIxLiteral { - let obj = match lit { - Literal::NatVal(n) => { - let obj = LeanCtor::alloc(0, 1, 0); - obj.set(0, build_nat(n)); - *obj - }, - Literal::StrVal(s) => { - let obj = LeanCtor::alloc(1, 1, 0); - obj.set(0, LeanString::new(s.as_str())); - *obj - }, - }; - LeanIxLiteral::new(obj) -} + Expr::cnst(name, levels) + }, + 5 => { + // app + let fn_expr = Self::new(ctor.get(0)).decode(); + let arg_expr = Self::new(ctor.get(1)).decode(); + Expr::app(fn_expr, arg_expr) + }, + 6 => { + // lam: name, ty, body, hash, bi (scalar) + let name = LeanIxName::new(ctor.get(0)).decode(); + let ty = Self::new(ctor.get(1)).decode(); + let body = Self::new(ctor.get(2)).decode(); -/// Build Ix.BinderInfo enum. -/// BinderInfo is a 4-constructor enum with no fields, stored as boxed scalar. -pub fn build_binder_info(bi: &BinderInfo) -> LeanIxBinderInfo { - LeanIxBinderInfo::new(LeanObject::box_usize(binder_info_to_u8(bi) as usize)) -} + // Read BinderInfo scalar (4 obj fields: name, ty, body, hash) + let bi_byte = ctor.scalar_u8(4, 0); + let bi = LeanIxBinderInfo::from_u8(bi_byte); -/// Convert BinderInfo to u8 tag. -pub fn binder_info_to_u8(bi: &BinderInfo) -> u8 { - match bi { - BinderInfo::Default => 0, - BinderInfo::Implicit => 1, - BinderInfo::StrictImplicit => 2, - BinderInfo::InstImplicit => 3, - } -} + Expr::lam(name, ty, body, bi) + }, + 7 => { + // forallE: same layout as lam + let name = LeanIxName::new(ctor.get(0)).decode(); + let ty = Self::new(ctor.get(1)).decode(); + let body = Self::new(ctor.get(2)).decode(); -/// Decode a Lean Ix.Expr to Rust Expr. -pub fn decode_ix_expr(obj: LeanIxExpr) -> Expr { - let ctor = obj.as_ctor(); - match ctor.tag() { - 0 => { - // bvar - let idx = Nat::from_obj(ctor.get(0)); - Expr::bvar(idx) - }, - 1 => { - // fvar - let name = decode_ix_name(LeanIxName::new(ctor.get(0))); - Expr::fvar(name) - }, - 2 => { - // mvar - let name = decode_ix_name(LeanIxName::new(ctor.get(0))); - Expr::mvar(name) - }, - 3 => { - // sort - let level = decode_ix_level(LeanIxLevel::new(ctor.get(0))); - Expr::sort(level) - }, - 4 => { - // const - let name = decode_ix_name(LeanIxName::new(ctor.get(0))); - let levels: Vec = - ctor.get(1).as_array().map(|x| decode_ix_level(LeanIxLevel::new(x))); + // 4 obj fields: name, ty, body, hash + let bi_byte = ctor.scalar_u8(4, 0); + let bi = LeanIxBinderInfo::from_u8(bi_byte); - Expr::cnst(name, levels) - }, - 5 => { - // app - let fn_expr = decode_ix_expr(LeanIxExpr::new(ctor.get(0))); - let arg_expr = decode_ix_expr(LeanIxExpr::new(ctor.get(1))); - Expr::app(fn_expr, arg_expr) - }, - 6 => { - // lam: name, ty, body, hash, bi (scalar) - let name = decode_ix_name(LeanIxName::new(ctor.get(0))); - let ty = decode_ix_expr(LeanIxExpr::new(ctor.get(1))); - let body = decode_ix_expr(LeanIxExpr::new(ctor.get(2))); + Expr::all(name, ty, body, bi) + }, + 8 => { + // letE: name, ty, val, body, hash, nonDep (scalar) + let name = LeanIxName::new(ctor.get(0)).decode(); + let ty = Self::new(ctor.get(1)).decode(); + let val = Self::new(ctor.get(2)).decode(); + let body = Self::new(ctor.get(3)).decode(); - // Read BinderInfo scalar (4 obj fields: name, ty, body, hash) - let bi_byte = ctor.scalar_u8(4, 0); - let bi = decode_binder_info(bi_byte); + // 5 obj fields: name, ty, val, body, hash + let non_dep = ctor.scalar_u8(5, 0) != 0; - Expr::lam(name, ty, body, bi) - }, - 7 => { - // forallE: same layout as lam - let name = decode_ix_name(LeanIxName::new(ctor.get(0))); - let ty = decode_ix_expr(LeanIxExpr::new(ctor.get(1))); - let body = decode_ix_expr(LeanIxExpr::new(ctor.get(2))); + Expr::letE(name, ty, val, body, non_dep) + }, + 9 => { + // lit + let lit = LeanIxLiteral::new(ctor.get(0)).decode(); + Expr::lit(lit) + }, + 10 => { + // mdata: data, expr, hash + let data: Vec<(Name, DataValue)> = + ctor.get(0).as_array().map(|obj| { + let pair = obj.as_ctor(); + let name = LeanIxName::new(pair.get(0)).decode(); + let dv = LeanIxDataValue::new(pair.get(1)).decode(); + (name, dv) + }); - // 4 obj fields: name, ty, body, hash - let bi_byte = ctor.scalar_u8(4, 0); - let bi = decode_binder_info(bi_byte); + let inner = Self::new(ctor.get(1)).decode(); + Expr::mdata(data, inner) + }, + 11 => { + // proj: typeName, idx, struct, hash + let type_name = LeanIxName::new(ctor.get(0)).decode(); + let idx = Nat::from_obj(ctor.get(1)); + let struct_expr = Self::new(ctor.get(2)).decode(); - Expr::all(name, ty, body, bi) - }, - 8 => { - // letE: name, ty, val, body, hash, nonDep (scalar) - let name = decode_ix_name(LeanIxName::new(ctor.get(0))); - let ty = decode_ix_expr(LeanIxExpr::new(ctor.get(1))); - let val = decode_ix_expr(LeanIxExpr::new(ctor.get(2))); - let body = decode_ix_expr(LeanIxExpr::new(ctor.get(3))); - - // 5 obj fields: name, ty, val, body, hash - let non_dep = ctor.scalar_u8(5, 0) != 0; - - Expr::letE(name, ty, val, body, non_dep) - }, - 9 => { - // lit - let lit = decode_literal(LeanIxLiteral::new(ctor.get(0))); - Expr::lit(lit) - }, - 10 => { - // mdata: data, expr, hash - let data: Vec<(Name, DataValue)> = - ctor.get(0).as_array().map(decode_name_data_value); + Expr::proj(type_name, idx, struct_expr) + }, + _ => panic!("Invalid Ix.Expr tag: {}", ctor.tag()), + } + } +} - let inner = decode_ix_expr(LeanIxExpr::new(ctor.get(1))); - Expr::mdata(data, inner) - }, - 11 => { - // proj: typeName, idx, struct, hash - let type_name = decode_ix_name(LeanIxName::new(ctor.get(0))); - let idx = Nat::from_obj(ctor.get(1)); - let struct_expr = decode_ix_expr(LeanIxExpr::new(ctor.get(2))); +impl LeanIxLiteral { + /// Build a Literal (natVal or strVal). + pub fn build(lit: &Literal) -> Self { + let obj = match lit { + Literal::NatVal(n) => { + let obj = LeanCtor::alloc(0, 1, 0); + obj.set(0, build_nat(n)); + *obj + }, + Literal::StrVal(s) => { + let obj = LeanCtor::alloc(1, 1, 0); + obj.set(0, LeanString::new(s.as_str())); + *obj + }, + }; + Self::new(obj) + } - Expr::proj(type_name, idx, struct_expr) - }, - _ => panic!("Invalid Ix.Expr tag: {}", ctor.tag()), + /// Decode Lean.Literal from a Lean object. + pub fn decode(self) -> Literal { + let ctor = self.as_ctor(); + match ctor.tag() { + 0 => { + // natVal + let nat = Nat::from_obj(ctor.get(0)); + Literal::NatVal(nat) + }, + 1 => { + // strVal + Literal::StrVal(ctor.get(0).as_string().to_string()) + }, + _ => panic!("Invalid Literal tag: {}", ctor.tag()), + } } } -/// Decode Lean.Literal from a Lean object. -pub fn decode_literal(obj: LeanIxLiteral) -> Literal { - let ctor = obj.as_ctor(); - match ctor.tag() { - 0 => { - // natVal - let nat = Nat::from_obj(ctor.get(0)); - Literal::NatVal(nat) - }, - 1 => { - // strVal - Literal::StrVal(ctor.get(0).as_string().to_string()) - }, - _ => panic!("Invalid Literal tag: {}", ctor.tag()), +impl LeanIxBinderInfo { + /// Build Ix.BinderInfo enum. + /// BinderInfo is a 4-constructor enum with no fields, stored as boxed scalar. + pub fn build(bi: &BinderInfo) -> Self { + Self::new(LeanObject::box_usize(Self::to_u8(bi) as usize)) } -} -/// Decode a (Name × DataValue) pair for mdata. -fn decode_name_data_value(obj: LeanObject) -> (Name, DataValue) { - let ctor = obj.as_ctor(); - let name = decode_ix_name(LeanIxName::new(ctor.get(0))); - let dv = decode_data_value(LeanIxDataValue::new(ctor.get(1))); - (name, dv) -} + /// Convert BinderInfo to u8 tag. + pub fn to_u8(bi: &BinderInfo) -> u8 { + match bi { + BinderInfo::Default => 0, + BinderInfo::Implicit => 1, + BinderInfo::StrictImplicit => 2, + BinderInfo::InstImplicit => 3, + } + } -/// Decode BinderInfo from byte. -pub fn decode_binder_info(bi_byte: u8) -> BinderInfo { - match bi_byte { - 0 => BinderInfo::Default, - 1 => BinderInfo::Implicit, - 2 => BinderInfo::StrictImplicit, - 3 => BinderInfo::InstImplicit, - _ => panic!("Invalid BinderInfo: {}", bi_byte), + /// Decode BinderInfo from byte. + pub fn from_u8(bi_byte: u8) -> BinderInfo { + match bi_byte { + 0 => BinderInfo::Default, + 1 => BinderInfo::Implicit, + 2 => BinderInfo::StrictImplicit, + 3 => BinderInfo::InstImplicit, + _ => panic!("Invalid BinderInfo: {}", bi_byte), + } } } /// Round-trip an Ix.Expr: decode from Lean, re-encode via LeanBuildCache. #[unsafe(no_mangle)] pub extern "C" fn rs_roundtrip_ix_expr(expr_ptr: LeanIxExpr) -> LeanIxExpr { - let expr = decode_ix_expr(expr_ptr); + let expr = expr_ptr.decode(); let mut cache = LeanBuildCache::new(); - build_expr(&mut cache, &expr) + LeanIxExpr::build(&mut cache, &expr) } diff --git a/src/ffi/ix/level.rs b/src/ffi/ix/level.rs index 2bf0c630..bac3eaf6 100644 --- a/src/ffi/ix/level.rs +++ b/src/ffi/ix/level.rs @@ -13,121 +13,122 @@ use crate::lean::{LeanIxLevel, LeanIxName}; use lean_ffi::object::{LeanArray, LeanCtor}; use crate::ffi::builder::LeanBuildCache; -use crate::ffi::ix::address::build_address; -use crate::ffi::ix::name::{build_name, decode_ix_name}; +use crate::lean::LeanIxAddress; -/// Build a Lean Ix.Level with embedded hash. -/// Uses caching to avoid rebuilding the same level. -pub fn build_level(cache: &mut LeanBuildCache, level: &Level) -> LeanIxLevel { - let hash = *level.get_hash(); - if let Some(&cached) = cache.levels.get(&hash) { - cached.inc_ref(); - return cached; - } +impl LeanIxLevel { + /// Build a Lean Ix.Level with embedded hash. + /// Uses caching to avoid rebuilding the same level. + pub fn build(cache: &mut LeanBuildCache, level: &Level) -> Self { + let hash = *level.get_hash(); + if let Some(&cached) = cache.levels.get(&hash) { + cached.inc_ref(); + return cached; + } - let result = match level.as_data() { - LevelData::Zero(h) => { - let ctor = LeanCtor::alloc(0, 1, 0); - ctor.set(0, build_address(h)); - LeanIxLevel::new(*ctor) - }, - LevelData::Succ(x, h) => { - let x_obj = build_level(cache, x); - let ctor = LeanCtor::alloc(1, 2, 0); - ctor.set(0, x_obj); - ctor.set(1, build_address(h)); - LeanIxLevel::new(*ctor) - }, - LevelData::Max(x, y, h) => { - let x_obj = build_level(cache, x); - let y_obj = build_level(cache, y); - let ctor = LeanCtor::alloc(2, 3, 0); - ctor.set(0, x_obj); - ctor.set(1, y_obj); - ctor.set(2, build_address(h)); - LeanIxLevel::new(*ctor) - }, - LevelData::Imax(x, y, h) => { - let x_obj = build_level(cache, x); - let y_obj = build_level(cache, y); - let ctor = LeanCtor::alloc(3, 3, 0); - ctor.set(0, x_obj); - ctor.set(1, y_obj); - ctor.set(2, build_address(h)); - LeanIxLevel::new(*ctor) - }, - LevelData::Param(n, h) => { - let n_obj = build_name(cache, n); - let ctor = LeanCtor::alloc(4, 2, 0); - ctor.set(0, n_obj); - ctor.set(1, build_address(h)); - LeanIxLevel::new(*ctor) - }, - LevelData::Mvar(n, h) => { - let n_obj = build_name(cache, n); - let ctor = LeanCtor::alloc(5, 2, 0); - ctor.set(0, n_obj); - ctor.set(1, build_address(h)); - LeanIxLevel::new(*ctor) - }, - }; + let result = match level.as_data() { + LevelData::Zero(h) => { + let ctor = LeanCtor::alloc(0, 1, 0); + ctor.set(0, LeanIxAddress::build_from_hash(h)); + Self::new(*ctor) + }, + LevelData::Succ(x, h) => { + let x_obj = Self::build(cache, x); + let ctor = LeanCtor::alloc(1, 2, 0); + ctor.set(0, x_obj); + ctor.set(1, LeanIxAddress::build_from_hash(h)); + Self::new(*ctor) + }, + LevelData::Max(x, y, h) => { + let x_obj = Self::build(cache, x); + let y_obj = Self::build(cache, y); + let ctor = LeanCtor::alloc(2, 3, 0); + ctor.set(0, x_obj); + ctor.set(1, y_obj); + ctor.set(2, LeanIxAddress::build_from_hash(h)); + Self::new(*ctor) + }, + LevelData::Imax(x, y, h) => { + let x_obj = Self::build(cache, x); + let y_obj = Self::build(cache, y); + let ctor = LeanCtor::alloc(3, 3, 0); + ctor.set(0, x_obj); + ctor.set(1, y_obj); + ctor.set(2, LeanIxAddress::build_from_hash(h)); + Self::new(*ctor) + }, + LevelData::Param(n, h) => { + let n_obj = LeanIxName::build(cache, n); + let ctor = LeanCtor::alloc(4, 2, 0); + ctor.set(0, n_obj); + ctor.set(1, LeanIxAddress::build_from_hash(h)); + Self::new(*ctor) + }, + LevelData::Mvar(n, h) => { + let n_obj = LeanIxName::build(cache, n); + let ctor = LeanCtor::alloc(5, 2, 0); + ctor.set(0, n_obj); + ctor.set(1, LeanIxAddress::build_from_hash(h)); + Self::new(*ctor) + }, + }; - cache.levels.insert(hash, result); - result -} + cache.levels.insert(hash, result); + result + } -/// Build an Array of Levels. -pub fn build_level_array( - cache: &mut LeanBuildCache, - levels: &[Level], -) -> LeanArray { - let arr = LeanArray::alloc(levels.len()); - for (i, level) in levels.iter().enumerate() { - arr.set(i, build_level(cache, level)); + /// Build an Array of Levels. + pub fn build_array( + cache: &mut LeanBuildCache, + levels: &[Level], + ) -> LeanArray { + let arr = LeanArray::alloc(levels.len()); + for (i, level) in levels.iter().enumerate() { + arr.set(i, Self::build(cache, level)); + } + arr } - arr -} -/// Decode a Lean Ix.Level to Rust Level. -pub fn decode_ix_level(obj: LeanIxLevel) -> Level { - let ctor = obj.as_ctor(); - match ctor.tag() { - 0 => Level::zero(), - 1 => { - let x = decode_ix_level(LeanIxLevel::new(ctor.get(0))); - Level::succ(x) - }, - 2 => { - let x = decode_ix_level(LeanIxLevel::new(ctor.get(0))); - let y = decode_ix_level(LeanIxLevel::new(ctor.get(1))); - Level::max(x, y) - }, - 3 => { - let x = decode_ix_level(LeanIxLevel::new(ctor.get(0))); - let y = decode_ix_level(LeanIxLevel::new(ctor.get(1))); - Level::imax(x, y) - }, - 4 => { - let n = decode_ix_name(LeanIxName::new(ctor.get(0))); - Level::param(n) - }, - 5 => { - let n = decode_ix_name(LeanIxName::new(ctor.get(0))); - Level::mvar(n) - }, - _ => panic!("Invalid Ix.Level tag: {}", ctor.tag()), + /// Decode a Lean Ix.Level to Rust Level. + pub fn decode(self) -> Level { + let ctor = self.as_ctor(); + match ctor.tag() { + 0 => Level::zero(), + 1 => { + let x = Self::new(ctor.get(0)).decode(); + Level::succ(x) + }, + 2 => { + let x = Self::new(ctor.get(0)).decode(); + let y = Self::new(ctor.get(1)).decode(); + Level::max(x, y) + }, + 3 => { + let x = Self::new(ctor.get(0)).decode(); + let y = Self::new(ctor.get(1)).decode(); + Level::imax(x, y) + }, + 4 => { + let n = LeanIxName::new(ctor.get(0)).decode(); + Level::param(n) + }, + 5 => { + let n = LeanIxName::new(ctor.get(0)).decode(); + Level::mvar(n) + }, + _ => panic!("Invalid Ix.Level tag: {}", ctor.tag()), + } } -} -/// Decode Array of Levels from Lean pointer. -pub fn decode_level_array(obj: LeanArray) -> Vec { - obj.map(|x| decode_ix_level(LeanIxLevel::new(x))) + /// Decode Array of Levels from Lean pointer. + pub fn decode_array(obj: LeanArray) -> Vec { + obj.map(|x| Self::new(x).decode()) + } } /// Round-trip an Ix.Level: decode from Lean, re-encode via LeanBuildCache. #[unsafe(no_mangle)] pub extern "C" fn rs_roundtrip_ix_level(level_ptr: LeanIxLevel) -> LeanIxLevel { - let level = decode_ix_level(level_ptr); + let level = level_ptr.decode(); let mut cache = LeanBuildCache::new(); - build_level(&mut cache, &level) + LeanIxLevel::build(&mut cache, &level) } diff --git a/src/ffi/ix/name.rs b/src/ffi/ix/name.rs index aeef2393..e8f0ca07 100644 --- a/src/ffi/ix/name.rs +++ b/src/ffi/ix/name.rs @@ -11,93 +11,95 @@ use lean_ffi::nat::Nat; use lean_ffi::object::{LeanArray, LeanCtor, LeanString}; use crate::ffi::builder::LeanBuildCache; -use crate::ffi::ix::address::build_address; use crate::ffi::primitives::build_nat; +use crate::lean::LeanIxAddress; -/// Build a Lean Ix.Name with embedded hash. -/// Uses caching to avoid rebuilding the same name. -pub fn build_name(cache: &mut LeanBuildCache, name: &Name) -> LeanIxName { - let hash = name.get_hash(); - if let Some(&cached) = cache.names.get(hash) { - cached.inc_ref(); - return cached; - } +impl LeanIxName { + /// Build a Lean Ix.Name with embedded hash. + /// Uses caching to avoid rebuilding the same name. + pub fn build(cache: &mut LeanBuildCache, name: &Name) -> Self { + let hash = name.get_hash(); + if let Some(&cached) = cache.names.get(hash) { + cached.inc_ref(); + return cached; + } - let result = match name.as_data() { - NameData::Anonymous(h) => { - let ctor = LeanCtor::alloc(0, 1, 0); - ctor.set(0, build_address(h)); - LeanIxName::new(*ctor) - }, - NameData::Str(parent, s, h) => { - let parent_obj = build_name(cache, parent); - let s_obj = LeanString::new(s.as_str()); - let ctor = LeanCtor::alloc(1, 3, 0); - ctor.set(0, parent_obj); - ctor.set(1, s_obj); - ctor.set(2, build_address(h)); - LeanIxName::new(*ctor) - }, - NameData::Num(parent, n, h) => { - let parent_obj = build_name(cache, parent); - let n_obj = build_nat(n); - let ctor = LeanCtor::alloc(2, 3, 0); - ctor.set(0, parent_obj); - ctor.set(1, n_obj); - ctor.set(2, build_address(h)); - LeanIxName::new(*ctor) - }, - }; + let result = match name.as_data() { + NameData::Anonymous(h) => { + let ctor = LeanCtor::alloc(0, 1, 0); + ctor.set(0, LeanIxAddress::build_from_hash(h)); + Self::new(*ctor) + }, + NameData::Str(parent, s, h) => { + let parent_obj = Self::build(cache, parent); + let s_obj = LeanString::new(s.as_str()); + let ctor = LeanCtor::alloc(1, 3, 0); + ctor.set(0, parent_obj); + ctor.set(1, s_obj); + ctor.set(2, LeanIxAddress::build_from_hash(h)); + Self::new(*ctor) + }, + NameData::Num(parent, n, h) => { + let parent_obj = Self::build(cache, parent); + let n_obj = build_nat(n); + let ctor = LeanCtor::alloc(2, 3, 0); + ctor.set(0, parent_obj); + ctor.set(1, n_obj); + ctor.set(2, LeanIxAddress::build_from_hash(h)); + Self::new(*ctor) + }, + }; - cache.names.insert(*hash, result); - result -} + cache.names.insert(*hash, result); + result + } -/// Build an Array of Names. -pub fn build_name_array( - cache: &mut LeanBuildCache, - names: &[Name], -) -> LeanArray { - let arr = LeanArray::alloc(names.len()); - for (i, name) in names.iter().enumerate() { - arr.set(i, build_name(cache, name)); + /// Build an Array of Names. + pub fn build_array( + cache: &mut LeanBuildCache, + names: &[Name], + ) -> LeanArray { + let arr = LeanArray::alloc(names.len()); + for (i, name) in names.iter().enumerate() { + arr.set(i, Self::build(cache, name)); + } + arr } - arr -} -/// Decode a Lean Ix.Name to Rust Name. -pub fn decode_ix_name(obj: LeanIxName) -> Name { - let ctor = obj.as_ctor(); - match ctor.tag() { - 0 => { - // anonymous: just has hash, construct anon Name - Name::anon() - }, - 1 => { - // str: parent, s, hash - let parent = decode_ix_name(LeanIxName::new(ctor.get(0))); - let s = ctor.get(1).as_string().to_string(); - Name::str(parent, s) - }, - 2 => { - // num: parent, i, hash - let parent = decode_ix_name(LeanIxName::new(ctor.get(0))); - let i = Nat::from_obj(ctor.get(1)); - Name::num(parent, i) - }, - _ => panic!("Invalid Ix.Name tag: {}", ctor.tag()), + /// Decode a Lean Ix.Name to Rust Name. + pub fn decode(self) -> Name { + let ctor = self.as_ctor(); + match ctor.tag() { + 0 => { + // anonymous: just has hash, construct anon Name + Name::anon() + }, + 1 => { + // str: parent, s, hash + let parent = Self::new(ctor.get(0)).decode(); + let s = ctor.get(1).as_string().to_string(); + Name::str(parent, s) + }, + 2 => { + // num: parent, i, hash + let parent = Self::new(ctor.get(0)).decode(); + let i = Nat::from_obj(ctor.get(1)); + Name::num(parent, i) + }, + _ => panic!("Invalid Ix.Name tag: {}", ctor.tag()), + } } -} -/// Decode Array of Names from Lean pointer. -pub fn decode_name_array(obj: LeanArray) -> Vec { - obj.map(|x| decode_ix_name(LeanIxName::new(x))) + /// Decode Array of Names from Lean pointer. + pub fn decode_array(obj: LeanArray) -> Vec { + obj.map(|x| Self::new(x).decode()) + } } /// Round-trip an Ix.Name: decode from Lean, re-encode via LeanBuildCache. #[unsafe(no_mangle)] pub extern "C" fn rs_roundtrip_ix_name(name_ptr: LeanIxName) -> LeanIxName { - let name = decode_ix_name(name_ptr); + let name = name_ptr.decode(); let mut cache = LeanBuildCache::new(); - build_name(&mut cache, &name) + LeanIxName::build(&mut cache, &name) } diff --git a/src/ffi/ixon/compare.rs b/src/ffi/ixon/compare.rs index f3595cf9..bfdff714 100644 --- a/src/ffi/ixon/compare.rs +++ b/src/ffi/ixon/compare.rs @@ -7,7 +7,7 @@ use crate::ix::env::Name; use crate::ix::ixon::serialize::put_expr; use crate::ix::mutual::MutCtx; use crate::lean::{LeanIxBlockCompareDetail, LeanIxBlockCompareResult}; -use lean_ffi::object::{LeanByteArray, LeanCtor, LeanObject}; +use lean_ffi::object::{LeanByteArray, LeanCtor, LeanList, LeanObject}; use crate::ffi::lean_env::{ Cache as LeanCache, GlobalCache, decode_expr, decode_name, @@ -61,40 +61,44 @@ pub extern "C" fn rs_compare_expr_compilation( rust_bytes == lean_bytes } -/// Build a BlockCompareResult Lean object. -fn build_block_compare_result( - matched: bool, - not_found: bool, - lean_size: u64, - rust_size: u64, - first_diff_offset: u64, -) -> LeanIxBlockCompareResult { - let obj = if matched { - *LeanCtor::alloc(0, 0, 0) // match - } else if not_found { - *LeanCtor::alloc(2, 0, 0) // notFound - } else { - // mismatch - let ctor = LeanCtor::alloc(1, 0, 24); - ctor.set_u64(0, lean_size); - ctor.set_u64(8, rust_size); - ctor.set_u64(16, first_diff_offset); - *ctor - }; - LeanIxBlockCompareResult::new(obj) +impl LeanIxBlockCompareResult { + /// Build a BlockCompareResult Lean object. + fn build( + matched: bool, + not_found: bool, + lean_size: u64, + rust_size: u64, + first_diff_offset: u64, + ) -> Self { + let obj = if matched { + *LeanCtor::alloc(0, 0, 0) // match + } else if not_found { + *LeanCtor::alloc(2, 0, 0) // notFound + } else { + // mismatch + let ctor = LeanCtor::alloc(1, 0, 24); + ctor.set_u64(0, lean_size); + ctor.set_u64(8, rust_size); + ctor.set_u64(16, first_diff_offset); + *ctor + }; + Self::new(obj) + } } -/// Build a BlockCompareDetail Lean object. -fn build_block_compare_detail( - result: LeanIxBlockCompareResult, - lean_sharing_len: u64, - rust_sharing_len: u64, -) -> LeanIxBlockCompareDetail { - let ctor = LeanCtor::alloc(0, 1, 16); - ctor.set(0, result); - ctor.set_u64(8, lean_sharing_len); - ctor.set_u64(8 + 8, rust_sharing_len); - LeanIxBlockCompareDetail::new(*ctor) +impl LeanIxBlockCompareDetail { + /// Build a BlockCompareDetail Lean object. + fn build( + result: LeanIxBlockCompareResult, + lean_sharing_len: u64, + rust_sharing_len: u64, + ) -> Self { + let ctor = LeanCtor::alloc(0, 1, 16); + ctor.set(0, result); + ctor.set_u64(8, lean_sharing_len); + ctor.set_u64(8 + 8, rust_sharing_len); + Self::new(*ctor) + } } /// Compare a single block by lowlink name. @@ -121,22 +125,22 @@ pub unsafe extern "C" fn rs_compare_block_v2( None => { // Block not found in Rust compilation let result = - build_block_compare_result(false, true, lean_data.len() as u64, 0, 0); - return build_block_compare_detail(result, lean_sharing_len, 0); + LeanIxBlockCompareResult::build(false, true, lean_data.len() as u64, 0, 0); + return LeanIxBlockCompareDetail::build(result, lean_sharing_len, 0); }, }; // Compare bytes if rust_bytes == lean_data { // Match - let result = build_block_compare_result( + let result = LeanIxBlockCompareResult::build( true, false, lean_data.len() as u64, rust_bytes.len() as u64, 0, ); - return build_block_compare_detail( + return LeanIxBlockCompareDetail::build( result, lean_sharing_len, rust_sharing_len, @@ -156,14 +160,14 @@ pub unsafe extern "C" fn rs_compare_block_v2( |i| i as u64, ); - let result = build_block_compare_result( + let result = LeanIxBlockCompareResult::build( false, false, lean_data.len() as u64, rust_bytes.len() as u64, first_diff_offset, ); - build_block_compare_detail(result, lean_sharing_len, rust_sharing_len) + LeanIxBlockCompareDetail::build(result, lean_sharing_len, rust_sharing_len) } /// Free a RustBlockEnv pointer. @@ -183,12 +187,12 @@ pub unsafe extern "C" fn rs_free_compiled_env(ptr: *mut RustBlockEnv) { /// Build a RustBlockEnv from a Lean environment. #[unsafe(no_mangle)] pub extern "C" fn rs_build_compiled_env( - env_consts_ptr: LeanObject, + env_consts_ptr: LeanList, ) -> *mut RustBlockEnv { use crate::ffi::lean_env::decode_env; // Decode Lean environment - let rust_env = decode_env(env_consts_ptr.as_list()); + let rust_env = decode_env(env_consts_ptr); let rust_env = std::sync::Arc::new(rust_env); // Compile diff --git a/src/ffi/ixon/constant.rs b/src/ffi/ixon/constant.rs index b674b2e6..84f91279 100644 --- a/src/ffi/ixon/constant.rs +++ b/src/ffi/ixon/constant.rs @@ -6,7 +6,6 @@ use std::sync::Arc; -use crate::ix::address::Address; use crate::ix::ixon::constant::{ Axiom as IxonAxiom, Constant as IxonConstant, ConstantInfo as IxonConstantInfo, Constructor as IxonConstructor, @@ -20,551 +19,548 @@ use crate::lean::{ LeanIxonConstructor, LeanIxonConstructorProj, LeanIxonDefinition, LeanIxonDefinitionProj, LeanIxonExpr, LeanIxonInductive, LeanIxonInductiveProj, LeanIxonMutConst, LeanIxonQuotient, LeanIxonRecursor, - LeanIxonRecursorProj, LeanIxonRecursorRule, + LeanIxonRecursorProj, LeanIxonRecursorRule, LeanIxonUniv, }; -use lean_ffi::object::{LeanArray, LeanByteArray, LeanCtor}; +use lean_ffi::object::{LeanArray, LeanCtor}; -use crate::ffi::ixon::expr::{ - build_ixon_expr, build_ixon_expr_array, decode_ixon_expr, - decode_ixon_expr_array, -}; -use crate::ffi::ixon::univ::{build_ixon_univ_array, decode_ixon_univ_array}; +// ============================================================================= +// Definition +// ============================================================================= -/// Build Address from Ixon Address type (which is just a [u8; 32]). -pub fn build_address_from_ixon(addr: &Address) -> LeanIxAddress { - LeanByteArray::from_bytes(addr.as_bytes()) -} +impl LeanIxonDefinition { + /// Build Ixon.Definition + /// Lean stores scalar fields ordered by size (largest first). + /// Layout: header(8) + typ(8) + value(8) + lvls(8) + kind(1) + safety(1) + padding(6) + pub fn build(def: &IxonDefinition) -> Self { + let typ_obj = LeanIxonExpr::build(&def.typ); + let value_obj = LeanIxonExpr::build(&def.value); + // 2 obj fields, 16 scalar bytes (lvls(8) + kind(1) + safety(1) + padding(6)) + let ctor = LeanCtor::alloc(0, 2, 16); + ctor.set(0, typ_obj); + ctor.set(1, value_obj); + // Scalar offsets from obj_cptr: 2*8=16 for lvls, 2*8+8=24 for kind, 2*8+9=25 for safety + ctor.set_u64(16, def.lvls); + let kind_val: u8 = match def.kind { + DefKind::Definition => 0, + DefKind::Opaque => 1, + DefKind::Theorem => 2, + }; + ctor.set_u8(24, kind_val); + let safety_val: u8 = match def.safety { + crate::ix::env::DefinitionSafety::Unsafe => 0, + crate::ix::env::DefinitionSafety::Safe => 1, + crate::ix::env::DefinitionSafety::Partial => 2, + }; + ctor.set_u8(25, safety_val); + Self::new(*ctor) + } -/// Build an Array of Addresses. -pub fn build_address_array(addrs: &[Address]) -> LeanArray { - let arr = LeanArray::alloc(addrs.len()); - for (i, addr) in addrs.iter().enumerate() { - arr.set(i, build_address_from_ixon(addr)); + /// Decode Ixon.Definition. + /// Lean stores scalar fields ordered by size (largest first). + /// Layout: header(8) + typ(8) + value(8) + lvls(8) + kind(1) + safety(1) + padding(6) + pub fn decode(self) -> IxonDefinition { + let ctor = self.as_ctor(); + let typ = Arc::new(LeanIxonExpr::new(ctor.get(0)).decode()); + let value = Arc::new(LeanIxonExpr::new(ctor.get(1)).decode()); + let lvls = ctor.scalar_u64(2, 0); + let kind_val = ctor.scalar_u8(2, 8); + let kind = match kind_val { + 0 => DefKind::Definition, + 1 => DefKind::Opaque, + 2 => DefKind::Theorem, + _ => panic!("Invalid DefKind: {}", kind_val), + }; + let safety_val = ctor.scalar_u8(2, 9); + let safety = match safety_val { + 0 => crate::ix::env::DefinitionSafety::Unsafe, + 1 => crate::ix::env::DefinitionSafety::Safe, + 2 => crate::ix::env::DefinitionSafety::Partial, + _ => panic!("Invalid DefinitionSafety: {}", safety_val), + }; + IxonDefinition { kind, safety, lvls, typ, value } } - arr } -/// Build Ixon.Definition -/// Lean stores scalar fields ordered by size (largest first). -/// Layout: header(8) + typ(8) + value(8) + lvls(8) + kind(1) + safety(1) + padding(6) -pub fn build_ixon_definition(def: &IxonDefinition) -> LeanIxonDefinition { - let typ_obj = build_ixon_expr(&def.typ); - let value_obj = build_ixon_expr(&def.value); - // 2 obj fields, 16 scalar bytes (lvls(8) + kind(1) + safety(1) + padding(6)) - let ctor = LeanCtor::alloc(0, 2, 16); - ctor.set(0, typ_obj); - ctor.set(1, value_obj); - // Scalar offsets from obj_cptr: 2*8=16 for lvls, 2*8+8=24 for kind, 2*8+9=25 for safety - ctor.set_u64(16, def.lvls); - let kind_val: u8 = match def.kind { - DefKind::Definition => 0, - DefKind::Opaque => 1, - DefKind::Theorem => 2, - }; - ctor.set_u8(24, kind_val); - let safety_val: u8 = match def.safety { - crate::ix::env::DefinitionSafety::Unsafe => 0, - crate::ix::env::DefinitionSafety::Safe => 1, - crate::ix::env::DefinitionSafety::Partial => 2, - }; - ctor.set_u8(25, safety_val); - LeanIxonDefinition::new(*ctor) -} +// ============================================================================= +// RecursorRule +// ============================================================================= -/// Build Ixon.RecursorRule -pub fn build_ixon_recursor_rule( - rule: &IxonRecursorRule, -) -> LeanIxonRecursorRule { - let rhs_obj = build_ixon_expr(&rule.rhs); - // 1 obj field, 8 scalar bytes - let ctor = LeanCtor::alloc(0, 1, 8); - ctor.set(0, rhs_obj); - ctor.set_u64(8, rule.fields); - LeanIxonRecursorRule::new(*ctor) -} +impl LeanIxonRecursorRule { + /// Build Ixon.RecursorRule + pub fn build(rule: &IxonRecursorRule) -> Self { + let rhs_obj = LeanIxonExpr::build(&rule.rhs); + // 1 obj field, 8 scalar bytes + let ctor = LeanCtor::alloc(0, 1, 8); + ctor.set(0, rhs_obj); + ctor.set_u64(8, rule.fields); + Self::new(*ctor) + } -/// Build Ixon.Recursor -/// Scalars ordered by size: lvls(8) + params(8) + indices(8) + motives(8) + minors(8) + k(1) + isUnsafe(1) + padding(6) -pub fn build_ixon_recursor(rec: &IxonRecursor) -> LeanIxonRecursor { - let typ_obj = build_ixon_expr(&rec.typ); - // Build rules array - let rules_arr = LeanArray::alloc(rec.rules.len()); - for (i, rule) in rec.rules.iter().enumerate() { - rules_arr.set(i, build_ixon_recursor_rule(rule)); + /// Decode Ixon.RecursorRule. + pub fn decode(self) -> IxonRecursorRule { + let ctor = self.as_ctor(); + let rhs = Arc::new(LeanIxonExpr::new(ctor.get(0)).decode()); + let fields = ctor.scalar_u64(1, 0); + IxonRecursorRule { fields, rhs } } - // 2 obj fields (typ, rules), 48 scalar bytes (5×8 + 1 + 1 + 6 padding) - let ctor = LeanCtor::alloc(0, 2, 48); - ctor.set(0, typ_obj); - ctor.set(1, rules_arr); - // Scalar offsets from obj_cptr: 2*8=16 base - ctor.set_u64(16, rec.lvls); - ctor.set_u64(24, rec.params); - ctor.set_u64(32, rec.indices); - ctor.set_u64(40, rec.motives); - ctor.set_u64(48, rec.minors); - ctor.set_u8(56, if rec.k { 1 } else { 0 }); - ctor.set_u8(57, if rec.is_unsafe { 1 } else { 0 }); - LeanIxonRecursor::new(*ctor) } -/// Build Ixon.Axiom -/// Scalars ordered by size: lvls(8) + isUnsafe(1) + padding(7) -pub fn build_ixon_axiom(ax: &IxonAxiom) -> LeanIxonAxiom { - let typ_obj = build_ixon_expr(&ax.typ); - // 1 obj field, 16 scalar bytes (lvls(8) + isUnsafe(1) + padding(7)) - let ctor = LeanCtor::alloc(0, 1, 16); - ctor.set(0, typ_obj); - // Scalar offsets from obj_cptr: 1*8=8 base - ctor.set_u64(8, ax.lvls); - ctor.set_u8(16, if ax.is_unsafe { 1 } else { 0 }); - LeanIxonAxiom::new(*ctor) -} +// ============================================================================= +// Recursor +// ============================================================================= -/// Build Ixon.Quotient -/// QuotKind is a simple enum stored as scalar u8, not object field. -/// Scalars ordered by size: lvls(8) + kind(1) + padding(7) -pub fn build_ixon_quotient(quot: &IxonQuotient) -> LeanIxonQuotient { - let typ_obj = build_ixon_expr(".typ); - // 1 obj field (typ), 16 scalar bytes (lvls(8) + kind(1) + padding(7)) - let ctor = LeanCtor::alloc(0, 1, 16); - ctor.set(0, typ_obj); - // Scalar offsets from obj_cptr: 1*8=8 base - ctor.set_u64(8, quot.lvls); - let kind_val: u8 = match quot.kind { - crate::ix::env::QuotKind::Type => 0, - crate::ix::env::QuotKind::Ctor => 1, - crate::ix::env::QuotKind::Lift => 2, - crate::ix::env::QuotKind::Ind => 3, - }; - ctor.set_u8(16, kind_val); - LeanIxonQuotient::new(*ctor) -} +impl LeanIxonRecursor { + /// Build Ixon.Recursor + /// Scalars ordered by size: lvls(8) + params(8) + indices(8) + motives(8) + minors(8) + k(1) + isUnsafe(1) + padding(6) + pub fn build(rec: &IxonRecursor) -> Self { + let typ_obj = LeanIxonExpr::build(&rec.typ); + // Build rules array + let rules_arr = LeanArray::alloc(rec.rules.len()); + for (i, rule) in rec.rules.iter().enumerate() { + rules_arr.set(i, LeanIxonRecursorRule::build(rule)); + } + // 2 obj fields (typ, rules), 48 scalar bytes (5×8 + 1 + 1 + 6 padding) + let ctor = LeanCtor::alloc(0, 2, 48); + ctor.set(0, typ_obj); + ctor.set(1, rules_arr); + // Scalar offsets from obj_cptr: 2*8=16 base + ctor.set_u64(16, rec.lvls); + ctor.set_u64(24, rec.params); + ctor.set_u64(32, rec.indices); + ctor.set_u64(40, rec.motives); + ctor.set_u64(48, rec.minors); + ctor.set_u8(56, if rec.k { 1 } else { 0 }); + ctor.set_u8(57, if rec.is_unsafe { 1 } else { 0 }); + Self::new(*ctor) + } -/// Build Ixon.Constructor -/// Scalars ordered by size: lvls(8) + cidx(8) + params(8) + fields(8) + isUnsafe(1) + padding(7) -pub fn build_ixon_constructor(c: &IxonConstructor) -> LeanIxonConstructor { - let typ_obj = build_ixon_expr(&c.typ); - // 1 obj field, 40 scalar bytes (4×8 + 1 + 7 padding) - let ctor = LeanCtor::alloc(0, 1, 40); - ctor.set(0, typ_obj); - // Scalar offsets from obj_cptr: 1*8=8 base - ctor.set_u64(8, c.lvls); - ctor.set_u64(16, c.cidx); - ctor.set_u64(24, c.params); - ctor.set_u64(32, c.fields); - ctor.set_u8(40, if c.is_unsafe { 1 } else { 0 }); - LeanIxonConstructor::new(*ctor) + /// Decode Ixon.Recursor. + /// Scalars ordered by size: lvls(8) + params(8) + indices(8) + motives(8) + minors(8) + k(1) + isUnsafe(1) + padding(6) + pub fn decode(self) -> IxonRecursor { + let ctor = self.as_ctor(); + let typ = Arc::new(LeanIxonExpr::new(ctor.get(0)).decode()); + let rules_arr = ctor.get(1).as_array(); + let rules = rules_arr.map(|x| LeanIxonRecursorRule::new(x).decode()); + let lvls = ctor.scalar_u64(2, 0); + let params = ctor.scalar_u64(2, 8); + let indices = ctor.scalar_u64(2, 16); + let motives = ctor.scalar_u64(2, 24); + let minors = ctor.scalar_u64(2, 32); + let k = ctor.scalar_u8(2, 40) != 0; + let is_unsafe = ctor.scalar_u8(2, 41) != 0; + IxonRecursor { + k, + is_unsafe, + lvls, + params, + indices, + motives, + minors, + typ, + rules, + } + } } -/// Build Ixon.Inductive -/// Scalars ordered by size: lvls(8) + params(8) + indices(8) + nested(8) + recr(1) + refl(1) + isUnsafe(1) + padding(5) -pub fn build_ixon_inductive(ind: &IxonInductive) -> LeanIxonInductive { - let typ_obj = build_ixon_expr(&ind.typ); - // Build ctors array - let ctors_arr = LeanArray::alloc(ind.ctors.len()); - for (i, c) in ind.ctors.iter().enumerate() { - ctors_arr.set(i, build_ixon_constructor(c)); +// ============================================================================= +// Axiom +// ============================================================================= + +impl LeanIxonAxiom { + /// Build Ixon.Axiom + /// Scalars ordered by size: lvls(8) + isUnsafe(1) + padding(7) + pub fn build(ax: &IxonAxiom) -> Self { + let typ_obj = LeanIxonExpr::build(&ax.typ); + // 1 obj field, 16 scalar bytes (lvls(8) + isUnsafe(1) + padding(7)) + let ctor = LeanCtor::alloc(0, 1, 16); + ctor.set(0, typ_obj); + // Scalar offsets from obj_cptr: 1*8=8 base + ctor.set_u64(8, ax.lvls); + ctor.set_u8(16, if ax.is_unsafe { 1 } else { 0 }); + Self::new(*ctor) } - // 2 obj fields, 40 scalar bytes (4×8 + 3 + 5 padding) - let ctor = LeanCtor::alloc(0, 2, 40); - ctor.set(0, typ_obj); - ctor.set(1, ctors_arr); - // Scalar offsets from obj_cptr: 2*8=16 base - ctor.set_u64(16, ind.lvls); - ctor.set_u64(24, ind.params); - ctor.set_u64(32, ind.indices); - ctor.set_u64(40, ind.nested); - ctor.set_u8(48, if ind.recr { 1 } else { 0 }); - ctor.set_u8(49, if ind.refl { 1 } else { 0 }); - ctor.set_u8(50, if ind.is_unsafe { 1 } else { 0 }); - LeanIxonInductive::new(*ctor) -} -/// Build Ixon.InductiveProj -pub fn build_inductive_proj(proj: &InductiveProj) -> LeanIxonInductiveProj { - let block_obj = build_address_from_ixon(&proj.block); - let ctor = LeanCtor::alloc(0, 1, 8); - ctor.set(0, block_obj); - ctor.set_u64(8, proj.idx); - LeanIxonInductiveProj::new(*ctor) + /// Decode Ixon.Axiom. + /// Scalars ordered by size: lvls(8) + isUnsafe(1) + padding(7) + pub fn decode(self) -> IxonAxiom { + let ctor = self.as_ctor(); + let typ = Arc::new(LeanIxonExpr::new(ctor.get(0)).decode()); + let lvls = ctor.scalar_u64(1, 0); + let is_unsafe = ctor.scalar_u8(1, 8) != 0; + IxonAxiom { is_unsafe, lvls, typ } + } } -/// Build Ixon.ConstructorProj -pub fn build_constructor_proj( - proj: &ConstructorProj, -) -> LeanIxonConstructorProj { - let block_obj = build_address_from_ixon(&proj.block); - let ctor = LeanCtor::alloc(0, 1, 16); - ctor.set(0, block_obj); - ctor.set_u64(8, proj.idx); - ctor.set_u64(16, proj.cidx); - LeanIxonConstructorProj::new(*ctor) -} +// ============================================================================= +// Quotient +// ============================================================================= -/// Build Ixon.RecursorProj -pub fn build_recursor_proj(proj: &RecursorProj) -> LeanIxonRecursorProj { - let block_obj = build_address_from_ixon(&proj.block); - let ctor = LeanCtor::alloc(0, 1, 8); - ctor.set(0, block_obj); - ctor.set_u64(8, proj.idx); - LeanIxonRecursorProj::new(*ctor) -} +impl LeanIxonQuotient { + /// Build Ixon.Quotient + /// QuotKind is a simple enum stored as scalar u8, not object field. + /// Scalars ordered by size: lvls(8) + kind(1) + padding(7) + pub fn build(quot: &IxonQuotient) -> Self { + let typ_obj = LeanIxonExpr::build(".typ); + // 1 obj field (typ), 16 scalar bytes (lvls(8) + kind(1) + padding(7)) + let ctor = LeanCtor::alloc(0, 1, 16); + ctor.set(0, typ_obj); + // Scalar offsets from obj_cptr: 1*8=8 base + ctor.set_u64(8, quot.lvls); + let kind_val: u8 = match quot.kind { + crate::ix::env::QuotKind::Type => 0, + crate::ix::env::QuotKind::Ctor => 1, + crate::ix::env::QuotKind::Lift => 2, + crate::ix::env::QuotKind::Ind => 3, + }; + ctor.set_u8(16, kind_val); + Self::new(*ctor) + } -/// Build Ixon.DefinitionProj -pub fn build_definition_proj(proj: &DefinitionProj) -> LeanIxonDefinitionProj { - let block_obj = build_address_from_ixon(&proj.block); - let ctor = LeanCtor::alloc(0, 1, 8); - ctor.set(0, block_obj); - ctor.set_u64(8, proj.idx); - LeanIxonDefinitionProj::new(*ctor) + /// Decode Ixon.Quotient. + /// QuotKind is a scalar (not object field). Scalars: lvls(8) + kind(1) + padding(7) + pub fn decode(self) -> IxonQuotient { + let ctor = self.as_ctor(); + let typ = Arc::new(LeanIxonExpr::new(ctor.get(0)).decode()); + let lvls = ctor.scalar_u64(1, 0); + let kind_val = ctor.scalar_u8(1, 8); + let kind = match kind_val { + 0 => crate::ix::env::QuotKind::Type, + 1 => crate::ix::env::QuotKind::Ctor, + 2 => crate::ix::env::QuotKind::Lift, + 3 => crate::ix::env::QuotKind::Ind, + _ => panic!("Invalid QuotKind: {}", kind_val), + }; + IxonQuotient { kind, lvls, typ } + } } -/// Build Ixon.MutConst -pub fn build_mut_const(mc: &MutConst) -> LeanIxonMutConst { - let obj = match mc { - MutConst::Defn(def) => { - let def_obj = build_ixon_definition(def); - let ctor = LeanCtor::alloc(0, 1, 0); - ctor.set(0, def_obj); - *ctor - }, - MutConst::Indc(ind) => { - let ind_obj = build_ixon_inductive(ind); - let ctor = LeanCtor::alloc(1, 1, 0); - ctor.set(0, ind_obj); - *ctor - }, - MutConst::Recr(rec) => { - let rec_obj = build_ixon_recursor(rec); - let ctor = LeanCtor::alloc(2, 1, 0); - ctor.set(0, rec_obj); - *ctor - }, - }; - LeanIxonMutConst::new(obj) -} +// ============================================================================= +// Constructor +// ============================================================================= -/// Build Ixon.ConstantInfo (9 constructors) -pub fn build_ixon_constant_info( - info: &IxonConstantInfo, -) -> LeanIxonConstantInfo { - let obj = match info { - IxonConstantInfo::Defn(def) => { - let def_obj = build_ixon_definition(def); - let ctor = LeanCtor::alloc(0, 1, 0); - ctor.set(0, def_obj); - *ctor - }, - IxonConstantInfo::Recr(rec) => { - let rec_obj = build_ixon_recursor(rec); - let ctor = LeanCtor::alloc(1, 1, 0); - ctor.set(0, rec_obj); - *ctor - }, - IxonConstantInfo::Axio(ax) => { - let ax_obj = build_ixon_axiom(ax); - let ctor = LeanCtor::alloc(2, 1, 0); - ctor.set(0, ax_obj); - *ctor - }, - IxonConstantInfo::Quot(quot) => { - let quot_obj = build_ixon_quotient(quot); - let ctor = LeanCtor::alloc(3, 1, 0); - ctor.set(0, quot_obj); - *ctor - }, - IxonConstantInfo::CPrj(proj) => { - let proj_obj = build_constructor_proj(proj); - let ctor = LeanCtor::alloc(4, 1, 0); - ctor.set(0, proj_obj); - *ctor - }, - IxonConstantInfo::RPrj(proj) => { - let proj_obj = build_recursor_proj(proj); - let ctor = LeanCtor::alloc(5, 1, 0); - ctor.set(0, proj_obj); - *ctor - }, - IxonConstantInfo::IPrj(proj) => { - let proj_obj = build_inductive_proj(proj); - let ctor = LeanCtor::alloc(6, 1, 0); - ctor.set(0, proj_obj); - *ctor - }, - IxonConstantInfo::DPrj(proj) => { - let proj_obj = build_definition_proj(proj); - let ctor = LeanCtor::alloc(7, 1, 0); - ctor.set(0, proj_obj); - *ctor - }, - IxonConstantInfo::Muts(muts) => { - let arr = LeanArray::alloc(muts.len()); - for (i, mc) in muts.iter().enumerate() { - arr.set(i, build_mut_const(mc)); - } - let ctor = LeanCtor::alloc(8, 1, 0); - ctor.set(0, arr); - *ctor - }, - }; - LeanIxonConstantInfo::new(obj) -} +impl LeanIxonConstructor { + /// Build Ixon.Constructor + /// Scalars ordered by size: lvls(8) + cidx(8) + params(8) + fields(8) + isUnsafe(1) + padding(7) + pub fn build(c: &IxonConstructor) -> Self { + let typ_obj = LeanIxonExpr::build(&c.typ); + // 1 obj field, 40 scalar bytes (4×8 + 1 + 7 padding) + let ctor = LeanCtor::alloc(0, 1, 40); + ctor.set(0, typ_obj); + // Scalar offsets from obj_cptr: 1*8=8 base + ctor.set_u64(8, c.lvls); + ctor.set_u64(16, c.cidx); + ctor.set_u64(24, c.params); + ctor.set_u64(32, c.fields); + ctor.set_u8(40, if c.is_unsafe { 1 } else { 0 }); + Self::new(*ctor) + } -/// Build Ixon.Constant -pub fn build_ixon_constant(constant: &IxonConstant) -> LeanIxonConstant { - let info_obj = build_ixon_constant_info(&constant.info); - let sharing_obj = build_ixon_expr_array(&constant.sharing); - let refs_obj = build_address_array(&constant.refs); - let univs_obj = build_ixon_univ_array(&constant.univs); - let ctor = LeanCtor::alloc(0, 4, 0); - ctor.set(0, info_obj); - ctor.set(1, sharing_obj); - ctor.set(2, refs_obj); - ctor.set(3, univs_obj); - LeanIxonConstant::new(*ctor) + /// Decode Ixon.Constructor. + /// Scalars ordered by size: lvls(8) + cidx(8) + params(8) + fields(8) + isUnsafe(1) + padding(7) + pub fn decode(self) -> IxonConstructor { + let ctor = self.as_ctor(); + let typ = Arc::new(LeanIxonExpr::new(ctor.get(0)).decode()); + let lvls = ctor.scalar_u64(1, 0); + let cidx = ctor.scalar_u64(1, 8); + let params = ctor.scalar_u64(1, 16); + let fields = ctor.scalar_u64(1, 24); + let is_unsafe = ctor.scalar_u8(1, 32) != 0; + IxonConstructor { is_unsafe, lvls, cidx, params, fields, typ } + } } // ============================================================================= -// Decode Functions +// Inductive // ============================================================================= -/// Decode a ByteArray (Address) to Address. -pub fn decode_ixon_address(obj: LeanIxAddress) -> Address { - Address::from_slice(&obj.as_bytes()[..32]) - .expect("Address should be 32 bytes") -} +impl LeanIxonInductive { + /// Build Ixon.Inductive + /// Scalars ordered by size: lvls(8) + params(8) + indices(8) + nested(8) + recr(1) + refl(1) + isUnsafe(1) + padding(5) + pub fn build(ind: &IxonInductive) -> Self { + let typ_obj = LeanIxonExpr::build(&ind.typ); + // Build ctors array + let ctors_arr = LeanArray::alloc(ind.ctors.len()); + for (i, c) in ind.ctors.iter().enumerate() { + ctors_arr.set(i, LeanIxonConstructor::build(c)); + } + // 2 obj fields, 40 scalar bytes (4×8 + 3 + 5 padding) + let ctor = LeanCtor::alloc(0, 2, 40); + ctor.set(0, typ_obj); + ctor.set(1, ctors_arr); + // Scalar offsets from obj_cptr: 2*8=16 base + ctor.set_u64(16, ind.lvls); + ctor.set_u64(24, ind.params); + ctor.set_u64(32, ind.indices); + ctor.set_u64(40, ind.nested); + ctor.set_u8(48, if ind.recr { 1 } else { 0 }); + ctor.set_u8(49, if ind.refl { 1 } else { 0 }); + ctor.set_u8(50, if ind.is_unsafe { 1 } else { 0 }); + Self::new(*ctor) + } -/// Decode Array Address. -pub fn decode_ixon_address_array(obj: LeanArray) -> Vec
{ - obj.map(|x| decode_ixon_address(x.as_byte_array())) + /// Decode Ixon.Inductive. + /// Scalars ordered by size: lvls(8) + params(8) + indices(8) + nested(8) + recr(1) + refl(1) + isUnsafe(1) + padding(5) + pub fn decode(self) -> IxonInductive { + let ctor = self.as_ctor(); + let typ = Arc::new(LeanIxonExpr::new(ctor.get(0)).decode()); + let ctors_arr = ctor.get(1).as_array(); + let ctors = ctors_arr.map(|x| LeanIxonConstructor::new(x).decode()); + let lvls = ctor.scalar_u64(2, 0); + let params = ctor.scalar_u64(2, 8); + let indices = ctor.scalar_u64(2, 16); + let nested = ctor.scalar_u64(2, 24); + let recr = ctor.scalar_u8(2, 32) != 0; + let refl = ctor.scalar_u8(2, 33) != 0; + let is_unsafe = ctor.scalar_u8(2, 34) != 0; + IxonInductive { + recr, + refl, + is_unsafe, + lvls, + params, + indices, + nested, + typ, + ctors, + } + } } -/// Decode Ixon.Definition. -/// Lean stores scalar fields ordered by size (largest first). -/// Layout: header(8) + typ(8) + value(8) + lvls(8) + kind(1) + safety(1) + padding(6) -pub fn decode_ixon_definition(obj: LeanIxonDefinition) -> IxonDefinition { - let ctor = obj.as_ctor(); - let typ = Arc::new(decode_ixon_expr(LeanIxonExpr::new(ctor.get(0)))); - let value = Arc::new(decode_ixon_expr(LeanIxonExpr::new(ctor.get(1)))); - let lvls = ctor.scalar_u64(2, 0); - let kind_val = ctor.scalar_u8(2, 8); - let kind = match kind_val { - 0 => DefKind::Definition, - 1 => DefKind::Opaque, - 2 => DefKind::Theorem, - _ => panic!("Invalid DefKind: {}", kind_val), - }; - let safety_val = ctor.scalar_u8(2, 9); - let safety = match safety_val { - 0 => crate::ix::env::DefinitionSafety::Unsafe, - 1 => crate::ix::env::DefinitionSafety::Safe, - 2 => crate::ix::env::DefinitionSafety::Partial, - _ => panic!("Invalid DefinitionSafety: {}", safety_val), - }; - IxonDefinition { kind, safety, lvls, typ, value } -} +// ============================================================================= +// Projection Types +// ============================================================================= -/// Decode Ixon.RecursorRule. -pub fn decode_ixon_recursor_rule( - obj: LeanIxonRecursorRule, -) -> IxonRecursorRule { - let ctor = obj.as_ctor(); - let rhs = Arc::new(decode_ixon_expr(LeanIxonExpr::new(ctor.get(0)))); - let fields = ctor.scalar_u64(1, 0); - IxonRecursorRule { fields, rhs } -} +impl LeanIxonInductiveProj { + pub fn build(proj: &InductiveProj) -> Self { + let block_obj = LeanIxAddress::build(&proj.block); + let ctor = LeanCtor::alloc(0, 1, 8); + ctor.set(0, block_obj); + ctor.set_u64(8, proj.idx); + Self::new(*ctor) + } -/// Decode Ixon.Recursor. -/// Scalars ordered by size: lvls(8) + params(8) + indices(8) + motives(8) + minors(8) + k(1) + isUnsafe(1) + padding(6) -pub fn decode_ixon_recursor(obj: LeanIxonRecursor) -> IxonRecursor { - let ctor = obj.as_ctor(); - let typ = Arc::new(decode_ixon_expr(LeanIxonExpr::new(ctor.get(0)))); - let rules_arr = ctor.get(1).as_array(); - let rules = - rules_arr.map(|x| decode_ixon_recursor_rule(LeanIxonRecursorRule::new(x))); - let lvls = ctor.scalar_u64(2, 0); - let params = ctor.scalar_u64(2, 8); - let indices = ctor.scalar_u64(2, 16); - let motives = ctor.scalar_u64(2, 24); - let minors = ctor.scalar_u64(2, 32); - let k = ctor.scalar_u8(2, 40) != 0; - let is_unsafe = ctor.scalar_u8(2, 41) != 0; - IxonRecursor { - k, - is_unsafe, - lvls, - params, - indices, - motives, - minors, - typ, - rules, + pub fn decode(self) -> InductiveProj { + let ctor = self.as_ctor(); + let block = LeanIxAddress::new(ctor.get(0)).decode(); + let idx = ctor.scalar_u64(1, 0); + InductiveProj { idx, block } } } -/// Decode Ixon.Axiom. -/// Scalars ordered by size: lvls(8) + isUnsafe(1) + padding(7) -pub fn decode_ixon_axiom(obj: LeanIxonAxiom) -> IxonAxiom { - let ctor = obj.as_ctor(); - let typ = Arc::new(decode_ixon_expr(LeanIxonExpr::new(ctor.get(0)))); - let lvls = ctor.scalar_u64(1, 0); - let is_unsafe = ctor.scalar_u8(1, 8) != 0; - IxonAxiom { is_unsafe, lvls, typ } -} +impl LeanIxonConstructorProj { + pub fn build(proj: &ConstructorProj) -> Self { + let block_obj = LeanIxAddress::build(&proj.block); + let ctor = LeanCtor::alloc(0, 1, 16); + ctor.set(0, block_obj); + ctor.set_u64(8, proj.idx); + ctor.set_u64(16, proj.cidx); + Self::new(*ctor) + } -/// Decode Ixon.Quotient. -/// QuotKind is a scalar (not object field). Scalars: lvls(8) + kind(1) + padding(7) -pub fn decode_ixon_quotient(obj: LeanIxonQuotient) -> IxonQuotient { - let ctor = obj.as_ctor(); - let typ = Arc::new(decode_ixon_expr(LeanIxonExpr::new(ctor.get(0)))); - let lvls = ctor.scalar_u64(1, 0); - let kind_val = ctor.scalar_u8(1, 8); - let kind = match kind_val { - 0 => crate::ix::env::QuotKind::Type, - 1 => crate::ix::env::QuotKind::Ctor, - 2 => crate::ix::env::QuotKind::Lift, - 3 => crate::ix::env::QuotKind::Ind, - _ => panic!("Invalid QuotKind: {}", kind_val), - }; - IxonQuotient { kind, lvls, typ } + pub fn decode(self) -> ConstructorProj { + let ctor = self.as_ctor(); + let block = LeanIxAddress::new(ctor.get(0)).decode(); + let idx = ctor.scalar_u64(1, 0); + let cidx = ctor.scalar_u64(1, 8); + ConstructorProj { idx, cidx, block } + } } -/// Decode Ixon.Constructor. -/// Scalars ordered by size: lvls(8) + cidx(8) + params(8) + fields(8) + isUnsafe(1) + padding(7) -pub fn decode_ixon_constructor(obj: LeanIxonConstructor) -> IxonConstructor { - let ctor = obj.as_ctor(); - let typ = Arc::new(decode_ixon_expr(LeanIxonExpr::new(ctor.get(0)))); - let lvls = ctor.scalar_u64(1, 0); - let cidx = ctor.scalar_u64(1, 8); - let params = ctor.scalar_u64(1, 16); - let fields = ctor.scalar_u64(1, 24); - let is_unsafe = ctor.scalar_u8(1, 32) != 0; - IxonConstructor { is_unsafe, lvls, cidx, params, fields, typ } -} +impl LeanIxonRecursorProj { + pub fn build(proj: &RecursorProj) -> Self { + let block_obj = LeanIxAddress::build(&proj.block); + let ctor = LeanCtor::alloc(0, 1, 8); + ctor.set(0, block_obj); + ctor.set_u64(8, proj.idx); + Self::new(*ctor) + } -/// Decode Ixon.Inductive. -/// Scalars ordered by size: lvls(8) + params(8) + indices(8) + nested(8) + recr(1) + refl(1) + isUnsafe(1) + padding(5) -pub fn decode_ixon_inductive(obj: LeanIxonInductive) -> IxonInductive { - let ctor = obj.as_ctor(); - let typ = Arc::new(decode_ixon_expr(LeanIxonExpr::new(ctor.get(0)))); - let ctors_arr = ctor.get(1).as_array(); - let ctors = - ctors_arr.map(|x| decode_ixon_constructor(LeanIxonConstructor::new(x))); - let lvls = ctor.scalar_u64(2, 0); - let params = ctor.scalar_u64(2, 8); - let indices = ctor.scalar_u64(2, 16); - let nested = ctor.scalar_u64(2, 24); - let recr = ctor.scalar_u8(2, 32) != 0; - let refl = ctor.scalar_u8(2, 33) != 0; - let is_unsafe = ctor.scalar_u8(2, 34) != 0; - IxonInductive { - recr, - refl, - is_unsafe, - lvls, - params, - indices, - nested, - typ, - ctors, + pub fn decode(self) -> RecursorProj { + let ctor = self.as_ctor(); + let block = LeanIxAddress::new(ctor.get(0)).decode(); + let idx = ctor.scalar_u64(1, 0); + RecursorProj { idx, block } } } -/// Decode Ixon.InductiveProj. -pub fn decode_ixon_inductive_proj(obj: LeanIxonInductiveProj) -> InductiveProj { - let ctor = obj.as_ctor(); - let block = decode_ixon_address(ctor.get(0).as_byte_array()); - let idx = ctor.scalar_u64(1, 0); - InductiveProj { idx, block } -} +impl LeanIxonDefinitionProj { + pub fn build(proj: &DefinitionProj) -> Self { + let block_obj = LeanIxAddress::build(&proj.block); + let ctor = LeanCtor::alloc(0, 1, 8); + ctor.set(0, block_obj); + ctor.set_u64(8, proj.idx); + Self::new(*ctor) + } -/// Decode Ixon.ConstructorProj. -pub fn decode_ixon_constructor_proj( - obj: LeanIxonConstructorProj, -) -> ConstructorProj { - let ctor = obj.as_ctor(); - let block = decode_ixon_address(ctor.get(0).as_byte_array()); - let idx = ctor.scalar_u64(1, 0); - let cidx = ctor.scalar_u64(1, 8); - ConstructorProj { idx, cidx, block } + pub fn decode(self) -> DefinitionProj { + let ctor = self.as_ctor(); + let block = LeanIxAddress::new(ctor.get(0)).decode(); + let idx = ctor.scalar_u64(1, 0); + DefinitionProj { idx, block } + } } -/// Decode Ixon.RecursorProj. -pub fn decode_ixon_recursor_proj(obj: LeanIxonRecursorProj) -> RecursorProj { - let ctor = obj.as_ctor(); - let block = decode_ixon_address(ctor.get(0).as_byte_array()); - let idx = ctor.scalar_u64(1, 0); - RecursorProj { idx, block } -} +// ============================================================================= +// MutConst +// ============================================================================= -/// Decode Ixon.DefinitionProj. -pub fn decode_ixon_definition_proj( - obj: LeanIxonDefinitionProj, -) -> DefinitionProj { - let ctor = obj.as_ctor(); - let block = decode_ixon_address(ctor.get(0).as_byte_array()); - let idx = ctor.scalar_u64(1, 0); - DefinitionProj { idx, block } -} +impl LeanIxonMutConst { + pub fn build(mc: &MutConst) -> Self { + let obj = match mc { + MutConst::Defn(def) => { + let def_obj = LeanIxonDefinition::build(def); + let ctor = LeanCtor::alloc(0, 1, 0); + ctor.set(0, def_obj); + *ctor + }, + MutConst::Indc(ind) => { + let ind_obj = LeanIxonInductive::build(ind); + let ctor = LeanCtor::alloc(1, 1, 0); + ctor.set(0, ind_obj); + *ctor + }, + MutConst::Recr(rec) => { + let rec_obj = LeanIxonRecursor::build(rec); + let ctor = LeanCtor::alloc(2, 1, 0); + ctor.set(0, rec_obj); + *ctor + }, + }; + Self::new(obj) + } -/// Decode Ixon.MutConst. -pub fn decode_ixon_mut_const(obj: LeanIxonMutConst) -> MutConst { - let ctor = obj.as_ctor(); - let inner = ctor.get(0); - match ctor.tag() { - 0 => MutConst::Defn(decode_ixon_definition(LeanIxonDefinition::new(inner))), - 1 => MutConst::Indc(decode_ixon_inductive(LeanIxonInductive::new(inner))), - 2 => MutConst::Recr(decode_ixon_recursor(LeanIxonRecursor::new(inner))), - tag => panic!("Invalid Ixon.MutConst tag: {}", tag), + pub fn decode(self) -> MutConst { + let ctor = self.as_ctor(); + let inner = ctor.get(0); + match ctor.tag() { + 0 => MutConst::Defn(LeanIxonDefinition::new(inner).decode()), + 1 => MutConst::Indc(LeanIxonInductive::new(inner).decode()), + 2 => MutConst::Recr(LeanIxonRecursor::new(inner).decode()), + tag => panic!("Invalid Ixon.MutConst tag: {}", tag), + } } } -/// Decode Ixon.ConstantInfo. -pub fn decode_ixon_constant_info( - obj: LeanIxonConstantInfo, -) -> IxonConstantInfo { - let ctor = obj.as_ctor(); - let inner = ctor.get(0); - match ctor.tag() { - 0 => IxonConstantInfo::Defn(decode_ixon_definition( - LeanIxonDefinition::new(inner), - )), - 1 => { - IxonConstantInfo::Recr(decode_ixon_recursor(LeanIxonRecursor::new(inner))) - }, - 2 => IxonConstantInfo::Axio(decode_ixon_axiom(LeanIxonAxiom::new(inner))), - 3 => { - IxonConstantInfo::Quot(decode_ixon_quotient(LeanIxonQuotient::new(inner))) - }, - 4 => IxonConstantInfo::CPrj(decode_ixon_constructor_proj( - LeanIxonConstructorProj::new(inner), - )), - 5 => IxonConstantInfo::RPrj(decode_ixon_recursor_proj( - LeanIxonRecursorProj::new(inner), - )), - 6 => IxonConstantInfo::IPrj(decode_ixon_inductive_proj( - LeanIxonInductiveProj::new(inner), - )), - 7 => IxonConstantInfo::DPrj(decode_ixon_definition_proj( - LeanIxonDefinitionProj::new(inner), - )), - 8 => { - let arr = inner.as_array(); - let muts = arr.map(|x| decode_ixon_mut_const(LeanIxonMutConst::new(x))); - IxonConstantInfo::Muts(muts) - }, - tag => panic!("Invalid Ixon.ConstantInfo tag: {}", tag), +// ============================================================================= +// ConstantInfo +// ============================================================================= + +impl LeanIxonConstantInfo { + /// Build Ixon.ConstantInfo (9 constructors) + pub fn build(info: &IxonConstantInfo) -> Self { + let obj = match info { + IxonConstantInfo::Defn(def) => { + let def_obj = LeanIxonDefinition::build(def); + let ctor = LeanCtor::alloc(0, 1, 0); + ctor.set(0, def_obj); + *ctor + }, + IxonConstantInfo::Recr(rec) => { + let rec_obj = LeanIxonRecursor::build(rec); + let ctor = LeanCtor::alloc(1, 1, 0); + ctor.set(0, rec_obj); + *ctor + }, + IxonConstantInfo::Axio(ax) => { + let ax_obj = LeanIxonAxiom::build(ax); + let ctor = LeanCtor::alloc(2, 1, 0); + ctor.set(0, ax_obj); + *ctor + }, + IxonConstantInfo::Quot(quot) => { + let quot_obj = LeanIxonQuotient::build(quot); + let ctor = LeanCtor::alloc(3, 1, 0); + ctor.set(0, quot_obj); + *ctor + }, + IxonConstantInfo::CPrj(proj) => { + let proj_obj = LeanIxonConstructorProj::build(proj); + let ctor = LeanCtor::alloc(4, 1, 0); + ctor.set(0, proj_obj); + *ctor + }, + IxonConstantInfo::RPrj(proj) => { + let proj_obj = LeanIxonRecursorProj::build(proj); + let ctor = LeanCtor::alloc(5, 1, 0); + ctor.set(0, proj_obj); + *ctor + }, + IxonConstantInfo::IPrj(proj) => { + let proj_obj = LeanIxonInductiveProj::build(proj); + let ctor = LeanCtor::alloc(6, 1, 0); + ctor.set(0, proj_obj); + *ctor + }, + IxonConstantInfo::DPrj(proj) => { + let proj_obj = LeanIxonDefinitionProj::build(proj); + let ctor = LeanCtor::alloc(7, 1, 0); + ctor.set(0, proj_obj); + *ctor + }, + IxonConstantInfo::Muts(muts) => { + let arr = LeanArray::alloc(muts.len()); + for (i, mc) in muts.iter().enumerate() { + arr.set(i, LeanIxonMutConst::build(mc)); + } + let ctor = LeanCtor::alloc(8, 1, 0); + ctor.set(0, arr); + *ctor + }, + }; + Self::new(obj) + } + + /// Decode Ixon.ConstantInfo. + pub fn decode(self) -> IxonConstantInfo { + let ctor = self.as_ctor(); + let inner = ctor.get(0); + match ctor.tag() { + 0 => IxonConstantInfo::Defn(LeanIxonDefinition::new(inner).decode()), + 1 => IxonConstantInfo::Recr(LeanIxonRecursor::new(inner).decode()), + 2 => IxonConstantInfo::Axio(LeanIxonAxiom::new(inner).decode()), + 3 => IxonConstantInfo::Quot(LeanIxonQuotient::new(inner).decode()), + 4 => IxonConstantInfo::CPrj(LeanIxonConstructorProj::new(inner).decode()), + 5 => IxonConstantInfo::RPrj(LeanIxonRecursorProj::new(inner).decode()), + 6 => IxonConstantInfo::IPrj(LeanIxonInductiveProj::new(inner).decode()), + 7 => IxonConstantInfo::DPrj(LeanIxonDefinitionProj::new(inner).decode()), + 8 => { + let arr = inner.as_array(); + let muts = arr.map(|x| LeanIxonMutConst::new(x).decode()); + IxonConstantInfo::Muts(muts) + }, + tag => panic!("Invalid Ixon.ConstantInfo tag: {}", tag), + } } } -/// Decode Ixon.Constant. -pub fn decode_ixon_constant(obj: LeanIxonConstant) -> IxonConstant { - let ctor = obj.as_ctor(); - IxonConstant { - info: decode_ixon_constant_info(LeanIxonConstantInfo::new(ctor.get(0))), - sharing: decode_ixon_expr_array(ctor.get(1).as_array()), - refs: decode_ixon_address_array(ctor.get(2).as_array()), - univs: decode_ixon_univ_array(ctor.get(3).as_array()), +// ============================================================================= +// Constant +// ============================================================================= + +impl LeanIxonConstant { + /// Build Ixon.Constant + pub fn build(constant: &IxonConstant) -> Self { + let info_obj = LeanIxonConstantInfo::build(&constant.info); + let sharing_obj = LeanIxonExpr::build_array(&constant.sharing); + let refs_obj = LeanIxAddress::build_array(&constant.refs); + let univs_obj = LeanIxonUniv::build_array(&constant.univs); + let ctor = LeanCtor::alloc(0, 4, 0); + ctor.set(0, info_obj); + ctor.set(1, sharing_obj); + ctor.set(2, refs_obj); + ctor.set(3, univs_obj); + Self::new(*ctor) + } + + /// Decode Ixon.Constant. + pub fn decode(self) -> IxonConstant { + let ctor = self.as_ctor(); + IxonConstant { + info: LeanIxonConstantInfo::new(ctor.get(0)).decode(), + sharing: LeanIxonExpr::decode_array(ctor.get(1).as_array()), + refs: LeanIxAddress::decode_array(ctor.get(2).as_array()), + univs: LeanIxonUniv::decode_array(ctor.get(3).as_array()), + } } } @@ -577,8 +573,8 @@ pub fn decode_ixon_constant(obj: LeanIxonConstant) -> IxonConstant { pub extern "C" fn rs_roundtrip_ixon_definition( obj: LeanIxonDefinition, ) -> LeanIxonDefinition { - let def = decode_ixon_definition(obj); - build_ixon_definition(&def) + let def = obj.decode(); + LeanIxonDefinition::build(&def) } /// Round-trip Ixon.Recursor. @@ -586,15 +582,15 @@ pub extern "C" fn rs_roundtrip_ixon_definition( pub extern "C" fn rs_roundtrip_ixon_recursor( obj: LeanIxonRecursor, ) -> LeanIxonRecursor { - let rec = decode_ixon_recursor(obj); - build_ixon_recursor(&rec) + let rec = obj.decode(); + LeanIxonRecursor::build(&rec) } /// Round-trip Ixon.Axiom. #[unsafe(no_mangle)] pub extern "C" fn rs_roundtrip_ixon_axiom(obj: LeanIxonAxiom) -> LeanIxonAxiom { - let ax = decode_ixon_axiom(obj); - build_ixon_axiom(&ax) + let ax = obj.decode(); + LeanIxonAxiom::build(&ax) } /// Round-trip Ixon.Quotient. @@ -602,8 +598,8 @@ pub extern "C" fn rs_roundtrip_ixon_axiom(obj: LeanIxonAxiom) -> LeanIxonAxiom { pub extern "C" fn rs_roundtrip_ixon_quotient( obj: LeanIxonQuotient, ) -> LeanIxonQuotient { - let quot = decode_ixon_quotient(obj); - build_ixon_quotient(") + let quot = obj.decode(); + LeanIxonQuotient::build(") } /// Round-trip Ixon.ConstantInfo. @@ -611,8 +607,8 @@ pub extern "C" fn rs_roundtrip_ixon_quotient( pub extern "C" fn rs_roundtrip_ixon_constant_info( obj: LeanIxonConstantInfo, ) -> LeanIxonConstantInfo { - let info = decode_ixon_constant_info(obj); - build_ixon_constant_info(&info) + let info = obj.decode(); + LeanIxonConstantInfo::build(&info) } /// Round-trip Ixon.Constant. @@ -620,8 +616,8 @@ pub extern "C" fn rs_roundtrip_ixon_constant_info( pub extern "C" fn rs_roundtrip_ixon_constant( obj: LeanIxonConstant, ) -> LeanIxonConstant { - let constant = decode_ixon_constant(obj); - build_ixon_constant(&constant) + let constant = obj.decode(); + LeanIxonConstant::build(&constant) } /// Round-trip Ixon.RecursorRule. @@ -629,8 +625,8 @@ pub extern "C" fn rs_roundtrip_ixon_constant( pub extern "C" fn rs_roundtrip_ixon_recursor_rule( obj: LeanIxonRecursorRule, ) -> LeanIxonRecursorRule { - let rule = decode_ixon_recursor_rule(obj); - build_ixon_recursor_rule(&rule) + let rule = obj.decode(); + LeanIxonRecursorRule::build(&rule) } /// Round-trip Ixon.Constructor. @@ -638,8 +634,8 @@ pub extern "C" fn rs_roundtrip_ixon_recursor_rule( pub extern "C" fn rs_roundtrip_ixon_constructor( obj: LeanIxonConstructor, ) -> LeanIxonConstructor { - let c = decode_ixon_constructor(obj); - build_ixon_constructor(&c) + let c = obj.decode(); + LeanIxonConstructor::build(&c) } /// Round-trip Ixon.Inductive. @@ -647,8 +643,8 @@ pub extern "C" fn rs_roundtrip_ixon_constructor( pub extern "C" fn rs_roundtrip_ixon_inductive( obj: LeanIxonInductive, ) -> LeanIxonInductive { - let ind = decode_ixon_inductive(obj); - build_ixon_inductive(&ind) + let ind = obj.decode(); + LeanIxonInductive::build(&ind) } /// Round-trip Ixon.InductiveProj. @@ -656,8 +652,8 @@ pub extern "C" fn rs_roundtrip_ixon_inductive( pub extern "C" fn rs_roundtrip_ixon_inductive_proj( obj: LeanIxonInductiveProj, ) -> LeanIxonInductiveProj { - let proj = decode_ixon_inductive_proj(obj); - build_inductive_proj(&proj) + let proj = obj.decode(); + LeanIxonInductiveProj::build(&proj) } /// Round-trip Ixon.ConstructorProj. @@ -665,8 +661,8 @@ pub extern "C" fn rs_roundtrip_ixon_inductive_proj( pub extern "C" fn rs_roundtrip_ixon_constructor_proj( obj: LeanIxonConstructorProj, ) -> LeanIxonConstructorProj { - let proj = decode_ixon_constructor_proj(obj); - build_constructor_proj(&proj) + let proj = obj.decode(); + LeanIxonConstructorProj::build(&proj) } /// Round-trip Ixon.RecursorProj. @@ -674,8 +670,8 @@ pub extern "C" fn rs_roundtrip_ixon_constructor_proj( pub extern "C" fn rs_roundtrip_ixon_recursor_proj( obj: LeanIxonRecursorProj, ) -> LeanIxonRecursorProj { - let proj = decode_ixon_recursor_proj(obj); - build_recursor_proj(&proj) + let proj = obj.decode(); + LeanIxonRecursorProj::build(&proj) } /// Round-trip Ixon.DefinitionProj. @@ -683,8 +679,8 @@ pub extern "C" fn rs_roundtrip_ixon_recursor_proj( pub extern "C" fn rs_roundtrip_ixon_definition_proj( obj: LeanIxonDefinitionProj, ) -> LeanIxonDefinitionProj { - let proj = decode_ixon_definition_proj(obj); - build_definition_proj(&proj) + let proj = obj.decode(); + LeanIxonDefinitionProj::build(&proj) } /// Round-trip Ixon.MutConst. @@ -692,6 +688,6 @@ pub extern "C" fn rs_roundtrip_ixon_definition_proj( pub extern "C" fn rs_roundtrip_ixon_mut_const( obj: LeanIxonMutConst, ) -> LeanIxonMutConst { - let mc = decode_ixon_mut_const(obj); - build_mut_const(&mc) + let mc = obj.decode(); + LeanIxonMutConst::build(&mc) } diff --git a/src/ffi/ixon/enums.rs b/src/ffi/ixon/enums.rs index a0b5f592..a1fb3e55 100644 --- a/src/ffi/ixon/enums.rs +++ b/src/ffi/ixon/enums.rs @@ -6,87 +6,86 @@ use crate::lean::{ LeanIxonDefKind, LeanIxonDefinitionSafety, LeanIxonQuotKind, }; use lean_ffi::object::LeanObject; -/// Build Ixon.DefKind -/// | defn -- tag 0 -/// | opaq -- tag 1 -/// | thm -- tag 2 -/// Simple enums are passed as raw (unboxed) tag values across Lean FFI. -pub fn build_def_kind(kind: &DefKind) -> LeanIxonDefKind { - let tag = match kind { - DefKind::Definition => 0, - DefKind::Opaque => 1, - DefKind::Theorem => 2, - }; - LeanIxonDefKind::new(LeanObject::from_enum_tag(tag)) -} -/// Build Ixon.DefinitionSafety -/// | unsaf -- tag 0 -/// | safe -- tag 1 -/// | part -- tag 2 -pub fn build_ixon_definition_safety( - safety: &DefinitionSafety, -) -> LeanIxonDefinitionSafety { - let tag = match safety { - DefinitionSafety::Unsafe => 0, - DefinitionSafety::Safe => 1, - DefinitionSafety::Partial => 2, - }; - LeanIxonDefinitionSafety::new(LeanObject::from_enum_tag(tag)) -} +impl LeanIxonDefKind { + /// Build Ixon.DefKind + /// | defn -- tag 0 + /// | opaq -- tag 1 + /// | thm -- tag 2 + /// Simple enums are passed as raw (unboxed) tag values across Lean FFI. + pub fn build(kind: &DefKind) -> Self { + let tag = match kind { + DefKind::Definition => 0, + DefKind::Opaque => 1, + DefKind::Theorem => 2, + }; + Self::new(LeanObject::from_enum_tag(tag)) + } -/// Build Ixon.QuotKind -/// | type -- tag 0 -/// | ctor -- tag 1 -/// | lift -- tag 2 -/// | ind -- tag 3 -pub fn build_ixon_quot_kind(kind: &QuotKind) -> LeanIxonQuotKind { - let tag = match kind { - QuotKind::Type => 0, - QuotKind::Ctor => 1, - QuotKind::Lift => 2, - QuotKind::Ind => 3, - }; - LeanIxonQuotKind::new(LeanObject::from_enum_tag(tag)) + /// Decode Ixon.DefKind (simple enum, raw unboxed tag value). + pub fn decode(self) -> DefKind { + let tag = self.as_enum_tag(); + match tag { + 0 => DefKind::Definition, + 1 => DefKind::Opaque, + 2 => DefKind::Theorem, + _ => panic!("Invalid Ixon.DefKind tag: {}", tag), + } + } } -// ============================================================================= -// Decode Functions -// ============================================================================= +impl LeanIxonDefinitionSafety { + /// Build Ixon.DefinitionSafety + /// | unsaf -- tag 0 + /// | safe -- tag 1 + /// | part -- tag 2 + pub fn build(safety: &DefinitionSafety) -> Self { + let tag = match safety { + DefinitionSafety::Unsafe => 0, + DefinitionSafety::Safe => 1, + DefinitionSafety::Partial => 2, + }; + Self::new(LeanObject::from_enum_tag(tag)) + } -/// Decode Ixon.DefKind (simple enum, raw unboxed tag value). -pub fn decode_ixon_def_kind(obj: LeanIxonDefKind) -> DefKind { - let tag = obj.as_enum_tag(); - match tag { - 0 => DefKind::Definition, - 1 => DefKind::Opaque, - 2 => DefKind::Theorem, - _ => panic!("Invalid Ixon.DefKind tag: {}", tag), + /// Decode Ixon.DefinitionSafety (simple enum, raw unboxed tag value). + pub fn decode(self) -> DefinitionSafety { + let tag = self.as_enum_tag(); + match tag { + 0 => DefinitionSafety::Unsafe, + 1 => DefinitionSafety::Safe, + 2 => DefinitionSafety::Partial, + _ => panic!("Invalid Ixon.DefinitionSafety tag: {}", tag), + } } } -/// Decode Ixon.DefinitionSafety (simple enum, raw unboxed tag value). -pub fn decode_ixon_definition_safety( - obj: LeanIxonDefinitionSafety, -) -> DefinitionSafety { - let tag = obj.as_enum_tag(); - match tag { - 0 => DefinitionSafety::Unsafe, - 1 => DefinitionSafety::Safe, - 2 => DefinitionSafety::Partial, - _ => panic!("Invalid Ixon.DefinitionSafety tag: {}", tag), +impl LeanIxonQuotKind { + /// Build Ixon.QuotKind + /// | type -- tag 0 + /// | ctor -- tag 1 + /// | lift -- tag 2 + /// | ind -- tag 3 + pub fn build(kind: &QuotKind) -> Self { + let tag = match kind { + QuotKind::Type => 0, + QuotKind::Ctor => 1, + QuotKind::Lift => 2, + QuotKind::Ind => 3, + }; + Self::new(LeanObject::from_enum_tag(tag)) } -} -/// Decode Ixon.QuotKind (simple enum, raw unboxed tag value). -pub fn decode_ixon_quot_kind(obj: LeanIxonQuotKind) -> QuotKind { - let tag = obj.as_enum_tag(); - match tag { - 0 => QuotKind::Type, - 1 => QuotKind::Ctor, - 2 => QuotKind::Lift, - 3 => QuotKind::Ind, - _ => panic!("Invalid Ixon.QuotKind tag: {}", tag), + /// Decode Ixon.QuotKind (simple enum, raw unboxed tag value). + pub fn decode(self) -> QuotKind { + let tag = self.as_enum_tag(); + match tag { + 0 => QuotKind::Type, + 1 => QuotKind::Ctor, + 2 => QuotKind::Lift, + 3 => QuotKind::Ind, + _ => panic!("Invalid Ixon.QuotKind tag: {}", tag), + } } } @@ -99,8 +98,8 @@ pub fn decode_ixon_quot_kind(obj: LeanIxonQuotKind) -> QuotKind { pub extern "C" fn rs_roundtrip_ixon_def_kind( obj: LeanIxonDefKind, ) -> LeanIxonDefKind { - let kind = decode_ixon_def_kind(obj); - build_def_kind(&kind) + let kind = obj.decode(); + LeanIxonDefKind::build(&kind) } /// Round-trip Ixon.DefinitionSafety. @@ -108,8 +107,8 @@ pub extern "C" fn rs_roundtrip_ixon_def_kind( pub extern "C" fn rs_roundtrip_ixon_definition_safety( obj: LeanIxonDefinitionSafety, ) -> LeanIxonDefinitionSafety { - let safety = decode_ixon_definition_safety(obj); - build_ixon_definition_safety(&safety) + let safety = obj.decode(); + LeanIxonDefinitionSafety::build(&safety) } /// Round-trip Ixon.QuotKind. @@ -117,6 +116,6 @@ pub extern "C" fn rs_roundtrip_ixon_definition_safety( pub extern "C" fn rs_roundtrip_ixon_quot_kind( obj: LeanIxonQuotKind, ) -> LeanIxonQuotKind { - let kind = decode_ixon_quot_kind(obj); - build_ixon_quot_kind(&kind) + let kind = obj.decode(); + LeanIxonQuotKind::build(&kind) } diff --git a/src/ffi/ixon/env.rs b/src/ffi/ixon/env.rs index e45c08ba..0fba98fb 100644 --- a/src/ffi/ixon/env.rs +++ b/src/ffi/ixon/env.rs @@ -17,40 +17,7 @@ use crate::lean::{ use lean_ffi::object::{LeanArray, LeanByteArray, LeanCtor, LeanExcept}; use crate::ffi::builder::LeanBuildCache; -use crate::ffi::ix::name::{build_name, decode_ix_name}; -use crate::ffi::ixon::constant::{ - build_address_from_ixon, build_ixon_constant, decode_ixon_address, - decode_ixon_constant, -}; -use crate::ffi::ixon::meta::{build_constant_meta, decode_constant_meta}; - -// ============================================================================= -// Comm Type (secret: Address, payload: Address) -// ============================================================================= - -/// Decoded Ixon.Comm -pub struct DecodedComm { - pub secret: Address, - pub payload: Address, -} - -/// Decode Ixon.Comm from Lean pointer. -/// Comm = { secret : Address, payload : Address } -pub fn decode_comm(obj: LeanIxonComm) -> DecodedComm { - let ctor = obj.as_ctor(); - DecodedComm { - secret: decode_ixon_address(ctor.get(0).as_byte_array()), - payload: decode_ixon_address(ctor.get(1).as_byte_array()), - } -} - -/// Build Ixon.Comm Lean object. -pub fn build_comm(comm: &DecodedComm) -> LeanIxonComm { - let ctor = LeanCtor::alloc(0, 2, 0); - ctor.set(0, build_address_from_ixon(&comm.secret)); - ctor.set(1, build_address_from_ixon(&comm.payload)); - LeanIxonComm::new(*ctor) -} +use crate::lean::LeanIxAddress; // ============================================================================= // RawConst (addr: Address, const: Constant) @@ -62,21 +29,34 @@ pub struct DecodedRawConst { pub constant: IxonConstant, } -/// Decode Ixon.RawConst from Lean pointer. -pub fn decode_raw_const(obj: LeanIxonRawConst) -> DecodedRawConst { - let ctor = obj.as_ctor(); - DecodedRawConst { - addr: decode_ixon_address(ctor.get(0).as_byte_array()), - constant: decode_ixon_constant(LeanIxonConstant::new(ctor.get(1))), +impl LeanIxonRawConst { + /// Decode Ixon.RawConst from Lean pointer. + pub fn decode(self) -> DecodedRawConst { + let ctor = self.as_ctor(); + DecodedRawConst { + addr: LeanIxAddress::new(ctor.get(0)).decode(), + constant: LeanIxonConstant::new(ctor.get(1)).decode(), + } } -} -/// Build Ixon.RawConst Lean object. -pub fn build_raw_const(rc: &DecodedRawConst) -> LeanIxonRawConst { - let ctor = LeanCtor::alloc(0, 2, 0); - ctor.set(0, build_address_from_ixon(&rc.addr)); - ctor.set(1, build_ixon_constant(&rc.constant)); - LeanIxonRawConst::new(*ctor) + /// Build Ixon.RawConst Lean object. + pub fn build(rc: &DecodedRawConst) -> Self { + let ctor = LeanCtor::alloc(0, 2, 0); + ctor.set(0, LeanIxAddress::build(&rc.addr)); + ctor.set(1, LeanIxonConstant::build(&rc.constant)); + Self::new(*ctor) + } + + /// Build from individual parts (used by compile.rs). + pub fn build_from_parts( + addr: &Address, + constant: &IxonConstant, + ) -> Self { + let ctor = LeanCtor::alloc(0, 2, 0); + ctor.set(0, LeanIxAddress::build(addr)); + ctor.set(1, LeanIxonConstant::build(constant)); + Self::new(*ctor) + } } // ============================================================================= @@ -90,26 +70,42 @@ pub struct DecodedRawNamed { pub const_meta: ConstantMeta, } -/// Decode Ixon.RawNamed from Lean pointer. -pub fn decode_raw_named(obj: LeanIxonRawNamed) -> DecodedRawNamed { - let ctor = obj.as_ctor(); - DecodedRawNamed { - name: decode_ix_name(LeanIxName::new(ctor.get(0))), - addr: decode_ixon_address(ctor.get(1).as_byte_array()), - const_meta: decode_constant_meta(LeanIxonConstantMeta::new(ctor.get(2))), +impl LeanIxonRawNamed { + /// Decode Ixon.RawNamed from Lean pointer. + pub fn decode(self) -> DecodedRawNamed { + let ctor = self.as_ctor(); + DecodedRawNamed { + name: LeanIxName::new(ctor.get(0)).decode(), + addr: LeanIxAddress::new(ctor.get(1)).decode(), + const_meta: LeanIxonConstantMeta::new(ctor.get(2)).decode(), + } + } + + /// Build Ixon.RawNamed Lean object. + pub fn build( + cache: &mut LeanBuildCache, + rn: &DecodedRawNamed, + ) -> Self { + let ctor = LeanCtor::alloc(0, 3, 0); + ctor.set(0, LeanIxName::build(cache, &rn.name)); + ctor.set(1, LeanIxAddress::build(&rn.addr)); + ctor.set(2, LeanIxonConstantMeta::build(&rn.const_meta)); + Self::new(*ctor) } -} -/// Build Ixon.RawNamed Lean object. -pub fn build_raw_named( - cache: &mut LeanBuildCache, - rn: &DecodedRawNamed, -) -> LeanIxonRawNamed { - let ctor = LeanCtor::alloc(0, 3, 0); - ctor.set(0, build_name(cache, &rn.name)); - ctor.set(1, build_address_from_ixon(&rn.addr)); - ctor.set(2, build_constant_meta(&rn.const_meta)); - LeanIxonRawNamed::new(*ctor) + /// Build from individual parts (used by compile.rs). + pub fn build_from_parts( + cache: &mut LeanBuildCache, + name: &Name, + addr: &Address, + meta: &ConstantMeta, + ) -> Self { + let ctor = LeanCtor::alloc(0, 3, 0); + ctor.set(0, LeanIxName::build(cache, name)); + ctor.set(1, LeanIxAddress::build(addr)); + ctor.set(2, LeanIxonConstantMeta::build(meta)); + Self::new(*ctor) + } } // ============================================================================= @@ -122,22 +118,32 @@ pub struct DecodedRawBlob { pub bytes: Vec, } -/// Decode Ixon.RawBlob from Lean pointer. -pub fn decode_raw_blob(obj: LeanIxonRawBlob) -> DecodedRawBlob { - let ctor = obj.as_ctor(); - let ba = ctor.get(1).as_byte_array(); - DecodedRawBlob { - addr: decode_ixon_address(ctor.get(0).as_byte_array()), - bytes: ba.as_bytes().to_vec(), +impl LeanIxonRawBlob { + /// Decode Ixon.RawBlob from Lean pointer. + pub fn decode(self) -> DecodedRawBlob { + let ctor = self.as_ctor(); + let ba = ctor.get(1).as_byte_array(); + DecodedRawBlob { + addr: LeanIxAddress::new(ctor.get(0)).decode(), + bytes: ba.as_bytes().to_vec(), + } } -} -/// Build Ixon.RawBlob Lean object. -pub fn build_raw_blob(rb: &DecodedRawBlob) -> LeanIxonRawBlob { - let ctor = LeanCtor::alloc(0, 2, 0); - ctor.set(0, build_address_from_ixon(&rb.addr)); - ctor.set(1, LeanByteArray::from_bytes(&rb.bytes)); - LeanIxonRawBlob::new(*ctor) + /// Build Ixon.RawBlob Lean object. + pub fn build(rb: &DecodedRawBlob) -> Self { + let ctor = LeanCtor::alloc(0, 2, 0); + ctor.set(0, LeanIxAddress::build(&rb.addr)); + ctor.set(1, LeanByteArray::from_bytes(&rb.bytes)); + Self::new(*ctor) + } + + /// Build from individual parts (used by compile.rs). + pub fn build_from_parts(addr: &Address, bytes: &[u8]) -> Self { + let ctor = LeanCtor::alloc(0, 2, 0); + ctor.set(0, LeanIxAddress::build(addr)); + ctor.set(1, LeanByteArray::from_bytes(bytes)); + Self::new(*ctor) + } } // ============================================================================= @@ -147,24 +153,34 @@ pub fn build_raw_blob(rb: &DecodedRawBlob) -> LeanIxonRawBlob { /// Decoded Ixon.RawComm pub struct DecodedRawComm { pub addr: Address, - pub comm: DecodedComm, + pub comm: Comm, } -/// Decode Ixon.RawComm from Lean pointer. -pub fn decode_raw_comm(obj: LeanIxonRawComm) -> DecodedRawComm { - let ctor = obj.as_ctor(); - DecodedRawComm { - addr: decode_ixon_address(ctor.get(0).as_byte_array()), - comm: decode_comm(LeanIxonComm::new(ctor.get(1))), +impl LeanIxonRawComm { + /// Decode Ixon.RawComm from Lean pointer. + pub fn decode(self) -> DecodedRawComm { + let ctor = self.as_ctor(); + DecodedRawComm { + addr: LeanIxAddress::new(ctor.get(0)).decode(), + comm: LeanIxonComm::new(ctor.get(1)).decode(), + } + } + + /// Build Ixon.RawComm Lean object. + pub fn build(rc: &DecodedRawComm) -> Self { + let ctor = LeanCtor::alloc(0, 2, 0); + ctor.set(0, LeanIxAddress::build(&rc.addr)); + ctor.set(1, LeanIxonComm::build(&rc.comm)); + Self::new(*ctor) } -} -/// Build Ixon.RawComm Lean object. -pub fn build_raw_comm(rc: &DecodedRawComm) -> LeanIxonRawComm { - let ctor = LeanCtor::alloc(0, 2, 0); - ctor.set(0, build_address_from_ixon(&rc.addr)); - ctor.set(1, build_comm(&rc.comm)); - LeanIxonRawComm::new(*ctor) + /// Build from individual parts (used by compile.rs). + pub fn build_from_parts(addr: &Address, comm: &Comm) -> Self { + let ctor = LeanCtor::alloc(0, 2, 0); + ctor.set(0, LeanIxAddress::build(addr)); + ctor.set(1, LeanIxonComm::build(comm)); + Self::new(*ctor) + } } // ============================================================================= @@ -177,25 +193,27 @@ pub struct DecodedRawNameEntry { pub name: Name, } -/// Decode Ixon.RawNameEntry from Lean pointer. -pub fn decode_raw_name_entry(obj: LeanIxonRawNameEntry) -> DecodedRawNameEntry { - let ctor = obj.as_ctor(); - DecodedRawNameEntry { - addr: decode_ixon_address(ctor.get(0).as_byte_array()), - name: decode_ix_name(LeanIxName::new(ctor.get(1))), +impl LeanIxonRawNameEntry { + /// Decode Ixon.RawNameEntry from Lean pointer. + pub fn decode(self) -> DecodedRawNameEntry { + let ctor = self.as_ctor(); + DecodedRawNameEntry { + addr: LeanIxAddress::new(ctor.get(0)).decode(), + name: LeanIxName::new(ctor.get(1)).decode(), + } } -} -/// Build Ixon.RawNameEntry Lean object. -pub fn build_raw_name_entry( - cache: &mut LeanBuildCache, - addr: &Address, - name: &Name, -) -> LeanIxonRawNameEntry { - let ctor = LeanCtor::alloc(0, 2, 0); - ctor.set(0, build_address_from_ixon(addr)); - ctor.set(1, build_name(cache, name)); - LeanIxonRawNameEntry::new(*ctor) + /// Build Ixon.RawNameEntry Lean object. + pub fn build( + cache: &mut LeanBuildCache, + addr: &Address, + name: &Name, + ) -> Self { + let ctor = LeanCtor::alloc(0, 2, 0); + ctor.set(0, LeanIxAddress::build(addr)); + ctor.set(1, LeanIxName::build(cache, name)); + Self::new(*ctor) + } } // ============================================================================= @@ -211,67 +229,68 @@ pub struct DecodedRawEnv { pub names: Vec, } -/// Decode Ixon.RawEnv from Lean pointer. -pub fn decode_raw_env(obj: LeanIxonRawEnv) -> DecodedRawEnv { - let ctor = obj.as_ctor(); - let consts_arr = ctor.get(0).as_array(); - let named_arr = ctor.get(1).as_array(); - let blobs_arr = ctor.get(2).as_array(); - let comms_arr = ctor.get(3).as_array(); - let names_arr = ctor.get(4).as_array(); - - DecodedRawEnv { - consts: consts_arr.map(|x| decode_raw_const(LeanIxonRawConst::new(x))), - named: named_arr.map(|x| decode_raw_named(LeanIxonRawNamed::new(x))), - blobs: blobs_arr.map(|x| decode_raw_blob(LeanIxonRawBlob::new(x))), - comms: comms_arr.map(|x| decode_raw_comm(LeanIxonRawComm::new(x))), - names: names_arr - .map(|x| decode_raw_name_entry(LeanIxonRawNameEntry::new(x))), - } -} - -/// Build Ixon.RawEnv Lean object. -pub fn build_raw_env(env: &DecodedRawEnv) -> LeanIxonRawEnv { - let mut cache = LeanBuildCache::new(); - - // Build consts array - let consts_arr = LeanArray::alloc(env.consts.len()); - for (i, rc) in env.consts.iter().enumerate() { - consts_arr.set(i, build_raw_const(rc)); - } - - // Build named array - let named_arr = LeanArray::alloc(env.named.len()); - for (i, rn) in env.named.iter().enumerate() { - named_arr.set(i, build_raw_named(&mut cache, rn)); +impl LeanIxonRawEnv { + /// Decode Ixon.RawEnv from Lean pointer. + pub fn decode(self) -> DecodedRawEnv { + let ctor = self.as_ctor(); + let consts_arr = ctor.get(0).as_array(); + let named_arr = ctor.get(1).as_array(); + let blobs_arr = ctor.get(2).as_array(); + let comms_arr = ctor.get(3).as_array(); + let names_arr = ctor.get(4).as_array(); + + DecodedRawEnv { + consts: consts_arr.map(|x| LeanIxonRawConst::new(x).decode()), + named: named_arr.map(|x| LeanIxonRawNamed::new(x).decode()), + blobs: blobs_arr.map(|x| LeanIxonRawBlob::new(x).decode()), + comms: comms_arr.map(|x| LeanIxonRawComm::new(x).decode()), + names: names_arr.map(|x| LeanIxonRawNameEntry::new(x).decode()), + } } - // Build blobs array - let blobs_arr = LeanArray::alloc(env.blobs.len()); - for (i, rb) in env.blobs.iter().enumerate() { - blobs_arr.set(i, build_raw_blob(rb)); + /// Build Ixon.RawEnv Lean object. + pub fn build(env: &DecodedRawEnv) -> Self { + let mut cache = LeanBuildCache::new(); + + // Build consts array + let consts_arr = LeanArray::alloc(env.consts.len()); + for (i, rc) in env.consts.iter().enumerate() { + consts_arr.set(i, LeanIxonRawConst::build(rc)); + } + + // Build named array + let named_arr = LeanArray::alloc(env.named.len()); + for (i, rn) in env.named.iter().enumerate() { + named_arr.set(i, LeanIxonRawNamed::build(&mut cache, rn)); + } + + // Build blobs array + let blobs_arr = LeanArray::alloc(env.blobs.len()); + for (i, rb) in env.blobs.iter().enumerate() { + blobs_arr.set(i, LeanIxonRawBlob::build(rb)); + } + + // Build comms array + let comms_arr = LeanArray::alloc(env.comms.len()); + for (i, rc) in env.comms.iter().enumerate() { + comms_arr.set(i, LeanIxonRawComm::build(rc)); + } + + // Build names array + let names_arr = LeanArray::alloc(env.names.len()); + for (i, rn) in env.names.iter().enumerate() { + names_arr.set(i, LeanIxonRawNameEntry::build(&mut cache, &rn.addr, &rn.name)); + } + + // Build RawEnv structure + let ctor = LeanCtor::alloc(0, 5, 0); + ctor.set(0, consts_arr); + ctor.set(1, named_arr); + ctor.set(2, blobs_arr); + ctor.set(3, comms_arr); + ctor.set(4, names_arr); + Self::new(*ctor) } - - // Build comms array - let comms_arr = LeanArray::alloc(env.comms.len()); - for (i, rc) in env.comms.iter().enumerate() { - comms_arr.set(i, build_raw_comm(rc)); - } - - // Build names array - let names_arr = LeanArray::alloc(env.names.len()); - for (i, rn) in env.names.iter().enumerate() { - names_arr.set(i, build_raw_name_entry(&mut cache, &rn.addr, &rn.name)); - } - - // Build RawEnv structure - let ctor = LeanCtor::alloc(0, 5, 0); - ctor.set(0, consts_arr); - ctor.set(1, named_arr); - ctor.set(2, blobs_arr); - ctor.set(3, comms_arr); - ctor.set(4, names_arr); - LeanIxonRawEnv::new(*ctor) } // ============================================================================= @@ -295,9 +314,7 @@ pub fn decoded_to_ixon_env(decoded: &DecodedRawEnv) -> IxonEnv { env.blobs.insert(rb.addr.clone(), rb.bytes.clone()); } for rc in &decoded.comms { - let comm = - Comm { secret: rc.comm.secret.clone(), payload: rc.comm.payload.clone() }; - env.store_comm(rc.addr.clone(), comm); + env.store_comm(rc.addr.clone(), rc.comm.clone()); } env } @@ -331,10 +348,7 @@ pub fn ixon_env_to_decoded(env: &IxonEnv) -> DecodedRawEnv { .iter() .map(|e| DecodedRawComm { addr: e.key().clone(), - comm: DecodedComm { - secret: e.value().secret.clone(), - payload: e.value().payload.clone(), - }, + comm: e.value().clone(), }) .collect(); let names = env @@ -355,7 +369,7 @@ pub fn ixon_env_to_decoded(env: &IxonEnv) -> DecodedRawEnv { /// FFI: Serialize an Ixon.RawEnv -> ByteArray via Rust's Env.put. Pure. #[unsafe(no_mangle)] pub extern "C" fn rs_ser_env(obj: LeanIxonRawEnv) -> LeanByteArray { - let decoded = decode_raw_env(obj); + let decoded = obj.decode(); let env = decoded_to_ixon_env(&decoded); let mut buf = Vec::new(); env.put(&mut buf).expect("Env serialization failed"); @@ -375,7 +389,7 @@ pub extern "C" fn rs_des_env(obj: LeanByteArray) -> LeanExcept { match IxonEnv::get(&mut slice) { Ok(env) => { let decoded = ixon_env_to_decoded(&env); - let raw_env = build_raw_env(&decoded); + let raw_env = LeanIxonRawEnv::build(&decoded); LeanExcept::ok(raw_env) }, Err(e) => { diff --git a/src/ffi/ixon/expr.rs b/src/ffi/ixon/expr.rs index b5ee586b..c231c96a 100644 --- a/src/ffi/ixon/expr.rs +++ b/src/ffi/ixon/expr.rs @@ -6,118 +6,6 @@ use crate::ix::ixon::expr::Expr as IxonExpr; use crate::lean::LeanIxonExpr; use lean_ffi::object::{LeanArray, LeanCtor}; -/// Build Ixon.Expr (12 constructors). -pub fn build_ixon_expr(expr: &IxonExpr) -> LeanIxonExpr { - let obj = match expr { - IxonExpr::Sort(idx) => { - let ctor = LeanCtor::alloc(0, 0, 8); - ctor.set_u64(0, *idx); - *ctor - }, - IxonExpr::Var(idx) => { - let ctor = LeanCtor::alloc(1, 0, 8); - ctor.set_u64(0, *idx); - *ctor - }, - IxonExpr::Ref(ref_idx, univ_idxs) => { - let arr = LeanArray::alloc(univ_idxs.len()); - for (i, idx) in univ_idxs.iter().enumerate() { - let uint64_obj = LeanCtor::alloc(0, 0, 8); - uint64_obj.set_u64(0, *idx); - arr.set(i, uint64_obj); - } - let ctor = LeanCtor::alloc(2, 1, 8); - ctor.set(0, arr); - ctor.set_u64(8, *ref_idx); - *ctor - }, - IxonExpr::Rec(rec_idx, univ_idxs) => { - let arr = LeanArray::alloc(univ_idxs.len()); - for (i, idx) in univ_idxs.iter().enumerate() { - let uint64_obj = LeanCtor::alloc(0, 0, 8); - uint64_obj.set_u64(0, *idx); - arr.set(i, uint64_obj); - } - let ctor = LeanCtor::alloc(3, 1, 8); - ctor.set(0, arr); - ctor.set_u64(8, *rec_idx); - *ctor - }, - IxonExpr::Prj(type_ref_idx, field_idx, val) => { - let val_obj = build_ixon_expr(val); - let ctor = LeanCtor::alloc(4, 1, 16); - ctor.set(0, val_obj); - ctor.set_u64(8, *type_ref_idx); - ctor.set_u64(16, *field_idx); - *ctor - }, - IxonExpr::Str(ref_idx) => { - let ctor = LeanCtor::alloc(5, 0, 8); - ctor.set_u64(0, *ref_idx); - *ctor - }, - IxonExpr::Nat(ref_idx) => { - let ctor = LeanCtor::alloc(6, 0, 8); - ctor.set_u64(0, *ref_idx); - *ctor - }, - IxonExpr::App(fun, arg) => { - let fun_obj = build_ixon_expr(fun); - let arg_obj = build_ixon_expr(arg); - let ctor = LeanCtor::alloc(7, 2, 0); - ctor.set(0, fun_obj); - ctor.set(1, arg_obj); - *ctor - }, - IxonExpr::Lam(ty, body) => { - let ty_obj = build_ixon_expr(ty); - let body_obj = build_ixon_expr(body); - let ctor = LeanCtor::alloc(8, 2, 0); - ctor.set(0, ty_obj); - ctor.set(1, body_obj); - *ctor - }, - IxonExpr::All(ty, body) => { - let ty_obj = build_ixon_expr(ty); - let body_obj = build_ixon_expr(body); - let ctor = LeanCtor::alloc(9, 2, 0); - ctor.set(0, ty_obj); - ctor.set(1, body_obj); - *ctor - }, - IxonExpr::Let(non_dep, ty, val, body) => { - let ty_obj = build_ixon_expr(ty); - let val_obj = build_ixon_expr(val); - let body_obj = build_ixon_expr(body); - let ctor = LeanCtor::alloc(10, 3, 1); - ctor.set(0, ty_obj); - ctor.set(1, val_obj); - ctor.set(2, body_obj); - ctor.set_u8(24, if *non_dep { 1 } else { 0 }); - *ctor - }, - IxonExpr::Share(idx) => { - let ctor = LeanCtor::alloc(11, 0, 8); - ctor.set_u64(0, *idx); - *ctor - }, - }; - LeanIxonExpr::new(obj) -} - -/// Build an Array of Ixon.Expr. -pub fn build_ixon_expr_array(exprs: &[Arc]) -> LeanArray { - let arr = LeanArray::alloc(exprs.len()); - for (i, expr) in exprs.iter().enumerate() { - arr.set(i, build_ixon_expr(expr)); - } - arr -} - -// ============================================================================= -// Decode Functions -// ============================================================================= - /// Decode Array UInt64 from Lean. fn decode_u64_array(obj: LeanArray) -> Vec { obj @@ -133,94 +21,204 @@ fn decode_u64_array(obj: LeanArray) -> Vec { .collect() } -/// Decode Ixon.Expr (12 constructors). -pub fn decode_ixon_expr(obj: LeanIxonExpr) -> IxonExpr { - let ctor = obj.as_ctor(); - let tag = ctor.tag(); - match tag { - 0 => { - let idx = ctor.scalar_u64(0, 0); - IxonExpr::Sort(idx) - }, - 1 => { - let idx = ctor.scalar_u64(0, 0); - IxonExpr::Var(idx) - }, - 2 => { - let ref_idx = ctor.scalar_u64(1, 0); - let univ_idxs = decode_u64_array(ctor.get(0).as_array()); - IxonExpr::Ref(ref_idx, univ_idxs) - }, - 3 => { - let rec_idx = ctor.scalar_u64(1, 0); - let univ_idxs = decode_u64_array(ctor.get(0).as_array()); - IxonExpr::Rec(rec_idx, univ_idxs) - }, - 4 => { - let val_obj = LeanIxonExpr::new(ctor.get(0)); - let type_ref_idx = ctor.scalar_u64(1, 0); - let field_idx = ctor.scalar_u64(1, 8); - IxonExpr::Prj( - type_ref_idx, - field_idx, - Arc::new(decode_ixon_expr(val_obj)), - ) - }, - 5 => { - let ref_idx = ctor.scalar_u64(0, 0); - IxonExpr::Str(ref_idx) - }, - 6 => { - let ref_idx = ctor.scalar_u64(0, 0); - IxonExpr::Nat(ref_idx) - }, - 7 => { - let f_obj = LeanIxonExpr::new(ctor.get(0)); - let a_obj = LeanIxonExpr::new(ctor.get(1)); - IxonExpr::App( - Arc::new(decode_ixon_expr(f_obj)), - Arc::new(decode_ixon_expr(a_obj)), - ) - }, - 8 => { - let ty_obj = LeanIxonExpr::new(ctor.get(0)); - let body_obj = LeanIxonExpr::new(ctor.get(1)); - IxonExpr::Lam( - Arc::new(decode_ixon_expr(ty_obj)), - Arc::new(decode_ixon_expr(body_obj)), - ) - }, - 9 => { - let ty_obj = LeanIxonExpr::new(ctor.get(0)); - let body_obj = LeanIxonExpr::new(ctor.get(1)); - IxonExpr::All( - Arc::new(decode_ixon_expr(ty_obj)), - Arc::new(decode_ixon_expr(body_obj)), - ) - }, - 10 => { - let ty_obj = LeanIxonExpr::new(ctor.get(0)); - let val_obj = LeanIxonExpr::new(ctor.get(1)); - let body_obj = LeanIxonExpr::new(ctor.get(2)); - let non_dep = ctor.scalar_u8(3, 0) != 0; - IxonExpr::Let( - non_dep, - Arc::new(decode_ixon_expr(ty_obj)), - Arc::new(decode_ixon_expr(val_obj)), - Arc::new(decode_ixon_expr(body_obj)), - ) - }, - 11 => { - let idx = ctor.scalar_u64(0, 0); - IxonExpr::Share(idx) - }, - _ => panic!("Invalid Ixon.Expr tag: {}", tag), +impl LeanIxonExpr { + /// Build Ixon.Expr (12 constructors). + pub fn build(expr: &IxonExpr) -> Self { + let obj = match expr { + IxonExpr::Sort(idx) => { + let ctor = LeanCtor::alloc(0, 0, 8); + ctor.set_u64(0, *idx); + *ctor + }, + IxonExpr::Var(idx) => { + let ctor = LeanCtor::alloc(1, 0, 8); + ctor.set_u64(0, *idx); + *ctor + }, + IxonExpr::Ref(ref_idx, univ_idxs) => { + let arr = LeanArray::alloc(univ_idxs.len()); + for (i, idx) in univ_idxs.iter().enumerate() { + let uint64_obj = LeanCtor::alloc(0, 0, 8); + uint64_obj.set_u64(0, *idx); + arr.set(i, uint64_obj); + } + let ctor = LeanCtor::alloc(2, 1, 8); + ctor.set(0, arr); + ctor.set_u64(8, *ref_idx); + *ctor + }, + IxonExpr::Rec(rec_idx, univ_idxs) => { + let arr = LeanArray::alloc(univ_idxs.len()); + for (i, idx) in univ_idxs.iter().enumerate() { + let uint64_obj = LeanCtor::alloc(0, 0, 8); + uint64_obj.set_u64(0, *idx); + arr.set(i, uint64_obj); + } + let ctor = LeanCtor::alloc(3, 1, 8); + ctor.set(0, arr); + ctor.set_u64(8, *rec_idx); + *ctor + }, + IxonExpr::Prj(type_ref_idx, field_idx, val) => { + let val_obj = Self::build(val); + let ctor = LeanCtor::alloc(4, 1, 16); + ctor.set(0, val_obj); + ctor.set_u64(8, *type_ref_idx); + ctor.set_u64(16, *field_idx); + *ctor + }, + IxonExpr::Str(ref_idx) => { + let ctor = LeanCtor::alloc(5, 0, 8); + ctor.set_u64(0, *ref_idx); + *ctor + }, + IxonExpr::Nat(ref_idx) => { + let ctor = LeanCtor::alloc(6, 0, 8); + ctor.set_u64(0, *ref_idx); + *ctor + }, + IxonExpr::App(fun, arg) => { + let fun_obj = Self::build(fun); + let arg_obj = Self::build(arg); + let ctor = LeanCtor::alloc(7, 2, 0); + ctor.set(0, fun_obj); + ctor.set(1, arg_obj); + *ctor + }, + IxonExpr::Lam(ty, body) => { + let ty_obj = Self::build(ty); + let body_obj = Self::build(body); + let ctor = LeanCtor::alloc(8, 2, 0); + ctor.set(0, ty_obj); + ctor.set(1, body_obj); + *ctor + }, + IxonExpr::All(ty, body) => { + let ty_obj = Self::build(ty); + let body_obj = Self::build(body); + let ctor = LeanCtor::alloc(9, 2, 0); + ctor.set(0, ty_obj); + ctor.set(1, body_obj); + *ctor + }, + IxonExpr::Let(non_dep, ty, val, body) => { + let ty_obj = Self::build(ty); + let val_obj = Self::build(val); + let body_obj = Self::build(body); + let ctor = LeanCtor::alloc(10, 3, 1); + ctor.set(0, ty_obj); + ctor.set(1, val_obj); + ctor.set(2, body_obj); + ctor.set_u8(24, if *non_dep { 1 } else { 0 }); + *ctor + }, + IxonExpr::Share(idx) => { + let ctor = LeanCtor::alloc(11, 0, 8); + ctor.set_u64(0, *idx); + *ctor + }, + }; + Self::new(obj) + } + + /// Build an Array of Ixon.Expr. + pub fn build_array(exprs: &[Arc]) -> LeanArray { + let arr = LeanArray::alloc(exprs.len()); + for (i, expr) in exprs.iter().enumerate() { + arr.set(i, Self::build(expr)); + } + arr + } + + /// Decode Ixon.Expr (12 constructors). + pub fn decode(self) -> IxonExpr { + let ctor = self.as_ctor(); + let tag = ctor.tag(); + match tag { + 0 => { + let idx = ctor.scalar_u64(0, 0); + IxonExpr::Sort(idx) + }, + 1 => { + let idx = ctor.scalar_u64(0, 0); + IxonExpr::Var(idx) + }, + 2 => { + let ref_idx = ctor.scalar_u64(1, 0); + let univ_idxs = decode_u64_array(ctor.get(0).as_array()); + IxonExpr::Ref(ref_idx, univ_idxs) + }, + 3 => { + let rec_idx = ctor.scalar_u64(1, 0); + let univ_idxs = decode_u64_array(ctor.get(0).as_array()); + IxonExpr::Rec(rec_idx, univ_idxs) + }, + 4 => { + let val_obj = Self::new(ctor.get(0)); + let type_ref_idx = ctor.scalar_u64(1, 0); + let field_idx = ctor.scalar_u64(1, 8); + IxonExpr::Prj( + type_ref_idx, + field_idx, + Arc::new(val_obj.decode()), + ) + }, + 5 => { + let ref_idx = ctor.scalar_u64(0, 0); + IxonExpr::Str(ref_idx) + }, + 6 => { + let ref_idx = ctor.scalar_u64(0, 0); + IxonExpr::Nat(ref_idx) + }, + 7 => { + let f_obj = Self::new(ctor.get(0)); + let a_obj = Self::new(ctor.get(1)); + IxonExpr::App( + Arc::new(f_obj.decode()), + Arc::new(a_obj.decode()), + ) + }, + 8 => { + let ty_obj = Self::new(ctor.get(0)); + let body_obj = Self::new(ctor.get(1)); + IxonExpr::Lam( + Arc::new(ty_obj.decode()), + Arc::new(body_obj.decode()), + ) + }, + 9 => { + let ty_obj = Self::new(ctor.get(0)); + let body_obj = Self::new(ctor.get(1)); + IxonExpr::All( + Arc::new(ty_obj.decode()), + Arc::new(body_obj.decode()), + ) + }, + 10 => { + let ty_obj = Self::new(ctor.get(0)); + let val_obj = Self::new(ctor.get(1)); + let body_obj = Self::new(ctor.get(2)); + let non_dep = ctor.scalar_u8(3, 0) != 0; + IxonExpr::Let( + non_dep, + Arc::new(ty_obj.decode()), + Arc::new(val_obj.decode()), + Arc::new(body_obj.decode()), + ) + }, + 11 => { + let idx = ctor.scalar_u64(0, 0); + IxonExpr::Share(idx) + }, + _ => panic!("Invalid Ixon.Expr tag: {}", tag), + } } -} -/// Decode Array Ixon.Expr. -pub fn decode_ixon_expr_array(obj: LeanArray) -> Vec> { - obj.map(|e| Arc::new(decode_ixon_expr(LeanIxonExpr::new(e)))) + /// Decode Array Ixon.Expr. + pub fn decode_array(obj: LeanArray) -> Vec> { + obj.map(|e| Arc::new(Self::new(e).decode())) + } } // ============================================================================= @@ -230,6 +228,6 @@ pub fn decode_ixon_expr_array(obj: LeanArray) -> Vec> { /// Round-trip Ixon.Expr. #[unsafe(no_mangle)] pub extern "C" fn rs_roundtrip_ixon_expr(obj: LeanIxonExpr) -> LeanIxonExpr { - let expr = decode_ixon_expr(obj); - build_ixon_expr(&expr) + let expr = obj.decode(); + LeanIxonExpr::build(&expr) } diff --git a/src/ffi/ixon/meta.rs b/src/ffi/ixon/meta.rs index 7aa3cfaa..019bebe4 100644 --- a/src/ffi/ixon/meta.rs +++ b/src/ffi/ixon/meta.rs @@ -16,81 +16,11 @@ use crate::lean::{ }; use lean_ffi::object::{LeanArray, LeanCtor, LeanObject}; -use crate::ffi::ix::constant::{ - build_reducibility_hints, decode_reducibility_hints, -}; -use crate::ffi::ix::expr::binder_info_to_u8; -use crate::ffi::ixon::constant::{ - build_address_array, build_address_from_ixon, decode_ixon_address, - decode_ixon_address_array, -}; - -// ============================================================================= -// DataValue Build/Decode -// ============================================================================= - -/// Build Ixon.DataValue (for metadata) -pub fn build_ixon_data_value(dv: &IxonDataValue) -> LeanIxonDataValue { - let obj = match dv { - IxonDataValue::OfString(addr) => { - let ctor = LeanCtor::alloc(0, 1, 0); - ctor.set(0, build_address_from_ixon(addr)); - *ctor - }, - IxonDataValue::OfBool(b) => { - let ctor = LeanCtor::alloc(1, 0, 1); - ctor.set_u8(0, if *b { 1 } else { 0 }); - *ctor - }, - IxonDataValue::OfName(addr) => { - let ctor = LeanCtor::alloc(2, 1, 0); - ctor.set(0, build_address_from_ixon(addr)); - *ctor - }, - IxonDataValue::OfNat(addr) => { - let ctor = LeanCtor::alloc(3, 1, 0); - ctor.set(0, build_address_from_ixon(addr)); - *ctor - }, - IxonDataValue::OfInt(addr) => { - let ctor = LeanCtor::alloc(4, 1, 0); - ctor.set(0, build_address_from_ixon(addr)); - *ctor - }, - IxonDataValue::OfSyntax(addr) => { - let ctor = LeanCtor::alloc(5, 1, 0); - ctor.set(0, build_address_from_ixon(addr)); - *ctor - }, - }; - LeanIxonDataValue::new(obj) -} - -/// Decode Ixon.DataValue. -pub fn decode_ixon_data_value(obj: LeanIxonDataValue) -> IxonDataValue { - let ctor = obj.as_ctor(); - match ctor.tag() { - 0 => { - IxonDataValue::OfString(decode_ixon_address(ctor.get(0).as_byte_array())) - }, - 1 => { - let b = ctor.scalar_u8(0, 0) != 0; - IxonDataValue::OfBool(b) - }, - 2 => { - IxonDataValue::OfName(decode_ixon_address(ctor.get(0).as_byte_array())) - }, - 3 => IxonDataValue::OfNat(decode_ixon_address(ctor.get(0).as_byte_array())), - 4 => IxonDataValue::OfInt(decode_ixon_address(ctor.get(0).as_byte_array())), - 5 => { - IxonDataValue::OfSyntax(decode_ixon_address(ctor.get(0).as_byte_array())) - }, - tag => panic!("Invalid Ixon.DataValue tag: {}", tag), - } -} +use crate::lean::LeanIxBinderInfo; +use crate::lean::LeanIxAddress; // ============================================================================= -// KVMap Build/Decode +// KVMap Build/Decode (not domain types, kept as free functions) // ============================================================================= /// Build an Ixon.KVMap (Array (Address × DataValue)). @@ -98,8 +28,8 @@ pub fn build_ixon_kvmap(kvmap: &KVMap) -> LeanArray { let arr = LeanArray::alloc(kvmap.len()); for (i, (addr, dv)) in kvmap.iter().enumerate() { let pair = LeanCtor::alloc(0, 2, 0); - pair.set(0, build_address_from_ixon(addr)); - pair.set(1, build_ixon_data_value(dv)); + pair.set(0, LeanIxAddress::build(addr)); + pair.set(1, LeanIxonDataValue::build(dv)); arr.set(i, pair); } arr @@ -121,8 +51,8 @@ pub fn decode_ixon_kvmap(obj: LeanArray) -> KVMap { .map(|pair| { let pair_ctor = pair.as_ctor(); ( - decode_ixon_address(pair_ctor.get(0).as_byte_array()), - decode_ixon_data_value(LeanIxonDataValue::new(pair_ctor.get(1))), + LeanIxAddress::new(pair_ctor.get(0)).decode(), + LeanIxonDataValue::new(pair_ctor.get(1)).decode(), ) }) .collect() @@ -139,7 +69,7 @@ fn decode_kvmap_array(obj: LeanArray) -> Vec { /// Decode Array Address. fn decode_address_array(obj: LeanArray) -> Vec
{ - decode_ixon_address_array(obj) + LeanIxAddress::decode_array(obj) } /// Build Array UInt64. @@ -157,149 +87,217 @@ fn decode_u64_array(obj: LeanArray) -> Vec { } // ============================================================================= -// ExprMetaData Build/Decode +// DataValue Build/Decode // ============================================================================= -/// Build Ixon.ExprMetaData Lean object. -/// -/// | Variant | Tag | Obj fields | Scalar bytes | -/// |------------|-----|------------------------|--------------------------| -/// | leaf | 0 | 0 | 0 | -/// | app | 1 | 0 | 16 (2× u64) | -/// | binder | 2 | 1 (name: Address) | 17 (info: u8, 2× u64) | -/// | letBinder | 3 | 1 (name: Address) | 24 (3× u64) | -/// | ref | 4 | 1 (name: Address) | 0 | -/// | prj | 5 | 1 (structName: Address) | 8 (1× u64) | -/// | mdata | 6 | 1 (mdata: Array) | 8 (1× u64) | -pub fn build_expr_meta_data(node: &ExprMetaData) -> LeanIxonExprMetaData { - let obj = match node { - ExprMetaData::Leaf => LeanObject::box_usize(0), - - ExprMetaData::App { children } => { - // Tag 1, 0 obj fields, 16 scalar bytes (2× u64) - let ctor = LeanCtor::alloc(1, 0, 16); - ctor.set_u64(0, children[0]); - ctor.set_u64(8, children[1]); - *ctor - }, - - ExprMetaData::Binder { name, info, children } => { - // Tag 2, 1 obj field (name), scalar: 2× u64 + u8 (info) - // Lean ABI sorts scalars by size descending: [tyChild: u64 @ 8] [bodyChild: u64 @ 16] [info: u8 @ 24] - // Offsets from obj_cptr: 1*8=8 base for scalar area - let ctor = LeanCtor::alloc(2, 1, 17); - ctor.set(0, build_address_from_ixon(name)); - ctor.set_u64(8, children[0]); - ctor.set_u64(16, children[1]); - ctor.set_u8(24, binder_info_to_u8(info)); - *ctor - }, - - ExprMetaData::LetBinder { name, children } => { - // Tag 3, 1 obj field (name), 24 scalar bytes (3× u64) - let ctor = LeanCtor::alloc(3, 1, 24); - ctor.set(0, build_address_from_ixon(name)); - ctor.set_u64(8, children[0]); - ctor.set_u64(16, children[1]); - ctor.set_u64(24, children[2]); - *ctor - }, - - ExprMetaData::Ref { name } => { - // Tag 4, 1 obj field (name), 0 scalar bytes - let ctor = LeanCtor::alloc(4, 1, 0); - ctor.set(0, build_address_from_ixon(name)); - *ctor - }, - - ExprMetaData::Prj { struct_name, child } => { - // Tag 5, 1 obj field (structName), 8 scalar bytes (1× u64) - let ctor = LeanCtor::alloc(5, 1, 8); - ctor.set(0, build_address_from_ixon(struct_name)); - ctor.set_u64(8, *child); - *ctor - }, - - ExprMetaData::Mdata { mdata, child } => { - // Tag 6, 1 obj field (mdata: Array KVMap), 8 scalar bytes (1× u64) - let mdata_arr = build_kvmap_array(mdata); - let ctor = LeanCtor::alloc(6, 1, 8); - ctor.set(0, mdata_arr); - ctor.set_u64(8, *child); - *ctor - }, - }; - LeanIxonExprMetaData::new(obj) +impl LeanIxonDataValue { + /// Build Ixon.DataValue (for metadata) + pub fn build(dv: &IxonDataValue) -> Self { + let obj = match dv { + IxonDataValue::OfString(addr) => { + let ctor = LeanCtor::alloc(0, 1, 0); + ctor.set(0, LeanIxAddress::build(addr)); + *ctor + }, + IxonDataValue::OfBool(b) => { + let ctor = LeanCtor::alloc(1, 0, 1); + ctor.set_u8(0, if *b { 1 } else { 0 }); + *ctor + }, + IxonDataValue::OfName(addr) => { + let ctor = LeanCtor::alloc(2, 1, 0); + ctor.set(0, LeanIxAddress::build(addr)); + *ctor + }, + IxonDataValue::OfNat(addr) => { + let ctor = LeanCtor::alloc(3, 1, 0); + ctor.set(0, LeanIxAddress::build(addr)); + *ctor + }, + IxonDataValue::OfInt(addr) => { + let ctor = LeanCtor::alloc(4, 1, 0); + ctor.set(0, LeanIxAddress::build(addr)); + *ctor + }, + IxonDataValue::OfSyntax(addr) => { + let ctor = LeanCtor::alloc(5, 1, 0); + ctor.set(0, LeanIxAddress::build(addr)); + *ctor + }, + }; + Self::new(obj) + } + + /// Decode Ixon.DataValue. + pub fn decode(self) -> IxonDataValue { + let ctor = self.as_ctor(); + match ctor.tag() { + 0 => { + IxonDataValue::OfString(LeanIxAddress::new(ctor.get(0)).decode()) + }, + 1 => { + let b = ctor.scalar_u8(0, 0) != 0; + IxonDataValue::OfBool(b) + }, + 2 => { + IxonDataValue::OfName(LeanIxAddress::new(ctor.get(0)).decode()) + }, + 3 => IxonDataValue::OfNat(LeanIxAddress::new(ctor.get(0)).decode()), + 4 => IxonDataValue::OfInt(LeanIxAddress::new(ctor.get(0)).decode()), + 5 => { + IxonDataValue::OfSyntax(LeanIxAddress::new(ctor.get(0)).decode()) + }, + tag => panic!("Invalid Ixon.DataValue tag: {}", tag), + } + } } -/// Decode Ixon.ExprMetaData from Lean pointer. -pub fn decode_expr_meta_data(obj: LeanIxonExprMetaData) -> ExprMetaData { - // Leaf (tag 0, no fields) is represented as a scalar lean_box(0) - if obj.is_scalar() { - let tag = obj.as_ptr() as usize >> 1; - assert_eq!(tag, 0, "Invalid scalar ExprMetaData tag: {}", tag); - return ExprMetaData::Leaf; +// ============================================================================= +// ExprMetaData Build/Decode +// ============================================================================= + +impl LeanIxonExprMetaData { + /// Build Ixon.ExprMetaData Lean object. + /// + /// | Variant | Tag | Obj fields | Scalar bytes | + /// |------------|-----|------------------------|--------------------------| + /// | leaf | 0 | 0 | 0 | + /// | app | 1 | 0 | 16 (2× u64) | + /// | binder | 2 | 1 (name: Address) | 17 (info: u8, 2× u64) | + /// | letBinder | 3 | 1 (name: Address) | 24 (3× u64) | + /// | ref | 4 | 1 (name: Address) | 0 | + /// | prj | 5 | 1 (structName: Address) | 8 (1× u64) | + /// | mdata | 6 | 1 (mdata: Array) | 8 (1× u64) | + pub fn build(node: &ExprMetaData) -> Self { + let obj = match node { + ExprMetaData::Leaf => LeanObject::box_usize(0), + + ExprMetaData::App { children } => { + // Tag 1, 0 obj fields, 16 scalar bytes (2× u64) + let ctor = LeanCtor::alloc(1, 0, 16); + ctor.set_u64(0, children[0]); + ctor.set_u64(8, children[1]); + *ctor + }, + + ExprMetaData::Binder { name, info, children } => { + // Tag 2, 1 obj field (name), scalar: 2× u64 + u8 (info) + // Lean ABI sorts scalars by size descending: [tyChild: u64 @ 8] [bodyChild: u64 @ 16] [info: u8 @ 24] + // Offsets from obj_cptr: 1*8=8 base for scalar area + let ctor = LeanCtor::alloc(2, 1, 17); + ctor.set(0, LeanIxAddress::build(name)); + ctor.set_u64(8, children[0]); + ctor.set_u64(16, children[1]); + ctor.set_u8(24, LeanIxBinderInfo::to_u8(info)); + *ctor + }, + + ExprMetaData::LetBinder { name, children } => { + // Tag 3, 1 obj field (name), 24 scalar bytes (3× u64) + let ctor = LeanCtor::alloc(3, 1, 24); + ctor.set(0, LeanIxAddress::build(name)); + ctor.set_u64(8, children[0]); + ctor.set_u64(16, children[1]); + ctor.set_u64(24, children[2]); + *ctor + }, + + ExprMetaData::Ref { name } => { + // Tag 4, 1 obj field (name), 0 scalar bytes + let ctor = LeanCtor::alloc(4, 1, 0); + ctor.set(0, LeanIxAddress::build(name)); + *ctor + }, + + ExprMetaData::Prj { struct_name, child } => { + // Tag 5, 1 obj field (structName), 8 scalar bytes (1× u64) + let ctor = LeanCtor::alloc(5, 1, 8); + ctor.set(0, LeanIxAddress::build(struct_name)); + ctor.set_u64(8, *child); + *ctor + }, + + ExprMetaData::Mdata { mdata, child } => { + // Tag 6, 1 obj field (mdata: Array KVMap), 8 scalar bytes (1× u64) + let mdata_arr = build_kvmap_array(mdata); + let ctor = LeanCtor::alloc(6, 1, 8); + ctor.set(0, mdata_arr); + ctor.set_u64(8, *child); + *ctor + }, + }; + Self::new(obj) } - let ctor = obj.as_ctor(); - match ctor.tag() { - 1 => { - // app: 0 obj fields, 2× u64 scalar - let fun_ = ctor.scalar_u64(0, 0); - let arg = ctor.scalar_u64(0, 8); - ExprMetaData::App { children: [fun_, arg] } - }, - - 2 => { - // binder: 1 obj field (name), scalar (Lean ABI: u64s first, then u8): - // [tyChild: u64 @ 0] [bodyChild: u64 @ 8] [info: u8 @ 16] - let name = decode_ixon_address(ctor.get(0).as_byte_array()); - let ty_child = ctor.scalar_u64(1, 0); - let body_child = ctor.scalar_u64(1, 8); - let info_byte = ctor.scalar_u8(1, 16); - let info = match info_byte { - 0 => BinderInfo::Default, - 1 => BinderInfo::Implicit, - 2 => BinderInfo::StrictImplicit, - 3 => BinderInfo::InstImplicit, - _ => panic!("Invalid BinderInfo tag: {}", info_byte), - }; - ExprMetaData::Binder { name, info, children: [ty_child, body_child] } - }, - - 3 => { - // letBinder: 1 obj field (name), 3× u64 scalar - let name = decode_ixon_address(ctor.get(0).as_byte_array()); - let ty_child = ctor.scalar_u64(1, 0); - let val_child = ctor.scalar_u64(1, 8); - let body_child = ctor.scalar_u64(1, 16); - ExprMetaData::LetBinder { - name, - children: [ty_child, val_child, body_child], - } - }, - - 4 => { - // ref: 1 obj field (name), 0 scalar - ExprMetaData::Ref { - name: decode_ixon_address(ctor.get(0).as_byte_array()), - } - }, - - 5 => { - // prj: 1 obj field (structName), 1× u64 scalar - let struct_name = decode_ixon_address(ctor.get(0).as_byte_array()); - let child = ctor.scalar_u64(1, 0); - ExprMetaData::Prj { struct_name, child } - }, - - 6 => { - // mdata: 1 obj field (mdata: Array KVMap), 1× u64 scalar - let mdata = decode_kvmap_array(ctor.get(0).as_array()); - let child = ctor.scalar_u64(1, 0); - ExprMetaData::Mdata { mdata, child } - }, - - tag => panic!("Invalid Ixon.ExprMetaData tag: {}", tag), + + /// Decode Ixon.ExprMetaData from Lean pointer. + pub fn decode(self) -> ExprMetaData { + // Leaf (tag 0, no fields) is represented as a scalar lean_box(0) + if self.is_scalar() { + let tag = self.as_ptr() as usize >> 1; + assert_eq!(tag, 0, "Invalid scalar ExprMetaData tag: {}", tag); + return ExprMetaData::Leaf; + } + let ctor = self.as_ctor(); + match ctor.tag() { + 1 => { + // app: 0 obj fields, 2× u64 scalar + let fun_ = ctor.scalar_u64(0, 0); + let arg = ctor.scalar_u64(0, 8); + ExprMetaData::App { children: [fun_, arg] } + }, + + 2 => { + // binder: 1 obj field (name), scalar (Lean ABI: u64s first, then u8): + // [tyChild: u64 @ 0] [bodyChild: u64 @ 8] [info: u8 @ 16] + let name = LeanIxAddress::new(ctor.get(0)).decode(); + let ty_child = ctor.scalar_u64(1, 0); + let body_child = ctor.scalar_u64(1, 8); + let info_byte = ctor.scalar_u8(1, 16); + let info = match info_byte { + 0 => BinderInfo::Default, + 1 => BinderInfo::Implicit, + 2 => BinderInfo::StrictImplicit, + 3 => BinderInfo::InstImplicit, + _ => panic!("Invalid BinderInfo tag: {}", info_byte), + }; + ExprMetaData::Binder { name, info, children: [ty_child, body_child] } + }, + + 3 => { + // letBinder: 1 obj field (name), 3× u64 scalar + let name = LeanIxAddress::new(ctor.get(0)).decode(); + let ty_child = ctor.scalar_u64(1, 0); + let val_child = ctor.scalar_u64(1, 8); + let body_child = ctor.scalar_u64(1, 16); + ExprMetaData::LetBinder { + name, + children: [ty_child, val_child, body_child], + } + }, + + 4 => { + // ref: 1 obj field (name), 0 scalar + ExprMetaData::Ref { + name: LeanIxAddress::new(ctor.get(0)).decode(), + } + }, + + 5 => { + // prj: 1 obj field (structName), 1× u64 scalar + let struct_name = LeanIxAddress::new(ctor.get(0)).decode(); + let child = ctor.scalar_u64(1, 0); + ExprMetaData::Prj { struct_name, child } + }, + + 6 => { + // mdata: 1 obj field (mdata: Array KVMap), 1× u64 scalar + let mdata = decode_kvmap_array(ctor.get(0).as_array()); + let child = ctor.scalar_u64(1, 0); + ExprMetaData::Mdata { mdata, child } + }, + + tag => panic!("Invalid Ixon.ExprMetaData tag: {}", tag), + } } } @@ -307,23 +305,25 @@ pub fn decode_expr_meta_data(obj: LeanIxonExprMetaData) -> ExprMetaData { // ExprMetaArena Build/Decode // ============================================================================= -/// Build Ixon.ExprMetaArena Lean object. -/// ExprMetaArena is a single-field structure (nodes : Array ExprMetaData), -/// which Lean unboxes — the value IS the Array directly. -pub fn build_expr_meta_arena(arena: &ExprMeta) -> LeanArray { - let arr = LeanArray::alloc(arena.nodes.len()); - for (i, node) in arena.nodes.iter().enumerate() { - arr.set(i, build_expr_meta_data(node)); +impl LeanIxonExprMetaArena { + /// Build Ixon.ExprMetaArena Lean object. + /// ExprMetaArena is a single-field structure (nodes : Array ExprMetaData), + /// which Lean unboxes — the value IS the Array directly. + pub fn build(arena: &ExprMeta) -> Self { + let arr = LeanArray::alloc(arena.nodes.len()); + for (i, node) in arena.nodes.iter().enumerate() { + arr.set(i, LeanIxonExprMetaData::build(node)); + } + Self::new(*arr) } - arr -} -/// Decode Ixon.ExprMetaArena from Lean pointer. -/// Single-field struct is unboxed — obj IS the Array directly. -pub fn decode_expr_meta_arena(obj: LeanIxonExprMetaArena) -> ExprMeta { - let arr = obj.as_array(); - ExprMeta { - nodes: arr.map(|x| decode_expr_meta_data(LeanIxonExprMetaData::new(x))), + /// Decode Ixon.ExprMetaArena from Lean pointer. + /// Single-field struct is unboxed — obj IS the Array directly. + pub fn decode(self) -> ExprMeta { + let arr = self.as_array(); + ExprMeta { + nodes: arr.map(|x| LeanIxonExprMetaData::new(x).decode()), + } } } @@ -331,130 +331,22 @@ pub fn decode_expr_meta_arena(obj: LeanIxonExprMetaArena) -> ExprMeta { // ConstantMeta Build/Decode // ============================================================================= -/// Build Ixon.ConstantMeta Lean object. -/// -/// | Variant | Tag | Obj fields | Scalar bytes | -/// |---------|-----|-----------|-------------| -/// | empty | 0 | 0 | 0 | -/// | defn | 1 | 6 (name, lvls, hints, all, ctx, arena) | 16 (2× u64) | -/// | axio | 2 | 3 (name, lvls, arena) | 8 (1× u64) | -/// | quot | 3 | 3 (name, lvls, arena) | 8 (1× u64) | -/// | indc | 4 | 6 (name, lvls, ctors, all, ctx, arena) | 8 (1× u64) | -/// | ctor | 5 | 4 (name, lvls, induct, arena) | 8 (1× u64) | -/// | recr | 6 | 7 (name, lvls, rules, all, ctx, arena, ruleRoots) | 8 (1× u64) | -pub fn build_constant_meta(meta: &ConstantMeta) -> LeanIxonConstantMeta { - let obj = match meta { - ConstantMeta::Empty => LeanObject::box_usize(0), - - ConstantMeta::Def { - name, - lvls, - hints, - all, - ctx, - arena, - type_root, - value_root, - } => { - let ctor = LeanCtor::alloc(1, 6, 16); - ctor.set(0, build_address_from_ixon(name)); - ctor.set(1, build_address_array(lvls)); - ctor.set(2, build_reducibility_hints(hints)); - ctor.set(3, build_address_array(all)); - ctor.set(4, build_address_array(ctx)); - ctor.set(5, build_expr_meta_arena(arena)); - ctor.set_u64(6 * 8, *type_root); - ctor.set_u64(6 * 8 + 8, *value_root); - *ctor - }, - - ConstantMeta::Axio { name, lvls, arena, type_root } => { - let ctor = LeanCtor::alloc(2, 3, 8); - ctor.set(0, build_address_from_ixon(name)); - ctor.set(1, build_address_array(lvls)); - ctor.set(2, build_expr_meta_arena(arena)); - ctor.set_u64(3 * 8, *type_root); - *ctor - }, - - ConstantMeta::Quot { name, lvls, arena, type_root } => { - let ctor = LeanCtor::alloc(3, 3, 8); - ctor.set(0, build_address_from_ixon(name)); - ctor.set(1, build_address_array(lvls)); - ctor.set(2, build_expr_meta_arena(arena)); - ctor.set_u64(3 * 8, *type_root); - *ctor - }, - - ConstantMeta::Indc { name, lvls, ctors, all, ctx, arena, type_root } => { - let ctor = LeanCtor::alloc(4, 6, 8); - ctor.set(0, build_address_from_ixon(name)); - ctor.set(1, build_address_array(lvls)); - ctor.set(2, build_address_array(ctors)); - ctor.set(3, build_address_array(all)); - ctor.set(4, build_address_array(ctx)); - ctor.set(5, build_expr_meta_arena(arena)); - ctor.set_u64(6 * 8, *type_root); - *ctor - }, - - ConstantMeta::Ctor { name, lvls, induct, arena, type_root } => { - let ctor = LeanCtor::alloc(5, 4, 8); - ctor.set(0, build_address_from_ixon(name)); - ctor.set(1, build_address_array(lvls)); - ctor.set(2, build_address_from_ixon(induct)); - ctor.set(3, build_expr_meta_arena(arena)); - ctor.set_u64(4 * 8, *type_root); - *ctor - }, - - ConstantMeta::Rec { - name, - lvls, - rules, - all, - ctx, - arena, - type_root, - rule_roots, - } => { - let ctor = LeanCtor::alloc(6, 7, 8); - ctor.set(0, build_address_from_ixon(name)); - ctor.set(1, build_address_array(lvls)); - ctor.set(2, build_address_array(rules)); - ctor.set(3, build_address_array(all)); - ctor.set(4, build_address_array(ctx)); - ctor.set(5, build_expr_meta_arena(arena)); - ctor.set(6, build_u64_array(rule_roots)); - ctor.set_u64(7 * 8, *type_root); - *ctor - }, - }; - LeanIxonConstantMeta::new(obj) -} +impl LeanIxonConstantMeta { + /// Build Ixon.ConstantMeta Lean object. + /// + /// | Variant | Tag | Obj fields | Scalar bytes | + /// |---------|-----|-----------|-------------| + /// | empty | 0 | 0 | 0 | + /// | defn | 1 | 6 (name, lvls, hints, all, ctx, arena) | 16 (2× u64) | + /// | axio | 2 | 3 (name, lvls, arena) | 8 (1× u64) | + /// | quot | 3 | 3 (name, lvls, arena) | 8 (1× u64) | + /// | indc | 4 | 6 (name, lvls, ctors, all, ctx, arena) | 8 (1× u64) | + /// | ctor | 5 | 4 (name, lvls, induct, arena) | 8 (1× u64) | + /// | recr | 6 | 7 (name, lvls, rules, all, ctx, arena, ruleRoots) | 8 (1× u64) | + pub fn build(meta: &ConstantMeta) -> Self { + let obj = match meta { + ConstantMeta::Empty => LeanObject::box_usize(0), -/// Decode Ixon.ConstantMeta from Lean pointer. -pub fn decode_constant_meta(obj: LeanIxonConstantMeta) -> ConstantMeta { - // Empty (tag 0, no fields) is represented as a scalar lean_box(0) - if obj.is_scalar() { - let tag = obj.as_ptr() as usize >> 1; - assert_eq!(tag, 0, "Invalid scalar ConstantMeta tag: {}", tag); - return ConstantMeta::Empty; - } - let ctor = obj.as_ctor(); - match ctor.tag() { - 1 => { - // defn: 6 obj fields, 2× u64 scalar - let name = decode_ixon_address(ctor.get(0).as_byte_array()); - let lvls = decode_address_array(ctor.get(1).as_array()); - let hints = - decode_reducibility_hints(LeanIxReducibilityHints::new(ctor.get(2))); - let all = decode_address_array(ctor.get(3).as_array()); - let ctx = decode_address_array(ctor.get(4).as_array()); - let arena = - decode_expr_meta_arena(LeanIxonExprMetaArena::new(ctor.get(5))); - let type_root = ctor.scalar_u64(6, 0); - let value_root = ctor.scalar_u64(6, 8); ConstantMeta::Def { name, lvls, @@ -464,64 +356,59 @@ pub fn decode_constant_meta(obj: LeanIxonConstantMeta) -> ConstantMeta { arena, type_root, value_root, - } - }, - - 2 => { - // axio: 3 obj fields, 1× u64 scalar - let name = decode_ixon_address(ctor.get(0).as_byte_array()); - let lvls = decode_address_array(ctor.get(1).as_array()); - let arena = - decode_expr_meta_arena(LeanIxonExprMetaArena::new(ctor.get(2))); - let type_root = ctor.scalar_u64(3, 0); - ConstantMeta::Axio { name, lvls, arena, type_root } - }, - - 3 => { - // quot: 3 obj fields, 1× u64 scalar - let name = decode_ixon_address(ctor.get(0).as_byte_array()); - let lvls = decode_address_array(ctor.get(1).as_array()); - let arena = - decode_expr_meta_arena(LeanIxonExprMetaArena::new(ctor.get(2))); - let type_root = ctor.scalar_u64(3, 0); - ConstantMeta::Quot { name, lvls, arena, type_root } - }, - - 4 => { - // indc: 6 obj fields, 1× u64 scalar - let name = decode_ixon_address(ctor.get(0).as_byte_array()); - let lvls = decode_address_array(ctor.get(1).as_array()); - let ctors = decode_address_array(ctor.get(2).as_array()); - let all = decode_address_array(ctor.get(3).as_array()); - let ctx = decode_address_array(ctor.get(4).as_array()); - let arena = - decode_expr_meta_arena(LeanIxonExprMetaArena::new(ctor.get(5))); - let type_root = ctor.scalar_u64(6, 0); - ConstantMeta::Indc { name, lvls, ctors, all, ctx, arena, type_root } - }, - - 5 => { - // ctor: 4 obj fields, 1× u64 scalar - let name = decode_ixon_address(ctor.get(0).as_byte_array()); - let lvls = decode_address_array(ctor.get(1).as_array()); - let induct = decode_ixon_address(ctor.get(2).as_byte_array()); - let arena = - decode_expr_meta_arena(LeanIxonExprMetaArena::new(ctor.get(3))); - let type_root = ctor.scalar_u64(4, 0); - ConstantMeta::Ctor { name, lvls, induct, arena, type_root } - }, - - 6 => { - // recr: 7 obj fields, 1× u64 scalar - let name = decode_ixon_address(ctor.get(0).as_byte_array()); - let lvls = decode_address_array(ctor.get(1).as_array()); - let rules = decode_address_array(ctor.get(2).as_array()); - let all = decode_address_array(ctor.get(3).as_array()); - let ctx = decode_address_array(ctor.get(4).as_array()); - let arena = - decode_expr_meta_arena(LeanIxonExprMetaArena::new(ctor.get(5))); - let rule_roots = decode_u64_array(ctor.get(6).as_array()); - let type_root = ctor.scalar_u64(7, 0); + } => { + let ctor = LeanCtor::alloc(1, 6, 16); + ctor.set(0, LeanIxAddress::build(name)); + ctor.set(1, LeanIxAddress::build_array(lvls)); + ctor.set(2, LeanIxReducibilityHints::build(hints)); + ctor.set(3, LeanIxAddress::build_array(all)); + ctor.set(4, LeanIxAddress::build_array(ctx)); + ctor.set(5, LeanIxonExprMetaArena::build(arena)); + ctor.set_u64(6 * 8, *type_root); + ctor.set_u64(6 * 8 + 8, *value_root); + *ctor + }, + + ConstantMeta::Axio { name, lvls, arena, type_root } => { + let ctor = LeanCtor::alloc(2, 3, 8); + ctor.set(0, LeanIxAddress::build(name)); + ctor.set(1, LeanIxAddress::build_array(lvls)); + ctor.set(2, LeanIxonExprMetaArena::build(arena)); + ctor.set_u64(3 * 8, *type_root); + *ctor + }, + + ConstantMeta::Quot { name, lvls, arena, type_root } => { + let ctor = LeanCtor::alloc(3, 3, 8); + ctor.set(0, LeanIxAddress::build(name)); + ctor.set(1, LeanIxAddress::build_array(lvls)); + ctor.set(2, LeanIxonExprMetaArena::build(arena)); + ctor.set_u64(3 * 8, *type_root); + *ctor + }, + + ConstantMeta::Indc { name, lvls, ctors, all, ctx, arena, type_root } => { + let ctor = LeanCtor::alloc(4, 6, 8); + ctor.set(0, LeanIxAddress::build(name)); + ctor.set(1, LeanIxAddress::build_array(lvls)); + ctor.set(2, LeanIxAddress::build_array(ctors)); + ctor.set(3, LeanIxAddress::build_array(all)); + ctor.set(4, LeanIxAddress::build_array(ctx)); + ctor.set(5, LeanIxonExprMetaArena::build(arena)); + ctor.set_u64(6 * 8, *type_root); + *ctor + }, + + ConstantMeta::Ctor { name, lvls, induct, arena, type_root } => { + let ctor = LeanCtor::alloc(5, 4, 8); + ctor.set(0, LeanIxAddress::build(name)); + ctor.set(1, LeanIxAddress::build_array(lvls)); + ctor.set(2, LeanIxAddress::build(induct)); + ctor.set(3, LeanIxonExprMetaArena::build(arena)); + ctor.set_u64(4 * 8, *type_root); + *ctor + }, + ConstantMeta::Rec { name, lvls, @@ -531,10 +418,125 @@ pub fn decode_constant_meta(obj: LeanIxonConstantMeta) -> ConstantMeta { arena, type_root, rule_roots, - } - }, + } => { + let ctor = LeanCtor::alloc(6, 7, 8); + ctor.set(0, LeanIxAddress::build(name)); + ctor.set(1, LeanIxAddress::build_array(lvls)); + ctor.set(2, LeanIxAddress::build_array(rules)); + ctor.set(3, LeanIxAddress::build_array(all)); + ctor.set(4, LeanIxAddress::build_array(ctx)); + ctor.set(5, LeanIxonExprMetaArena::build(arena)); + ctor.set(6, build_u64_array(rule_roots)); + ctor.set_u64(7 * 8, *type_root); + *ctor + }, + }; + Self::new(obj) + } - tag => panic!("Invalid Ixon.ConstantMeta tag: {}", tag), + /// Decode Ixon.ConstantMeta from Lean pointer. + pub fn decode(self) -> ConstantMeta { + // Empty (tag 0, no fields) is represented as a scalar lean_box(0) + if self.is_scalar() { + let tag = self.as_ptr() as usize >> 1; + assert_eq!(tag, 0, "Invalid scalar ConstantMeta tag: {}", tag); + return ConstantMeta::Empty; + } + let ctor = self.as_ctor(); + match ctor.tag() { + 1 => { + // defn: 6 obj fields, 2× u64 scalar + let name = LeanIxAddress::new(ctor.get(0)).decode(); + let lvls = decode_address_array(ctor.get(1).as_array()); + let hints = + LeanIxReducibilityHints::new(ctor.get(2)).decode(); + let all = decode_address_array(ctor.get(3).as_array()); + let ctx = decode_address_array(ctor.get(4).as_array()); + let arena = + LeanIxonExprMetaArena::new(ctor.get(5)).decode(); + let type_root = ctor.scalar_u64(6, 0); + let value_root = ctor.scalar_u64(6, 8); + ConstantMeta::Def { + name, + lvls, + hints, + all, + ctx, + arena, + type_root, + value_root, + } + }, + + 2 => { + // axio: 3 obj fields, 1× u64 scalar + let name = LeanIxAddress::new(ctor.get(0)).decode(); + let lvls = decode_address_array(ctor.get(1).as_array()); + let arena = + LeanIxonExprMetaArena::new(ctor.get(2)).decode(); + let type_root = ctor.scalar_u64(3, 0); + ConstantMeta::Axio { name, lvls, arena, type_root } + }, + + 3 => { + // quot: 3 obj fields, 1× u64 scalar + let name = LeanIxAddress::new(ctor.get(0)).decode(); + let lvls = decode_address_array(ctor.get(1).as_array()); + let arena = + LeanIxonExprMetaArena::new(ctor.get(2)).decode(); + let type_root = ctor.scalar_u64(3, 0); + ConstantMeta::Quot { name, lvls, arena, type_root } + }, + + 4 => { + // indc: 6 obj fields, 1× u64 scalar + let name = LeanIxAddress::new(ctor.get(0)).decode(); + let lvls = decode_address_array(ctor.get(1).as_array()); + let ctors = decode_address_array(ctor.get(2).as_array()); + let all = decode_address_array(ctor.get(3).as_array()); + let ctx = decode_address_array(ctor.get(4).as_array()); + let arena = + LeanIxonExprMetaArena::new(ctor.get(5)).decode(); + let type_root = ctor.scalar_u64(6, 0); + ConstantMeta::Indc { name, lvls, ctors, all, ctx, arena, type_root } + }, + + 5 => { + // ctor: 4 obj fields, 1× u64 scalar + let name = LeanIxAddress::new(ctor.get(0)).decode(); + let lvls = decode_address_array(ctor.get(1).as_array()); + let induct = LeanIxAddress::new(ctor.get(2)).decode(); + let arena = + LeanIxonExprMetaArena::new(ctor.get(3)).decode(); + let type_root = ctor.scalar_u64(4, 0); + ConstantMeta::Ctor { name, lvls, induct, arena, type_root } + }, + + 6 => { + // recr: 7 obj fields, 1× u64 scalar + let name = LeanIxAddress::new(ctor.get(0)).decode(); + let lvls = decode_address_array(ctor.get(1).as_array()); + let rules = decode_address_array(ctor.get(2).as_array()); + let all = decode_address_array(ctor.get(3).as_array()); + let ctx = decode_address_array(ctor.get(4).as_array()); + let arena = + LeanIxonExprMetaArena::new(ctor.get(5)).decode(); + let rule_roots = decode_u64_array(ctor.get(6).as_array()); + let type_root = ctor.scalar_u64(7, 0); + ConstantMeta::Rec { + name, + lvls, + rules, + all, + ctx, + arena, + type_root, + rule_roots, + } + }, + + tag => panic!("Invalid Ixon.ConstantMeta tag: {}", tag), + } } } @@ -542,41 +544,45 @@ pub fn decode_constant_meta(obj: LeanIxonConstantMeta) -> ConstantMeta { // Named and Comm Build/Decode // ============================================================================= -/// Build Ixon.Named { addr : Address, constMeta : ConstantMeta } -pub fn build_named(addr: &Address, meta: &ConstantMeta) -> LeanIxonNamed { - let addr_obj = build_address_from_ixon(addr); - let meta_obj = build_constant_meta(meta); - let ctor = LeanCtor::alloc(0, 2, 0); - ctor.set(0, addr_obj); - ctor.set(1, meta_obj); - LeanIxonNamed::new(*ctor) -} +impl LeanIxonNamed { + /// Build Ixon.Named { addr : Address, constMeta : ConstantMeta } + pub fn build(addr: &Address, meta: &ConstantMeta) -> Self { + let addr_obj = LeanIxAddress::build(addr); + let meta_obj = LeanIxonConstantMeta::build(meta); + let ctor = LeanCtor::alloc(0, 2, 0); + ctor.set(0, addr_obj); + ctor.set(1, meta_obj); + Self::new(*ctor) + } -/// Decode Ixon.Named. -pub fn decode_named(obj: LeanIxonNamed) -> Named { - let ctor = obj.as_ctor(); - Named { - addr: decode_ixon_address(ctor.get(0).as_byte_array()), - meta: decode_constant_meta(LeanIxonConstantMeta::new(ctor.get(1))), + /// Decode Ixon.Named. + pub fn decode(self) -> Named { + let ctor = self.as_ctor(); + Named { + addr: LeanIxAddress::new(ctor.get(0)).decode(), + meta: LeanIxonConstantMeta::new(ctor.get(1)).decode(), + } } } -/// Build Ixon.Comm { secret : Address, payload : Address } -pub fn build_ixon_comm(comm: &Comm) -> LeanIxonComm { - let secret_obj = build_address_from_ixon(&comm.secret); - let payload_obj = build_address_from_ixon(&comm.payload); - let ctor = LeanCtor::alloc(0, 2, 0); - ctor.set(0, secret_obj); - ctor.set(1, payload_obj); - LeanIxonComm::new(*ctor) -} +impl LeanIxonComm { + /// Build Ixon.Comm { secret : Address, payload : Address } + pub fn build(comm: &Comm) -> Self { + let secret_obj = LeanIxAddress::build(&comm.secret); + let payload_obj = LeanIxAddress::build(&comm.payload); + let ctor = LeanCtor::alloc(0, 2, 0); + ctor.set(0, secret_obj); + ctor.set(1, payload_obj); + Self::new(*ctor) + } -/// Decode Ixon.Comm. -pub fn decode_ixon_comm(obj: LeanIxonComm) -> Comm { - let ctor = obj.as_ctor(); - Comm { - secret: decode_ixon_address(ctor.get(0).as_byte_array()), - payload: decode_ixon_address(ctor.get(1).as_byte_array()), + /// Decode Ixon.Comm. + pub fn decode(self) -> Comm { + let ctor = self.as_ctor(); + Comm { + secret: LeanIxAddress::new(ctor.get(0)).decode(), + payload: LeanIxAddress::new(ctor.get(1)).decode(), + } } } @@ -589,15 +595,15 @@ pub fn decode_ixon_comm(obj: LeanIxonComm) -> Comm { pub extern "C" fn rs_roundtrip_ixon_data_value( obj: LeanIxonDataValue, ) -> LeanIxonDataValue { - let dv = decode_ixon_data_value(obj); - build_ixon_data_value(&dv) + let dv = obj.decode(); + LeanIxonDataValue::build(&dv) } /// Round-trip Ixon.Comm. #[unsafe(no_mangle)] pub extern "C" fn rs_roundtrip_ixon_comm(obj: LeanIxonComm) -> LeanIxonComm { - let comm = decode_ixon_comm(obj); - build_ixon_comm(&comm) + let comm = obj.decode(); + LeanIxonComm::build(&comm) } /// Round-trip Ixon.ExprMetaData. @@ -605,8 +611,8 @@ pub extern "C" fn rs_roundtrip_ixon_comm(obj: LeanIxonComm) -> LeanIxonComm { pub extern "C" fn rs_roundtrip_ixon_expr_meta_data( obj: LeanIxonExprMetaData, ) -> LeanIxonExprMetaData { - let node = decode_expr_meta_data(obj); - build_expr_meta_data(&node) + let node = obj.decode(); + LeanIxonExprMetaData::build(&node) } /// Round-trip Ixon.ExprMetaArena. @@ -614,8 +620,8 @@ pub extern "C" fn rs_roundtrip_ixon_expr_meta_data( pub extern "C" fn rs_roundtrip_ixon_expr_meta_arena( obj: LeanIxonExprMetaArena, ) -> LeanIxonExprMetaArena { - let arena = decode_expr_meta_arena(obj); - LeanIxonExprMetaArena::new(*build_expr_meta_arena(&arena)) + let arena = obj.decode(); + LeanIxonExprMetaArena::build(&arena) } /// Round-trip Ixon.ConstantMeta (full arena-based). @@ -623,13 +629,13 @@ pub extern "C" fn rs_roundtrip_ixon_expr_meta_arena( pub extern "C" fn rs_roundtrip_ixon_constant_meta( obj: LeanIxonConstantMeta, ) -> LeanIxonConstantMeta { - let meta = decode_constant_meta(obj); - build_constant_meta(&meta) + let meta = obj.decode(); + LeanIxonConstantMeta::build(&meta) } /// Round-trip Ixon.Named (with real metadata). #[unsafe(no_mangle)] pub extern "C" fn rs_roundtrip_ixon_named(obj: LeanIxonNamed) -> LeanIxonNamed { - let named = decode_named(obj); - build_named(&named.addr, &named.meta) + let named = obj.decode(); + LeanIxonNamed::build(&named.addr, &named.meta) } diff --git a/src/ffi/ixon/serialize.rs b/src/ffi/ixon/serialize.rs index 3b9b6261..4815a4b7 100644 --- a/src/ffi/ixon/serialize.rs +++ b/src/ffi/ixon/serialize.rs @@ -8,14 +8,12 @@ use std::sync::Arc; use crate::ix::address::Address; use crate::ix::ixon::serialize::put_expr; use crate::ix::ixon::sharing::hash_expr; -use crate::ix::ixon::univ::{Univ as IxonUniv, put_univ}; +use crate::ix::ixon::univ::put_univ; use crate::lean::{ LeanIxAddress, LeanIxonConstant, LeanIxonExpr, LeanIxonRawEnv, LeanIxonUniv, }; use lean_ffi::object::LeanByteArray; -use crate::ffi::ixon::constant::{decode_ixon_address, decode_ixon_constant}; -use crate::ffi::ixon::expr::decode_ixon_expr; /// Check if Lean's computed hash matches Rust's computed hash. #[unsafe(no_mangle)] @@ -23,49 +21,19 @@ pub extern "C" fn rs_expr_hash_matches( expr_obj: LeanIxonExpr, expected_hash: LeanIxAddress, ) -> bool { - let expr = Arc::new(decode_ixon_expr(expr_obj)); + let expr = Arc::new(expr_obj.decode()); let hash = hash_expr(&expr); - let expected = decode_ixon_address(expected_hash); + let expected = expected_hash.decode(); Address::from_slice(hash.as_bytes()).is_ok_and(|h| h == expected) } -/// Decode a Lean `Ixon.Univ` to a Rust `IxonUniv`. -fn decode_ixon_univ(obj: LeanIxonUniv) -> Arc { - if obj.is_scalar() { - return IxonUniv::zero(); - } - let ctor = obj.as_ctor(); - match ctor.tag() { - 1 => { - let [inner] = ctor.objs::<1>(); - IxonUniv::succ(decode_ixon_univ(LeanIxonUniv::new(inner))) - }, - 2 => { - let [a, b] = ctor.objs::<2>(); - IxonUniv::max( - decode_ixon_univ(LeanIxonUniv::new(a)), - decode_ixon_univ(LeanIxonUniv::new(b)), - ) - }, - 3 => { - let [a, b] = ctor.objs::<2>(); - IxonUniv::imax( - decode_ixon_univ(LeanIxonUniv::new(a)), - decode_ixon_univ(LeanIxonUniv::new(b)), - ) - }, - 4 => IxonUniv::var(ctor.scalar_u64(0, 0)), - tag => panic!("Unknown Ixon.Univ tag: {tag}"), - } -} - /// Check if Lean's Ixon.Univ serialization matches Rust. #[unsafe(no_mangle)] pub extern "C" fn rs_eq_univ_serialization( univ_obj: LeanIxonUniv, bytes_obj: LeanByteArray, ) -> bool { - let univ = decode_ixon_univ(univ_obj); + let univ = univ_obj.decode(); let bytes_data = bytes_obj.as_bytes(); let mut buf = Vec::with_capacity(bytes_data.len()); put_univ(&univ, &mut buf); @@ -78,7 +46,7 @@ pub extern "C" fn rs_eq_expr_serialization( expr_obj: LeanIxonExpr, bytes_obj: LeanByteArray, ) -> bool { - let expr = decode_ixon_expr(expr_obj); + let expr = expr_obj.decode(); let bytes_data = bytes_obj.as_bytes(); let mut buf = Vec::with_capacity(bytes_data.len()); put_expr(&expr, &mut buf); @@ -91,7 +59,7 @@ pub extern "C" fn rs_eq_constant_serialization( constant_obj: LeanIxonConstant, bytes_obj: LeanByteArray, ) -> bool { - let constant = decode_ixon_constant(constant_obj); + let constant = constant_obj.decode(); let bytes_data = bytes_obj.as_bytes(); let mut buf = Vec::with_capacity(bytes_data.len()); constant.put(&mut buf); @@ -105,10 +73,9 @@ pub extern "C" fn rs_eq_env_serialization( raw_env_obj: LeanIxonRawEnv, bytes_obj: LeanByteArray, ) -> bool { - use crate::ffi::ixon::env::decode_raw_env; use crate::ix::ixon::env::Env; - let decoded = decode_raw_env(raw_env_obj); + let decoded = raw_env_obj.decode(); let bytes_data = bytes_obj.as_bytes(); // Deserialize Lean's bytes using Rust's deserializer @@ -145,12 +112,8 @@ pub extern "C" fn rs_eq_env_serialization( return false; } for rc in &decoded.comms { - let expected_comm = crate::ix::ixon::comm::Comm { - secret: rc.comm.secret.clone(), - payload: rc.comm.payload.clone(), - }; match rust_env.comms.get(&rc.addr) { - Some(c) if *c == expected_comm => {}, + Some(c) if *c == rc.comm => {}, _ => return false, } } diff --git a/src/ffi/ixon/sharing.rs b/src/ffi/ixon/sharing.rs index fdb259be..85e5ddd9 100644 --- a/src/ffi/ixon/sharing.rs +++ b/src/ffi/ixon/sharing.rs @@ -10,16 +10,12 @@ use crate::ix::ixon::sharing::{ use crate::lean::LeanIxonExpr; use lean_ffi::object::{LeanArray, LeanByteArray}; -use crate::ffi::ixon::expr::decode_ixon_expr_array; -use crate::ffi::ixon::expr::decode_ixon_expr; - /// FFI: Debug sharing analysis - print usage counts for subterms with usage >= 2. /// This helps diagnose why Lean and Rust make different sharing decisions. #[unsafe(no_mangle)] pub extern "C" fn rs_debug_sharing_analysis(exprs_obj: LeanArray) { - let arr = exprs_obj; let exprs: Vec> = - arr.map(|x| Arc::new(decode_ixon_expr(LeanIxonExpr::new(x)))); + exprs_obj.map(|x| Arc::new(LeanIxonExpr::new(x).decode())); println!("[Rust] Analyzing {} input expressions", exprs.len()); @@ -60,7 +56,7 @@ pub extern "C" fn rs_debug_sharing_analysis(exprs_obj: LeanArray) { /// Returns the number of shared items Rust would produce. #[unsafe(no_mangle)] extern "C" fn rs_analyze_sharing_count(exprs_obj: LeanArray) -> u64 { - let exprs = decode_ixon_expr_array(exprs_obj); + let exprs = LeanIxonExpr::decode_array(exprs_obj); let (info_map, _ptr_to_hash) = analyze_block(&exprs, false); let shared_hashes = decide_sharing(&info_map); @@ -77,7 +73,7 @@ extern "C" fn rs_run_sharing_analysis( out_sharing_vec: LeanByteArray, out_rewritten: LeanByteArray, ) -> u64 { - let exprs = decode_ixon_expr_array(exprs_obj); + let exprs = LeanIxonExpr::decode_array(exprs_obj); let (info_map, ptr_to_hash) = analyze_block(&exprs, false); let shared_hashes = decide_sharing(&info_map); @@ -116,10 +112,10 @@ extern "C" fn rs_compare_sharing_analysis( _lean_rewritten_obj: LeanArray, ) -> u64 { // Decode input expressions - let exprs = decode_ixon_expr_array(exprs_obj); + let exprs = LeanIxonExpr::decode_array(exprs_obj); // Decode Lean's sharing vector - let lean_sharing = decode_ixon_expr_array(lean_sharing_obj); + let lean_sharing = LeanIxonExpr::decode_array(lean_sharing_obj); // Run Rust's sharing analysis let (info_map, ptr_to_hash) = analyze_block(&exprs, false); diff --git a/src/ffi/ixon/univ.rs b/src/ffi/ixon/univ.rs index b4bad24e..074363ff 100644 --- a/src/ffi/ixon/univ.rs +++ b/src/ffi/ixon/univ.rs @@ -75,16 +75,6 @@ impl LeanIxonUniv { } } -/// Build an Array of Ixon.Univ (standalone wrapper). -pub fn build_ixon_univ_array(univs: &[Arc]) -> LeanArray { - LeanIxonUniv::build_array(univs) -} - -/// Decode Array Ixon.Univ (standalone wrapper). -pub fn decode_ixon_univ_array(obj: LeanArray) -> Vec> { - LeanIxonUniv::decode_array(obj) -} - // ============================================================================= // FFI Exports // ============================================================================= diff --git a/src/lean.rs b/src/lean.rs index 29aba13e..a9f72353 100644 --- a/src/lean.rs +++ b/src/lean.rs @@ -129,5 +129,36 @@ lean_ffi::lean_domain_type! { LeanIxBlockCompareDetail; } -/// `Ix.Address = { hash : ByteArray }` — single-field struct, unboxed to `ByteArray`. -pub type LeanIxAddress = lean_ffi::object::LeanByteArray; +/// Lean `Address` object — newtype over `LeanByteArray`. +#[derive(Clone, Copy)] +#[repr(transparent)] +pub struct LeanIxAddress(lean_ffi::object::LeanByteArray); + +impl std::ops::Deref for LeanIxAddress { + type Target = lean_ffi::object::LeanByteArray; + #[inline] + fn deref(&self) -> &lean_ffi::object::LeanByteArray { + &self.0 + } +} + +impl From for lean_ffi::object::LeanObject { + #[inline] + fn from(x: LeanIxAddress) -> Self { + x.0.into() + } +} + +impl From for LeanIxAddress { + #[inline] + fn from(x: lean_ffi::object::LeanByteArray) -> Self { + Self(x) + } +} + +impl LeanIxAddress { + #[inline] + pub fn new(obj: lean_ffi::object::LeanObject) -> Self { + Self(obj.as_byte_array()) + } +} From 73262b5a211cb29dd41c0cae0ae1597659092065 Mon Sep 17 00:00:00 2001 From: samuelburnham <45365069+samuelburnham@users.noreply.github.com> Date: Wed, 4 Mar 2026 16:48:03 -0500 Subject: [PATCH 25/27] Fmt --- src/ffi/aiur/protocol.rs | 4 +--- src/ffi/compile.rs | 36 ++++++++++++++++---------------- src/ffi/graph.rs | 4 +--- src/ffi/ix/constant.rs | 11 +++------- src/ffi/ix/data.rs | 14 +++---------- src/ffi/ix/expr.rs | 13 ++++++------ src/ffi/ix/name.rs | 5 +---- src/ffi/ixon/compare.rs | 9 ++++++-- src/ffi/ixon/env.rs | 18 +++++----------- src/ffi/ixon/expr.rs | 21 ++++--------------- src/ffi/ixon/meta.rs | 43 ++++++++++++--------------------------- src/ffi/ixon/serialize.rs | 1 - src/ffi/lean_env.rs | 11 +++------- 13 files changed, 64 insertions(+), 126 deletions(-) diff --git a/src/ffi/aiur/protocol.rs b/src/ffi/aiur/protocol.rs index 7083f4e4..f646bf3b 100644 --- a/src/ffi/aiur/protocol.rs +++ b/src/ffi/aiur/protocol.rs @@ -181,9 +181,7 @@ fn decode_fri_parameters(obj: LeanAiurFriParameters) -> FriParameters { } } -fn decode_io_buffer_map( - arr: LeanArray, -) -> FxHashMap, IOKeyInfo> { +fn decode_io_buffer_map(arr: LeanArray) -> FxHashMap, IOKeyInfo> { let mut map = FxHashMap::with_capacity_and_hasher(arr.len(), FxBuildHasher); for elt in arr.iter() { let pair = elt.as_ctor(); diff --git a/src/ffi/compile.rs b/src/ffi/compile.rs index 6737c225..b83b30ad 100644 --- a/src/ffi/compile.rs +++ b/src/ffi/compile.rs @@ -24,9 +24,9 @@ use crate::ix::ixon::{Comm, ConstantMeta}; use crate::lean::{ LeanIxBlockCompareDetail, LeanIxBlockCompareResult, LeanIxCompileError, LeanIxCompilePhases, LeanIxCondensedBlocks, LeanIxConstantInfo, - LeanIxDecompileError, LeanIxName, LeanIxRawEnvironment, - LeanIxSerializeError, LeanIxonRawBlob, LeanIxonRawComm, LeanIxonRawConst, - LeanIxonRawEnv, LeanIxonRawNameEntry, LeanIxonRawNamed, + LeanIxDecompileError, LeanIxName, LeanIxRawEnvironment, LeanIxSerializeError, + LeanIxonRawBlob, LeanIxonRawComm, LeanIxonRawConst, LeanIxonRawEnv, + LeanIxonRawNameEntry, LeanIxonRawNamed, }; use lean_ffi::nat::Nat; use lean_ffi::object::LeanIOResult; @@ -39,9 +39,9 @@ use dashmap::DashMap; use dashmap::DashSet; use crate::ffi::builder::LeanBuildCache; -use crate::lean::LeanIxAddress; use crate::ffi::ixon::env::decoded_to_ixon_env; use crate::ffi::lean_env::{GlobalCache, decode_env, decode_name}; +use crate::lean::LeanIxAddress; // ============================================================================= // Helper builders @@ -331,9 +331,7 @@ pub extern "C" fn rs_roundtrip_raw_env( /// FFI function to run all compilation phases and return combined results. #[unsafe(no_mangle)] -pub extern "C" fn rs_compile_phases( - env_consts_ptr: LeanList, -) -> LeanIOResult { +pub extern "C" fn rs_compile_phases(env_consts_ptr: LeanList) -> LeanIOResult { ffi_io_guard(std::panic::AssertUnwindSafe(|| { let rust_env = decode_env(env_consts_ptr); let env_len = rust_env.len(); @@ -1267,15 +1265,15 @@ impl LeanIxDecompileError { let idx = ctor.scalar_u64(2, 0); DecompileError::InvalidUnivVarIndex { idx, max, constant } }, - 5 => DecompileError::MissingAddress( - LeanIxAddress::new(ctor.get(0)).decode(), - ), + 5 => { + DecompileError::MissingAddress(LeanIxAddress::new(ctor.get(0)).decode()) + }, 6 => DecompileError::MissingMetadata( LeanIxAddress::new(ctor.get(0)).decode(), ), - 7 => DecompileError::BlobNotFound( - LeanIxAddress::new(ctor.get(0)).decode(), - ), + 7 => { + DecompileError::BlobNotFound(LeanIxAddress::new(ctor.get(0)).decode()) + }, 8 => { let addr = LeanIxAddress::new(ctor.get(0)).decode(); let expected = ctor.get(1).as_string().to_string(); @@ -1348,9 +1346,9 @@ impl LeanIxCompileError { let name = ctor.get(0).as_string().to_string(); CompileError::MissingConstant { name } }, - 1 => CompileError::MissingAddress( - LeanIxAddress::new(ctor.get(0)).decode(), - ), + 1 => { + CompileError::MissingAddress(LeanIxAddress::new(ctor.get(0)).decode()) + }, 2 => { let reason = ctor.get(0).as_string().to_string(); CompileError::InvalidMutualBlock { reason } @@ -1364,9 +1362,9 @@ impl LeanIxCompileError { let param = ctor.get(1).as_string().to_string(); CompileError::UnknownUnivParam { curr, param } }, - 5 => CompileError::Serialize( - LeanIxSerializeError::new(ctor.get(0)).decode(), - ), + 5 => { + CompileError::Serialize(LeanIxSerializeError::new(ctor.get(0)).decode()) + }, _ => unreachable!("Invalid CompileError tag: {}", ctor.tag()), } } diff --git a/src/ffi/graph.rs b/src/ffi/graph.rs index 6402d076..1a3a0d7e 100644 --- a/src/ffi/graph.rs +++ b/src/ffi/graph.rs @@ -97,9 +97,7 @@ impl LeanIxCondensedBlocks { /// FFI function to build a reference graph from a Lean environment. #[unsafe(no_mangle)] -pub extern "C" fn rs_build_ref_graph( - env_consts_ptr: LeanList, -) -> LeanIOResult { +pub extern "C" fn rs_build_ref_graph(env_consts_ptr: LeanList) -> LeanIOResult { ffi_io_guard(std::panic::AssertUnwindSafe(|| { let rust_env = decode_env(env_consts_ptr); let rust_env = Arc::new(rust_env); diff --git a/src/ffi/ix/constant.rs b/src/ffi/ix/constant.rs index 50489cad..49e2d9dc 100644 --- a/src/ffi/ix/constant.rs +++ b/src/ffi/ix/constant.rs @@ -152,10 +152,7 @@ impl LeanIxRecursorRule { impl LeanIxConstantInfo { /// Build a Ix.ConstantInfo from a Rust ConstantInfo. - pub fn build( - cache: &mut LeanBuildCache, - info: &ConstantInfo, - ) -> Self { + pub fn build(cache: &mut LeanBuildCache, info: &ConstantInfo) -> Self { let result = match info { // | axiomInfo (v : AxiomVal) -- tag 0 ConstantInfo::AxiomInfo(v) => { @@ -422,10 +419,8 @@ impl LeanIxConstantInfo { let k = inner.scalar_u8(7, 0) != 0; let is_unsafe = inner.scalar_u8(7, 1) != 0; - let rules: Vec = inner - .get(6) - .as_array() - .map(|x| LeanIxRecursorRule::new(x).decode()); + let rules: Vec = + inner.get(6).as_array().map(|x| LeanIxRecursorRule::new(x).decode()); ConstantInfo::RecInfo(RecursorVal { cnst: LeanIxConstantVal::new(inner.get(0)).decode(), diff --git a/src/ffi/ix/data.rs b/src/ffi/ix/data.rs index 24a36177..e0fa6bf0 100644 --- a/src/ffi/ix/data.rs +++ b/src/ffi/ix/data.rs @@ -124,10 +124,7 @@ impl LeanIxSourceInfo { impl LeanIxSyntaxPreresolved { /// Build a Ix.SyntaxPreresolved. - pub fn build( - cache: &mut LeanBuildCache, - sp: &SyntaxPreresolved, - ) -> Self { + pub fn build(cache: &mut LeanBuildCache, sp: &SyntaxPreresolved) -> Self { match sp { // | namespace (name : Name) -- tag 0 SyntaxPreresolved::Namespace(name) => { @@ -283,10 +280,7 @@ impl LeanIxSyntax { impl LeanIxDataValue { /// Build Ix.DataValue. - pub fn build( - cache: &mut LeanBuildCache, - dv: &DataValue, - ) -> Self { + pub fn build(cache: &mut LeanBuildCache, dv: &DataValue) -> Self { match dv { DataValue::OfString(s) => { let obj = LeanCtor::alloc(0, 1, 0); @@ -374,9 +368,7 @@ impl LeanIxDataValue { }, 5 => { // ofSyntax: 1 object field - DataValue::OfSyntax( - LeanIxSyntax::new(ctor.get(0)).decode().into(), - ) + DataValue::OfSyntax(LeanIxSyntax::new(ctor.get(0)).decode().into()) }, _ => panic!("Invalid DataValue tag: {}", ctor.tag()), } diff --git a/src/ffi/ix/expr.rs b/src/ffi/ix/expr.rs index fae10519..6e37989d 100644 --- a/src/ffi/ix/expr.rs +++ b/src/ffi/ix/expr.rs @@ -238,13 +238,12 @@ impl LeanIxExpr { }, 10 => { // mdata: data, expr, hash - let data: Vec<(Name, DataValue)> = - ctor.get(0).as_array().map(|obj| { - let pair = obj.as_ctor(); - let name = LeanIxName::new(pair.get(0)).decode(); - let dv = LeanIxDataValue::new(pair.get(1)).decode(); - (name, dv) - }); + let data: Vec<(Name, DataValue)> = ctor.get(0).as_array().map(|obj| { + let pair = obj.as_ctor(); + let name = LeanIxName::new(pair.get(0)).decode(); + let dv = LeanIxDataValue::new(pair.get(1)).decode(); + (name, dv) + }); let inner = Self::new(ctor.get(1)).decode(); Expr::mdata(data, inner) diff --git a/src/ffi/ix/name.rs b/src/ffi/ix/name.rs index e8f0ca07..dd181cf9 100644 --- a/src/ffi/ix/name.rs +++ b/src/ffi/ix/name.rs @@ -55,10 +55,7 @@ impl LeanIxName { } /// Build an Array of Names. - pub fn build_array( - cache: &mut LeanBuildCache, - names: &[Name], - ) -> LeanArray { + pub fn build_array(cache: &mut LeanBuildCache, names: &[Name]) -> LeanArray { let arr = LeanArray::alloc(names.len()); for (i, name) in names.iter().enumerate() { arr.set(i, Self::build(cache, name)); diff --git a/src/ffi/ixon/compare.rs b/src/ffi/ixon/compare.rs index bfdff714..e5def305 100644 --- a/src/ffi/ixon/compare.rs +++ b/src/ffi/ixon/compare.rs @@ -124,8 +124,13 @@ pub unsafe extern "C" fn rs_compare_block_v2( Some((bytes, sharing_len)) => (bytes, *sharing_len as u64), None => { // Block not found in Rust compilation - let result = - LeanIxBlockCompareResult::build(false, true, lean_data.len() as u64, 0, 0); + let result = LeanIxBlockCompareResult::build( + false, + true, + lean_data.len() as u64, + 0, + 0, + ); return LeanIxBlockCompareDetail::build(result, lean_sharing_len, 0); }, }; diff --git a/src/ffi/ixon/env.rs b/src/ffi/ixon/env.rs index 0fba98fb..143fb23c 100644 --- a/src/ffi/ixon/env.rs +++ b/src/ffi/ixon/env.rs @@ -48,10 +48,7 @@ impl LeanIxonRawConst { } /// Build from individual parts (used by compile.rs). - pub fn build_from_parts( - addr: &Address, - constant: &IxonConstant, - ) -> Self { + pub fn build_from_parts(addr: &Address, constant: &IxonConstant) -> Self { let ctor = LeanCtor::alloc(0, 2, 0); ctor.set(0, LeanIxAddress::build(addr)); ctor.set(1, LeanIxonConstant::build(constant)); @@ -82,10 +79,7 @@ impl LeanIxonRawNamed { } /// Build Ixon.RawNamed Lean object. - pub fn build( - cache: &mut LeanBuildCache, - rn: &DecodedRawNamed, - ) -> Self { + pub fn build(cache: &mut LeanBuildCache, rn: &DecodedRawNamed) -> Self { let ctor = LeanCtor::alloc(0, 3, 0); ctor.set(0, LeanIxName::build(cache, &rn.name)); ctor.set(1, LeanIxAddress::build(&rn.addr)); @@ -279,7 +273,8 @@ impl LeanIxonRawEnv { // Build names array let names_arr = LeanArray::alloc(env.names.len()); for (i, rn) in env.names.iter().enumerate() { - names_arr.set(i, LeanIxonRawNameEntry::build(&mut cache, &rn.addr, &rn.name)); + names_arr + .set(i, LeanIxonRawNameEntry::build(&mut cache, &rn.addr, &rn.name)); } // Build RawEnv structure @@ -346,10 +341,7 @@ pub fn ixon_env_to_decoded(env: &IxonEnv) -> DecodedRawEnv { let comms = env .comms .iter() - .map(|e| DecodedRawComm { - addr: e.key().clone(), - comm: e.value().clone(), - }) + .map(|e| DecodedRawComm { addr: e.key().clone(), comm: e.value().clone() }) .collect(); let names = env .names diff --git a/src/ffi/ixon/expr.rs b/src/ffi/ixon/expr.rs index c231c96a..31cd2293 100644 --- a/src/ffi/ixon/expr.rs +++ b/src/ffi/ixon/expr.rs @@ -157,11 +157,7 @@ impl LeanIxonExpr { let val_obj = Self::new(ctor.get(0)); let type_ref_idx = ctor.scalar_u64(1, 0); let field_idx = ctor.scalar_u64(1, 8); - IxonExpr::Prj( - type_ref_idx, - field_idx, - Arc::new(val_obj.decode()), - ) + IxonExpr::Prj(type_ref_idx, field_idx, Arc::new(val_obj.decode())) }, 5 => { let ref_idx = ctor.scalar_u64(0, 0); @@ -174,26 +170,17 @@ impl LeanIxonExpr { 7 => { let f_obj = Self::new(ctor.get(0)); let a_obj = Self::new(ctor.get(1)); - IxonExpr::App( - Arc::new(f_obj.decode()), - Arc::new(a_obj.decode()), - ) + IxonExpr::App(Arc::new(f_obj.decode()), Arc::new(a_obj.decode())) }, 8 => { let ty_obj = Self::new(ctor.get(0)); let body_obj = Self::new(ctor.get(1)); - IxonExpr::Lam( - Arc::new(ty_obj.decode()), - Arc::new(body_obj.decode()), - ) + IxonExpr::Lam(Arc::new(ty_obj.decode()), Arc::new(body_obj.decode())) }, 9 => { let ty_obj = Self::new(ctor.get(0)); let body_obj = Self::new(ctor.get(1)); - IxonExpr::All( - Arc::new(ty_obj.decode()), - Arc::new(body_obj.decode()), - ) + IxonExpr::All(Arc::new(ty_obj.decode()), Arc::new(body_obj.decode())) }, 10 => { let ty_obj = Self::new(ctor.get(0)); diff --git a/src/ffi/ixon/meta.rs b/src/ffi/ixon/meta.rs index 019bebe4..03e16f49 100644 --- a/src/ffi/ixon/meta.rs +++ b/src/ffi/ixon/meta.rs @@ -16,8 +16,8 @@ use crate::lean::{ }; use lean_ffi::object::{LeanArray, LeanCtor, LeanObject}; -use crate::lean::LeanIxBinderInfo; use crate::lean::LeanIxAddress; +use crate::lean::LeanIxBinderInfo; // ============================================================================= // KVMap Build/Decode (not domain types, kept as free functions) @@ -132,21 +132,15 @@ impl LeanIxonDataValue { pub fn decode(self) -> IxonDataValue { let ctor = self.as_ctor(); match ctor.tag() { - 0 => { - IxonDataValue::OfString(LeanIxAddress::new(ctor.get(0)).decode()) - }, + 0 => IxonDataValue::OfString(LeanIxAddress::new(ctor.get(0)).decode()), 1 => { let b = ctor.scalar_u8(0, 0) != 0; IxonDataValue::OfBool(b) }, - 2 => { - IxonDataValue::OfName(LeanIxAddress::new(ctor.get(0)).decode()) - }, + 2 => IxonDataValue::OfName(LeanIxAddress::new(ctor.get(0)).decode()), 3 => IxonDataValue::OfNat(LeanIxAddress::new(ctor.get(0)).decode()), 4 => IxonDataValue::OfInt(LeanIxAddress::new(ctor.get(0)).decode()), - 5 => { - IxonDataValue::OfSyntax(LeanIxAddress::new(ctor.get(0)).decode()) - }, + 5 => IxonDataValue::OfSyntax(LeanIxAddress::new(ctor.get(0)).decode()), tag => panic!("Invalid Ixon.DataValue tag: {}", tag), } } @@ -277,9 +271,7 @@ impl LeanIxonExprMetaData { 4 => { // ref: 1 obj field (name), 0 scalar - ExprMetaData::Ref { - name: LeanIxAddress::new(ctor.get(0)).decode(), - } + ExprMetaData::Ref { name: LeanIxAddress::new(ctor.get(0)).decode() } }, 5 => { @@ -321,9 +313,7 @@ impl LeanIxonExprMetaArena { /// Single-field struct is unboxed — obj IS the Array directly. pub fn decode(self) -> ExprMeta { let arr = self.as_array(); - ExprMeta { - nodes: arr.map(|x| LeanIxonExprMetaData::new(x).decode()), - } + ExprMeta { nodes: arr.map(|x| LeanIxonExprMetaData::new(x).decode()) } } } @@ -448,12 +438,10 @@ impl LeanIxonConstantMeta { // defn: 6 obj fields, 2× u64 scalar let name = LeanIxAddress::new(ctor.get(0)).decode(); let lvls = decode_address_array(ctor.get(1).as_array()); - let hints = - LeanIxReducibilityHints::new(ctor.get(2)).decode(); + let hints = LeanIxReducibilityHints::new(ctor.get(2)).decode(); let all = decode_address_array(ctor.get(3).as_array()); let ctx = decode_address_array(ctor.get(4).as_array()); - let arena = - LeanIxonExprMetaArena::new(ctor.get(5)).decode(); + let arena = LeanIxonExprMetaArena::new(ctor.get(5)).decode(); let type_root = ctor.scalar_u64(6, 0); let value_root = ctor.scalar_u64(6, 8); ConstantMeta::Def { @@ -472,8 +460,7 @@ impl LeanIxonConstantMeta { // axio: 3 obj fields, 1× u64 scalar let name = LeanIxAddress::new(ctor.get(0)).decode(); let lvls = decode_address_array(ctor.get(1).as_array()); - let arena = - LeanIxonExprMetaArena::new(ctor.get(2)).decode(); + let arena = LeanIxonExprMetaArena::new(ctor.get(2)).decode(); let type_root = ctor.scalar_u64(3, 0); ConstantMeta::Axio { name, lvls, arena, type_root } }, @@ -482,8 +469,7 @@ impl LeanIxonConstantMeta { // quot: 3 obj fields, 1× u64 scalar let name = LeanIxAddress::new(ctor.get(0)).decode(); let lvls = decode_address_array(ctor.get(1).as_array()); - let arena = - LeanIxonExprMetaArena::new(ctor.get(2)).decode(); + let arena = LeanIxonExprMetaArena::new(ctor.get(2)).decode(); let type_root = ctor.scalar_u64(3, 0); ConstantMeta::Quot { name, lvls, arena, type_root } }, @@ -495,8 +481,7 @@ impl LeanIxonConstantMeta { let ctors = decode_address_array(ctor.get(2).as_array()); let all = decode_address_array(ctor.get(3).as_array()); let ctx = decode_address_array(ctor.get(4).as_array()); - let arena = - LeanIxonExprMetaArena::new(ctor.get(5)).decode(); + let arena = LeanIxonExprMetaArena::new(ctor.get(5)).decode(); let type_root = ctor.scalar_u64(6, 0); ConstantMeta::Indc { name, lvls, ctors, all, ctx, arena, type_root } }, @@ -506,8 +491,7 @@ impl LeanIxonConstantMeta { let name = LeanIxAddress::new(ctor.get(0)).decode(); let lvls = decode_address_array(ctor.get(1).as_array()); let induct = LeanIxAddress::new(ctor.get(2)).decode(); - let arena = - LeanIxonExprMetaArena::new(ctor.get(3)).decode(); + let arena = LeanIxonExprMetaArena::new(ctor.get(3)).decode(); let type_root = ctor.scalar_u64(4, 0); ConstantMeta::Ctor { name, lvls, induct, arena, type_root } }, @@ -519,8 +503,7 @@ impl LeanIxonConstantMeta { let rules = decode_address_array(ctor.get(2).as_array()); let all = decode_address_array(ctor.get(3).as_array()); let ctx = decode_address_array(ctor.get(4).as_array()); - let arena = - LeanIxonExprMetaArena::new(ctor.get(5)).decode(); + let arena = LeanIxonExprMetaArena::new(ctor.get(5)).decode(); let rule_roots = decode_u64_array(ctor.get(6).as_array()); let type_root = ctor.scalar_u64(7, 0); ConstantMeta::Rec { diff --git a/src/ffi/ixon/serialize.rs b/src/ffi/ixon/serialize.rs index 4815a4b7..14a3683c 100644 --- a/src/ffi/ixon/serialize.rs +++ b/src/ffi/ixon/serialize.rs @@ -14,7 +14,6 @@ use crate::lean::{ }; use lean_ffi::object::LeanByteArray; - /// Check if Lean's computed hash matches Rust's computed hash. #[unsafe(no_mangle)] pub extern "C" fn rs_expr_hash_matches( diff --git a/src/ffi/lean_env.rs b/src/ffi/lean_env.rs index dc38df09..695b29c7 100644 --- a/src/ffi/lean_env.rs +++ b/src/ffi/lean_env.rs @@ -148,13 +148,11 @@ fn decode_level(obj: LeanObject, cache: &mut Cache<'_>) -> Level { Level::imax(u, v) }, 4 => { - let [name] = - ctor.objs::<1>().map(|o| decode_name(o, cache.global)); + let [name] = ctor.objs::<1>().map(|o| decode_name(o, cache.global)); Level::param(name) }, 5 => { - let [name] = - ctor.objs::<1>().map(|o| decode_name(o, cache.global)); + let [name] = ctor.objs::<1>().map(|o| decode_name(o, cache.global)); Level::mvar(name) }, _ => unreachable!(), @@ -410,10 +408,7 @@ fn decode_recursor_rule( RecursorRule { ctor: ctor_name, n_fields, rhs } } -fn decode_constant_val( - obj: LeanObject, - cache: &mut Cache<'_>, -) -> ConstantVal { +fn decode_constant_val(obj: LeanObject, cache: &mut Cache<'_>) -> ConstantVal { let ctor = obj.as_ctor(); let [name_obj, level_params, typ] = ctor.objs(); let name = decode_name(name_obj, cache.global); From d740c3fcd87dc7fcc14408dd348bee1f0b4a88a8 Mon Sep 17 00:00:00 2001 From: samuelburnham <45365069+samuelburnham@users.noreply.github.com> Date: Fri, 6 Mar 2026 10:16:10 -0500 Subject: [PATCH 26/27] Update LSpec and fix compile test --- .github/workflows/ignored.yml | 18 ++++++++++++++++++ Ix/Meta.lean | 4 +++- README.md | 3 ++- lake-manifest.json | 4 ++-- lakefile.lean | 2 +- 5 files changed, 26 insertions(+), 5 deletions(-) create mode 100644 .github/workflows/ignored.yml diff --git a/.github/workflows/ignored.yml b/.github/workflows/ignored.yml new file mode 100644 index 00000000..0bd3cfb6 --- /dev/null +++ b/.github/workflows/ignored.yml @@ -0,0 +1,18 @@ +name: Ignored expensive tests + +on: + push: + branches: main + workflow_dispatch: + +jobs: + test: + runs-on: warp-ubuntu-latest-x64-32x # Needs 128 GB RAM for Lean compilation + steps: + - uses: actions/checkout@v6 + - uses: actions-rust-lang/setup-rust-toolchain@v1 + - uses: leanprover/lean-action@v1 + with: + auto-config: false + test: true + test-args: "-- --ignored" diff --git a/Ix/Meta.lean b/Ix/Meta.lean index b0a83169..0c7ee565 100644 --- a/Ix/Meta.lean +++ b/Ix/Meta.lean @@ -23,7 +23,9 @@ def getFileEnv (path : FilePath) : IO Environment := do let source ← IO.FS.readFile path let inputCtx := Parser.mkInputContext source path.toString let (header, parserState, messages) ← Parser.parseHeader inputCtx - let (env, messages) ← processHeader header default messages inputCtx 0 + let (env, messages) ← processHeaderCore + (HeaderSyntax.startPos header) (HeaderSyntax.imports header) + (isModule := false) default messages inputCtx 0 if messages.hasErrors then throw $ IO.userError $ "\n\n".intercalate $ (← messages.toList.mapM (·.toString)).map (String.trimAscii · |>.toString) diff --git a/README.md b/README.md index 91c5efed..0f7af7e6 100644 --- a/README.md +++ b/README.md @@ -193,7 +193,8 @@ Compiler performance benchmarks are tracked at https://bencher.dev/console/proje - `lake test -- ` runs one or multiple primary test suites. Primary suites: `ffi`, `byte-array`, `ixon`, `claim`, `commit`, `canon`, `keccak`, `sharing`, `graph-unit`, `condense-unit` - `lake test -- --ignored` runs only the expensive test suites: `shard-map`, `rust-canon-roundtrip`, `serial-canon-roundtrip`, `parallel-canon-roundtrip`, `graph-cross`, `condense-cross`, `compile`, `decompile`, `rust-serialize`, `rust-decompile`, `commit-io`, `aiur`, `aiur-hashes`, `ixvm` - - Any `canon` or `compile` test will require significant RAM, beware of OOM + - Most tests require at least 32 GB RAM + - The `compile` and `decompile` tests require 128 GB RAM - `aiur` and `aiur-hashes` generate ZK proofs and use significant CPU - `lake test -- --ignored ` runs one or multiple expensive suites by name - `lake test -- --include-ignored` runs both primary and expensive test suites diff --git a/lake-manifest.json b/lake-manifest.json index fcd9c3a4..7af6c508 100644 --- a/lake-manifest.json +++ b/lake-manifest.json @@ -35,10 +35,10 @@ "type": "git", "subDir": null, "scope": "", - "rev": "41c8a9b2f08679212e075ff89fa33694a2536d64", + "rev": "928f27c7de8318455ba0be7461dbdf7096f4075a", "name": "LSpec", "manifestFile": "lake-manifest.json", - "inputRev": "41c8a9b2f08679212e075ff89fa33694a2536d64", + "inputRev": "928f27c7de8318455ba0be7461dbdf7096f4075a", "inherited": false, "configFile": "lakefile.toml"}], "name": "ix", diff --git a/lakefile.lean b/lakefile.lean index a1ffbcf9..16d37449 100644 --- a/lakefile.lean +++ b/lakefile.lean @@ -12,7 +12,7 @@ lean_exe ix where supportInterpreter := true require LSpec from git - "https://github.com/argumentcomputer/LSpec" @ "41c8a9b2f08679212e075ff89fa33694a2536d64" + "https://github.com/argumentcomputer/LSpec" @ "928f27c7de8318455ba0be7461dbdf7096f4075a" require Blake3 from git "https://github.com/argumentcomputer/Blake3.lean" @ "564e0ab364ebaa3b1153defe2f49c9fe58a2d77c" From 2df4dfd33d5546260b9887eaf838c7047e0fc998 Mon Sep 17 00:00:00 2001 From: samuelburnham <45365069+samuelburnham@users.noreply.github.com> Date: Fri, 6 Mar 2026 10:24:25 -0500 Subject: [PATCH 27/27] Merge ignored.yml and valgrind.yml --- .github/workflows/ignored.yml | 33 +++++++++++++++++++++++++++++-- .github/workflows/valgrind.yml | 36 ---------------------------------- 2 files changed, 31 insertions(+), 38 deletions(-) delete mode 100644 .github/workflows/valgrind.yml diff --git a/.github/workflows/ignored.yml b/.github/workflows/ignored.yml index 0bd3cfb6..cb0af803 100644 --- a/.github/workflows/ignored.yml +++ b/.github/workflows/ignored.yml @@ -1,12 +1,19 @@ -name: Ignored expensive tests +name: Extended CI tests on: push: branches: main workflow_dispatch: +permissions: + contents: read + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + jobs: - test: + ignored-test: runs-on: warp-ubuntu-latest-x64-32x # Needs 128 GB RAM for Lean compilation steps: - uses: actions/checkout@v6 @@ -16,3 +23,25 @@ jobs: auto-config: false test: true test-args: "-- --ignored" + + valgrind: + runs-on: warp-ubuntu-latest-x64-16x + steps: + - uses: actions/checkout@v6 + - uses: actions-rust-lang/setup-rust-toolchain@v1 + - uses: leanprover/lean-action@v1 + with: + auto-config: false + build: true + build-args: "IxTests" + - name: Install valgrind + run: sudo apt-get update && sudo apt-get install -y valgrind + - name: Run tests under valgrind + run: | + valgrind \ + --leak-check=full \ + --show-leak-kinds=definite,possible \ + --errors-for-leak-kinds=definite \ + --track-origins=yes \ + --error-exitcode=1 \ + .lake/build/bin/IxTests -- --include-ignored aiur aiur-hashes ixvm diff --git a/.github/workflows/valgrind.yml b/.github/workflows/valgrind.yml deleted file mode 100644 index 3879f166..00000000 --- a/.github/workflows/valgrind.yml +++ /dev/null @@ -1,36 +0,0 @@ -name: Valgrind - -on: - push: - branches: main - workflow_dispatch: - -permissions: - contents: read - -concurrency: - group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} - cancel-in-progress: true - -jobs: - valgrind: - runs-on: warp-ubuntu-latest-x64-16x - steps: - - uses: actions/checkout@v6 - - uses: actions-rust-lang/setup-rust-toolchain@v1 - - uses: leanprover/lean-action@v1 - with: - auto-config: false - build: true - build-args: "IxTests" - - name: Install valgrind - run: sudo apt-get update && sudo apt-get install -y valgrind - - name: Run tests under valgrind - run: | - valgrind \ - --leak-check=full \ - --show-leak-kinds=definite,possible \ - --errors-for-leak-kinds=definite \ - --track-origins=yes \ - --error-exitcode=1 \ - .lake/build/bin/IxTests -- --include-ignored aiur aiur-hashes ixvm