diff --git a/AUTHORS b/AUTHORS index b9adf15..993fa57 100644 --- a/AUTHORS +++ b/AUTHORS @@ -5,3 +5,4 @@ # To see the full list of contributors, see the revision history in # source control. Google LLC +Manos Pitsidianakis diff --git a/Cargo.toml b/Cargo.toml index 7d1b654..88fa431 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -24,6 +24,7 @@ el1 = [] el2 = [] el3 = [] exceptions = [] +relocate = [] initial-pagetable = [] psci = ["dep:smccc"] diff --git a/README.md b/README.md index 9f4e1bb..c99a055 100644 --- a/README.md +++ b/README.md @@ -120,6 +120,35 @@ cache fills. Adds the `start_core` function to start another CPU core via a PSCI `CPU_ON` call. This adds a dependency on the `smccc` crate. +### `relocate` + +Relocates binary symbols to the memory offset where the binary was loaded. +This allows for building binaries that can be loaded at arbitrary locations by +the bootloader/hypervisor. + +To perform relocations, the linker must be instructed to generate a position +independent executable and to generate relocation entries for text relocations. + +These options can be passed to the linker by `rustc` through some manner, e.g. +a `.cargo/config.toml` file: + +```toml +[target."aarch64-unknown-none"] +rustflags = [ + "-C", "relocation-model=pie", + "-C", "link-args=-z notext", +] +``` + +Also make sure your image's origin is set to zero in your linker script: + +```ld +MEMORY +{ + image : ORIGIN = 0x0, LENGTH = 2M +} +``` + ## License Licensed under either of diff --git a/image.ld b/image.ld index 857029b..cf42d18 100644 --- a/image.ld +++ b/image.ld @@ -53,6 +53,15 @@ SECTIONS data_end = .; } >image + /* + * Keep track of relocations + */ + .rela.dyn : ALIGN(8) { + __rela_start = .; + *(.rela .rela*) + __rela_end = .; + } >image + /* Everything beyond this point will not be included in the binary. */ bin_end = .; diff --git a/src/entry.rs b/src/entry.rs index 1ef377f..781c86c 100644 --- a/src/entry.rs +++ b/src/entry.rs @@ -6,19 +6,82 @@ use core::arch::naked_asm; -/// This is a generic entry point for an image. It carries out the operations required to prepare the -/// loaded image to be run. Specifically, it zeroes the bss section using registers x25 and above, -/// prepares the stack, enables floating point, and sets up the exception vector. It preserves x0-x3 -/// for the Rust entry point, as these may contain boot parameters. +use crate::rust_entry; + +/// This is a generic entry point for an image that calls [`entry_early_prepare`]. +/// +/// # Safety +/// +/// This function is marked unsafe because it should never be called by anyone. The linker is +/// responsible for setting it as the entry function. +#[cfg(not(feature = "relocate"))] +#[unsafe(naked)] +#[unsafe(link_section = ".init.entry")] +#[unsafe(export_name = "entry")] +unsafe extern "C" fn entry() -> ! { + naked_asm!( + "b {entry_early_prepare}", + entry_early_prepare = sym entry_early_prepare + ) +} + +/// This is a generic entry point for an image prefixed with an [AArch64 Linux kernel boot +/// header](https://docs.kernel.org/arch/arm64/booting.html) that calls [`entry_early_prepare`]. /// /// # Safety /// /// This function is marked unsafe because it should never be called by anyone. The linker is /// responsible for setting it as the entry function. +#[cfg(feature = "relocate")] #[unsafe(naked)] #[unsafe(link_section = ".init.entry")] #[unsafe(export_name = "entry")] unsafe extern "C" fn entry() -> ! { + const HEADER_FLAG_ENDIANNESS: u64 = cfg!(target_endian = "big") as u64; + // 0 - Unspecified, 1 - 4K, 2 - 16K, 3 - 64K + const HEADER_FLAG_PAGE_SIZE: u64 = 1; + const HEADER_FLAG_PHYSICAL_PLACEMENT: u64 = 1; + const HEADER_FLAGS: u64 = HEADER_FLAG_ENDIANNESS + | (HEADER_FLAG_PAGE_SIZE << 1) + | (HEADER_FLAG_PHYSICAL_PLACEMENT << 3); + + naked_asm!( + // code0 + "b {entry_early_prepare}", + // code1 + "nop", + + // text_offset + ".quad 0x0", + // image_size + ".quad bin_end - entry", + // flags + ".quad {HEADER_FLAGS}", + // res2 + ".quad 0", + // res3 + ".quad 0", + // res4 + ".quad 0", + + // "ARM\x64" magic number + ".long 0x644d5241", + // res5 + ".long 0", + ".align 3", + entry_early_prepare = sym entry_early_prepare, + HEADER_FLAGS = const HEADER_FLAGS.to_le(), + ) +} + +/// Early entry point preparations. +/// +/// It carries out the operations required to prepare the loaded image to be run. Specifically, it +/// zeroes the bss section using registers x25 and above, prepares the stack, enables floating +/// point, and sets up the exception vector. It preserves x0-x3 for the Rust entry point, as these +/// may contain boot parameters. +#[unsafe(naked)] +unsafe extern "C" fn entry_early_prepare() -> ! { naked_asm!( ".macro adr_l, reg:req, sym:req", r"adrp \reg, \sym", @@ -42,9 +105,66 @@ unsafe extern "C" fn entry() -> ! { // Prepare the stack. "adr_l x30, boot_stack_end", "mov sp, x30", + // Perform final Rust entrypoint setup + "b {entry_prepare_image}", + ".purgem adr_l", + entry_prepare_image = sym entry_prepare_image + ) +} + +#[cfg(not(feature = "relocate"))] +#[unsafe(naked)] +unsafe extern "C" fn entry_prepare_image() -> ! { + naked_asm!( + // Call into Rust code. + "b {rust_entry}", + rust_entry = sym rust_entry + ) +} + +#[cfg(feature = "relocate")] +#[unsafe(naked)] +unsafe extern "C" fn entry_prepare_image() -> ! { + naked_asm!( + ".macro adr_l, reg:req, sym:req", + r"adrp \reg, \sym", + r"add \reg, \reg, :lo12:\sym", + ".endm", + // Where the image was loaded + "adr_l x7, text_begin", + // Start relocating. + // let mut rela: *mut Elf64Rela = __rela_start; + "adr_l x9, __rela_start", + // let rela_end: *mut Elf64Rela = __rela_end; + "adr_l x8, __rela_end", + "b 1f", + "2:", + // rela = rela.wrapping_offset(1); + "add x9, x9, #24", + "1:", + // while rela < rela_end { + "cmp x9, x8", + "b.hs 3f", + // let r_type = unsafe { *rela }.r_info & 0xffff_ffff; + "ldr w10, [x9, #8]", + // if r_type != R_AARCH64_RELATIVE { continue }; + "cmp w10, #1027", + "b.ne 2b", + // let mut new_ptr = unsafe { *rela }.r_addend; + "ldr x10, [x9, #16]", + // let r_offset = unsafe { *rela }.r_offset; + "ldr x11, [x9]", + // new_ptr += offset; + "add x10, x10, x7", + // unsafe { *(offset + r_offset) } = new_ptr; + "str x10, [x7, x11]", + "b 2b", + "3:", + // End of relocations. // Call into Rust code. "b {rust_entry}", - rust_entry = sym crate::rust_entry, + ".purgem adr_l", + rust_entry = sym rust_entry, ) }