Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions gc.c
Original file line number Diff line number Diff line change
Expand Up @@ -2475,6 +2475,9 @@ rb_gc_before_updating_jit_code(void)
#if USE_YJIT
rb_yjit_mark_all_writeable();
#endif
#if USE_ZJIT
rb_zjit_mark_all_writable();
#endif
}

/*
Expand All @@ -2488,6 +2491,9 @@ rb_gc_after_updating_jit_code(void)
#if USE_YJIT
rb_yjit_mark_all_executable();
#endif
#if USE_ZJIT
rb_zjit_mark_all_executable();
#endif
}

static void
Expand Down
6 changes: 0 additions & 6 deletions prism_compile.c
Original file line number Diff line number Diff line change
Expand Up @@ -11362,12 +11362,6 @@ pm_parse_process(pm_parse_result_t *result, pm_node_t *node, VALUE *script_lines
pm_intern_constants_ctx_t intern_ctx = { .constants = scope_node->constants, .encoding = scope_node->encoding, .index = 0 };
pm_parser_constants_each(parser, pm_intern_constants_callback, &intern_ctx);

pm_constant_id_list_t *locals = &scope_node->locals;
pm_index_lookup_table_init_heap(&scope_node->index_lookup_table, (int) constants_size);
for (size_t index = 0; index < locals->size; index++) {
pm_index_lookup_table_insert(&scope_node->index_lookup_table, locals->ids[index], (int) index);
}

// If we got here, this is a success and we can return Qnil to indicate that
// no error should be raised.
result->parsed = true;
Expand Down
2 changes: 2 additions & 0 deletions zjit.h
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,8 @@ void rb_zjit_invalidate_no_ep_escape(const rb_iseq_t *iseq);
void rb_zjit_constant_state_changed(ID id);
void rb_zjit_iseq_mark(void *payload);
void rb_zjit_iseq_update_references(void *payload);
void rb_zjit_mark_all_writable(void);
void rb_zjit_mark_all_executable(void);
void rb_zjit_iseq_free(const rb_iseq_t *iseq);
void rb_zjit_before_ractor_spawn(void);
void rb_zjit_tracing_invalidate_all(void);
Expand Down
4 changes: 4 additions & 0 deletions zjit/src/asm/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -294,6 +294,10 @@ impl CodeBlock {
}

/// Make all the code in the region executable. Call this at the end of a write session.
pub fn mark_all_writable(&mut self) {
self.mem_block.borrow_mut().mark_all_writable();
}

pub fn mark_all_executable(&mut self) {
self.mem_block.borrow_mut().mark_all_executable();
}
Expand Down
30 changes: 24 additions & 6 deletions zjit/src/gc.rs
Original file line number Diff line number Diff line change
Expand Up @@ -158,7 +158,9 @@ fn iseq_version_update_references(mut version: IseqVersionRef) {
}
}

// Move objects baked in JIT code
// Move objects baked in JIT code.
// The code region is already writable because rb_zjit_mark_all_writable() was called
// before the GC update_references phase. We write directly to avoid per-page mprotect calls.
let cb = ZJITState::get_code_block();
for &offset in unsafe { version.as_ref() }.gc_offsets.iter() {
let value_ptr: *const u8 = offset.raw_ptr(cb);
Expand All @@ -170,13 +172,10 @@ fn iseq_version_update_references(mut version: IseqVersionRef) {

// Only write when the VALUE moves, to be copy-on-write friendly.
if new_addr != object {
for (byte_idx, &byte) in new_addr.as_u64().to_le_bytes().iter().enumerate() {
let byte_code_ptr = offset.add_bytes(byte_idx);
cb.write_mem(byte_code_ptr, byte).expect("patching existing code should be within bounds");
}
let value_ptr = value_ptr as *mut VALUE;
unsafe { value_ptr.write_unaligned(new_addr) };
}
}
cb.mark_all_executable();
}

/// Append a set of gc_offsets to the iseq's payload
Expand Down Expand Up @@ -211,6 +210,25 @@ fn ranges_overlap<T>(left: &Range<T>, right: &Range<T>) -> bool where T: Partial
left.start < right.end && right.start < left.end
}

/// GC callback for making all JIT code writable before updating references in bulk.
/// This avoids toggling W^X permissions per-page during GC compaction.
#[unsafe(no_mangle)]
pub extern "C" fn rb_zjit_mark_all_writable() {
if !ZJITState::has_instance() {
return;
}
ZJITState::get_code_block().mark_all_writable();
}

/// GC callback for making all JIT code executable after updating references in bulk.
#[unsafe(no_mangle)]
pub extern "C" fn rb_zjit_mark_all_executable() {
if !ZJITState::has_instance() {
return;
}
ZJITState::get_code_block().mark_all_executable();
}

/// Callback for marking GC objects inside [crate::invariants::Invariants].
#[unsafe(no_mangle)]
pub extern "C" fn rb_zjit_root_mark() {
Expand Down
16 changes: 16 additions & 0 deletions zjit/src/virtualmem.rs
Original file line number Diff line number Diff line change
Expand Up @@ -265,6 +265,22 @@ impl<A: Allocator> VirtualMemory<A> {
memory_usage_bytes + self.page_size_bytes < memory_limit_bytes
}

/// Make all the code in the region writable. Call this before bulk writes (e.g. GC
/// reference updates). See [Self] for usual usage flow.
pub fn mark_all_writable(&mut self) {
self.current_write_page = None;

let region_start = self.region_start;
let mapped_region_bytes: u32 = self.mapped_region_bytes.try_into().unwrap();

// Make mapped region writable
if mapped_region_bytes > 0 {
if !self.allocator.mark_writable(region_start.as_ptr(), mapped_region_bytes) {
panic!("Cannot make JIT memory region writable");
}
}
}

/// Make all the code in the region executable. Call this at the end of a write session.
/// See [Self] for usual usage flow.
pub fn mark_all_executable(&mut self) {
Expand Down