diff --git a/kernel/arch/aarch64/timer/timer.cpp b/kernel/arch/aarch64/timer/timer.cpp index 9ec01575..429f7ace 100644 --- a/kernel/arch/aarch64/timer/timer.cpp +++ b/kernel/arch/aarch64/timer/timer.cpp @@ -154,6 +154,8 @@ __PRIVILEGED_CODE bool on_interrupt() { if (t->timer_deadline > now) break; state.sleep_queue.pop_front(); t->timer_deadline = 0; + t->block_kind = sched::TASK_BLOCK_NONE; + t->blocked_wait_queue = nullptr; sched::wake(t); } @@ -188,6 +190,8 @@ __PRIVILEGED_CODE void schedule_sleep(sched::task* t, uint64_t deadline_ns) { timer_cpu_state& state = this_cpu(cpu_timer_state); sync::irq_state irq = sync::spin_lock_irqsave(state.lock); + t->block_kind = sched::TASK_BLOCK_TIMER; + t->blocked_wait_queue = nullptr; t->timer_deadline = deadline_ns; state.sleep_queue.insert_sorted(t, [](sched::task* a, sched::task* b) { @@ -202,4 +206,47 @@ __PRIVILEGED_CODE void schedule_sleep(sched::task* t, uint64_t deadline_ns) { sync::spin_unlock_irqrestore(state.lock, irq); } +/** + * @note Privilege: **required** + */ +__PRIVILEGED_CODE void cancel_sleep(sched::task* t) { + if (!t) { + return; + } + + uint32_t target_cpu = __atomic_load_n(&t->exec.cpu, __ATOMIC_ACQUIRE); + if (target_cpu >= MAX_CPUS) { + return; + } + + timer_cpu_state& state = per_cpu_on(cpu_timer_state, target_cpu); + bool should_wake = false; + + sync::irq_state irq = sync::spin_lock_irqsave(state.lock); + if (t->state == sched::TASK_STATE_BLOCKED && + t->block_kind == sched::TASK_BLOCK_TIMER && + t->timer_link.prev && t->timer_link.next) { + state.sleep_queue.remove(t); + t->timer_deadline = 0; + t->block_kind = sched::TASK_BLOCK_NONE; + t->blocked_wait_queue = nullptr; + should_wake = true; + + uint64_t next_event = state.next_tick_ns; + if (!state.sleep_queue.empty()) { + uint64_t front_deadline = state.sleep_queue.front()->timer_deadline; + if (front_deadline < next_event) { + next_event = front_deadline; + } + } + state.programmed_ns = next_event; + program_oneshot(next_event); + } + sync::spin_unlock_irqrestore(state.lock, irq); + + if (should_wake) { + sched::wake(t); + } +} + } // namespace timer diff --git a/kernel/arch/aarch64/trap/trap.cpp b/kernel/arch/aarch64/trap/trap.cpp index ac518f16..08fcfc98 100644 --- a/kernel/arch/aarch64/trap/trap.cpp +++ b/kernel/arch/aarch64/trap/trap.cpp @@ -3,6 +3,7 @@ #include "common/types.h" #include "common/logging.h" #include "debug/panic.h" +#include "sched/sched.h" #include "sched/task_exec_core.h" #include "percpu/percpu.h" #include "dynpriv/dynpriv.h" @@ -74,6 +75,7 @@ void stlx_aarch64_el0_irq_handler(aarch64::trap_frame* tf) { sched::on_tick(tf); } irq_task_core->flags &= ~sched::TASK_FLAG_IN_IRQ; + sched::maybe_terminate_current(); restore_post_trap_elevation_state(); return; } @@ -82,6 +84,7 @@ void stlx_aarch64_el0_irq_handler(aarch64::trap_frame* tf) { serial::on_rx_irq(); irq::eoi(irq_id); irq_task_core->flags &= ~sched::TASK_FLAG_IN_IRQ; + sched::maybe_terminate_current(); restore_post_trap_elevation_state(); return; } @@ -139,6 +142,7 @@ void stlx_aarch64_el1_irq_handler(aarch64::trap_frame* tf) { sched::on_tick(tf); } irq_task_core->flags &= ~sched::TASK_FLAG_IN_IRQ; + sched::maybe_terminate_current(); restore_post_trap_elevation_state(); return; } @@ -147,6 +151,7 @@ void stlx_aarch64_el1_irq_handler(aarch64::trap_frame* tf) { serial::on_rx_irq(); irq::eoi(irq_id); irq_task_core->flags &= ~sched::TASK_FLAG_IN_IRQ; + sched::maybe_terminate_current(); restore_post_trap_elevation_state(); return; } diff --git a/kernel/arch/x86_64/timer/timer.cpp b/kernel/arch/x86_64/timer/timer.cpp index e7e94531..b406022e 100644 --- a/kernel/arch/x86_64/timer/timer.cpp +++ b/kernel/arch/x86_64/timer/timer.cpp @@ -220,6 +220,8 @@ __PRIVILEGED_CODE bool on_interrupt() { if (t->timer_deadline > now) break; state.sleep_queue.pop_front(); t->timer_deadline = 0; + t->block_kind = sched::TASK_BLOCK_NONE; + t->blocked_wait_queue = nullptr; sched::wake(t); } @@ -254,6 +256,8 @@ __PRIVILEGED_CODE void schedule_sleep(sched::task* t, uint64_t deadline_ns) { timer_cpu_state& state = this_cpu(cpu_timer_state); sync::irq_state irq = sync::spin_lock_irqsave(state.lock); + t->block_kind = sched::TASK_BLOCK_TIMER; + t->blocked_wait_queue = nullptr; t->timer_deadline = deadline_ns; state.sleep_queue.insert_sorted(t, [](sched::task* a, sched::task* b) { @@ -268,4 +272,47 @@ __PRIVILEGED_CODE void schedule_sleep(sched::task* t, uint64_t deadline_ns) { sync::spin_unlock_irqrestore(state.lock, irq); } +/** + * @note Privilege: **required** + */ +__PRIVILEGED_CODE void cancel_sleep(sched::task* t) { + if (!t) { + return; + } + + uint32_t target_cpu = __atomic_load_n(&t->exec.cpu, __ATOMIC_ACQUIRE); + if (target_cpu >= MAX_CPUS) { + return; + } + + timer_cpu_state& state = per_cpu_on(cpu_timer_state, target_cpu); + bool should_wake = false; + + sync::irq_state irq = sync::spin_lock_irqsave(state.lock); + if (t->state == sched::TASK_STATE_BLOCKED && + t->block_kind == sched::TASK_BLOCK_TIMER && + t->timer_link.prev && t->timer_link.next) { + state.sleep_queue.remove(t); + t->timer_deadline = 0; + t->block_kind = sched::TASK_BLOCK_NONE; + t->blocked_wait_queue = nullptr; + should_wake = true; + + uint64_t next_event = state.next_tick_ns; + if (!state.sleep_queue.empty()) { + uint64_t front_deadline = state.sleep_queue.front()->timer_deadline; + if (front_deadline < next_event) { + next_event = front_deadline; + } + } + state.programmed_ns = next_event; + program_oneshot(next_event); + } + sync::spin_unlock_irqrestore(state.lock, irq); + + if (should_wake) { + sched::wake(t); + } +} + } // namespace timer diff --git a/kernel/arch/x86_64/trap/trap.cpp b/kernel/arch/x86_64/trap/trap.cpp index b3fb08ee..90cab922 100644 --- a/kernel/arch/x86_64/trap/trap.cpp +++ b/kernel/arch/x86_64/trap/trap.cpp @@ -4,6 +4,7 @@ #include "io/serial.h" #include "timer/timer.h" #include "debug/panic.h" +#include "sched/sched.h" #include "sched/task_exec_core.h" #include "percpu/percpu.h" #include "dynpriv/dynpriv.h" @@ -36,6 +37,7 @@ extern "C" __PRIVILEGED_CODE void stlx_x86_64_trap_handler(x86::trap_frame* tf) sched::on_yield(tf); // Clear the IRQ flag on the originally interrupted task, not the post-switch task. irq_task_core->flags &= ~sched::TASK_FLAG_IN_IRQ; + sched::maybe_terminate_current(); restore_post_trap_elevation_state(); return; } @@ -48,6 +50,7 @@ extern "C" __PRIVILEGED_CODE void stlx_x86_64_trap_handler(x86::trap_frame* tf) } // Clear IRQ state on the interrupted task to avoid stale IN_IRQ ownership. irq_task_core->flags &= ~sched::TASK_FLAG_IN_IRQ; + sched::maybe_terminate_current(); restore_post_trap_elevation_state(); return; } @@ -56,6 +59,7 @@ extern "C" __PRIVILEGED_CODE void stlx_x86_64_trap_handler(x86::trap_frame* tf) irq::eoi(0); serial::on_rx_irq(); irq_task_core->flags &= ~sched::TASK_FLAG_IN_IRQ; + sched::maybe_terminate_current(); restore_post_trap_elevation_state(); return; } diff --git a/kernel/resource/providers/proc_provider.cpp b/kernel/resource/providers/proc_provider.cpp index 38d5b5ea..58260a4c 100644 --- a/kernel/resource/providers/proc_provider.cpp +++ b/kernel/resource/providers/proc_provider.cpp @@ -9,6 +9,8 @@ namespace resource::proc_provider { +static uint32_t g_next_terminate_epoch = 1; + __PRIVILEGED_CODE void proc_resource::ref_destroy(proc_resource* self) { heap::kfree_delete(self); } @@ -27,6 +29,126 @@ __PRIVILEGED_CODE static ssize_t proc_write( return ERR_UNSUP; } +__PRIVILEGED_CODE static uint32_t allocate_terminate_epoch() { + uint32_t epoch = __atomic_fetch_add(&g_next_terminate_epoch, 1, __ATOMIC_ACQ_REL); + if (epoch == 0) { + epoch = __atomic_fetch_add(&g_next_terminate_epoch, 1, __ATOMIC_ACQ_REL); + } + return epoch; +} + +__PRIVILEGED_CODE static resource_object* acquire_process_handle_at( + sched::task* task, + uint32_t index +) { + if (!task || index >= resource::MAX_TASK_HANDLES) { + return nullptr; + } + + resource_object* obj = nullptr; + sync::irq_state irq = sync::spin_lock_irqsave(task->handles.lock); + const resource::handle_entry& entry = task->handles.entries[index]; + if (entry.used && + entry.type == resource::resource_type::PROCESS && + entry.obj) { + resource::resource_add_ref(entry.obj); + obj = entry.obj; + } + sync::spin_unlock_irqrestore(task->handles.lock, irq); + return obj; +} + +__PRIVILEGED_CODE static int32_t terminate_proc_resource_with_epoch( + proc_resource* pr, + int32_t exit_code, + uint32_t epoch +) { + if (!pr) { + return ERR_INVAL; + } + + sched::task* created_child = nullptr; + sched::task* target_child = nullptr; + sync::irq_state irq = sync::spin_lock_irqsave(pr->lock); + + if (!pr->child || pr->exited) { + pr->terminate_in_progress = false; + sync::spin_unlock_irqrestore(pr->lock, irq); + return OK; + } + + if (pr->terminate_in_progress) { + if (pr->terminate_epoch == epoch) { + // Cycle detected in the same recursive termination traversal. + // Avoid waiting here to prevent deadlock (A->B->A style graphs). + sync::spin_unlock_irqrestore(pr->lock, irq); + return OK; + } + + while (!pr->exited) { + irq = sync::wait(pr->wait_queue, pr->lock, irq); + } + sync::spin_unlock_irqrestore(pr->lock, irq); + return OK; + } + + pr->terminate_in_progress = true; + pr->terminate_epoch = epoch; + + if (pr->child->state == sched::TASK_STATE_CREATED) { + created_child = pr->child; + pr->exit_code = exit_code; + pr->exited = true; + pr->child = nullptr; + pr->terminate_in_progress = false; + sync::wake_all(pr->wait_queue); + sync::spin_unlock_irqrestore(pr->lock, irq); + + if (created_child->proc_res) { + (void)created_child->proc_res->release(); + created_child->proc_res = nullptr; + } + destroy_unstarted_task(created_child); + return OK; + } + + target_child = pr->child; + sync::spin_unlock_irqrestore(pr->lock, irq); + + for (uint32_t i = 0; i < resource::MAX_TASK_HANDLES; i++) { + resource_object* descendant_obj = acquire_process_handle_at(target_child, i); + if (!descendant_obj) { + continue; + } + + proc_resource* child_pr = get_proc_resource(descendant_obj); + if (child_pr) { + (void)terminate_proc_resource_with_epoch(child_pr, exit_code, epoch); + } + resource::resource_release(descendant_obj); + } + + irq = sync::spin_lock_irqsave(pr->lock); + if (!pr->exited && pr->child) { + sched::request_terminate(pr->child, exit_code); + } + + while (!pr->exited) { + irq = sync::wait(pr->wait_queue, pr->lock, irq); + } + pr->terminate_in_progress = false; + sync::spin_unlock_irqrestore(pr->lock, irq); + return OK; +} + +__PRIVILEGED_CODE int32_t terminate_proc_resource( + proc_resource* pr, + int32_t exit_code +) { + uint32_t epoch = allocate_terminate_epoch(); + return terminate_proc_resource_with_epoch(pr, exit_code, epoch); +} + __PRIVILEGED_CODE static void proc_close(resource_object* obj) { if (!obj || !obj->impl) { return; @@ -35,25 +157,17 @@ __PRIVILEGED_CODE static void proc_close(resource_object* obj) { auto* impl = static_cast(obj->impl); auto* pr = impl->proc.ptr(); + bool should_terminate = false; sync::irq_state irq = sync::spin_lock_irqsave(pr->lock); - if (pr->child && pr->child->state == sched::TASK_STATE_CREATED) { - auto* child = pr->child; - pr->child = nullptr; - sync::spin_unlock_irqrestore(pr->lock, irq); - - if (child->proc_res) { - (void)child->proc_res->release(); - child->proc_res = nullptr; - } - destroy_unstarted_task(child); + should_terminate = true; } else if (pr->child && !pr->exited && !pr->detached) { - uint32_t child_tid = pr->child->tid; - sync::spin_unlock_irqrestore(pr->lock, irq); - log::fatal("proc_close: parent exiting with running attached child tid=%u", - child_tid); - } else { - sync::spin_unlock_irqrestore(pr->lock, irq); + should_terminate = true; + } + sync::spin_unlock_irqrestore(pr->lock, irq); + + if (should_terminate) { + (void)terminate_proc_resource(pr, PROC_KILL_EXIT_CODE); } heap::kfree_delete(impl); @@ -84,8 +198,10 @@ __PRIVILEGED_CODE int32_t create_proc_resource( pr->child = child_task; pr->wait_queue.init(); pr->exit_code = 0; + pr->terminate_epoch = 0; pr->exited = false; pr->detached = false; + pr->terminate_in_progress = false; pr->add_ref(); // refcount 1 -> 2 (second ref for the child task) diff --git a/kernel/resource/providers/proc_provider.h b/kernel/resource/providers/proc_provider.h index 73c36106..819b6d4d 100644 --- a/kernel/resource/providers/proc_provider.h +++ b/kernel/resource/providers/proc_provider.h @@ -13,8 +13,10 @@ struct proc_resource : rc::ref_counted { sched::task* child; sync::wait_queue wait_queue; int32_t exit_code; + uint32_t terminate_epoch; bool exited; bool detached; + bool terminate_in_progress; /** * @brief Free a proc_resource when the last reference is released. @@ -27,6 +29,8 @@ struct proc_resource_impl { rc::strong_ref proc; }; +constexpr int32_t PROC_KILL_EXIT_CODE = 137; + /** * @brief Create a PROCESS resource wrapping a child task. * Sets child_task->proc_res and gives the child an owned ref on proc_resource. @@ -52,6 +56,18 @@ __PRIVILEGED_CODE int32_t create_proc_resource( */ __PRIVILEGED_CODE void destroy_unstarted_task(sched::task* t); +/** + * @brief Terminate a process resource and descendants recursively. + * For CREATED tasks, destroys the task immediately and marks the proc + * as exited. For started tasks, requests termination and blocks until + * the task has exited. + * @note Privilege: **required** + */ +__PRIVILEGED_CODE int32_t terminate_proc_resource( + proc_resource* pr, + int32_t exit_code +); + } // namespace resource::proc_provider #endif // STELLUX_RESOURCE_PROVIDERS_PROC_PROVIDER_H diff --git a/kernel/sched/sched.cpp b/kernel/sched/sched.cpp index a35f297a..39dec3e1 100644 --- a/kernel/sched/sched.cpp +++ b/kernel/sched/sched.cpp @@ -12,6 +12,7 @@ #include "mm/paging.h" #include "common/logging.h" #include "sync/spinlock.h" +#include "sync/wait_queue.h" #include "smp/smp.h" #include "hw/cpu.h" #include "clock/clock.h" @@ -62,6 +63,35 @@ static void store_cleanup_stage(task* t, uint32_t stage) { __atomic_store_n(&t->cleanup_stage, stage, __ATOMIC_RELEASE); } +static void do_exit_current(int exit_code) { + sched::task* task = current(); + + if (task->proc_res) { + auto* pr = task->proc_res; + sync::irq_state irq = sync::spin_lock_irqsave(pr->lock); + if (!pr->detached) { + pr->exit_code = exit_code; + pr->exited = true; + pr->child = nullptr; + sync::wake_all(pr->wait_queue); + } else { + pr->child = nullptr; + } + sync::spin_unlock_irqrestore(pr->lock, irq); + task->proc_res = nullptr; + if (pr->release()) { + resource::proc_provider::proc_resource::ref_destroy(pr); + } + } + + store_cleanup_stage(task, TASK_CLEANUP_STAGE_EXIT_REQUESTED); + task->state = TASK_STATE_DEAD; + task->exit_code = exit_code; + task->block_kind = TASK_BLOCK_NONE; + task->blocked_wait_queue = nullptr; + task->terminate_requested = 0; +} + #ifdef DEBUG [[noreturn]] __PRIVILEGED_CODE static void panic_invalid_privilege_state( const char* site @@ -166,6 +196,62 @@ task* current() { return this_cpu(current_task); } +__PRIVILEGED_CODE bool termination_requested(task* t, int* out_exit_code) { + if (!t) { + return false; + } + + if (__atomic_load_n(&t->terminate_requested, __ATOMIC_ACQUIRE) == 0) { + return false; + } + + if (out_exit_code) { + *out_exit_code = __atomic_load_n(&t->terminate_exit_code, __ATOMIC_ACQUIRE); + } + return true; +} + +__PRIVILEGED_CODE void request_terminate(task* t, int exit_code) { + if (!t) { + return; + } + if (t->exec.flags & TASK_FLAG_KERNEL) { + return; + } + + __atomic_store_n(&t->terminate_exit_code, exit_code, __ATOMIC_RELEASE); + __atomic_store_n(&t->terminate_requested, 1, __ATOMIC_RELEASE); + + if (t == current()) { + return; + } + + uint32_t state = __atomic_load_n(&t->state, __ATOMIC_ACQUIRE); + if (state != TASK_STATE_BLOCKED) { + return; + } + + uint32_t block_kind = __atomic_load_n(&t->block_kind, __ATOMIC_ACQUIRE); + if (block_kind == TASK_BLOCK_WAIT_QUEUE) { + sync::wait_queue* wq = __atomic_load_n(&t->blocked_wait_queue, __ATOMIC_ACQUIRE); + if (wq) { + sync::cancel_wait(*wq, t); + } + } else if (block_kind == TASK_BLOCK_TIMER) { + timer::cancel_sleep(t); + } +} + +__PRIVILEGED_CODE void maybe_terminate_current() { + task* t = current(); + int exit_code = 0; + if (termination_requested(t, &exit_code)) { + do_exit_current(exit_code); + yield(); + __builtin_unreachable(); + } +} + /** * @note Privilege: **required** */ @@ -343,8 +429,11 @@ __PRIVILEGED_CODE void wake(task* t) { * @note Privilege: **required** */ __PRIVILEGED_CODE void sleep_ns(uint64_t ns) { + maybe_terminate_current(); + if (ns == 0) { yield(); + maybe_terminate_current(); return; } @@ -357,6 +446,7 @@ __PRIVILEGED_CODE void sleep_ns(uint64_t ns) { self->state = TASK_STATE_BLOCKED; timer::schedule_sleep(self, deadline); yield(); + maybe_terminate_current(); } __PRIVILEGED_CODE void sleep_us(uint64_t us) { @@ -369,29 +459,7 @@ __PRIVILEGED_CODE void sleep_ms(uint64_t ms) { [[noreturn]] void exit(int exit_code) { RUN_ELEVATED({ - sched::task* task = current(); - - if (task->proc_res) { - auto* pr = task->proc_res; - sync::irq_state irq = sync::spin_lock_irqsave(pr->lock); - if (!pr->detached) { - pr->exit_code = exit_code; - pr->exited = true; - pr->child = nullptr; - sync::wake_all(pr->wait_queue); - } else { - pr->child = nullptr; - } - sync::spin_unlock_irqrestore(pr->lock, irq); - task->proc_res = nullptr; - if (pr->release()) { - resource::proc_provider::proc_resource::ref_destroy(pr); - } - } - - store_cleanup_stage(task, TASK_CLEANUP_STAGE_EXIT_REQUESTED); - task->state = TASK_STATE_DEAD; - task->exit_code = exit_code; + do_exit_current(exit_code); }); yield(); __builtin_unreachable(); @@ -457,6 +525,10 @@ __PRIVILEGED_CODE task* create_kernel_task( t->wait_link = {}; t->timer_link = {}; t->timer_deadline = 0; + t->block_kind = TASK_BLOCK_NONE; + t->terminate_requested = 0; + t->terminate_exit_code = 0; + t->blocked_wait_queue = nullptr; string::memcpy(t->name, name, string::strnlen(name, TASK_NAME_MAX - 1)); t->name[string::strnlen(name, TASK_NAME_MAX - 1)] = '\0'; t->cleanup_stage = TASK_CLEANUP_STAGE_ACTIVE; @@ -653,6 +725,10 @@ __PRIVILEGED_CODE task* create_user_task( t->wait_link = {}; t->timer_link = {}; t->timer_deadline = 0; + t->block_kind = TASK_BLOCK_NONE; + t->terminate_requested = 0; + t->terminate_exit_code = 0; + t->blocked_wait_queue = nullptr; string::memcpy(t->name, name, string::strnlen(name, TASK_NAME_MAX - 1)); t->name[string::strnlen(name, TASK_NAME_MAX - 1)] = '\0'; t->cleanup_stage = TASK_CLEANUP_STAGE_ACTIVE; @@ -704,6 +780,10 @@ __PRIVILEGED_CODE int32_t init() { idle->wait_link = {}; idle->timer_link = {}; idle->timer_deadline = 0; + idle->block_kind = TASK_BLOCK_NONE; + idle->terminate_requested = 0; + idle->terminate_exit_code = 0; + idle->blocked_wait_queue = nullptr; string::memcpy(idle->name, "idle", 4); idle->name[4] = '\0'; idle->cleanup_stage = TASK_CLEANUP_STAGE_ACTIVE; @@ -766,6 +846,10 @@ __PRIVILEGED_CODE int32_t init_ap(uint32_t cpu_id, uintptr_t task_stack_top, idle->sys_stack_base = 0; idle->tid = __atomic_fetch_add(&g_next_tid, 1, __ATOMIC_RELAXED); idle->state = TASK_STATE_RUNNING; + idle->block_kind = TASK_BLOCK_NONE; + idle->terminate_requested = 0; + idle->terminate_exit_code = 0; + idle->blocked_wait_queue = nullptr; string::memcpy(idle->name, "idle", 4); idle->name[4] = '\0'; idle->cleanup_stage = TASK_CLEANUP_STAGE_ACTIVE; diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index ecd0dc8a..e51db69c 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -102,6 +102,29 @@ void yield(); */ [[noreturn]] void exit(int exit_code); +/** + * @brief Request termination of a task from another context. + * Sets a pending-terminate flag and wakes blocked tasks so they can + * converge to sched::exit() at the next safe point. + * @note Privilege: **required** + */ +__PRIVILEGED_CODE void request_terminate(task* t, int exit_code); + +/** + * @brief Query whether termination has been requested for a task. + * If out_exit_code is non-null and a request exists, writes the + * requested exit code. + * @note Privilege: **required** + */ +__PRIVILEGED_CODE bool termination_requested(task* t, int* out_exit_code = nullptr); + +/** + * @brief Terminate current task if a pending request exists. + * Safe-point helper for trap/syscall/blocking return paths. + * @note Privilege: **required** + */ +__PRIVILEGED_CODE void maybe_terminate_current(); + /** * @brief Get the current task on this CPU. */ diff --git a/kernel/sched/task.h b/kernel/sched/task.h index e8b0c966..831006d5 100644 --- a/kernel/sched/task.h +++ b/kernel/sched/task.h @@ -8,6 +8,7 @@ namespace resource::proc_provider { struct proc_resource; } namespace fs { class node; } +namespace sync { struct wait_queue; } namespace sched { @@ -19,6 +20,10 @@ constexpr uint32_t TASK_STATE_RUNNING = 2; // executing on a CPU constexpr uint32_t TASK_STATE_BLOCKED = 3; // on a wait queue constexpr uint32_t TASK_STATE_DEAD = 4; // terminated +constexpr uint32_t TASK_BLOCK_NONE = 0; +constexpr uint32_t TASK_BLOCK_WAIT_QUEUE = 1; +constexpr uint32_t TASK_BLOCK_TIMER = 2; + constexpr uint32_t TASK_CLEANUP_STAGE_ACTIVE = 0; constexpr uint32_t TASK_CLEANUP_STAGE_EXIT_REQUESTED = 1; constexpr uint32_t TASK_CLEANUP_STAGE_SCHEDULER_DETACHED = 2; @@ -48,6 +53,10 @@ struct task { list::node wait_link; list::node timer_link; uint64_t timer_deadline; + uint32_t block_kind; + uint32_t terminate_requested; + int32_t terminate_exit_code; + sync::wait_queue* blocked_wait_queue; char name[TASK_NAME_MAX]; task_tlb_sync_ticket tlb_sync_ticket; rc::reaper::dead_node reaper_node; diff --git a/kernel/sync/wait_queue.cpp b/kernel/sync/wait_queue.cpp index 97d10d25..7d0902d9 100644 --- a/kernel/sync/wait_queue.cpp +++ b/kernel/sync/wait_queue.cpp @@ -17,14 +17,26 @@ irq_state wait(wait_queue& wq, spinlock& lock, irq_state saved) { } } + int terminate_exit = 0; + if (sched::termination_requested(self, &terminate_exit)) { + spin_unlock_irqrestore(lock, saved); + sched::maybe_terminate_current(); + for (;;) { + cpu::halt(); + } + } + spin_lock(wq.lock); self->state = sched::TASK_STATE_BLOCKED; + self->block_kind = sched::TASK_BLOCK_WAIT_QUEUE; + self->blocked_wait_queue = &wq; wq.waiters.push_back(self); spin_unlock(wq.lock); spin_unlock_irqrestore(lock, saved); sched::yield(); + sched::maybe_terminate_current(); return spin_lock_irqsave(lock); } @@ -35,6 +47,10 @@ irq_state wait(wait_queue& wq, spinlock& lock, irq_state saved) { __PRIVILEGED_CODE void wake_one(wait_queue& wq) { irq_state irq = spin_lock_irqsave(wq.lock); sched::task* t = wq.waiters.pop_front(); + if (t) { + t->block_kind = sched::TASK_BLOCK_NONE; + t->blocked_wait_queue = nullptr; + } spin_unlock_irqrestore(wq.lock, irq); if (t) { @@ -51,6 +67,8 @@ __PRIVILEGED_CODE void wake_all(wait_queue& wq) { irq_state irq = spin_lock_irqsave(wq.lock); while (sched::task* t = wq.waiters.pop_front()) { + t->block_kind = sched::TASK_BLOCK_NONE; + t->blocked_wait_queue = nullptr; batch.push_back(t); } spin_unlock_irqrestore(wq.lock, irq); @@ -60,4 +78,30 @@ __PRIVILEGED_CODE void wake_all(wait_queue& wq) { } } +/** + * @note Privilege: **required** + */ +__PRIVILEGED_CODE void cancel_wait(wait_queue& wq, sched::task* task) { + if (!task) { + return; + } + + bool should_wake = false; + irq_state irq = spin_lock_irqsave(wq.lock); + if (task->state == sched::TASK_STATE_BLOCKED && + task->block_kind == sched::TASK_BLOCK_WAIT_QUEUE && + task->blocked_wait_queue == &wq && + task->wait_link.prev && task->wait_link.next) { + wq.waiters.remove(task); + task->block_kind = sched::TASK_BLOCK_NONE; + task->blocked_wait_queue = nullptr; + should_wake = true; + } + spin_unlock_irqrestore(wq.lock, irq); + + if (should_wake) { + sched::wake(task); + } +} + } // namespace sync diff --git a/kernel/sync/wait_queue.h b/kernel/sync/wait_queue.h index 55935d88..ba626dff 100644 --- a/kernel/sync/wait_queue.h +++ b/kernel/sync/wait_queue.h @@ -56,6 +56,14 @@ __PRIVILEGED_CODE void wake_one(wait_queue& wq); */ __PRIVILEGED_CODE void wake_all(wait_queue& wq); +/** + * Remove a specific blocked task from this wait queue and wake it. + * No-op if the task is not currently queued on this wait queue. + * Safe from IRQ context. + * @note Privilege: **required** + */ +__PRIVILEGED_CODE void cancel_wait(wait_queue& wq, sched::task* task); + } // namespace sync #endif // STELLUX_SYNC_WAIT_QUEUE_H diff --git a/kernel/syscall/handlers/sys_proc.cpp b/kernel/syscall/handlers/sys_proc.cpp index c3f02cf7..c106f773 100644 --- a/kernel/syscall/handlers/sys_proc.cpp +++ b/kernel/syscall/handlers/sys_proc.cpp @@ -290,6 +290,42 @@ DEFINE_SYSCALL1(proc_detach, u_handle) { return 0; } +DEFINE_SYSCALL1(proc_kill, u_handle) { + int32_t handle = static_cast(u_handle); + + sched::task* caller = sched::current(); + resource::resource_object* obj = nullptr; + int32_t rc = resource::get_handle_object( + &caller->handles, handle, 0, &obj); + if (rc != resource::HANDLE_OK) { + return syscall::EBADF; + } + + if (obj->type != resource::resource_type::PROCESS) { + resource::resource_release(obj); + return syscall::EBADF; + } + + auto* pr = resource::proc_provider::get_proc_resource(obj); + if (!pr) { + resource::resource_release(obj); + return syscall::EINVAL; + } + + int32_t kill_rc = resource::proc_provider::terminate_proc_resource( + pr, resource::proc_provider::PROC_KILL_EXIT_CODE); + resource::resource_release(obj); + + if (kill_rc != resource::OK) { + if (kill_rc == resource::ERR_INVAL) { + return syscall::EINVAL; + } + return syscall::EIO; + } + + return 0; +} + DEFINE_SYSCALL2(proc_info, u_handle, u_info_ptr) { if (u_info_ptr == 0) { return syscall::EFAULT; diff --git a/kernel/syscall/handlers/sys_proc.h b/kernel/syscall/handlers/sys_proc.h index 1544db9e..2849358c 100644 --- a/kernel/syscall/handlers/sys_proc.h +++ b/kernel/syscall/handlers/sys_proc.h @@ -9,5 +9,6 @@ DECLARE_SYSCALL(proc_wait); DECLARE_SYSCALL(proc_detach); DECLARE_SYSCALL(proc_info); DECLARE_SYSCALL(proc_set_handle); +DECLARE_SYSCALL(proc_kill); #endif // STELLUX_SYSCALL_HANDLERS_SYS_PROC_H diff --git a/kernel/syscall/syscall.cpp b/kernel/syscall/syscall.cpp index dfc1155d..53c0370d 100644 --- a/kernel/syscall/syscall.cpp +++ b/kernel/syscall/syscall.cpp @@ -1,5 +1,6 @@ #include "syscall/syscall.h" #include "syscall/syscall_table.h" +#include "sched/sched.h" #include "sched/task_exec_core.h" #include "dynpriv/dynpriv.h" #include "percpu/percpu.h" @@ -35,6 +36,8 @@ extern "C" __PRIVILEGED_CODE int64_t stlx_syscall_handler( result = syscall::ENOSYS; } + sched::maybe_terminate_current(); + // Return-boundary restore: dynamic runtime elevation follows the selected // task mode once syscall handling and switch teardown are complete. restore_post_syscall_elevation_state(); diff --git a/kernel/syscall/syscall.h b/kernel/syscall/syscall.h index b56b7d09..6ef50782 100644 --- a/kernel/syscall/syscall.h +++ b/kernel/syscall/syscall.h @@ -19,6 +19,7 @@ constexpr uint64_t SYS_PROC_WAIT = 1012; constexpr uint64_t SYS_PROC_DETACH = 1013; constexpr uint64_t SYS_PROC_INFO = 1014; constexpr uint64_t SYS_PROC_SET_HANDLE = 1015; +constexpr uint64_t SYS_PROC_KILL = 1016; // PTY constexpr uint64_t SYS_PTY_CREATE = 1020; diff --git a/kernel/syscall/syscall_table.cpp b/kernel/syscall/syscall_table.cpp index b2d61598..338db239 100644 --- a/kernel/syscall/syscall_table.cpp +++ b/kernel/syscall/syscall_table.cpp @@ -71,6 +71,7 @@ __PRIVILEGED_CODE void init_syscall_table() { REGISTER_SYSCALL(SYS_PROC_DETACH, proc_detach); REGISTER_SYSCALL(SYS_PROC_INFO, proc_info); REGISTER_SYSCALL(SYS_PROC_SET_HANDLE, proc_set_handle); + REGISTER_SYSCALL(SYS_PROC_KILL, proc_kill); REGISTER_SYSCALL(SYS_PTY_CREATE, pty_create); diff --git a/kernel/tests/sync/wait_queue.test.cpp b/kernel/tests/sync/wait_queue.test.cpp index 5ef80707..06f3f1b2 100644 --- a/kernel/tests/sync/wait_queue.test.cpp +++ b/kernel/tests/sync/wait_queue.test.cpp @@ -407,3 +407,75 @@ TEST(wait_queue, wake_with_condition_recheck) { ASSERT_TRUE(spin_wait_ge(&g_recheck_done_count, 2)); EXPECT_EQ(__atomic_load_n(&g_recheck_done_count, __ATOMIC_ACQUIRE), 2u); } + +// --- cancel_wait_wakes_specific_task --- +// Two tasks block on the same queue. cancel_wait() is used to remove and +// wake exactly one waiter, verifying targeted cancellation semantics. + +constexpr uint32_t CANCEL_WAIT_TASKS = 2; + +static sync::wait_queue g_cancel_wq; +static sync::spinlock g_cancel_lock; +static volatile uint32_t g_cancel_go[CANCEL_WAIT_TASKS]; +static volatile uint32_t g_cancel_ready[CANCEL_WAIT_TASKS]; +static volatile uint32_t g_cancel_done[CANCEL_WAIT_TASKS]; +static sched::task* g_cancel_tasks[CANCEL_WAIT_TASKS]; + +static void cancel_wait_worker_fn(void* arg) { + uint32_t idx = static_cast(reinterpret_cast(arg)); + RUN_ELEVATED({ + sync::irq_state irq = sync::spin_lock_irqsave(g_cancel_lock); + __atomic_store_n(&g_cancel_ready[idx], 1, __ATOMIC_RELEASE); + while (!__atomic_load_n(&g_cancel_go[idx], __ATOMIC_ACQUIRE)) { + irq = sync::wait(g_cancel_wq, g_cancel_lock, irq); + } + sync::spin_unlock_irqrestore(g_cancel_lock, irq); + }); + __atomic_store_n(&g_cancel_done[idx], 1, __ATOMIC_RELEASE); + sched::exit(0); +} + +TEST(wait_queue, cancel_wait_wakes_specific_task) { + g_cancel_wq.init(); + g_cancel_lock = sync::SPINLOCK_INIT; + for (uint32_t i = 0; i < CANCEL_WAIT_TASKS; i++) { + g_cancel_go[i] = 0; + g_cancel_ready[i] = 0; + g_cancel_done[i] = 0; + g_cancel_tasks[i] = nullptr; + } + + RUN_ELEVATED({ + for (uint32_t i = 0; i < CANCEL_WAIT_TASKS; i++) { + g_cancel_tasks[i] = sched::create_kernel_task( + cancel_wait_worker_fn, + reinterpret_cast(static_cast(i)), + "wq_cancel"); + ASSERT_NOT_NULL(g_cancel_tasks[i]); + sched::enqueue(g_cancel_tasks[i]); + } + }); + + ASSERT_TRUE(spin_wait(&g_cancel_ready[0])); + ASSERT_TRUE(spin_wait(&g_cancel_ready[1])); + + RUN_ELEVATED({ + sync::irq_state irq = sync::spin_lock_irqsave(g_cancel_lock); + __atomic_store_n(&g_cancel_go[0], 1, __ATOMIC_RELEASE); + sync::spin_unlock_irqrestore(g_cancel_lock, irq); + sync::cancel_wait(g_cancel_wq, g_cancel_tasks[0]); + }); + + ASSERT_TRUE(spin_wait(&g_cancel_done[0])); + brief_delay(); + EXPECT_EQ(__atomic_load_n(&g_cancel_done[1], __ATOMIC_ACQUIRE), 0u); + + RUN_ELEVATED({ + sync::irq_state irq = sync::spin_lock_irqsave(g_cancel_lock); + __atomic_store_n(&g_cancel_go[1], 1, __ATOMIC_RELEASE); + sync::spin_unlock_irqrestore(g_cancel_lock, irq); + sync::wake_all(g_cancel_wq); + }); + + ASSERT_TRUE(spin_wait(&g_cancel_done[1])); +} diff --git a/kernel/timer/timer.h b/kernel/timer/timer.h index 6c3c3f48..fb88acea 100644 --- a/kernel/timer/timer.h +++ b/kernel/timer/timer.h @@ -56,6 +56,14 @@ __PRIVILEGED_CODE bool on_interrupt(); */ __PRIVILEGED_CODE void schedule_sleep(sched::task* t, uint64_t deadline_ns); +/** + * @brief Cancel a blocked sleep for the given task and wake it. + * No-op if task is not currently sleeping in a timer queue. + * Safe from IRQ context and remote CPUs. + * @note Privilege: **required** + */ +__PRIVILEGED_CODE void cancel_sleep(sched::task* t); + } // namespace timer #endif // STELLUX_TIMER_TIMER_H diff --git a/userland/apps/Makefile b/userland/apps/Makefile index a76215ff..43d7c4f0 100644 --- a/userland/apps/Makefile +++ b/userland/apps/Makefile @@ -2,7 +2,7 @@ # Stellux Userland - Applications # -APP_DIRS := init hello shell ls cat rm stat touch sleep true false clear ptytest date clockbench +APP_DIRS := init hello shell ls cat rm stat touch sleep true false clear ptytest date clockbench proctest APP_COUNT := $(words $(APP_DIRS)) all: diff --git a/userland/apps/proctest/Makefile b/userland/apps/proctest/Makefile new file mode 100644 index 00000000..b3e1f161 --- /dev/null +++ b/userland/apps/proctest/Makefile @@ -0,0 +1,2 @@ +APP_NAME := proctest +include ../../mk/app.mk diff --git a/userland/apps/proctest/src/proctest.c b/userland/apps/proctest/src/proctest.c new file mode 100644 index 00000000..b6b76ce6 --- /dev/null +++ b/userland/apps/proctest/src/proctest.c @@ -0,0 +1,288 @@ +#define _POSIX_C_SOURCE 199309L +#include +#include +#include +#include +#include +#include + +#define PROC_KILL_EXPECTED_EXIT 137 + +static int parse_i32(const char* s, int* out) { + if (!s || !out) return -1; + char* end = NULL; + long v = strtol(s, &end, 10); + if (end == s || (end && *end != '\0')) return -1; + if (v < -2147483648L || v > 2147483647L) return -1; + *out = (int)v; + return 0; +} + +static void sleep_ms(int ms) { + if (ms <= 0) return; + struct timespec ts = { + .tv_sec = ms / 1000, + .tv_nsec = (long)(ms % 1000) * 1000000L + }; + nanosleep(&ts, NULL); +} + +static int run_chain(int depth, int tick_ms) { + if (depth > 0) { + char depth_buf[16]; + char tick_buf[16]; + snprintf(depth_buf, sizeof(depth_buf), "%d", depth - 1); + snprintf(tick_buf, sizeof(tick_buf), "%d", tick_ms); + + const char* child_argv[] = { "chain", depth_buf, tick_buf, NULL }; + int child = proc_create("/initrd/bin/proctest", child_argv); + if (child < 0) { + printf("proctest(chain): proc_create failed errno=%d\r\n", errno); + return 20; + } + if (proc_start(child) < 0) { + printf("proctest(chain): proc_start failed errno=%d\r\n", errno); + return 21; + } + printf("proctest(chain): spawned depth=%d handle=%d\r\n", depth - 1, child); + } + + while (1) { + sleep_ms(tick_ms); + } +} + +static int run_kill_recursive_test(void) { + const char* child_argv[] = { "chain", "2", "50", NULL }; + int child = proc_create("/initrd/bin/proctest", child_argv); + if (child < 0) { + printf("proctest(kill): proc_create failed errno=%d\r\n", errno); + return 30; + } + if (proc_start(child) < 0) { + printf("proctest(kill): proc_start failed errno=%d\r\n", errno); + return 31; + } + + sleep_ms(150); + + if (proc_kill(child) < 0) { + printf("proctest(kill): proc_kill failed errno=%d\r\n", errno); + return 32; + } + + int exit_code = -1; + if (proc_wait(child, &exit_code) < 0) { + printf("proctest(kill): proc_wait failed errno=%d\r\n", errno); + return 33; + } + + printf("proctest(kill): child exit=%d expected=%d\r\n", + exit_code, PROC_KILL_EXPECTED_EXIT); + if (exit_code != PROC_KILL_EXPECTED_EXIT) { + printf("proctest(kill): FAIL\r\n"); + return 34; + } + + printf("proctest(kill): PASS\r\n"); + return 0; +} + +static int run_orphan_exit_test(void) { + const char* child_argv[] = { "chain", "1", "50", NULL }; + int child = proc_create("/initrd/bin/proctest", child_argv); + if (child < 0) { + printf("proctest(orphan-exit): proc_create failed errno=%d\r\n", errno); + return 40; + } + if (proc_start(child) < 0) { + printf("proctest(orphan-exit): proc_start failed errno=%d\r\n", errno); + return 41; + } + + printf("proctest(orphan-exit): started child handle=%d and exiting without wait\r\n", child); + return 0; +} + +static int run_detach_test(void) { + const char* child_argv[] = { "sleep", "800", NULL }; + int child = proc_create("/initrd/bin/proctest", child_argv); + if (child < 0) { + printf("proctest(detach): proc_create failed errno=%d\r\n", errno); + return 50; + } + if (proc_start(child) < 0) { + printf("proctest(detach): proc_start failed errno=%d\r\n", errno); + return 51; + } + if (proc_detach(child) < 0) { + printf("proctest(detach): proc_detach failed errno=%d\r\n", errno); + return 52; + } + + printf("proctest(detach): detached child successfully\r\n"); + sleep_ms(50); + return 0; +} + +static int run_kill_created_test(void) { + const char* child_argv[] = { "sleep", "5000", NULL }; + int child = proc_create("/initrd/bin/proctest", child_argv); + if (child < 0) { + printf("proctest(kill-created): proc_create failed errno=%d\r\n", errno); + return 60; + } + + if (proc_kill(child) < 0) { + printf("proctest(kill-created): proc_kill failed errno=%d\r\n", errno); + return 61; + } + + int exit_code = -1; + if (proc_wait(child, &exit_code) < 0) { + printf("proctest(kill-created): proc_wait failed errno=%d\r\n", errno); + return 62; + } + + printf("proctest(kill-created): child exit=%d expected=%d\r\n", + exit_code, PROC_KILL_EXPECTED_EXIT); + if (exit_code != PROC_KILL_EXPECTED_EXIT) { + printf("proctest(kill-created): FAIL\r\n"); + return 63; + } + + printf("proctest(kill-created): PASS\r\n"); + return 0; +} + +static int run_kill_invalid_test(void) { + errno = 0; + int rc = proc_kill(-1); + if (rc == 0) { + printf("proctest(kill-invalid): expected failure but succeeded\r\n"); + return 70; + } + if (errno == 0) { + printf("proctest(kill-invalid): expected errno to be set\r\n"); + return 71; + } + printf("proctest(kill-invalid): PASS errno=%d\r\n", errno); + return 0; +} + +static int run_kill_cycle_test(void) { + const char* sleep_argv[] = { "sleep", "5000", NULL }; + + int a = proc_create("/initrd/bin/proctest", sleep_argv); + if (a < 0) { + printf("proctest(kill-cycle): create A failed errno=%d\r\n", errno); + return 80; + } + + int b = proc_create("/initrd/bin/proctest", sleep_argv); + if (b < 0) { + printf("proctest(kill-cycle): create B failed errno=%d\r\n", errno); + return 81; + } + + if (proc_set_handle(a, 10, b) < 0) { + printf("proctest(kill-cycle): set A->B failed errno=%d\r\n", errno); + return 82; + } + if (proc_set_handle(b, 10, a) < 0) { + printf("proctest(kill-cycle): set B->A failed errno=%d\r\n", errno); + return 83; + } + + if (proc_start(a) < 0 || proc_start(b) < 0) { + printf("proctest(kill-cycle): start failed errno=%d\r\n", errno); + return 84; + } + + sleep_ms(100); + + if (proc_kill(a) < 0) { + printf("proctest(kill-cycle): proc_kill(A) failed errno=%d\r\n", errno); + return 85; + } + + int exit_a = -1; + int exit_b = -1; + if (proc_wait(a, &exit_a) < 0) { + printf("proctest(kill-cycle): wait A failed errno=%d\r\n", errno); + return 86; + } + if (proc_wait(b, &exit_b) < 0) { + printf("proctest(kill-cycle): wait B failed errno=%d\r\n", errno); + return 87; + } + + printf("proctest(kill-cycle): exitA=%d exitB=%d expected=%d\r\n", + exit_a, exit_b, PROC_KILL_EXPECTED_EXIT); + if (exit_a != PROC_KILL_EXPECTED_EXIT || exit_b != PROC_KILL_EXPECTED_EXIT) { + printf("proctest(kill-cycle): FAIL\r\n"); + return 88; + } + + printf("proctest(kill-cycle): PASS\r\n"); + return 0; +} + +int main(int argc, char** argv) { + setvbuf(stdout, NULL, _IONBF, 0); + + if (argc <= 1) { + return run_kill_recursive_test(); + } + + if (strcmp(argv[1], "chain") == 0) { + if (argc < 4) { + printf("usage: proctest chain \r\n"); + return 2; + } + int depth = 0; + int tick_ms = 0; + if (parse_i32(argv[2], &depth) < 0 || parse_i32(argv[3], &tick_ms) < 0) { + printf("proctest(chain): invalid numeric args\r\n"); + return 3; + } + if (depth < 0) depth = 0; + if (tick_ms <= 0) tick_ms = 50; + return run_chain(depth, tick_ms); + } + + if (strcmp(argv[1], "sleep") == 0) { + if (argc < 3) return 4; + int ms = 0; + if (parse_i32(argv[2], &ms) < 0) return 5; + sleep_ms(ms); + return 0; + } + + if (strcmp(argv[1], "kill-recursive") == 0) { + return run_kill_recursive_test(); + } + + if (strcmp(argv[1], "orphan-exit") == 0) { + return run_orphan_exit_test(); + } + + if (strcmp(argv[1], "detach") == 0) { + return run_detach_test(); + } + + if (strcmp(argv[1], "kill-created") == 0) { + return run_kill_created_test(); + } + + if (strcmp(argv[1], "kill-invalid") == 0) { + return run_kill_invalid_test(); + } + + if (strcmp(argv[1], "kill-cycle") == 0) { + return run_kill_cycle_test(); + } + + printf("usage: proctest [kill-recursive|kill-created|kill-invalid|kill-cycle|orphan-exit|detach|chain |sleep ]\r\n"); + return 1; +} diff --git a/userland/lib/libstlx/include/stlx/proc.h b/userland/lib/libstlx/include/stlx/proc.h index 9f6fd89e..20db1b29 100644 --- a/userland/lib/libstlx/include/stlx/proc.h +++ b/userland/lib/libstlx/include/stlx/proc.h @@ -56,4 +56,13 @@ int proc_info(int handle, process_info* info); */ int proc_set_handle(int proc_handle, int slot, int resource_handle); +/** + * Request termination of a process and all of its descendants reachable + * through PROCESS handles. This call blocks until the target process has + * exited. The process handle remains valid so callers may still query + * proc_info/proc_wait afterwards. + * Returns 0 on success, -1 on failure with errno set. + */ +int proc_kill(int handle); + #endif /* STLX_PROC_H */ diff --git a/userland/lib/libstlx/include/stlx/syscall_nums.h b/userland/lib/libstlx/include/stlx/syscall_nums.h index 89f0e72b..3b84beb0 100644 --- a/userland/lib/libstlx/include/stlx/syscall_nums.h +++ b/userland/lib/libstlx/include/stlx/syscall_nums.h @@ -7,6 +7,7 @@ #define SYS_PROC_DETACH 1013 #define SYS_PROC_INFO 1014 #define SYS_PROC_SET_HANDLE 1015 +#define SYS_PROC_KILL 1016 #define SYS_PTY_CREATE 1020 #endif /* STLX_SYSCALL_NUMS_H */ diff --git a/userland/lib/libstlx/src/proc.c b/userland/lib/libstlx/src/proc.c index 32c4d183..a4ee873b 100644 --- a/userland/lib/libstlx/src/proc.c +++ b/userland/lib/libstlx/src/proc.c @@ -39,3 +39,7 @@ int proc_info(int handle, process_info* info) { int proc_set_handle(int proc_handle, int slot, int resource_handle) { return (int)syscall(SYS_PROC_SET_HANDLE, proc_handle, slot, resource_handle); } + +int proc_kill(int handle) { + return (int)syscall(SYS_PROC_KILL, handle); +}