Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Implement stack unwinding for cancelled tasks #769

Open
wants to merge 16 commits into
base: theseus_main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 13 additions & 1 deletion Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion applications/test_task_cancel/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -9,4 +9,4 @@ edition = "2021"
log = "0.4"
spawn = { path = "../../kernel/spawn" }
spin = "0.9"
task = { path = "../../kernel/task" }
task_cancel = { path = "../../kernel/task_cancel" }
21 changes: 17 additions & 4 deletions applications/test_task_cancel/src/lib.rs
Original file line number Diff line number Diff line change
@@ -1,11 +1,21 @@
// TODO: Properly implement Task::kill so the test passes.
// TODO: Test that the other thread is succesfully cancelled in the following
// scenarios:
//
// 1. In lsda_generator, in which case it should trigger the first criteria of
// unwind::can_unwind.
//
// 2. At the call lsda_generator instruction, in which case it should trigger
// the second criteria of unwind::can_unwind.
//
// 3. At the jmp (loop) instruction, in which case it should continue to the
// next (call) instruction and then unwind.

#![no_std]

extern crate alloc;

use alloc::{string::String, sync::Arc, vec::Vec};
use core::sync::atomic::{AtomicBool, Ordering::Relaxed};
use core::sync::atomic::{AtomicBool, AtomicUsize, Ordering::Relaxed};
use spin::Mutex;

pub fn main(_: Vec<String>) -> isize {
Expand All @@ -16,8 +26,7 @@ pub fn main(_: Vec<String>) -> isize {

while !lock.is_locked() {}

task.kill(task::KillReason::Requested)
.expect("failed to abort task");
task.cancel();

log::debug!("waiting for lock to be unlocked");

Expand Down Expand Up @@ -51,4 +60,8 @@ fn lsda_generator() {
if FALSE.load(Relaxed) {
panic!();
}

// Spend more time in lsda_generator to increase likelihood of scenario 1.
static __COUNTER: AtomicUsize = AtomicUsize::new(0);
__COUNTER.fetch_add(1, Relaxed);
}
3 changes: 3 additions & 0 deletions kernel/exceptions_full/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -52,5 +52,8 @@ path = "../debug_info"
[dependencies.signal_handler]
path = "../signal_handler"

[dependencies.task_cancel]
path = "../task_cancel"

[lib]
crate-type = ["rlib"]
12 changes: 2 additions & 10 deletions kernel/exceptions_full/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ pub fn init(idt_ref: &'static LockedIdt) {

// SET UP FIXED EXCEPTION HANDLERS
idt.divide_error.set_handler_fn(divide_error_handler);
idt.debug.set_handler_fn(debug_handler);
idt.debug.set_handler_fn(task_cancel::interrupt_handler);
idt.non_maskable_interrupt.set_handler_fn(nmi_handler);
idt.breakpoint.set_handler_fn(breakpoint_handler);
idt.overflow.set_handler_fn(overflow_handler);
Expand Down Expand Up @@ -257,15 +257,7 @@ extern "x86-interrupt" fn divide_error_handler(stack_frame: InterruptStackFrame)
kill_and_halt(0x0, &stack_frame, None, true)
}

/// exception 0x01
extern "x86-interrupt" fn debug_handler(stack_frame: InterruptStackFrame) {
println_both!("\nEXCEPTION: DEBUG EXCEPTION\n{:#X?}", stack_frame);
// don't halt here, this isn't a fatal/permanent failure, just a brief pause.
}

/// Exception 0x02 is a Non-Maskable Interrupt (NMI).
///
/// Theseus uses this for TLB Shootdown IPIs and sampling interrupts.
/// exception 0x02, also used for TLB Shootdown IPIs and sampling interrupts.
///
/// # Important Note
/// Acquiring ANY locks in this function, even irq-safe ones, could cause a deadlock
Expand Down
5 changes: 4 additions & 1 deletion kernel/interrupts/src/aarch64/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,10 @@ struct EsrEL1(InMemoryRegister<u64, ESR_EL1::Register>);
macro_rules! interrupt_handler {
($name:ident, $x86_64_eoi_param:expr, $stack_frame:ident, $code:block) => {
extern "C" fn $name($stack_frame: &$crate::InterruptStackFrame) -> $crate::EoiBehaviour $code
}
};
($name:ident, $x86_64_eoi_param:expr, mut $stack_frame:ident, $code:block) => {
extern "C" fn $name(mut $stack_frame: &$crate::InterruptStackFrame) -> $crate::EoiBehaviour $code
};
}

/// The exception context as it is stored on the stack on exception entry.
Expand Down
10 changes: 9 additions & 1 deletion kernel/interrupts/src/x86_64/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,15 @@ macro_rules! interrupt_handler {
$crate::eoi($x86_64_eoi_param);
}
}
}
};
($name:ident, $x86_64_eoi_param:expr, mut $stack_frame:ident, $code:block) => {
extern "x86-interrupt" fn $name(mut sf: $crate::InterruptStackFrame) {
let mut $stack_frame = &mut sf;
if let $crate::EoiBehaviour::HandlerDidNotSendEoi = $code {
$crate::eoi($x86_64_eoi_param);
}
}
};
}


Expand Down
1 change: 1 addition & 0 deletions kernel/scheduler/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ edition = "2018"
log = "0.4.8"
cfg-if = "1.0.0"

task_cancel = { path = "../task_cancel" }
cpu = { path = "../cpu" }
interrupts = { path = "../interrupts" }
sleep = { path = "../sleep" }
Expand Down
9 changes: 8 additions & 1 deletion kernel/scheduler/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ pub fn init() -> Result<(), &'static str> {
}

// Architecture-independent timer interrupt handler for preemptive scheduling.
interrupt_handler!(timer_tick_handler, None, _stack_frame, {
interrupt_handler!(timer_tick_handler, None, mut stack_frame, {
#[cfg(target_arch = "aarch64")]
interrupts::schedule_next_timer_tick();

Expand Down Expand Up @@ -89,6 +89,13 @@ interrupt_handler!(timer_tick_handler, None, _stack_frame, {

schedule();

if let Some(current_task) = task::get_my_current_task() {
if current_task.is_cancelled() {
// Trigger a debug interrupt on the next instruction which will invoke task_cancel::interrupt_handler.
task_cancel::set_trap_flag(stack_frame);
}
}

EoiBehaviour::HandlerSentEoi
});

Expand Down
11 changes: 10 additions & 1 deletion kernel/task/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -133,6 +133,7 @@ struct TaskRefInner {
///
/// This is not public because it permits interior mutability.
joinable: AtomicBool,
pub cancel_requested: AtomicBool,
}

impl TaskRef {
Expand Down Expand Up @@ -160,6 +161,7 @@ impl TaskRef {
exit_value_mailbox,
// A new task is joinable until its `JoinableTaskRef` is dropped.
joinable: AtomicBool::new(true),
cancel_requested: AtomicBool::new(false),
}));

// Add the new TaskRef to the global task list.
Expand Down Expand Up @@ -215,6 +217,14 @@ impl TaskRef {
self.internal_exit(ExitValue::Killed(reason))
}

pub fn cancel(&self) {
self.0.cancel_requested.store(true, Ordering::Relaxed);
}

pub fn is_cancelled(&self) -> bool {
self.0.cancel_requested.load(Ordering::Relaxed)
}

/// The internal routine that actually exits or kills a Task.
fn internal_exit(&self, val: ExitValue) -> Result<(), &'static str> {
if self.has_exited() {
Expand Down Expand Up @@ -687,7 +697,6 @@ pub fn task_switch(
cpu_id: CpuId,
preemption_guard: PreemptionGuard,
) -> (bool, PreemptionGuard) {

// We use the `with_current_task_and_value()` closure here in order to ensure that
// the borrowed reference to the current task is guaranteed to be dropped
// *before* the actual context switch operation occurs.
Expand Down
12 changes: 12 additions & 0 deletions kernel/task_cancel/Cargo.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
[package]
name = "task_cancel"
version = "0.1.0"
authors = ["Klim Tsoutsman <[email protected]>"]
description = "Task cancellation"
edition = "2021"

[dependencies]
task = { path = "../task" }
unwind = { path = "../unwind" }
x86_64 = "0.14"
log = "0.4"
34 changes: 34 additions & 0 deletions kernel/task_cancel/src/lib.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
// TODO: Move into task crate. Hasn't yet been done because it creates a
// circular dependency as unwind depends on task.

#![feature(abi_x86_interrupt, try_blocks)]
#![no_std]

use task::KillReason;
use x86_64::structures::idt::InterruptStackFrame;

pub fn set_trap_flag(stack_frame: &mut InterruptStackFrame) {
unsafe { stack_frame.as_mut() }.update(|stack_frame| stack_frame.cpu_flags |= 0x100);
}

pub extern "x86-interrupt" fn interrupt_handler(mut stack_frame: InterruptStackFrame) {
let instruction_pointer = stack_frame.instruction_pointer.as_u64();
let stack_pointer = stack_frame.stack_pointer.as_u64();

if unwind::can_unwind(instruction_pointer) {
log::info!("unwinding a cancelled task");
unwind::start_remote_unwinding(
KillReason::Requested,
0,
stack_pointer,
instruction_pointer,
)
.expect("failed to unwind");
} else {
log::debug!("couldn't unwind at {instruction_pointer:0x?}; resetting trap flag");
// The trap flag is reset after every debug interrupt. Since we can't unwind at
// this instruction, we reset the flag to check again at the next instruction.
set_trap_flag(&mut stack_frame);
// FIXME: What happens if a LAPIC timer interrupt triggers here?
}
}
Loading
Loading