Skip to content
This repository was archived by the owner on Mar 24, 2022. It is now read-only.

Commit b3e289d

Browse files
acfoltzeriximeow
authored andcommitted
improve forced unwinding in cases with termination and yield
- adds a `catch_unwind` around each hostcall to make sure we decrement `hostcall_count` when unwinding - handles `State::Yielded` instances by setting a pending termination flag, and then resuming
1 parent d571c56 commit b3e289d

3 files changed

Lines changed: 120 additions & 86 deletions

File tree

lucet-runtime/lucet-runtime-internals/src/hostcall_macros.rs

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -59,10 +59,9 @@ macro_rules! lucet_hostcalls {
5959
// let res = std::panic::catch_unwind(move || {
6060
#[allow(unused_imports)]
6161
use lucet_runtime_internals::vmctx::VmctxInternal;
62-
$crate::vmctx::Vmctx::from_raw(vmctx_raw).instance_mut().begin_hostcall();
63-
let res = hostcall_impl(&mut $crate::vmctx::Vmctx::from_raw(vmctx_raw), $( $arg ),*);
64-
65-
$crate::vmctx::Vmctx::from_raw(vmctx_raw).instance_mut().end_hostcall();
62+
let res = $crate::vmctx::Vmctx::from_raw(vmctx_raw).instance_mut().in_hostcall(|| {
63+
hostcall_impl(&mut $crate::vmctx::Vmctx::from_raw(vmctx_raw), $( $arg ),*)
64+
});
6665
res
6766
// });
6867
// match res {

lucet-runtime/lucet-runtime-internals/src/instance.rs

Lines changed: 114 additions & 82 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,7 @@ use std::cell::{BorrowError, BorrowMutError, Ref, RefCell, RefMut, UnsafeCell};
2323
use std::marker::PhantomData;
2424
use std::mem;
2525
use std::ops::{Deref, DerefMut};
26+
use std::panic::{catch_unwind, resume_unwind, UnwindSafe};
2627
use std::ptr::{self, NonNull};
2728
use std::sync::Arc;
2829

@@ -258,6 +259,8 @@ pub struct Instance {
258259

259260
hostcall_count: u64,
260261

262+
pub(crate) pending_termination: Option<TerminationDetails>,
263+
261264
/// `_padding` must be the last member of the structure.
262265
/// This marks where the padding starts to make the structure exactly 4096 bytes long.
263266
/// It is also used to compute the size of the structure up to that point, i.e. without padding.
@@ -273,6 +276,10 @@ pub struct Instance {
273276
/// this Instance exists in, *while* the Instance destructor is executing.
274277
impl Drop for Instance {
275278
fn drop(&mut self) {
279+
// Initiate a forced unwind if any hostcall frames remain on the stack
280+
if self.hostcall_count > 0 {
281+
self.force_unwind().expect("forced unwinding succeeds");
282+
}
276283
// Reset magic to indicate this instance
277284
// is no longer valid
278285
self.magic = 0;
@@ -541,7 +548,7 @@ impl Instance {
541548
/// code is potentially unsafe; see [`Instance::run()`](struct.Instance.html#method.run).
542549
pub fn reset(&mut self) -> Result<(), Error> {
543550
if self.hostcall_count > 0 {
544-
self.force_unwind().unwrap();
551+
self.force_unwind()?;
545552
}
546553
self.alloc.reset_heap(self.module.as_ref())?;
547554
let globals = unsafe { self.alloc.globals_mut() };
@@ -559,6 +566,9 @@ impl Instance {
559566
}
560567

561568
self.state = State::Ready;
569+
self.resumed_val = None;
570+
self.hostcall_count = 0;
571+
self.pending_termination = None;
562572

563573
self.run_start()?;
564574

@@ -734,12 +744,17 @@ impl Instance {
734744
res
735745
}
736746

737-
pub fn begin_hostcall(&mut self) {
747+
pub fn in_hostcall<F: FnOnce() -> R + UnwindSafe, R>(&mut self, f: F) -> R {
738748
self.hostcall_count += 1;
739-
}
740-
741-
pub fn end_hostcall(&mut self) {
749+
let res = match catch_unwind(f) {
750+
Ok(res) => res,
751+
Err(e) => {
752+
self.hostcall_count -= 1;
753+
resume_unwind(e)
754+
}
755+
};
742756
self.hostcall_count -= 1;
757+
res
743758
}
744759

745760
#[inline]
@@ -772,6 +787,7 @@ impl Instance {
772787
entrypoint: None,
773788
resumed_val: None,
774789
hostcall_count: 0,
790+
pending_termination: None,
775791
_padding: (),
776792
};
777793
inst.set_globals_ptr(globals_ptr);
@@ -1049,90 +1065,106 @@ impl Instance {
10491065

10501066
// Force a guest to unwind the stack from the specified guest address
10511067
fn force_unwind(&mut self) -> Result<(), Error> {
1052-
// if we should unwind by returning into the guest to cause a fault, do so with the redzone
1053-
// available in case the guest was at or close to overflowing.
1054-
self.with_redzone_stack(|inst| {
1055-
#[unwind(allowed)]
1056-
extern "C" fn initiate_unwind() {
1057-
panic!(TerminationDetails::ForcedUnwind);
1068+
match &self.state {
1069+
State::Yielded { .. } => {
1070+
self.pending_termination = Some(TerminationDetails::ForcedUnwind);
1071+
match self.with_redzone_stack(|inst| inst.swap_and_return()) {
1072+
Err(Error::RuntimeTerminated(TerminationDetails::ForcedUnwind)) => Ok(()),
1073+
Err(e) => lucet_bail!(
1074+
"resume with forced unwind returned with an unexpected error: {}",
1075+
e
1076+
),
1077+
Ok(_) => lucet_bail!("resume with forced unwind returned normally"),
1078+
}
10581079
}
1080+
State::Faulted { .. } => {
1081+
// if we should unwind by returning into the guest to cause a fault, do so with the redzone
1082+
// available in case the guest was at or close to overflowing.
1083+
self.with_redzone_stack(|inst| {
1084+
#[unwind(allowed)]
1085+
extern "C" fn initiate_unwind() {
1086+
panic!(TerminationDetails::ForcedUnwind);
1087+
}
10591088

1060-
let guest_addr = inst
1061-
.ctx
1062-
.stop_addr
1063-
.expect("guest that stopped in guest code has an address it stopped at");
1064-
1065-
// set up the faulting instruction pointer as the return address for `initiate_unwind`;
1066-
// extremely unsafe, doesn't handle any edge cases yet
1067-
//
1068-
// TODO(Andy) if the last address is obtained through the signal handler, for a signal
1069-
// received exactly when we have just executed a `call` to a guest function, we
1070-
// actually want to not push it (or push it +1?) lest we try to unwind with a return
1071-
// address == start of function, where the system unwinder will unwind for the function
1072-
// at address-1, (probably) fail to find the function, and `abort()`.
1073-
//
1074-
// if `rip` == the start of some guest function, we can probably just discard it and
1075-
// use the return address instead.
1076-
inst.push(guest_addr as u64)
1077-
.expect("stack has available space");
1078-
1079-
// The logic for this conditional can be a bit unintuitive: we _require_ that the stack
1080-
// is aligned to 8 bytes, but not 16 bytes, when pushing `initiate_unwind`.
1081-
//
1082-
// A diagram of the required layout may help:
1083-
// `XXXXX0`: ------------------ <-- call frame start -- SysV ABI requires 16-byte alignment
1084-
// `XXXXX8`: | return address |
1085-
// `XXXX..`: | ..locals etc.. |
1086-
// `XXXX..`: | ..as needed... |
1087-
//
1088-
// Now ensure we _have_ an ABI-conformant call fame like above, by handling the case that
1089-
// could lead to an unaligned stack - the guest stack pointer currently being unaligned.
1090-
// Among other errors, a misaligned stack will result in compiler-generated xmm accesses to
1091-
// fault.
1092-
//
1093-
// Eg, we would have a stack like:
1094-
// `XXXXX8`: ------------------ <-- guest stack end, call frame start
1095-
// `XXXXX0`: | unwind_stub |
1096-
// `XXXX..`: | ..locals etc.. |
1097-
// `XXXX..`: | ..as needed... |
1098-
//
1099-
// So, instead, push a new return address to construct a new call frame at the right
1100-
// offset. `unwind_stub` has CFA directives so the unwinder can connect from
1101-
// `initiate_unwind` to guest/host frames to unwind. The unwinder, thankfully, has no
1102-
// preferences about alignment of frames being unwound.
1103-
//
1104-
// And we end up with a guest stack like this:
1105-
// `XXXXX8`: ------------------ <-- guest stack end
1106-
// `XXXXX0`: | guest ret addr | <-- guest return address to unwind through
1107-
// `XXXXX0`: ------------------ <-- call frame start -- SysV ABI requires 16-byte alignment
1108-
// `XXXXX8`: | unwind_stub |
1109-
// `XXXX..`: | ..locals etc.. |
1110-
// `XXXX..`: | ..as needed... |
1111-
if inst.ctx.gpr.rsp % 16 == 0 {
1112-
// extremely unsafe, doesn't handle any stack exhaustion edge cases yet
1113-
inst.push(crate::context::unwind_stub as u64)
1114-
.expect("stack has available space");
1115-
}
1089+
let guest_addr = inst
1090+
.ctx
1091+
.stop_addr
1092+
.expect("guest that stopped in guest code has an address it stopped at");
1093+
1094+
// set up the faulting instruction pointer as the return address for `initiate_unwind`;
1095+
// extremely unsafe, doesn't handle any edge cases yet
1096+
//
1097+
// TODO(Andy) if the last address is obtained through the signal handler, for a signal
1098+
// received exactly when we have just executed a `call` to a guest function, we
1099+
// actually want to not push it (or push it +1?) lest we try to unwind with a return
1100+
// address == start of function, where the system unwinder will unwind for the function
1101+
// at address-1, (probably) fail to find the function, and `abort()`.
1102+
//
1103+
// if `rip` == the start of some guest function, we can probably just discard it and
1104+
// use the return address instead.
1105+
inst.push(guest_addr as u64)
1106+
.expect("stack has available space");
1107+
1108+
// The logic for this conditional can be a bit unintuitive: we _require_ that the stack
1109+
// is aligned to 8 bytes, but not 16 bytes, when pushing `initiate_unwind`.
1110+
//
1111+
// A diagram of the required layout may help:
1112+
// `XXXXX0`: ------------------ <-- call frame start -- SysV ABI requires 16-byte alignment
1113+
// `XXXXX8`: | return address |
1114+
// `XXXX..`: | ..locals etc.. |
1115+
// `XXXX..`: | ..as needed... |
1116+
//
1117+
// Now ensure we _have_ an ABI-conformant call fame like above, by handling the case that
1118+
// could lead to an unaligned stack - the guest stack pointer currently being unaligned.
1119+
// Among other errors, a misaligned stack will result in compiler-generated xmm accesses to
1120+
// fault.
1121+
//
1122+
// Eg, we would have a stack like:
1123+
// `XXXXX8`: ------------------ <-- guest stack end, call frame start
1124+
// `XXXXX0`: | unwind_stub |
1125+
// `XXXX..`: | ..locals etc.. |
1126+
// `XXXX..`: | ..as needed... |
1127+
//
1128+
// So, instead, push a new return address to construct a new call frame at the right
1129+
// offset. `unwind_stub` has CFA directives so the unwinder can connect from
1130+
// `initiate_unwind` to guest/host frames to unwind. The unwinder, thankfully, has no
1131+
// preferences about alignment of frames being unwound.
1132+
//
1133+
// And we end up with a guest stack like this:
1134+
// `XXXXX8`: ------------------ <-- guest stack end
1135+
// `XXXXX0`: | guest ret addr | <-- guest return address to unwind through
1136+
// `XXXXX0`: ------------------ <-- call frame start -- SysV ABI requires 16-byte alignment
1137+
// `XXXXX8`: | unwind_stub |
1138+
// `XXXX..`: | ..locals etc.. |
1139+
// `XXXX..`: | ..as needed... |
1140+
if inst.ctx.gpr.rsp % 16 == 0 {
1141+
// extremely unsafe, doesn't handle any stack exhaustion edge cases yet
1142+
inst.push(crate::context::unwind_stub as u64)
1143+
.expect("stack has available space");
1144+
}
11161145

1117-
assert!(inst.ctx.gpr.rsp % 16 == 8);
1118-
// extremely unsafe, doesn't handle any stack exhaustion edge cases yet
1119-
inst.push(initiate_unwind as u64)
1120-
.expect("stack has available space");
1146+
assert!(inst.ctx.gpr.rsp % 16 == 8);
1147+
// extremely unsafe, doesn't handle any stack exhaustion edge cases yet
1148+
inst.push(initiate_unwind as u64)
1149+
.expect("stack has available space");
11211150

1122-
inst.state = State::Ready;
1151+
inst.state = State::Ready;
11231152

1124-
match inst.swap_and_return() {
1125-
Ok(_) => panic!("forced unwinding shouldn't return normally"),
1126-
Err(Error::RuntimeTerminated(TerminationDetails::ForcedUnwind)) => (),
1127-
Err(e) => panic!("unexpected error: {}", e),
1128-
}
1153+
match inst.swap_and_return() {
1154+
Ok(_) => panic!("forced unwinding shouldn't return normally"),
1155+
Err(Error::RuntimeTerminated(TerminationDetails::ForcedUnwind)) => (),
1156+
Err(e) => panic!("unexpected error: {}", e),
1157+
}
11291158

1130-
// we've unwound the stack, so we know there are no longer any host frames.
1131-
inst.hostcall_count = 0;
1132-
inst.ctx.stop_addr = None;
1159+
// we've unwound the stack, so we know there are no longer any host frames.
1160+
// inst.hostcall_count = 0;
1161+
inst.ctx.stop_addr = None;
11331162

1134-
Ok(())
1135-
})
1163+
Ok(())
1164+
})
1165+
}
1166+
st => panic!("should never do forced unwinding in this state: {}", st),
1167+
}
11361168
}
11371169
}
11381170

lucet-runtime/lucet-runtime-internals/src/vmctx.rs

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -376,6 +376,9 @@ impl Vmctx {
376376
};
377377

378378
HOST_CTX.with(|host_ctx| unsafe { Context::swap(&mut inst.ctx, &mut *host_ctx.get()) });
379+
if let Some(td) = inst.pending_termination.take() {
380+
panic!(td);
381+
}
379382
}
380383

381384
/// Take and return the value passed to

0 commit comments

Comments
 (0)